summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap14
-rw-r--r--Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml83
-rw-r--r--Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml88
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml70
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/msm/gmu.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml32
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml14
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml13
-rw-r--r--Documentation/devicetree/bindings/display/panel/apple,summit.yaml58
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm67200.yaml72
-rw-r--r--Documentation/devicetree/bindings/display/panel/visionox,rm692e5.yaml77
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml99
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml1
-rw-r--r--Documentation/gpu/amdgpu/amdgpu-glossary.rst45
-rw-r--r--Documentation/gpu/amdgpu/display/dc-glossary.rst6
-rw-r--r--Documentation/gpu/drm-internals.rst7
-rw-r--r--MAINTAINERS40
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c17
-rw-r--r--drivers/base/component.c14
-rw-r--r--drivers/bus/mhi/host/boot.c2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/adp/Kconfig17
-rw-r--r--drivers/gpu/drm/adp/Makefile5
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c276
-rw-r--r--drivers/gpu/drm/adp/adp_drv.c612
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c249
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c464
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c264
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c675
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c671
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c623
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c659
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c598
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c577
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c199
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c75
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c77
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c296
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c28
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h221
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h76
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c17
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c47
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h5
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h4
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c28
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c27
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c28
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c78
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h147
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h143
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c384
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c64
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c64
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c4
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c489
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c6
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c61
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_format_helper.c54
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c12
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c30
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c54
-rw-r--r--drivers/gpu/drm/drm_prime.c8
-rw-r--r--drivers/gpu/drm/drm_writeback.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c4
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c29
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c72
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c140
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c233
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c468
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c298
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h14
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c25
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c193
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c21
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c120
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h31
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c124
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c324
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_dsc_helper.h11
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c54
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c19
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h10
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml1
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c12
-rw-r--r--drivers/gpu/drm/panel/Kconfig31
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c2
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67200.c499
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c64
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c107
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c132
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm692e5.c442
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c9
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.h6
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c62
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c8
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c26
-rw-r--r--drivers/gpu/drm/radeon/r300.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c18
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c114
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c16
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c16
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c1574
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h278
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c80
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c1798
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c25
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_internal.h91
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c39
-rw-r--r--drivers/gpu/drm/stm/ltdc.c4
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_state_test.c24
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c81
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c254
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c41
-rw-r--r--drivers/gpu/drm/tiny/Kconfig12
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c841
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c4
-rw-r--r--drivers/gpu/drm/tiny/repaper.c4
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c207
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c247
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c577
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c83
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c37
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_guest.h2
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c46
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c81
-rw-r--r--drivers/gpu/drm/xe/Makefile1
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c6
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c202
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h36
-rw-r--r--drivers/gpu/drm/xe/xe_device.c8
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c258
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h18
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c3
-rw-r--r--drivers/gpu/host1x/debug.c9
-rw-r--r--drivers/gpu/host1x/debug.h1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c38
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c23
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c108
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c73
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c48
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h2
-rw-r--r--drivers/gpu/ipu-v3/ipu-vdi.c11
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--include/drm/display/drm_dp_helper.h2
-rw-r--r--include/drm/drm_format_helper.h3
-rw-r--r--include/drm/drm_gem.h14
-rw-r--r--include/drm/drm_gem_shmem_helper.h2
-rw-r--r--include/drm/drm_kunit_helpers.h2
-rw-r--r--include/drm/drm_mipi_dsi.h2
-rw-r--r--include/drm/drm_print.h41
-rw-r--r--include/drm/gpu_scheduler.h122
-rw-r--r--include/drm/ttm/ttm_backup.h74
-rw-r--r--include/drm/ttm/ttm_bo.h93
-rw-r--r--include/drm/ttm/ttm_pool.h8
-rw-r--r--include/drm/ttm/ttm_tt.h69
-rw-r--r--include/dt-bindings/clock/qcom,dsi-phy-28nm.h9
-rw-r--r--include/linux/component.h4
-rw-r--r--include/uapi/linux/kfd_ioctl.h10
-rw-r--r--include/uapi/linux/kfd_sysfs.h3
-rw-r--r--include/video/imx-ipu-image-convert.h32
-rw-r--r--include/video/imx-ipu-v3.h14
520 files changed, 18159 insertions, 8614 deletions
diff --git a/.mailmap b/.mailmap
index a897c16d3bae..428672d12ba3 100644
--- a/.mailmap
+++ b/.mailmap
@@ -200,10 +200,11 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
<dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
Dikshita Agarwal <quic_dikshita@quicinc.com> <dikshita@codeaurora.org>
-Dmitry Baryshkov <dbaryshkov@gmail.com>
-Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]>
-Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com>
-Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_eremin@mentor.com>
+Dmitry Baryshkov <lumag@kernel.org> <dbaryshkov@gmail.com>
+Dmitry Baryshkov <lumag@kernel.org> <[dbaryshkov@gmail.com]>
+Dmitry Baryshkov <lumag@kernel.org> <dmitry_baryshkov@mentor.com>
+Dmitry Baryshkov <lumag@kernel.org> <dmitry_eremin@mentor.com>
+Dmitry Baryshkov <lumag@kernel.org> <dmitry.baryshkov@linaro.org>
Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
@@ -323,7 +324,8 @@ Jeff Johnson <jeff.johnson@oss.qualcomm.com> <quic_jjohnson@quicinc.com>
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
-Jeffrey Hugo <quic_jhugo@quicinc.com> <jhugo@codeaurora.org>
+Jeff Hugo <jeff.hugo@oss.qualcomm.com> <jhugo@codeaurora.org>
+Jeff Hugo <jeff.hugo@oss.qualcomm.com> <quic_jhugo@quicinc.com>
Jens Axboe <axboe@kernel.dk> <axboe@suse.de>
Jens Axboe <axboe@kernel.dk> <jens.axboe@oracle.com>
Jens Axboe <axboe@kernel.dk> <axboe@fb.com>
@@ -613,6 +615,8 @@ Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
+Rodrigo Siqueira <siqueira@igalia.com> <rodrigosiqueiramelo@gmail.com>
+Rodrigo Siqueira <siqueira@igalia.com> <Rodrigo.Siqueira@amd.com>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
diff --git a/Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml b/Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml
new file mode 100644
index 000000000000..5e6da66499a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/apple,h7-display-pipe-mipi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple pre-DCP display controller MIPI interface
+
+maintainers:
+ - Sasha Finkelstein <fnkl.kernel@gmail.com>
+
+description:
+ The MIPI controller part of the pre-DCP Apple display controller
+
+allOf:
+ - $ref: dsi-controller.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - apple,t8112-display-pipe-mipi
+ - apple,t8103-display-pipe-mipi
+ - const: apple,h7-display-pipe-mipi
+
+ reg:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Input port. Always connected to the primary controller
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Output MIPI DSI port to the panel
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - ports
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ dsi@28200000 {
+ compatible = "apple,t8103-display-pipe-mipi", "apple,h7-display-pipe-mipi";
+ reg = <0x28200000 0xc000>;
+ power-domains = <&ps_dispdfr_mipi>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dfr_adp_out_mipi: endpoint {
+ remote-endpoint = <&dfr_adp_out_mipi>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ dfr_panel_in: endpoint {
+ remote-endpoint = <&dfr_mipi_out_panel>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml b/Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml
new file mode 100644
index 000000000000..102fb1804c0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/apple,h7-display-pipe.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple pre-DCP display controller
+
+maintainers:
+ - Sasha Finkelstein <fnkl.kernel@gmail.com>
+
+description:
+ A secondary display controller used to drive the "touchbar" on
+ certain Apple laptops.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - apple,t8112-display-pipe
+ - apple,t8103-display-pipe
+ - const: apple,h7-display-pipe
+
+ reg:
+ items:
+ - description: Primary register block, controls planes and blending
+ - description:
+ Contains other configuration registers like interrupt
+ and FIFO control
+
+ reg-names:
+ items:
+ - const: be
+ - const: fe
+
+ power-domains:
+ description:
+ Phandles to pmgr entries that are needed for this controller to turn on.
+ Aside from that, their specific functions are unknown
+ maxItems: 2
+
+ interrupts:
+ items:
+ - description: Unknown function
+ - description: Primary interrupt. Vsync events are reported via it
+
+ interrupt-names:
+ items:
+ - const: be
+ - const: fe
+
+ iommus:
+ maxItems: 1
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Output port. Always connected to apple,h7-display-pipe-mipi
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/apple-aic.h>
+ display-pipe@28200000 {
+ compatible = "apple,t8103-display-pipe", "apple,h7-display-pipe";
+ reg = <0x28200000 0xc000>,
+ <0x28400000 0x4000>;
+ reg-names = "be", "fe";
+ power-domains = <&ps_dispdfr_fe>, <&ps_dispdfr_be>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 502 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 506 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "be", "fe";
+ iommus = <&displaydfr_dart 0>;
+
+ port {
+ dfr_adp_out_mipi: endpoint {
+ remote-endpoint = <&dfr_mipi_in_adp>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
index ffbd1dc9470e..2aab33cd0017 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
@@ -231,6 +231,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 7
maxItems: 7
clock-names:
items:
@@ -248,29 +249,12 @@ allOf:
contains:
enum:
- qcom,msm8916-dsi-ctrl
- then:
- properties:
- clocks:
- maxItems: 6
- clock-names:
- items:
- - const: mdp_core
- - const: iface
- - const: bus
- - const: byte
- - const: pixel
- - const: core
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,msm8953-dsi-ctrl
- qcom,msm8976-dsi-ctrl
then:
properties:
clocks:
+ minItems: 6
maxItems: 6
clock-names:
items:
@@ -291,6 +275,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 7
maxItems: 7
clock-names:
items:
@@ -311,6 +296,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 7
maxItems: 7
clock-names:
items:
@@ -328,28 +314,13 @@ allOf:
contains:
enum:
- qcom,msm8998-dsi-ctrl
- - qcom,sm6125-dsi-ctrl
- - qcom,sm6350-dsi-ctrl
- then:
- properties:
- clocks:
- maxItems: 6
- clock-names:
- items:
- - const: byte
- - const: byte_intf
- - const: pixel
- - const: core
- - const: iface
- - const: bus
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,sc7180-dsi-ctrl
- qcom,sc7280-dsi-ctrl
+ - qcom,sdm845-dsi-ctrl
+ - qcom,sm6115-dsi-ctrl
+ - qcom,sm6125-dsi-ctrl
+ - qcom,sm6350-dsi-ctrl
+ - qcom,sm6375-dsi-ctrl
- qcom,sm6150-dsi-ctrl
- qcom,sm7150-dsi-ctrl
- qcom,sm8150-dsi-ctrl
@@ -361,6 +332,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 6
maxItems: 6
clock-names:
items:
@@ -380,6 +352,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 9
maxItems: 9
clock-names:
items:
@@ -393,27 +366,6 @@ allOf:
- const: pixel
- const: core
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sdm845-dsi-ctrl
- - qcom,sm6115-dsi-ctrl
- - qcom,sm6375-dsi-ctrl
- then:
- properties:
- clocks:
- maxItems: 6
- clock-names:
- items:
- - const: byte
- - const: byte_intf
- - const: pixel
- - const: core
- - const: iface
- - const: bus
-
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml
index 6b57ce41c95f..d0ce85a08b6d 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml
@@ -15,6 +15,8 @@ description:
properties:
"#clock-cells":
const: 1
+ description:
+ See include/dt-bindings/clock/qcom,dsi-phy-28nm.h for clock IDs.
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/display/msm/gmu.yaml b/Documentation/devicetree/bindings/display/msm/gmu.yaml
index ab884e236429..4392aa7a4ffe 100644
--- a/Documentation/devicetree/bindings/display/msm/gmu.yaml
+++ b/Documentation/devicetree/bindings/display/msm/gmu.yaml
@@ -123,6 +123,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,adreno-gmu-623.0
- qcom,adreno-gmu-635.0
- qcom,adreno-gmu-660.1
- qcom,adreno-gmu-663.0
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
index a90a8b3f1a9e..5fac3e266703 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
@@ -52,6 +52,13 @@ patternProperties:
items:
- const: qcom,sa8775p-dp
+ "^phy@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ const: qcom,sa8775p-edp-phy
+
required:
- compatible
@@ -61,6 +68,7 @@ examples:
- |
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/qcom,sa8775p-dispcc.h>
#include <dt-bindings/clock/qcom,sa8775p-gcc.h>
#include <dt-bindings/interconnect/qcom,sa8775p-rpmh.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
@@ -158,6 +166,26 @@ examples:
};
};
+ mdss0_dp0_phy: phy@aec2a00 {
+ compatible = "qcom,sa8775p-edp-phy";
+
+ reg = <0x0aec2a00 0x200>,
+ <0x0aec2200 0xd0>,
+ <0x0aec2600 0xd0>,
+ <0x0aec2000 0x1c8>;
+
+ clocks = <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "aux",
+ "cfg_ahb";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ vdda-phy-supply = <&vreg_l1c>;
+ vdda-pll-supply = <&vreg_l4a>;
+ };
+
displayport-controller@af54000 {
compatible = "qcom,sa8775p-dp";
@@ -186,9 +214,9 @@ examples:
assigned-clocks = <&dispcc_mdss_dptx0_link_clk_src>,
<&dispcc_mdss_dptx0_pixel0_clk_src>;
- assigned-clock-parents = <&mdss0_edp_phy 0>, <&mdss0_edp_phy 1>;
+ assigned-clock-parents = <&mdss0_dp0_phy 0>, <&mdss0_dp0_phy 1>;
- phys = <&mdss0_edp_phy>;
+ phys = <&mdss0_dp0_phy>;
phy-names = "dp";
operating-points-v2 = <&dp_opp_table>;
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml
index 1ea50a2c7c8e..59192c59ddb9 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml
@@ -30,10 +30,14 @@ properties:
maxItems: 1
interconnects:
- maxItems: 2
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- maxItems: 2
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
@@ -91,9 +95,9 @@ examples:
reg = <0x0ae00000 0x1000>;
reg-names = "mdss";
- interconnects = <&mmss_noc MASTER_MDP 0 &gem_noc SLAVE_LLCC 0>,
- <&mc_virt MASTER_LLCC 0 &mc_virt SLAVE_EBI1 0>;
- interconnect-names = "mdp0-mem", "mdp1-mem";
+ interconnects = <&mmss_noc MASTER_MDP 0 &mc_virt SLAVE_EBI1 0>,
+ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_DISPLAY_CFG 0>;
+ interconnect-names = "mdp0-mem", "cpu-cfg";
resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml
index 24cece1e888b..a1c53e191033 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml
@@ -29,10 +29,14 @@ properties:
maxItems: 1
interconnects:
- maxItems: 2
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- maxItems: 2
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
@@ -75,12 +79,17 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
+ #include <dt-bindings/interconnect/qcom,sm8650-rpmh.h>
display-subsystem@ae00000 {
compatible = "qcom,sm8650-mdss";
reg = <0x0ae00000 0x1000>;
reg-names = "mdss";
+ interconnects = <&mmss_noc MASTER_MDP 0 &mc_virt SLAVE_EBI1 0>,
+ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_DISPLAY_CFG 0>;
+ interconnect-names = "mdp0-mem", "cpu-cfg";
+
resets = <&dispcc_core_bcr>;
power-domains = <&dispcc_gdsc>;
diff --git a/Documentation/devicetree/bindings/display/panel/apple,summit.yaml b/Documentation/devicetree/bindings/display/panel/apple,summit.yaml
new file mode 100644
index 000000000000..f081755325e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/apple,summit.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/apple,summit.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple "Summit" display panel
+
+maintainers:
+ - Sasha Finkelstein <fnkl.kernel@gmail.com>
+
+description:
+ An OLED panel used as a touchbar on certain Apple laptops.
+ Contains a backlight device, which controls brightness of the panel itself.
+ The backlight common properties are included for this reason
+
+allOf:
+ - $ref: panel-common.yaml#
+ - $ref: /schemas/leds/backlight/common.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - apple,j293-summit
+ - apple,j493-summit
+ - const: apple,summit
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - max-brightness
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "apple,j293-summit", "apple,summit";
+ reg = <0>;
+ max-brightness = <255>;
+
+ port {
+ endpoint {
+ remote-endpoint = <&dfr_bridge_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml
index e80fc7006984..548f5ac14500 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml
@@ -40,6 +40,8 @@ properties:
- auo,g185han01
# AU Optronics Corporation 19.0" (1280x1024) TFT LCD panel
- auo,g190ean01
+ # BOE AV123Z7M-N17 12.3" (1920x720) LVDS TFT LCD panel
+ - boe,av123z7m-n17
# Kaohsiung Opto-Electronics Inc. 10.1" WUXGA (1920 x 1200) LVDS TFT LCD panel
- koe,tx26d202vm0bwa
# Lincoln Technology Solutions, LCD185-101CT 10.1" TFT 1920x1200
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index e3ee3a332bb7..b0de4fd6f3d4 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -63,6 +63,8 @@ properties:
- auo,t215hvn01
# Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
- avic,tm070ddh03
+ # BOE AV101HDT-a10 10.1" 1280x720 LVDS panel
+ - boe,av101hdt-a10
# BOE BP082WX1-100 8.2" WXGA (1280x800) LVDS panel
- boe,bp082wx1-100
# BOE BP101WX1-100 10.1" WXGA (1280x800) LVDS panel
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm67200.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm67200.yaml
new file mode 100644
index 000000000000..54c9c0ef45ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm67200.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/raydium,rm67200.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Raydium RM67200 based MIPI-DSI panels
+
+maintainers:
+ - Sebastian Reichel <sebastian.reichel@collabora.com>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - wanchanglong,w552793baa
+ - const: raydium,rm67200
+
+ reg:
+ maxItems: 1
+
+ vdd-supply:
+ description: 2.8V Logic voltage
+
+ iovcc-supply:
+ description: 1.8V IO voltage
+
+ vsp-supply:
+ description: positive 5.5V voltage
+
+ vsn-supply:
+ description: negative 5.5V voltage
+
+ backlight: true
+ port: true
+ reset-gpios: true
+
+required:
+ - compatible
+ - port
+ - reg
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "wanchanglong,w552793baa", "raydium,rm67200";
+ reg = <0>;
+
+ vdd-supply = <&regulator1>;
+ iovcc-supply = <&regulator2>;
+ vsp-supply = <&regulator3>;
+ vsn-supply = <&regulator4>;
+ reset-gpios = <&gpiobank 42 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel0_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,rm692e5.yaml b/Documentation/devicetree/bindings/display/panel/visionox,rm692e5.yaml
new file mode 100644
index 000000000000..d4b4672815fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/visionox,rm692e5.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/visionox,rm692e5.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Visionox RM692E5 6.55" 2400x1080 120Hz MIPI-DSI Panel
+
+maintainers:
+ - Danila Tikhonov <danila@jiaxyga.com>
+
+description:
+ The Visionox RM692E5 is a generic DSI Panel IC used to control
+ AMOLED panels.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - visionox,rm692e5
+ - items:
+ - enum:
+ - nothing,rm692e5-spacewar
+ - const: visionox,rm692e5
+
+ reg:
+ maxItems: 1
+
+ vdd-supply:
+ description: 3.3V source voltage rail
+
+ vddio-supply:
+ description: 1.8V I/O source voltage rail
+
+ reset-gpios: true
+ port: true
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - vdd-supply
+ - vddio-supply
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "nothing,rm692e5-spacewar",
+ "visionox,rm692e5";
+ reg = <0>;
+
+ reset-gpios = <&tlmm 44 GPIO_ACTIVE_LOW>;
+
+ vdd-supply = <&vdd_oled>;
+ vddio-supply = <&vdd_io_oled>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml
index 46d956e63338..f546d481b7e5 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop2.yaml
@@ -14,12 +14,14 @@ description:
maintainers:
- Sandy Huang <hjc@rock-chips.com>
- Heiko Stuebner <heiko@sntech.de>
+ - Andy Yan <andyshrk@163.com>
properties:
compatible:
enum:
- rockchip,rk3566-vop
- rockchip,rk3568-vop
+ - rockchip,rk3576-vop
- rockchip,rk3588-vop
reg:
@@ -37,10 +39,21 @@ properties:
- const: gamma-lut
interrupts:
- maxItems: 1
+ minItems: 1
+ maxItems: 4
description:
- The VOP interrupt is shared by several interrupt sources, such as
- frame start (VSYNC), line flag and other status interrupts.
+ For VOP version under rk3576, the interrupt is shared by several interrupt
+ sources, such as frame start (VSYNC), line flag and other interrupt status.
+ For VOP version from rk3576 there is a system interrupt for bus error, and
+ every video port has it's independent interrupts for vsync and other video
+ port related error interrupts.
+
+ interrupt-names:
+ items:
+ - const: sys
+ - const: vp0
+ - const: vp1
+ - const: vp2
# See compatible-specific constraints below.
clocks:
@@ -124,43 +137,100 @@ allOf:
properties:
compatible:
contains:
- const: rockchip,rk3588-vop
+ enum:
+ - rockchip,rk3566-vop
+ - rockchip,rk3568-vop
then:
properties:
clocks:
- minItems: 7
+ maxItems: 5
+
clock-names:
- minItems: 7
+ maxItems: 5
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names: false
ports:
required:
- port@0
- port@1
- port@2
- - port@3
+
+ rockchip,vo1-grf: false
+ rockchip,vop-grf: false
+ rockchip,pmu: false
required:
- rockchip,grf
- - rockchip,vo1-grf
- - rockchip,vop-grf
- - rockchip,pmu
- else:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3576-vop
+ then:
properties:
+ clocks:
+ maxItems: 5
+
+ clock-names:
+ maxItems: 5
+
+ interrupts:
+ minItems: 4
+
+ interrupt-names:
+ minItems: 4
+
+ ports:
+ required:
+ - port@0
+ - port@1
+ - port@2
+
rockchip,vo1-grf: false
rockchip,vop-grf: false
- rockchip,pmu: false
+ required:
+ - rockchip,grf
+ - rockchip,pmu
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3588-vop
+ then:
+ properties:
clocks:
- maxItems: 5
+ minItems: 7
+ maxItems: 9
+
clock-names:
- maxItems: 5
+ minItems: 7
+ maxItems: 9
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names: false
ports:
required:
- port@0
- port@1
- port@2
+ - port@3
+
+ required:
+ - rockchip,grf
+ - rockchip,vo1-grf
+ - rockchip,vop-grf
+ - rockchip,pmu
additionalProperties: false
@@ -188,6 +258,7 @@ examples:
"dclk_vp1",
"dclk_vp2";
power-domains = <&power RK3568_PD_VO>;
+ rockchip,grf = <&grf>;
iommus = <&vop_mmu>;
vop_out: ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 735c7f06c24e..fc8e82cb28a9 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -25,6 +25,7 @@ properties:
- renesas,r9a07g044-mali
- renesas,r9a07g054-mali
- rockchip,px30-mali
+ - rockchip,rk3562-mali
- rockchip,rk3568-mali
- rockchip,rk3576-mali
- const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
diff --git a/Documentation/gpu/amdgpu/amdgpu-glossary.rst b/Documentation/gpu/amdgpu/amdgpu-glossary.rst
index 00a47ebb0b0f..1e9283e076ba 100644
--- a/Documentation/gpu/amdgpu/amdgpu-glossary.rst
+++ b/Documentation/gpu/amdgpu/amdgpu-glossary.rst
@@ -12,6 +12,9 @@ we have a dedicated glossary for Display Core at
The number of CUs that are active on the system. The number of active
CUs may be less than SE * SH * CU depending on the board configuration.
+ CE
+ Constant Engine
+
CP
Command Processor
@@ -68,6 +71,9 @@ we have a dedicated glossary for Display Core at
IB
Indirect Buffer
+ IMU
+ Integrated Management Unit (Power Management support)
+
IP
Intellectual Property blocks
@@ -80,6 +86,12 @@ we have a dedicated glossary for Display Core at
KIQ
Kernel Interface Queue
+ MC
+ Memory Controller
+
+ ME
+ MicroEngine (Graphics)
+
MEC
MicroEngine Compute
@@ -92,6 +104,9 @@ we have a dedicated glossary for Display Core at
MQD
Memory Queue Descriptor
+ PFP
+ Pre-Fetch Parser (Graphics)
+
PPLib
PowerPlay Library - PowerPlay is the power management component.
@@ -99,7 +114,10 @@ we have a dedicated glossary for Display Core at
Platform Security Processor
RLC
- RunList Controller
+ RunList Controller. This name is a remnant of past ages and doesn't have
+ much meaning today. It's a group of general-purpose helper engines for
+ the GFX block. It's involved in GFX power management and SR-IOV, among
+ other things.
SDMA
System DMA
@@ -110,14 +128,35 @@ we have a dedicated glossary for Display Core at
SH
SHader array
- SMU
- System Management Unit
+ SMU/SMC
+ System Management Unit / System Management Controller
+
+ SRLC
+ Save/Restore List Control
+
+ SRLG
+ Save/Restore List GPM_MEM
+
+ SRLS
+ Save/Restore List SRM_MEM
SS
Spread Spectrum
+ TA
+ Trusted Application
+
+ TOC
+ Table of Contents
+
+ UVD
+ Unified Video Decoder
+
VCE
Video Compression Engine
VCN
Video Codec Next
+
+ VPE
+ Video Processing Engine
diff --git a/Documentation/gpu/amdgpu/display/dc-glossary.rst b/Documentation/gpu/amdgpu/display/dc-glossary.rst
index 0b0ffd428dd2..7dc034e9e586 100644
--- a/Documentation/gpu/amdgpu/display/dc-glossary.rst
+++ b/Documentation/gpu/amdgpu/display/dc-glossary.rst
@@ -167,9 +167,6 @@ consider asking in the amdgfx and update this page.
MALL
Memory Access at Last Level
- MC
- Memory Controller
-
MPC/MPCC
Multiple pipes and plane combine
@@ -232,6 +229,3 @@ consider asking in the amdgfx and update this page.
VRR
Variable Refresh Rate
-
- UVD
- Unified Video Decoder
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index cb9ae282771c..94f93fd3b8a0 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -208,6 +208,13 @@ follows:
``CONFIG_VIRTIO_UML`` and ``CONFIG_UML_PCI_OVER_VIRTIO`` are not
included in it because they are only required for User Mode Linux.
+KUnit Coverage Rules
+~~~~~~~~~~~~~~~~~~~~
+
+KUnit support is gradually added to the DRM framework and helpers. There's no
+general requirement for the framework and helpers to have KUnit tests at the
+moment. However, patches that are affecting a function or helper already
+covered by KUnit tests must provide tests if the change calls for one.
Legacy Support Code
===================
diff --git a/MAINTAINERS b/MAINTAINERS
index cddcb097f7f3..0fa53d0bb346 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1046,14 +1046,14 @@ F: drivers/crypto/ccp/hsti.*
AMD DISPLAY CORE
M: Harry Wentland <harry.wentland@amd.com>
M: Leo Li <sunpeng.li@amd.com>
-M: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+R: Rodrigo Siqueira <siqueira@igalia.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/display/
AMD DISPLAY CORE - DML
-M: Chaitanya Dhere <chaitanya.dhere@amd.com>
+M: Austin Zheng <austin.zheng@amd.com>
M: Jun Lei <jun.lei@amd.com>
S: Supported
F: drivers/gpu/drm/amd/display/dc/dml/
@@ -7149,6 +7149,14 @@ S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/sun4i/sun8i*
+DRM DRIVER FOR APPLE TOUCH BARS
+M: Aun-Ali Zaidi <admin@kodeit.net>
+M: Aditya Garg <gargaditya08@live.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: drivers/gpu/drm/tiny/appletbdrm.c
+
DRM DRIVER FOR ARM PL111 CLCD
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
@@ -7347,7 +7355,8 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/mgag200/
DRM DRIVER FOR MI0283QT
-S: Orphan
+M: Alex Lanzano <lanzano.alex@gmail.com>
+S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
F: drivers/gpu/drm/tiny/mi0283qt.c
@@ -7380,7 +7389,7 @@ F: include/uapi/drm/msm_drm.h
DRM DRIVER for Qualcomm display hardware
M: Rob Clark <robdclark@gmail.com>
M: Abhinav Kumar <quic_abhinavk@quicinc.com>
-M: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+M: Dmitry Baryshkov <lumag@kernel.org>
R: Sean Paul <sean@poorly.run>
R: Marijn Suijten <marijn.suijten@somainline.org>
L: linux-arm-msm@vger.kernel.org
@@ -7392,6 +7401,7 @@ T: git https://gitlab.freedesktop.org/drm/msm.git
F: Documentation/devicetree/bindings/display/msm/
F: drivers/gpu/drm/ci/xfails/msm*
F: drivers/gpu/drm/msm/
+F: include/dt-bindings/clock/qcom,dsi-phy-28nm.h
F: include/uapi/drm/msm_drm.h
DRM DRIVER FOR NOVATEK NT35510 PANELS
@@ -7449,7 +7459,8 @@ F: Documentation/devicetree/bindings/display/bridge/ps8640.yaml
F: drivers/gpu/drm/bridge/parade-ps8640.c
DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
-S: Orphan
+M: Alex Lanzano <lanzano.alex@gmail.com>
+S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/repaper.txt
F: drivers/gpu/drm/tiny/repaper.c
@@ -7826,6 +7837,22 @@ F: drivers/gpu/host1x/
F: include/linux/host1x.h
F: include/uapi/drm/tegra_drm.h
+DRM DRIVERS FOR PRE-DCP APPLE DISPLAY OUTPUT
+M: Sasha Finkelstein <fnkl.kernel@gmail.com>
+R: Janne Grunau <j@jannau.net>
+L: dri-devel@lists.freedesktop.org
+L: asahi@lists.linux.dev
+S: Maintained
+W: https://asahilinux.org
+B: https://github.com/AsahiLinux/linux/issues
+C: irc://irc.oftc.net/asahi-dev
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml
+F: Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml
+F: Documentation/devicetree/bindings/display/panel/apple,summit.yaml
+F: drivers/gpu/drm/adp/
+F: drivers/gpu/drm/panel/panel-summit.c
+
DRM DRIVERS FOR RENESAS R-CAR
M: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
M: Tomi Valkeinen <tomi.valkeinen+renesas@ideasonboard.com>
@@ -19425,7 +19452,7 @@ F: drivers/clk/qcom/
F: include/dt-bindings/clock/qcom,*
QUALCOMM CLOUD AI (QAIC) DRIVER
-M: Jeffrey Hugo <quic_jhugo@quicinc.com>
+M: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
R: Carl Vanderlip <quic_carlv@quicinc.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
@@ -19655,7 +19682,6 @@ F: drivers/net/wireless/quantenna
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
-M: Xinhui Pan <Xinhui.Pan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
B: https://gitlab.freedesktop.org/drm/amd/-/issues
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
index aa07e67400ef..da1ac89bb78f 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox.c
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -349,8 +349,6 @@ static irqreturn_t mailbox_irq_handler(int irq, void *p)
trace_mbox_irq_handle(MAILBOX_NAME, irq);
/* Schedule a rx_work to call the callback functions */
queue_work(mb_chann->work_q, &mb_chann->rx_work);
- /* Clear IOHUB register */
- mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
return IRQ_HANDLED;
}
@@ -367,6 +365,9 @@ static void mailbox_rx_worker(struct work_struct *rx_work)
return;
}
+again:
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
+
while (1) {
/*
* If return is 0, keep consuming next message, until there is
@@ -380,10 +381,18 @@ static void mailbox_rx_worker(struct work_struct *rx_work)
if (unlikely(ret)) {
MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
WRITE_ONCE(mb_chann->bad_state, true);
- disable_irq(mb_chann->msix_irq);
- break;
+ return;
}
}
+
+ /*
+ * The hardware will not generate interrupt if firmware creates a new
+ * response right after driver clears interrupt register. Check
+ * the interrupt register to make sure there is not any new response
+ * before exiting.
+ */
+ if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr))
+ goto again;
}
int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 5d10600bbc25..a482708566bc 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -569,6 +569,20 @@ void component_master_del(struct device *parent,
}
EXPORT_SYMBOL_GPL(component_master_del);
+bool component_master_is_bound(struct device *parent,
+ const struct component_master_ops *ops)
+{
+ struct aggregate_device *adev;
+
+ guard(mutex)(&component_mutex);
+ adev = __aggregate_find(parent, ops);
+ if (!adev)
+ return 0;
+
+ return adev->bound;
+}
+EXPORT_SYMBOL_GPL(component_master_is_bound);
+
static void component_unbind(struct component *component,
struct aggregate_device *adev, void *data)
{
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index c8e48f621a8c..efa3b6dddf4d 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -608,7 +608,7 @@ fw_load_ready_state:
return;
error_ready_state:
- if (fw_load_type == MHI_FW_LOAD_FBC) {
+ if (mhi_cntrl->fbc_image) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 693f338e5b28..1be14d8634f4 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -448,6 +448,8 @@ source "drivers/gpu/drm/mcde/Kconfig"
source "drivers/gpu/drm/tidss/Kconfig"
+source "drivers/gpu/drm/adp/Kconfig"
+
source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 186b611a88b5..ed54a546bbe2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -207,6 +207,7 @@ obj-y += mxsfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
+obj-$(CONFIG_DRM_ADP) += adp/
obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
diff --git a/drivers/gpu/drm/adp/Kconfig b/drivers/gpu/drm/adp/Kconfig
new file mode 100644
index 000000000000..9fcc27eb200d
--- /dev/null
+++ b/drivers/gpu/drm/adp/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+config DRM_ADP
+ tristate "DRM Support for pre-DCP Apple display controllers"
+ depends on DRM && OF && ARM64
+ depends on ARCH_APPLE || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_DMA_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_PANEL_BRIDGE
+ select VIDEOMODE_HELPERS
+ select DRM_MIPI_DSI
+ help
+ Chose this option if you have an Apple Arm laptop with a touchbar.
+
+ If M is selected, this module will be called adpdrm.
diff --git a/drivers/gpu/drm/adp/Makefile b/drivers/gpu/drm/adp/Makefile
new file mode 100644
index 000000000000..8e7b618edd35
--- /dev/null
+++ b/drivers/gpu/drm/adp/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+adpdrm-y := adp_drv.o
+adpdrm-mipi-y := adp-mipi.o
+obj-$(CONFIG_DRM_ADP) += adpdrm.o adpdrm-mipi.o
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
new file mode 100644
index 000000000000..ad80542b60ed
--- /dev/null
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define DSI_GEN_HDR 0x6c
+#define DSI_GEN_PLD_DATA 0x70
+
+#define DSI_CMD_PKT_STATUS 0x74
+
+#define GEN_PLD_R_EMPTY BIT(4)
+#define GEN_PLD_W_FULL BIT(3)
+#define GEN_PLD_W_EMPTY BIT(2)
+#define GEN_CMD_FULL BIT(1)
+#define GEN_CMD_EMPTY BIT(0)
+#define GEN_RD_CMD_BUSY BIT(6)
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+struct adp_mipi_drv_private {
+ struct mipi_dsi_host dsi;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ void __iomem *mipi;
+};
+
+#define mipi_to_adp(x) container_of(x, struct adp_mipi_drv_private, dsi)
+
+static int adp_dsi_gen_pkt_hdr_write(struct adp_mipi_drv_private *adp, u32 hdr_val)
+{
+ int ret;
+ u32 val, mask;
+
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_CMD_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to get available command FIFO\n");
+ return ret;
+ }
+
+ writel(hdr_val, adp->mipi + DSI_GEN_HDR);
+
+ mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, (val & mask) == mask,
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to write command FIFO\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adp_dsi_write(struct adp_mipi_drv_private *adp,
+ const struct mipi_dsi_packet *packet)
+{
+ const u8 *tx_buf = packet->payload;
+ int len = packet->payload_length, pld_data_bytes = sizeof(u32), ret;
+ __le32 word;
+ u32 val;
+
+ while (len) {
+ if (len < pld_data_bytes) {
+ word = 0;
+ memcpy(&word, tx_buf, len);
+ writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA);
+ len = 0;
+ } else {
+ memcpy(&word, tx_buf, pld_data_bytes);
+ writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA);
+ tx_buf += pld_data_bytes;
+ len -= pld_data_bytes;
+ }
+
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_W_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev,
+ "failed to get available write payload FIFO\n");
+ return ret;
+ }
+ }
+
+ word = 0;
+ memcpy(&word, packet->header, sizeof(packet->header));
+ return adp_dsi_gen_pkt_hdr_write(adp, le32_to_cpu(word));
+}
+
+static int adp_dsi_read(struct adp_mipi_drv_private *adp,
+ const struct mipi_dsi_msg *msg)
+{
+ int i, j, ret, len = msg->rx_len;
+ u8 *buf = msg->rx_buf;
+ u32 val;
+
+ /* Wait end of the read operation */
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_RD_CMD_BUSY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "Timeout during read operation\n");
+ return ret;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ /* Read fifo must not be empty before all bytes are read */
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_R_EMPTY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "Read payload FIFO is empty\n");
+ return ret;
+ }
+
+ val = readl(adp->mipi + DSI_GEN_PLD_DATA);
+ for (j = 0; j < 4 && j + i < len; j++)
+ buf[i + j] = val >> (8 * j);
+ }
+
+ return ret;
+}
+
+static ssize_t adp_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+ struct mipi_dsi_packet packet;
+ int ret, nb_bytes;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to create packet: %d\n", ret);
+ return ret;
+ }
+
+ ret = adp_dsi_write(adp, &packet);
+ if (ret)
+ return ret;
+
+ if (msg->rx_buf && msg->rx_len) {
+ ret = adp_dsi_read(adp, msg);
+ if (ret)
+ return ret;
+ nb_bytes = msg->rx_len;
+ } else {
+ nb_bytes = packet.size;
+ }
+
+ return nb_bytes;
+}
+
+static int adp_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ return 0;
+}
+
+static void adp_dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+}
+
+static const struct component_ops adp_dsi_component_ops = {
+ .bind = adp_dsi_bind,
+ .unbind = adp_dsi_unbind,
+};
+
+static int adp_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+ struct drm_bridge *next;
+ int ret;
+
+ next = devm_drm_of_get_bridge(adp->dsi.dev, adp->dsi.dev->of_node, 1, 0);
+ if (IS_ERR(next))
+ return PTR_ERR(next);
+
+ adp->next_bridge = next;
+
+ drm_bridge_add(&adp->bridge);
+
+ ret = component_add(host->dev, &adp_dsi_component_ops);
+ if (ret) {
+ pr_err("failed to add dsi_host component: %d\n", ret);
+ drm_bridge_remove(&adp->bridge);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adp_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+
+ component_del(host->dev, &adp_dsi_component_ops);
+ drm_bridge_remove(&adp->bridge);
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
+ .transfer = adp_dsi_host_transfer,
+ .attach = adp_dsi_host_attach,
+ .detach = adp_dsi_host_detach,
+};
+
+static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct adp_mipi_drv_private *adp =
+ container_of(bridge, struct adp_mipi_drv_private, bridge);
+
+ return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags);
+}
+
+static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
+ .attach = adp_dsi_bridge_attach,
+};
+
+static int adp_mipi_probe(struct platform_device *pdev)
+{
+ struct adp_mipi_drv_private *adp;
+
+ adp = devm_kzalloc(&pdev->dev, sizeof(*adp), GFP_KERNEL);
+ if (!adp)
+ return -ENOMEM;
+
+ adp->mipi = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adp->mipi)) {
+ dev_err(&pdev->dev, "failed to map mipi mmio");
+ return PTR_ERR(adp->mipi);
+ }
+
+ adp->dsi.dev = &pdev->dev;
+ adp->dsi.ops = &adp_dsi_host_ops;
+ adp->bridge.funcs = &adp_dsi_bridge_funcs;
+ adp->bridge.of_node = pdev->dev.of_node;
+ adp->bridge.type = DRM_MODE_CONNECTOR_DSI;
+ dev_set_drvdata(&pdev->dev, adp);
+ return mipi_dsi_host_register(&adp->dsi);
+}
+
+static void adp_mipi_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct adp_mipi_drv_private *adp = dev_get_drvdata(dev);
+
+ mipi_dsi_host_unregister(&adp->dsi);
+}
+
+static const struct of_device_id adp_mipi_of_match[] = {
+ { .compatible = "apple,h7-display-pipe-mipi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adp_mipi_of_match);
+
+static struct platform_driver adp_mipi_platform_driver = {
+ .driver = {
+ .name = "adp-mipi",
+ .of_match_table = adp_mipi_of_match,
+ },
+ .probe = adp_mipi_probe,
+ .remove = adp_mipi_remove,
+};
+
+module_platform_driver(adp_mipi_platform_driver);
+
+MODULE_DESCRIPTION("Apple Display Pipe MIPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
new file mode 100644
index 000000000000..0eeb9e5fab26
--- /dev/null
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#define ADP_INT_STATUS 0x34
+#define ADP_INT_STATUS_INT_MASK 0x7
+#define ADP_INT_STATUS_VBLANK 0x1
+#define ADP_CTRL 0x100
+#define ADP_CTRL_VBLANK_ON 0x12
+#define ADP_CTRL_FIFO_ON 0x601
+#define ADP_SCREEN_SIZE 0x0c
+#define ADP_SCREEN_HSIZE GENMASK(15, 0)
+#define ADP_SCREEN_VSIZE GENMASK(31, 16)
+
+#define ADBE_FIFO 0x10c0
+#define ADBE_FIFO_SYNC 0xc0000000
+
+#define ADBE_BLEND_BYPASS 0x2020
+#define ADBE_BLEND_EN1 0x2028
+#define ADBE_BLEND_EN2 0x2074
+#define ADBE_BLEND_EN3 0x202c
+#define ADBE_BLEND_EN4 0x2034
+#define ADBE_MASK_BUF 0x2200
+
+#define ADBE_SRC_START 0x4040
+#define ADBE_SRC_SIZE 0x4048
+#define ADBE_DST_START 0x4050
+#define ADBE_DST_SIZE 0x4054
+#define ADBE_STRIDE 0x4038
+#define ADBE_FB_BASE 0x4030
+
+#define ADBE_LAYER_EN1 0x4020
+#define ADBE_LAYER_EN2 0x4068
+#define ADBE_LAYER_EN3 0x40b4
+#define ADBE_LAYER_EN4 0x40f4
+#define ADBE_SCALE_CTL 0x40ac
+#define ADBE_SCALE_CTL_BYPASS 0x100000
+
+#define ADBE_LAYER_CTL 0x1038
+#define ADBE_LAYER_CTL_ENABLE 0x10000
+
+#define ADBE_PIX_FMT 0x402c
+#define ADBE_PIX_FMT_XRGB32 0x53e4001
+
+static int adp_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * The modesetting driver does not check the non-desktop connector
+ * property and keeps the device open and locked. If the touchbar daemon
+ * opens the device first, modesetting breaks the whole X session.
+ * Simply refuse to open the device for X11 server processes as
+ * workaround.
+ */
+ if (current->comm[0] == 'X')
+ return -EBUSY;
+
+ return drm_open(inode, filp);
+}
+
+static const struct file_operations adp_fops = {
+ .owner = THIS_MODULE,
+ .open = adp_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
+ DRM_GEM_DMA_UNMAPPED_AREA_FOPS
+};
+
+static int adp_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ args->height = ALIGN(args->height, 64);
+ args->size = args->pitch * args->height;
+
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+}
+
+static const struct drm_driver adp_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &adp_fops,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(adp_drm_gem_dumb_create),
+ .name = "adp",
+ .desc = "Apple Display Pipe DRM Driver",
+ .major = 0,
+ .minor = 1,
+};
+
+struct adp_drv_private {
+ struct drm_device drm;
+ struct drm_crtc crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_bridge *next_bridge;
+ void __iomem *be;
+ void __iomem *fe;
+ u32 *mask_buf;
+ u64 mask_buf_size;
+ dma_addr_t mask_iova;
+ int be_irq;
+ int fe_irq;
+ spinlock_t irq_lock;
+ struct drm_pending_vblank_event *event;
+};
+
+#define to_adp(x) container_of(x, struct adp_drv_private, drm)
+#define crtc_to_adp(x) container_of(x, struct adp_drv_private, crtc)
+
+static int adp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_crtc_state *crtc_state;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!new_plane_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+}
+
+static void adp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp;
+ struct drm_rect src_rect;
+ struct drm_gem_dma_object *obj;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ u32 src_pos, src_size, dst_pos, dst_size;
+
+ if (!plane || !new_state)
+ return;
+
+ fb = new_state->fb;
+ if (!fb)
+ return;
+ adp = to_adp(plane->dev);
+
+ drm_rect_fp_to_int(&src_rect, &new_state->src);
+ src_pos = src_rect.x1 << 16 | src_rect.y1;
+ dst_pos = new_state->dst.x1 << 16 | new_state->dst.y1;
+ src_size = drm_rect_width(&src_rect) << 16 | drm_rect_height(&src_rect);
+ dst_size = drm_rect_width(&new_state->dst) << 16 |
+ drm_rect_height(&new_state->dst);
+ writel(src_pos, adp->be + ADBE_SRC_START);
+ writel(src_size, adp->be + ADBE_SRC_SIZE);
+ writel(dst_pos, adp->be + ADBE_DST_START);
+ writel(dst_size, adp->be + ADBE_DST_SIZE);
+ writel(fb->pitches[0], adp->be + ADBE_STRIDE);
+ obj = drm_fb_dma_get_gem_obj(fb, 0);
+ if (obj)
+ writel(obj->dma_addr + fb->offsets[0], adp->be + ADBE_FB_BASE);
+
+ writel(BIT(0), adp->be + ADBE_LAYER_EN1);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN2);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN3);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN4);
+ writel(ADBE_SCALE_CTL_BYPASS, adp->be + ADBE_SCALE_CTL);
+ writel(ADBE_LAYER_CTL_ENABLE | BIT(0), adp->be + ADBE_LAYER_CTL);
+ writel(ADBE_PIX_FMT_XRGB32, adp->be + ADBE_PIX_FMT);
+}
+
+static void adp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = to_adp(plane->dev);
+
+ writel(0x0, adp->be + ADBE_LAYER_EN1);
+ writel(0x0, adp->be + ADBE_LAYER_EN2);
+ writel(0x0, adp->be + ADBE_LAYER_EN3);
+ writel(0x0, adp->be + ADBE_LAYER_EN4);
+ writel(ADBE_LAYER_CTL_ENABLE, adp->be + ADBE_LAYER_CTL);
+}
+
+static const struct drm_plane_helper_funcs adp_plane_helper_funcs = {
+ .atomic_check = adp_plane_atomic_check,
+ .atomic_update = adp_plane_atomic_update,
+ .atomic_disable = adp_plane_atomic_disable,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS
+};
+
+static const struct drm_plane_funcs adp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_GEM_SHADOW_PLANE_FUNCS
+};
+
+static const u32 plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+#define ALL_CRTCS 1
+
+static struct drm_plane *adp_plane_new(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ struct drm_plane *plane;
+
+ plane = __drmm_universal_plane_alloc(drm, sizeof(struct drm_plane), 0,
+ ALL_CRTCS, &adp_plane_funcs,
+ plane_formats, ARRAY_SIZE(plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, "plane");
+ if (!plane) {
+ drm_err(drm, "failed to allocate plane");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_plane_helper_add(plane, &adp_plane_helper_funcs);
+ return plane;
+}
+
+static void adp_enable_vblank(struct adp_drv_private *adp)
+{
+ u32 cur_ctrl;
+
+ writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS);
+
+ cur_ctrl = readl(adp->fe + ADP_CTRL);
+ writel(cur_ctrl | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+}
+
+static int adp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct adp_drv_private *adp = to_adp(dev);
+
+ adp_enable_vblank(adp);
+
+ return 0;
+}
+
+static void adp_disable_vblank(struct adp_drv_private *adp)
+{
+ u32 cur_ctrl;
+
+ cur_ctrl = readl(adp->fe + ADP_CTRL);
+ writel(cur_ctrl & ~ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+ writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS);
+}
+
+static void adp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct adp_drv_private *adp = to_adp(dev);
+
+ adp_disable_vblank(adp);
+}
+
+static void adp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+
+ writel(BIT(0), adp->be + ADBE_BLEND_EN2);
+ writel(BIT(4), adp->be + ADBE_BLEND_EN1);
+ writel(BIT(0), adp->be + ADBE_BLEND_EN3);
+ writel(BIT(0), adp->be + ADBE_BLEND_BYPASS);
+ writel(BIT(0), adp->be + ADBE_BLEND_EN4);
+}
+
+static void adp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+ drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
+ writel(0x0, adp->be + ADBE_BLEND_EN2);
+ writel(0x0, adp->be + ADBE_BLEND_EN1);
+ writel(0x0, adp->be + ADBE_BLEND_EN3);
+ writel(0x0, adp->be + ADBE_BLEND_BYPASS);
+ writel(0x0, adp->be + ADBE_BLEND_EN4);
+ drm_crtc_vblank_off(crtc);
+}
+
+static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ u32 frame_num = 1;
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+ struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc);
+ u64 new_size = ALIGN(new_state->mode.hdisplay *
+ new_state->mode.vdisplay * 4, PAGE_SIZE);
+
+ if (new_size != adp->mask_buf_size) {
+ if (adp->mask_buf)
+ dma_free_coherent(crtc->dev->dev, adp->mask_buf_size,
+ adp->mask_buf, adp->mask_iova);
+ adp->mask_buf = NULL;
+ if (new_size != 0) {
+ adp->mask_buf = dma_alloc_coherent(crtc->dev->dev, new_size,
+ &adp->mask_iova, GFP_KERNEL);
+ memset(adp->mask_buf, 0xFF, new_size);
+ writel(adp->mask_iova, adp->be + ADBE_MASK_BUF);
+ }
+ adp->mask_buf_size = new_size;
+ }
+ writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO);
+ //FIXME: use adbe flush interrupt
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_vblank_get(crtc);
+ adp->event = crtc->state->event;
+ }
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static const struct drm_crtc_funcs adp_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = adp_crtc_enable_vblank,
+ .disable_vblank = adp_crtc_disable_vblank,
+};
+
+
+static const struct drm_crtc_helper_funcs adp_crtc_helper_funcs = {
+ .atomic_enable = adp_crtc_atomic_enable,
+ .atomic_disable = adp_crtc_atomic_disable,
+ .atomic_flush = adp_crtc_atomic_flush,
+};
+
+static int adp_setup_crtc(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ struct drm_plane *primary;
+ int ret;
+
+ primary = adp_plane_new(adp);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = drm_crtc_init_with_planes(drm, &adp->crtc, primary,
+ NULL, &adp_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&adp->crtc, &adp_crtc_helper_funcs);
+ return 0;
+}
+
+static const struct drm_mode_config_funcs adp_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int adp_setup_mode_config(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ int ret;
+ u32 size;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ /*
+ * Query screen size restrict the frame buffer size to the screen size
+ * aligned to the next multiple of 64. This is not necessary but can be
+ * used as simple check for non-desktop devices.
+ * Xorg's modesetting driver does not care about the connector
+ * "non-desktop" property. The max frame buffer width or height can be
+ * easily checked and a device can be reject if the max width/height is
+ * smaller than 120 for example.
+ * Any touchbar daemon is not limited by this small framebuffer size.
+ */
+ size = readl(adp->fe + ADP_SCREEN_SIZE);
+
+ drm->mode_config.min_width = 32;
+ drm->mode_config.min_height = 32;
+ drm->mode_config.max_width = ALIGN(FIELD_GET(ADP_SCREEN_HSIZE, size), 64);
+ drm->mode_config.max_height = ALIGN(FIELD_GET(ADP_SCREEN_VSIZE, size), 64);
+ drm->mode_config.preferred_depth = 24;
+ drm->mode_config.prefer_shadow = 0;
+ drm->mode_config.funcs = &adp_mode_config_funcs;
+
+ ret = adp_setup_crtc(adp);
+ if (ret) {
+ drm_err(drm, "failed to create crtc");
+ return ret;
+ }
+
+ adp->encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DSI, NULL);
+ if (IS_ERR(adp->encoder)) {
+ drm_err(drm, "failed to init encoder");
+ return PTR_ERR(adp->encoder);
+ }
+ adp->encoder->possible_crtcs = ALL_CRTCS;
+
+ ret = drm_bridge_attach(adp->encoder, adp->next_bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ drm_err(drm, "failed to init bridge chain");
+ return ret;
+ }
+
+ adp->connector = drm_bridge_connector_init(drm, adp->encoder);
+ if (IS_ERR(adp->connector))
+ return PTR_ERR(adp->connector);
+
+ drm_connector_attach_encoder(adp->connector, adp->encoder);
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ drm_err(drm, "failed to initialize vblank");
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ return 0;
+}
+
+static int adp_parse_of(struct platform_device *pdev, struct adp_drv_private *adp)
+{
+ struct device *dev = &pdev->dev;
+
+ adp->be = devm_platform_ioremap_resource_byname(pdev, "be");
+ if (IS_ERR(adp->be)) {
+ dev_err(dev, "failed to map display backend mmio");
+ return PTR_ERR(adp->be);
+ }
+
+ adp->fe = devm_platform_ioremap_resource_byname(pdev, "fe");
+ if (IS_ERR(adp->fe)) {
+ dev_err(dev, "failed to map display pipe mmio");
+ return PTR_ERR(adp->fe);
+ }
+
+ adp->be_irq = platform_get_irq_byname(pdev, "be");
+ if (adp->be_irq < 0)
+ return adp->be_irq;
+
+ adp->fe_irq = platform_get_irq_byname(pdev, "fe");
+ if (adp->fe_irq < 0)
+ return adp->fe_irq;
+
+ return 0;
+}
+
+static irqreturn_t adp_fe_irq(int irq, void *arg)
+{
+ struct adp_drv_private *adp = (struct adp_drv_private *)arg;
+ u32 int_status;
+ u32 int_ctl;
+
+ spin_lock(&adp->irq_lock);
+
+ int_status = readl(adp->fe + ADP_INT_STATUS);
+ if (int_status & ADP_INT_STATUS_VBLANK) {
+ drm_crtc_handle_vblank(&adp->crtc);
+ spin_lock(&adp->crtc.dev->event_lock);
+ if (adp->event) {
+ int_ctl = readl(adp->fe + ADP_CTRL);
+ if ((int_ctl & 0xF00) == 0x600) {
+ drm_crtc_send_vblank_event(&adp->crtc, adp->event);
+ adp->event = NULL;
+ drm_crtc_vblank_put(&adp->crtc);
+ }
+ }
+ spin_unlock(&adp->crtc.dev->event_lock);
+ }
+
+ writel(int_status, adp->fe + ADP_INT_STATUS);
+
+ spin_unlock(&adp->irq_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int adp_drm_bind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct adp_drv_private *adp = to_adp(drm);
+ int err;
+
+ adp_disable_vblank(adp);
+ writel(ADP_CTRL_FIFO_ON | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+
+ adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0);
+ if (IS_ERR(adp->next_bridge)) {
+ dev_err(dev, "failed to find next bridge");
+ return PTR_ERR(adp->next_bridge);
+ }
+
+ err = adp_setup_mode_config(adp);
+ if (err < 0)
+ return err;
+
+ err = request_irq(adp->fe_irq, adp_fe_irq, 0, "adp-fe", adp);
+ if (err)
+ return err;
+
+ err = drm_dev_register(&adp->drm, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void adp_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct adp_drv_private *adp = to_adp(drm);
+
+ drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+ free_irq(adp->fe_irq, adp);
+}
+
+static const struct component_master_ops adp_master_ops = {
+ .bind = adp_drm_bind,
+ .unbind = adp_drm_unbind,
+};
+
+static int compare_dev(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int adp_probe(struct platform_device *pdev)
+{
+ struct device_node *port;
+ struct component_match *match = NULL;
+ struct adp_drv_private *adp;
+ int err;
+
+ adp = devm_drm_dev_alloc(&pdev->dev, &adp_driver, struct adp_drv_private, drm);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
+
+ spin_lock_init(&adp->irq_lock);
+
+ dev_set_drvdata(&pdev->dev, &adp->drm);
+
+ err = adp_parse_of(pdev, adp);
+ if (err < 0)
+ return err;
+
+ port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
+ if (!port)
+ return -ENODEV;
+
+ drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+ of_node_put(port);
+
+ return component_master_add_with_match(&pdev->dev, &adp_master_ops, match);
+}
+
+static void adp_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &adp_master_ops);
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct of_device_id adp_of_match[] = {
+ { .compatible = "apple,h7-display-pipe", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adp_of_match);
+
+static struct platform_driver adp_platform_driver = {
+ .driver = {
+ .name = "adp",
+ .of_match_table = adp_of_match,
+ },
+ .probe = adp_probe,
+ .remove = adp_remove,
+};
+
+module_platform_driver(adp_platform_driver);
+
+MODULE_DESCRIPTION("Apple Display Pipe DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2b1990ea9639..2a9a41f4e748 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1192,6 +1192,7 @@ struct amdgpu_device {
bool debug_use_vram_fw_buf;
bool debug_enable_ras_aca;
bool debug_exp_resets;
+ bool debug_disable_gpu_ring_reset;
bool enforce_isolation[MAX_XCP];
/* Added this mutex for cleaner shader isolation between GFX and compute processes */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index d11593cd1922..ffd4c64e123c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -392,6 +392,9 @@ static void aca_banks_generate_cper(struct amdgpu_device *adev,
struct aca_bank_node *node;
struct aca_bank *bank;
+ if (!adev->cper.enabled)
+ return;
+
if (!banks || !count) {
dev_warn(adev->dev, "fail to generate cper records\n");
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index b84a3489b116..6f62e5d80ed6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -76,6 +76,12 @@ struct ras_query_context;
#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+#define ACA_BANK_ERR_CE_DE_DECODE(bank) \
+ ((ACA_REG__STATUS__POISON((bank)->regs[ACA_REG_IDX_STATUS]) || \
+ ACA_REG__STATUS__DEFERRED((bank)->regs[ACA_REG_IDX_STATUS])) ? \
+ ACA_ERROR_TYPE_DEFERRED : \
+ ACA_ERROR_TYPE_CE)
+
enum aca_reg_idx {
ACA_REG_IDX_CTL = 0,
ACA_REG_IDX_STATUS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index deb0785350e8..4926996f94da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -579,7 +579,7 @@ static int acp_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool acp_is_idle(void *handle)
+static bool acp_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index b8d4e07d2043..b7f8f2ff143d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -394,6 +394,10 @@ static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
characteristics.max_input_signal;
atif->backlight_caps.ac_level = characteristics.ac_level;
atif->backlight_caps.dc_level = characteristics.dc_level;
+ atif->backlight_caps.data_points = characteristics.number_of_points;
+ memcpy(atif->backlight_caps.luminance_data,
+ characteristics.data_points,
+ sizeof(atif->backlight_caps.luminance_data));
out:
kfree(info);
return err;
@@ -1277,11 +1281,7 @@ void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps)
{
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
- caps->caps_valid = atif->backlight_caps.caps_valid;
- caps->min_input_signal = atif->backlight_caps.min_input_signal;
- caps->max_input_signal = atif->backlight_caps.max_input_signal;
- caps->ac_level = atif->backlight_caps.ac_level;
- caps->dc_level = atif->backlight_caps.dc_level;
+ memcpy(caps, &atif->backlight_caps, sizeof(*caps));
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 0312231b703e..4cec3a873995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -555,48 +555,6 @@ out_put:
return r;
}
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
- struct amdgpu_device *src)
-{
- struct amdgpu_device *peer_adev = src;
- struct amdgpu_device *adev = dst;
- int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
-
- if (ret < 0) {
- DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
- adev->gmc.xgmi.physical_node_id,
- peer_adev->gmc.xgmi.physical_node_id, ret);
- ret = 0;
- }
- return (uint8_t)ret;
-}
-
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
- struct amdgpu_device *src,
- bool is_min)
-{
- struct amdgpu_device *adev = dst, *peer_adev;
- int num_links;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
- return 0;
-
- if (src)
- peer_adev = src;
-
- /* num links returns 0 for indirect peers since indirect route is unknown. */
- num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
- if (num_links < 0) {
- DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
- adev->gmc.xgmi.physical_node_id,
- peer_adev->gmc.xgmi.physical_node_id, num_links);
- num_links = 0;
- }
-
- /* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */
- return (num_links * 16 * 25000)/BITS_PER_BYTE;
-}
-
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
{
int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 236b73e283e8..b6ca41859b53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -193,7 +193,7 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
#if IS_ENABLED(CONFIG_HSA_AMD)
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo);
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem);
int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
@@ -213,9 +213,8 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
}
static inline
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
{
- return 0;
}
static inline
@@ -255,11 +254,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
uint32_t *flags, int8_t *xcp_id);
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
- struct amdgpu_device *src);
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
- struct amdgpu_device *src,
- bool is_min);
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
uint32_t *payload);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index 8dfdb18197c4..6e861d08d044 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -193,4 +193,5 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
.hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 9abf29b58ac7..c820418e8ccd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -419,5 +419,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
index e2ae714a700f..0c0998477598 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
@@ -509,6 +509,17 @@ static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v9_4_3_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ uint32_t reg_offset = get_sdma_rlc_reg_offset(adev, engine, queue);
+ uint32_t status = RREG32(regSDMA_RLC0_CONTEXT_STATUS + reg_offset);
+ uint32_t doorbell_off = RREG32(regSDMA_RLC0_DOORBELL_OFFSET + reg_offset);
+ bool is_active = !!REG_GET_FIELD(status, SDMA_RLC0_CONTEXT_STATUS, SELECTED);
+
+ return is_active ? doorbell_off >> 2 : 0;
+}
+
const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping,
@@ -543,5 +554,6 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
.clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_4_3_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 62176d607bef..2887b6f3eaa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -1084,6 +1084,12 @@ uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
return 0;
}
+uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -1112,5 +1118,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
.program_trap_handler_settings = program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v10_hqd_reset
+ .hqd_reset = kgd_gfx_v10_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
index 9efd2dd4fdd7..db577c2a847a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
@@ -65,3 +65,5 @@ uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
uint32_t queue_id,
uint32_t inst,
unsigned int utimeout);
+uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index c718bedda0ca..ac9ad505f9d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -682,5 +682,6 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.set_address_watch = kgd_gfx_v10_set_address_watch,
.clear_address_watch = kgd_gfx_v10_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v10_hqd_reset
+ .hqd_reset = kgd_gfx_v10_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index a4ba49cb22db..e0e6a6a49d90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -800,6 +800,12 @@ static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v11_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.program_sh_mem_settings = program_sh_mem_settings_v11,
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
@@ -824,5 +830,6 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.set_address_watch = kgd_gfx_v11_set_address_watch,
.clear_address_watch = kgd_gfx_v11_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v11_hqd_reset
+ .hqd_reset = kgd_gfx_v11_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v11_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
index 0dfe7093bd8a..6f0dc23c901b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
@@ -361,6 +361,12 @@ static uint32_t kgd_gfx_v12_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v12_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v12_kfd2kgd = {
.init_interrupts = init_interrupts_v12,
.hqd_dump = hqd_dump_v12,
@@ -374,4 +380,5 @@ const struct kfd2kgd_calls gfx_v12_kfd2kgd = {
.set_wave_launch_mode = kgd_gfx_v12_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v12_set_address_watch,
.clear_address_watch = kgd_gfx_v12_clear_address_watch,
+ .hqd_sdma_get_doorbell = kgd_gfx_v12_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 441568163e20..84135eb90660 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -1131,9 +1131,6 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
uint32_t low, high;
uint64_t queue_addr = 0;
- if (!amdgpu_gpu_recovery)
- return 0;
-
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
@@ -1182,9 +1179,6 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
uint32_t low, high, pipe_reset_data = 0;
uint64_t queue_addr = 0;
- if (!amdgpu_gpu_recovery)
- return 0;
-
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
@@ -1229,6 +1223,13 @@ unlock_out:
return queue_addr;
}
+uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -1258,5 +1259,6 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index b6a91a552aa4..90c8fa13d519 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -111,3 +111,5 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
uint32_t queue_id,
uint32_t inst,
unsigned int utimeout);
+uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ea3f7ee18923..2ac6d4fa0601 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -370,40 +370,32 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
return 0;
}
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+/**
+ * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences
+ * @bo: the BO where to remove the evictions fences from.
+ *
+ * This functions should only be used on release when all references to the BO
+ * are already dropped. We remove the eviction fence from the private copy of
+ * the dma_resv object here since that is what is used during release to
+ * determine of the BO is idle or not.
+ */
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
{
- struct amdgpu_bo *root = bo;
- struct amdgpu_vm_bo_base *vm_bo;
- struct amdgpu_vm *vm;
- struct amdkfd_process_info *info;
- struct amdgpu_amdkfd_fence *ef;
- int ret;
-
- /* we can always get vm_bo from root PD bo.*/
- while (root->parent)
- root = root->parent;
+ struct dma_resv *resv = &bo->tbo.base._resv;
+ struct dma_fence *fence, *stub;
+ struct dma_resv_iter cursor;
- vm_bo = root->vm_bo;
- if (!vm_bo)
- return 0;
-
- vm = vm_bo->vm;
- if (!vm)
- return 0;
-
- info = vm->process_info;
- if (!info || !info->eviction_fence)
- return 0;
+ dma_resv_assert_held(resv);
- ef = container_of(dma_fence_get(&info->eviction_fence->base),
- struct amdgpu_amdkfd_fence, base);
-
- BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
- ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
- dma_resv_unlock(bo->tbo.base.resv);
+ stub = dma_fence_get_stub();
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
+ if (!to_amdgpu_amdkfd_fence(fence))
+ continue;
- dma_fence_put(&ef->base);
- return ret;
+ dma_resv_replace_fences(resv, fence->context, stub,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ dma_fence_put(stub);
}
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
@@ -603,12 +595,6 @@ kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
{
struct ttm_operation_ctx ctx = {.interruptible = true};
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
- int ret;
-
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret)
- return ret;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 20c474a32852..3f291b30b79f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -57,6 +57,8 @@ void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
enum amdgpu_cper_type type,
enum cper_error_severity sev)
{
+ char record_id[16];
+
hdr->signature[0] = 'C';
hdr->signature[1] = 'P';
hdr->signature[2] = 'E';
@@ -71,7 +73,13 @@ void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
amdgpu_cper_get_timestamp(&hdr->timestamp);
- snprintf(hdr->record_id, 8, "%d", atomic_inc_return(&adev->cper.unique_id));
+ snprintf(record_id, 9, "%d:%X",
+ (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0,
+ atomic_inc_return(&adev->cper.unique_id));
+ memcpy(hdr->record_id, record_id, 8);
+
snprintf(hdr->platform_id, 16, "0x%04X:0x%04X",
adev->pdev->vendor, adev->pdev->device);
/* pmfw version should be part of creator_id according to CPER spec */
@@ -112,18 +120,15 @@ static int amdgpu_cper_entry_fill_section_desc(struct amdgpu_device *adev,
section_desc->revision_major = CPER_SEC_MAJOR_REV_22;
section_desc->sec_offset = section_offset;
section_desc->sec_length = section_length;
- section_desc->valid_bits.fru_id = 1;
section_desc->valid_bits.fru_text = 1;
section_desc->flag_bits.primary = 1;
section_desc->severity = sev;
section_desc->sec_type = sec_type;
- if (adev->smuio.funcs &&
- adev->smuio.funcs->get_socket_id)
- snprintf(section_desc->fru_text, 20, "OAM%d",
- adev->smuio.funcs->get_socket_id(adev));
- /* TODO: fru_id is 16 bytes in CPER spec, but driver defines it as 20 bytes */
- snprintf(section_desc->fru_id, 16, "%llx", adev->unique_id);
+ snprintf(section_desc->fru_text, 20, "OAM%d",
+ (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0);
if (bp_threshold)
section_desc->flag_bits.exceed_err_threshold = 1;
@@ -304,6 +309,7 @@ int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev,
return ret;
amdgpu_cper_ring_write(ring, fatal, fatal->record_length);
+ kfree(fatal);
return 0;
}
@@ -326,6 +332,7 @@ int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev)
return ret;
amdgpu_cper_ring_write(ring, bp_threshold, bp_threshold->record_length);
+ kfree(bp_threshold);
return 0;
}
@@ -376,7 +383,7 @@ int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
amdgpu_cper_entry_fill_hdr(adev, corrected, AMDGPU_CPER_TYPE_RUNTIME, sev);
- /* Combine CE and UE in cper record */
+ /* Combine CE and DE in cper record */
list_for_each_entry(node, &banks->list, node) {
bank = &node->bank;
reg_data[CPER_ACA_REG_CTL_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CTL]);
@@ -402,6 +409,7 @@ int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
}
amdgpu_cper_ring_write(ring, corrected, corrected->record_length);
+ kfree(corrected);
return 0;
}
@@ -538,15 +546,23 @@ static int amdgpu_cper_ring_init(struct amdgpu_device *adev)
int amdgpu_cper_init(struct amdgpu_device *adev)
{
+ int r;
+
if (!amdgpu_aca_is_enabled(adev))
return 0;
+ r = amdgpu_cper_ring_init(adev);
+ if (r) {
+ dev_err(adev->dev, "failed to initialize cper ring, r = %d\n", r);
+ return r;
+ }
+
mutex_init(&adev->cper.cper_lock);
adev->cper.enabled = true;
adev->cper.max_count = CPER_MAX_ALLOWED_COUNT;
- return amdgpu_cper_ring_init(adev);
+ return 0;
}
int amdgpu_cper_fini(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 824f9da5b6ce..7b50741dc097 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -364,5 +364,9 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+
+ drm_info(dev, "AMDGPU device coredump file has been created\n");
+ drm_info(dev, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
+ dev->primary->index);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 17e5967bfa60..9e9fec5b52de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1662,6 +1662,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ /* resizing on Dell G5 SE platforms causes problems with runtime pm */
+ if ((amdgpu_runtime_pm != 0) &&
+ adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
+ adev->pdev->device == 0x731f &&
+ adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+ return 0;
+
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
DRM_WARN("System can't access extended configuration space, please check!!\n");
@@ -3098,7 +3105,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_fru_get_product_info(adev);
- r = amdgpu_cper_init(adev);
+ if (!amdgpu_sriov_vf(adev) || amdgpu_sriov_ras_cper_en(adev))
+ r = amdgpu_cper_init(adev);
init_failed:
@@ -4332,10 +4340,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* for throttling interrupt) = 60 seconds.
*/
ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
- ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1);
ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
- ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -4367,7 +4373,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return -ENOMEM;
/* detect hw virtualization here */
- amdgpu_detect_virtualization(adev);
+ amdgpu_virt_init(adev);
amdgpu_device_get_pcie_info(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 1819166cb4cf..ce08c428ba4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -137,6 +137,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
+ AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -2223,6 +2224,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: enable experimental reset features\n");
adev->debug_exp_resets = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_GPU_RING_RESET) {
+ pr_info("debug: ring reset disabled\n");
+ adev->debug_disable_gpu_ring_reset = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2557,7 +2563,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
int r;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_s4 = false;
if (r)
return r;
@@ -2569,8 +2574,13 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_resume(drm_dev, true);
+ r = amdgpu_device_resume(drm_dev, true);
+ adev->in_s4 = false;
+
+ return r;
}
static int amdgpu_pmops_poweroff(struct device *dev)
@@ -2583,6 +2593,9 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 89109eb2ce16..1ae88c459da5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -31,6 +31,7 @@
#define FRU_EEPROM_MADDR_6 0x60000
#define FRU_EEPROM_MADDR_8 0x80000
+#define FRU_EEPROM_MADDR_INV 0xFFFFF
static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
{
@@ -104,6 +105,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_8;
return true;
+ case IP_VERSION(13, 0, 12):
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_INV;
+ return true;
default:
return false;
}
@@ -120,6 +125,10 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
if (!is_fru_eeprom_supported(adev, &fru_addr))
return 0;
+ /* FRU data avaialble, but no direct EEPROM access */
+ if (fru_addr == FRU_EEPROM_MADDR_INV)
+ return 0;
+
if (!adev->fru_info) {
adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
if (!adev->fru_info)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index b9bd6654f317..a194bf3347cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1665,24 +1665,19 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
}
mutex_lock(&adev->enforce_isolation_mutex);
-
for (i = 0; i < num_partitions; i++) {
- if (adev->enforce_isolation[i] && !partition_values[i]) {
+ if (adev->enforce_isolation[i] && !partition_values[i])
/* Going from enabled to disabled */
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
- if (adev->enable_mes && adev->gfx.enable_cleaner_shader)
- amdgpu_mes_set_enforce_isolation(adev, i, false);
- } else if (!adev->enforce_isolation[i] && partition_values[i]) {
+ else if (!adev->enforce_isolation[i] && partition_values[i])
/* Going from disabled to enabled */
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- if (adev->enable_mes && adev->gfx.enable_cleaner_shader)
- amdgpu_mes_set_enforce_isolation(adev, i, true);
- }
adev->enforce_isolation[i] = partition_values[i];
}
-
mutex_unlock(&adev->enforce_isolation_mutex);
+ amdgpu_mes_update_enforce_isolation(adev);
+
return count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index c6e5c50a3322..4eefa17fa39b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -269,7 +269,7 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
* @mc: memory controller structure holding memory information
* @gart_placement: GART placement policy with respect to VRAM
*
- * Function will place try to place GART before or after VRAM.
+ * Function will try to place GART before or after VRAM.
* If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 459a30fe239f..bd7fc123b8f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "amdgpu_irq.h"
+#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
/* VA hole for 48bit addresses on Vega10 */
@@ -174,28 +175,6 @@ struct amdgpu_gmc_funcs {
bool (*need_reset_on_init)(struct amdgpu_device *adev);
};
-struct amdgpu_xgmi_ras {
- struct amdgpu_ras_block_object ras_block;
-};
-
-struct amdgpu_xgmi {
- /* from psp */
- u64 node_id;
- u64 hive_id;
- /* fixed per family */
- u64 node_segment_size;
- /* physical node (0-3) */
- unsigned physical_node_id;
- /* number of nodes (0-4) */
- unsigned num_physical_nodes;
- /* gpu list in the same hive */
- struct list_head head;
- bool supported;
- struct ras_common_if *ras_if;
- bool connected_to_cpu;
- struct amdgpu_xgmi_ras *ras;
-};
-
struct amdgpu_mem_partition_info {
union {
struct {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 7d4395a5d8ac..b0a88f92cd82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -78,6 +78,9 @@ struct amdgpu_ih_ring {
#define amdgpu_ih_ts_after(t1, t2) \
(((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL)
+#define amdgpu_ih_ts_after_or_equal(t1, t2) \
+ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) >= 0LL)
+
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 732744488b03..43fc941dfa57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -124,7 +124,7 @@ static int isp_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool isp_is_idle(void *handle)
+static bool isp_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index b03664c66dd6..4f3b7b5d9c1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -50,6 +50,7 @@ struct amdgpu_isp {
struct mfd_cell *isp_cell;
struct resource *isp_res;
struct resource *isp_i2c_res;
+ struct resource *isp_gpio_res;
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 100f04475943..1d26be3c6d9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -130,29 +130,47 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_vm_put_task_info(ti);
}
- dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
-
/* attempt a per ring reset */
- if (amdgpu_gpu_recovery &&
- ring->funcs->reset) {
+ if (unlikely(adev->debug_disable_gpu_ring_reset)) {
+ dev_err(adev->dev, "Ring reset disabled by debug mask\n");
+ } else if (amdgpu_gpu_recovery && ring->funcs->reset) {
+ bool is_guilty;
+
dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name);
/* stop the scheduler, but don't mess with the
* bad job yet because if ring reset fails
* we'll fall back to full GPU reset.
*/
drm_sched_wqueue_stop(&ring->sched);
+
+ /* for engine resets, we need to reset the engine,
+ * but individual queues may be unaffected.
+ * check here to make sure the accounting is correct.
+ */
+ if (ring->funcs->is_guilty)
+ is_guilty = ring->funcs->is_guilty(ring);
+ else
+ is_guilty = true;
+
+ if (is_guilty)
+ dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+
r = amdgpu_ring_reset(ring, job->vmid);
if (!r) {
if (amdgpu_ring_sched_ready(ring))
drm_sched_stop(&ring->sched, s_job);
- atomic_inc(&ring->adev->gpu_reset_counter);
- amdgpu_fence_driver_force_completion(ring);
+ if (is_guilty) {
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ amdgpu_fence_driver_force_completion(ring);
+ }
if (amdgpu_ring_sched_ready(ring))
drm_sched_start(&ring->sched, 0);
+ dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
goto exit;
}
dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
}
+ dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
if (amdgpu_device_should_recover_gpu(ring->adev)) {
struct amdgpu_reset_context reset_context;
@@ -411,8 +429,24 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
return fence;
}
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
+/*
+ * This is a duplicate function from DRM scheduler sched_internal.h.
+ * Plan is to remove it when amdgpu_job_stop_all_jobs_on_sched is removed, due
+ * latter being incorrect and racy.
+ *
+ * See https://lore.kernel.org/amd-gfx/44edde63-7181-44fb-a4f7-94e50514f539@amd.com/
+ */
+static struct drm_sched_job *
+drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
+{
+ struct spsc_node *node;
+
+ node = spsc_queue_pop(&entity->job_queue);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct drm_sched_job, queue_node);
+}
void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
{
@@ -425,7 +459,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
- while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
+ while ((s_job = drm_sched_entity_queue_pop(s_entity))) {
struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_signal(&s_fence->scheduled);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 05c73bf7541c..27bfe9c8af06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -459,7 +459,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->vcn.harvest_config & (1 << i))
continue;
- for (j = 0; j < adev->vcn.num_enc_rings; j++)
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
if (adev->vcn.inst[i].ring_enc[j].sched.ready)
++num_rings;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 32b27a1658e7..709c11cbeabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1681,7 +1681,8 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
}
/* Fix me -- node_id is used to identify the correct MES instances in the future */
-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
+static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
+ uint32_t node_id, bool enable)
{
struct mes_misc_op_input op_input = {0};
int r;
@@ -1703,6 +1704,23 @@ error:
return r;
}
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ if (adev->enforce_isolation[i])
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
+ else
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
+ return r;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 2df2444ee892..68d640aaa2e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -56,7 +56,7 @@ enum amdgpu_mes_priority_level {
struct amdgpu_mes_funcs;
-enum admgpu_mes_pipe {
+enum amdgpu_mes_pipe {
AMDGPU_MES_SCHED_PIPE = 0,
AMDGPU_MES_KIQ_PIPE,
AMDGPU_MAX_MES_PIPES = 2,
@@ -534,6 +534,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 96f4b8904e9a..80cd6f5273db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1295,28 +1295,36 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (abo->kfd_bo)
amdgpu_amdkfd_release_notify(abo);
- /* We only remove the fence if the resv has individualized. */
- WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
- && bo->base.resv != &bo->base._resv);
- if (bo->base.resv == &bo->base._resv)
- amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
+ /*
+ * We lock the private dma_resv object here and since the BO is about to
+ * be released nobody else should have a pointer to it.
+ * So when this locking here fails something is wrong with the reference
+ * counting.
+ */
+ if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv)))
+ return;
+
+ amdgpu_amdkfd_remove_all_eviction_fences(abo);
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
- return;
+ goto out;
- if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
- return;
+ r = dma_resv_reserve_fences(&bo->base._resv, 1);
+ if (r)
+ goto out;
- r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
- if (!WARN_ON(r)) {
- amdgpu_vram_mgr_set_cleared(bo->resource);
- amdgpu_bo_fence(abo, fence, false);
- dma_fence_put(fence);
- }
+ r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
+ if (WARN_ON(r))
+ goto out;
+
+ amdgpu_vram_mgr_set_cleared(bo->resource);
+ dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
- dma_resv_unlock(bo->base.resv);
+out:
+ dma_resv_unlock(&bo->base._resv);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 3c3312bbfee8..285e3aa2bb2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -2799,20 +2799,100 @@ static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
return -EINVAL;
}
+static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, int count)
+{
+ int j;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data = con->eh_data;
+
+ for (j = 0; j < count; j++) {
+ if (amdgpu_ras_check_bad_page_unlock(con,
+ bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
+ if (!data->space_left &&
+ amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
+ return -ENOMEM;
+ }
+
+ amdgpu_ras_reserve_page(adev, bps[j].retired_page);
+
+ memcpy(&data->bps[data->count], &(bps[j]),
+ sizeof(struct eeprom_table_record));
+ data->count++;
+ data->space_left--;
+ }
+
+ return 0;
+}
+
+static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, struct ras_err_data *err_data,
+ enum amdgpu_memory_partition nps)
+{
+ int i = 0;
+ enum amdgpu_memory_partition save_nps;
+
+ save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+
+ for (i = 0; i < adev->umc.retire_unit; i++)
+ bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+
+ if (save_nps) {
+ if (save_nps == nps) {
+ if (amdgpu_umc_pages_in_a_row(adev, err_data,
+ bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+ } else {
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
+ return -EINVAL;
+ }
+ } else {
+ if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
+ if (nps == AMDGPU_NPS1_PARTITION_MODE)
+ memcpy(err_data->err_addr, bps,
+ sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
+ else
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
+}
+
+static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, struct ras_err_data *err_data,
+ enum amdgpu_memory_partition nps)
+{
+ enum amdgpu_memory_partition save_nps;
+
+ save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+ bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+
+ if (save_nps == nps) {
+ if (amdgpu_umc_pages_in_a_row(adev, err_data,
+ bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+ } else {
+ if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
+ return -EINVAL;
+ }
+ return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
+ adev->umc.retire_unit);
+}
+
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages, bool from_rom)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data *data;
struct ras_err_data err_data;
- struct eeprom_table_record *err_rec;
struct amdgpu_ras_eeprom_control *control =
&adev->psp.ras_context.ras->eeprom_control;
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
- uint32_t i, j, loop_cnt = 1;
- bool find_pages_per_pa = false;
+ uint32_t i;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
@@ -2823,114 +2903,46 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
sizeof(struct eeprom_table_record), GFP_KERNEL);
if (!err_data.err_addr) {
dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
- err_rec = err_data.err_addr;
- loop_cnt = adev->umc.retire_unit;
if (adev->gmc.gmc_funcs->query_mem_partition_mode)
nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
}
mutex_lock(&con->recovery_lock);
- data = con->eh_data;
- if (!data) {
- /* Returning 0 as the absence of eh_data is acceptable */
- goto free;
- }
-
- for (i = 0; i < pages; i++) {
- if (from_rom &&
- control->rec_type == AMDGPU_RAS_EEPROM_REC_MCA) {
- if (!find_pages_per_pa) {
- if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
- if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
- /* may use old RAS TA, use PA to find pages in
- * one row
- */
- if (amdgpu_umc_pages_in_a_row(adev, &err_data,
- bps[i].retired_page <<
- AMDGPU_GPU_PAGE_SHIFT)) {
- ret = -EINVAL;
- goto free;
- } else {
- find_pages_per_pa = true;
- }
- } else {
- /* unsupported cases */
- ret = -EOPNOTSUPP;
- goto free;
- }
- }
- } else {
- if (amdgpu_umc_pages_in_a_row(adev, &err_data,
- bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
- ret = -EINVAL;
- goto free;
- }
- }
- } else {
- if (from_rom && !find_pages_per_pa) {
- if (bps[i].retired_page & UMC_CHANNEL_IDX_V2) {
- /* bad page in any NPS mode in eeprom */
- if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
- ret = -EINVAL;
+
+ if (from_rom) {
+ for (i = 0; i < pages; i++) {
+ if (control->ras_num_recs - i >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ //deal with retire_unit records a time
+ ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
goto free;
- }
+ i += (adev->umc.retire_unit - 1);
} else {
- /* legacy bad page in eeprom, generated only in
- * NPS1 mode
- */
- if (amdgpu_ras_mca2pa(adev, &bps[i], &err_data)) {
- /* old RAS TA or ASICs which don't support to
- * convert addrss via mca address
- */
- if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
- find_pages_per_pa = true;
- err_rec = &bps[i];
- loop_cnt = 1;
- } else {
- /* non-nps1 mode, old RAS TA
- * can't support it
- */
- ret = -EOPNOTSUPP;
- goto free;
- }
- }
+ break;
}
-
- if (!find_pages_per_pa)
- i += (adev->umc.retire_unit - 1);
} else {
- err_rec = &bps[i];
+ break;
}
}
-
- for (j = 0; j < loop_cnt; j++) {
- if (amdgpu_ras_check_bad_page_unlock(con,
- err_rec[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
- continue;
-
- if (!data->space_left &&
- amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
- ret = -ENOMEM;
+ for (; i < pages; i++) {
+ ret = __amdgpu_ras_convert_rec_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
goto free;
- }
-
- amdgpu_ras_reserve_page(adev, err_rec[j].retired_page);
-
- memcpy(&data->bps[data->count], &(err_rec[j]),
- sizeof(struct eeprom_table_record));
- data->count++;
- data->space_left--;
}
+ } else {
+ ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
free:
if (from_rom)
kfree(err_data.err_addr);
-out:
mutex_unlock(&con->recovery_lock);
return ret;
@@ -2969,24 +2981,14 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
/* only new entries are saved */
if (save_count > 0) {
- if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA) {
+ for (i = 0; i < unit_num; i++) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[control->ras_num_recs],
- save_count)) {
+ &data->bps[bad_page_num + i * adev->umc.retire_unit],
+ 1)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
- } else {
- for (i = 0; i < unit_num; i++) {
- if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num + i * adev->umc.retire_unit],
- 1)) {
- dev_err(adev->dev, "Failed to save EEPROM table data!");
- return -EIO;
- }
- }
}
-
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
}
@@ -3002,7 +3004,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
struct amdgpu_ras_eeprom_control *control =
&adev->psp.ras_context.ras->eeprom_control;
struct eeprom_table_record *bps;
- int ret;
+ int ret, i = 0;
/* no bad page record, skip eeprom access */
if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
@@ -3016,13 +3018,23 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
if (ret) {
dev_err(adev->dev, "Failed to load EEPROM table records!");
} else {
- if (control->ras_num_recs > 1 &&
- adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
- if ((bps[0].address == bps[1].address) &&
- (bps[0].mem_channel == bps[1].mem_channel))
- control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
- else
- control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ for (i = 0; i < control->ras_num_recs; i++) {
+ if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ control->ras_num_pa_recs += adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ control->ras_num_mca_recs +=
+ (control->ras_num_recs - i);
+ break;
+ }
+ } else {
+ control->ras_num_mca_recs += (control->ras_num_recs - i);
+ break;
+ }
+ }
}
ret = amdgpu_ras_eeprom_check(control);
@@ -3428,12 +3440,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
return ret;
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
- control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
-
- /* default status is MCA storage */
- if (control->ras_num_recs <= 1 &&
- adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
- control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
+ control->ras_num_pa_recs = control->ras_num_recs;
if (control->ras_num_recs) {
ret = amdgpu_ras_load_bad_pages(adev);
@@ -5131,9 +5138,9 @@ static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
"socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
socket_id, aid_id, fw_status);
- if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
+ if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
dev_info(adev->dev,
- "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
+ "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
socket_id, aid_id, fw_status);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index cc4586581dba..764e9fa0a914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -47,7 +47,7 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
#define AMDGPU_RAS_GPU_ERR_DATA_ABORT(x) AMDGPU_GET_REG_FIELD(x, 29, 29)
-#define AMDGPU_RAS_GPU_ERR_UNKNOWN(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
+#define AMDGPU_RAS_GPU_ERR_GENERIC(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100
#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 83b54efcaa87..09a6f8bc1a5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -727,11 +727,9 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
- control->ras_fri)
% control->ras_max_record_count;
- if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
- control->ras_num_bad_pages = control->ras_num_recs;
- else
- control->ras_num_bad_pages =
- control->ras_num_recs * adev->umc.retire_unit;
+ control->ras_num_mca_recs += num;
+ control->ras_num_bad_pages += num * adev->umc.retire_unit;
+
Out:
kfree(buf);
return res;
@@ -749,7 +747,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
/* Modify the header if it exceeds.
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) {
+ control->ras_num_bad_pages > ras->bad_page_cnt_threshold) {
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
@@ -808,7 +806,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
*/
if (amdgpu_bad_page_threshold != 0 &&
control->tbl_hdr.version == RAS_TABLE_VER_V2_1 &&
- control->ras_num_bad_pages < ras->bad_page_cnt_threshold)
+ control->ras_num_bad_pages <= ras->bad_page_cnt_threshold)
control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
control->ras_num_bad_pages) * 100) /
ras->bad_page_cnt_threshold;
@@ -852,6 +850,7 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
{
struct amdgpu_device *adev = to_amdgpu_device(control);
int res, i;
+ uint64_t nps = AMDGPU_NPS1_PARTITION_MODE;
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -865,9 +864,12 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
return -EINVAL;
}
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+
/* set the new channel index flag */
for (i = 0; i < num; i++)
- record[i].retired_page |= UMC_CHANNEL_IDX_V2;
+ record[i].retired_page |= (nps << UMC_NPS_SHIFT);
mutex_lock(&control->ras_tbl_mutex);
@@ -881,7 +883,7 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
/* clear channel index flag, the flag is only saved on eeprom */
for (i = 0; i < num; i++)
- record[i].retired_page &= ~UMC_CHANNEL_IDX_V2;
+ record[i].retired_page &= ~(nps << UMC_NPS_SHIFT);
return res;
}
@@ -1392,6 +1394,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
}
control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
return 0;
}
@@ -1412,11 +1416,8 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
if (!__get_eeprom_i2c_addr(adev, control))
return -EINVAL;
- if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
- control->ras_num_bad_pages = control->ras_num_recs;
- else
- control->ras_num_bad_pages =
- control->ras_num_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = control->ras_num_pa_recs +
+ control->ras_num_mca_recs * adev->umc.retire_unit;
if (hdr->header == RAS_TABLE_HDR_VAL) {
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
@@ -1455,7 +1456,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
res);
return -EINVAL;
}
- if (ras->bad_page_cnt_threshold > control->ras_num_bad_pages) {
+ if (ras->bad_page_cnt_threshold >= control->ras_num_bad_pages) {
/* This means that, the threshold was increased since
* the last time the system was booted, and now,
* ras->bad_page_cnt_threshold - control->num_recs > 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 81d55cb7b397..13f7eda9a696 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -43,19 +43,6 @@ enum amdgpu_ras_eeprom_err_type {
AMDGPU_RAS_EEPROM_ERR_COUNT,
};
-/*
- * one UMC MCA address could map to multiply physical address (PA),
- * such as 1:16, we use eeprom_table_record.address to store MCA
- * address and use eeprom_table_record.retired_page to save PA.
- *
- * AMDGPU_RAS_EEPROM_REC_PA: one record store one PA
- * AMDGPU_RAS_EEPROM_REC_MCA: one record store one MCA address
- */
-enum amdgpu_ras_eeprom_rec_type {
- AMDGPU_RAS_EEPROM_REC_PA,
- AMDGPU_RAS_EEPROM_REC_MCA,
-};
-
struct amdgpu_ras_eeprom_table_header {
uint32_t header;
uint32_t version;
@@ -100,6 +87,12 @@ struct amdgpu_ras_eeprom_control {
*/
u32 ras_num_bad_pages;
+ /* Number of records store mca address */
+ u32 ras_num_mca_recs;
+
+ /* Number of records store physical address */
+ u32 ras_num_pa_recs;
+
/* First record index to read, 0-based.
* Range is [0, num_recs-1]. This is
* an absolute index, starting right after
@@ -120,7 +113,6 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
- enum amdgpu_ras_eeprom_rec_type rec_type;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index f53887e2f528..d55c8b7fdb59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -349,6 +349,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->buf_mask = (ring->ring_size / 4) - 1;
ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
0xffffffffffffffff : ring->buf_mask;
+ /* Initialize cached_rptr to 0 */
+ ring->cached_rptr = 0;
/* Allocate ring buffer */
if (ring->is_mes_queue) {
@@ -576,12 +578,32 @@ out:
return result;
}
+static ssize_t amdgpu_debugfs_virt_ring_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ amdgpu_virt_req_ras_cper_dump(ring->adev, false);
+
+ return amdgpu_debugfs_ring_read(f, buf, size, pos);
+}
+
static const struct file_operations amdgpu_debugfs_ring_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_ring_read,
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_virt_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_virt_ring_read,
+ .llseek = default_llseek
+};
+
static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -669,9 +691,14 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
char name[32];
sprintf(name, "amdgpu_ring_%s", ring->name);
- debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
- &amdgpu_debugfs_ring_fops,
- ring->ring_size + 12);
+ if (amdgpu_sriov_vf(adev))
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_virt_ring_fops,
+ ring->ring_size + 12);
+ else
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_ring_fops,
+ ring->ring_size + 12);
if (ring->mqd_obj) {
sprintf(name, "amdgpu_mqd_%s", ring->name);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7372e4aed6b0..b4fd1e17205e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -37,7 +37,7 @@ struct amdgpu_job;
struct amdgpu_vm;
/* max number of rings */
-#define AMDGPU_MAX_RINGS 132
+#define AMDGPU_MAX_RINGS 133
#define AMDGPU_MAX_HWIP_RINGS 64
#define AMDGPU_MAX_GFX_RINGS 2
#define AMDGPU_MAX_SW_GFX_RINGS 2
@@ -238,6 +238,7 @@ struct amdgpu_ring_funcs {
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
+ bool (*is_guilty)(struct amdgpu_ring *ring);
};
struct amdgpu_ring {
@@ -307,6 +308,8 @@ struct amdgpu_ring {
bool is_sw_ring;
unsigned int entry_index;
+ /* store the cached rptr to restore after reset */
+ uint64_t cached_rptr;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 174badca27e7..3a4cef896018 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -355,23 +356,44 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
- u32 i;
+ u64 i, num_ring;
u64 mask = 0;
- struct amdgpu_ring *ring;
+ struct amdgpu_ring *ring, *page = NULL;
if (!adev)
return -ENODEV;
- mask = BIT_ULL(adev->sdma.num_instances) - 1;
+ /* Determine the number of rings per SDMA instance
+ * (1 for sdma gfx ring, 2 if page queue exists)
+ */
+ if (adev->sdma.has_page_queue)
+ num_ring = 2;
+ else
+ num_ring = 1;
+
+ /* Calculate the maximum possible mask value
+ * based on the number of SDMA instances and rings
+ */
+ mask = BIT_ULL(adev->sdma.num_instances * num_ring) - 1;
+
if ((val & mask) == 0)
return -EINVAL;
for (i = 0; i < adev->sdma.num_instances; ++i) {
ring = &adev->sdma.instance[i].ring;
- if (val & BIT_ULL(i))
+ if (adev->sdma.has_page_queue)
+ page = &adev->sdma.instance[i].page;
+ if (val & BIT_ULL(i * num_ring))
ring->sched.ready = true;
else
ring->sched.ready = false;
+
+ if (page) {
+ if (val & BIT_ULL(i * num_ring + 1))
+ page->sched.ready = true;
+ else
+ page->sched.ready = false;
+ }
}
/* publish sched.ready flag update effective immediately across smp */
smp_rmb();
@@ -381,16 +403,37 @@ static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
- u32 i;
+ u64 i, num_ring;
u64 mask = 0;
- struct amdgpu_ring *ring;
+ struct amdgpu_ring *ring, *page = NULL;
if (!adev)
return -ENODEV;
+
+ /* Determine the number of rings per SDMA instance
+ * (1 for sdma gfx ring, 2 if page queue exists)
+ */
+ if (adev->sdma.has_page_queue)
+ num_ring = 2;
+ else
+ num_ring = 1;
+
for (i = 0; i < adev->sdma.num_instances; ++i) {
ring = &adev->sdma.instance[i].ring;
+ if (adev->sdma.has_page_queue)
+ page = &adev->sdma.instance[i].page;
+
if (ring->sched.ready)
- mask |= 1 << i;
+ mask |= BIT_ULL(i * num_ring);
+ else
+ mask &= ~BIT_ULL(i * num_ring);
+
+ if (page) {
+ if (page->sched.ready)
+ mask |= BIT_ULL(i * num_ring + 1);
+ else
+ mask &= ~BIT_ULL(i * num_ring + 1);
+ }
}
*val = mask;
@@ -460,3 +503,123 @@ void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_sdma_reset_mask);
}
}
+
+/**
+ * amdgpu_sdma_register_on_reset_callbacks - Register SDMA reset callbacks
+ * @funcs: Pointer to the callback structure containing pre_reset and post_reset functions
+ *
+ * This function allows KFD and AMDGPU to register their own callbacks for handling
+ * pre-reset and post-reset operations for engine reset. These are needed because engine
+ * reset will stop all queues on that engine.
+ */
+void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs)
+{
+ if (!funcs)
+ return;
+
+ /* Ensure the reset_callback_list is initialized */
+ if (!adev->sdma.reset_callback_list.next) {
+ INIT_LIST_HEAD(&adev->sdma.reset_callback_list);
+ }
+ /* Initialize the list node in the callback structure */
+ INIT_LIST_HEAD(&funcs->list);
+
+ /* Add the callback structure to the global list */
+ list_add_tail(&funcs->list, &adev->sdma.reset_callback_list);
+}
+
+/**
+ * amdgpu_sdma_reset_engine - Reset a specific SDMA engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: ID of the SDMA engine instance to reset
+ * @suspend_user_queues: check if suspend user queue.
+ *
+ * This function performs the following steps:
+ * 1. Calls all registered pre_reset callbacks to allow KFD and AMDGPU to save their state.
+ * 2. Resets the specified SDMA engine instance.
+ * 3. Calls all registered post_reset callbacks to allow KFD and AMDGPU to restore their state.
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, bool suspend_user_queues)
+{
+ struct sdma_on_reset_funcs *funcs;
+ int ret = 0;
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];;
+ struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
+ struct amdgpu_ring *page_ring = &sdma_instance->page;
+ bool gfx_sched_stopped = false, page_sched_stopped = false;
+
+ /* Suspend KFD if suspend_user_queues is true.
+ * prevent the destruction of in-flight healthy user queue packets and
+ * avoid race conditions between KFD and KGD during the reset process.
+ */
+ if (suspend_user_queues)
+ amdgpu_amdkfd_suspend(adev, false);
+
+ /* Stop the scheduler's work queue for the GFX and page rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ if (!amdgpu_ring_sched_ready(gfx_ring)) {
+ drm_sched_wqueue_stop(&gfx_ring->sched);
+ gfx_sched_stopped = true;
+ }
+
+ if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) {
+ drm_sched_wqueue_stop(&page_ring->sched);
+ page_sched_stopped = true;
+ }
+
+ /* Invoke all registered pre_reset callbacks */
+ list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
+ if (funcs->pre_reset) {
+ ret = funcs->pre_reset(adev, instance_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "beforeReset callback failed for instance %u: %d\n",
+ instance_id, ret);
+ goto exit;
+ }
+ }
+ }
+
+ /* Perform the SDMA reset for the specified instance */
+ ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
+ if (ret) {
+ dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
+ goto exit;
+ }
+
+ /* Invoke all registered post_reset callbacks */
+ list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
+ if (funcs->post_reset) {
+ ret = funcs->post_reset(adev, instance_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "afterReset callback failed for instance %u: %d\n",
+ instance_id, ret);
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ /* Restart the scheduler's work queue for the GFX and page rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ if (ret) {
+ if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
+ drm_sched_wqueue_start(&gfx_ring->sched);
+ }
+ if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) {
+ drm_sched_wqueue_start(&page_ring->sched);
+ }
+ }
+
+ if (suspend_user_queues)
+ amdgpu_amdkfd_resume(adev, false);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 5f60736051d1..965169320065 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -98,6 +98,13 @@ struct amdgpu_sdma_ras {
struct amdgpu_ras_block_object ras_block;
};
+struct sdma_on_reset_funcs {
+ int (*pre_reset)(struct amdgpu_device *adev, uint32_t instance_id);
+ int (*post_reset)(struct amdgpu_device *adev, uint32_t instance_id);
+ /* Linked list node to store this structure in a list; */
+ struct list_head list;
+};
+
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
@@ -118,6 +125,10 @@ struct amdgpu_sdma {
struct amdgpu_sdma_ras *ras;
uint32_t *ip_dump;
uint32_t supported_reset;
+ struct list_head reset_callback_list;
+ /* track guilty state of GFX and PAGE queues */
+ bool gfx_guilty;
+ bool page_guilty;
};
/*
@@ -157,6 +168,9 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
+void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs);
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, bool suspend_user_queues);
+
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index bcb4bcc4ab75..53b71e9d8076 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2295,7 +2295,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor cursor;
u64 addr;
- int r;
+ int r = 0;
if (!adev->mman.buffer_funcs_enabled)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index a4a7e61817aa..857693bcd8d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -71,6 +71,13 @@
*/
#define UMC_CHANNEL_IDX_V2 BIT_ULL(47)
+/*
+ * save nps value to eeprom_table_record.retired_page[47:40],
+ * the channel index flag above will be retired.
+ */
+#define UMC_NPS_SHIFT 40
+#define UMC_NPS_MASK 0xffULL
+
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 83faf6e6788a..8d8b39e6d197 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -93,47 +93,53 @@ MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
-int amdgpu_vcn_early_init(struct amdgpu_device *adev)
+int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
{
char ucode_prefix[25];
- int r, i;
+ int r;
+ adev->vcn.inst[i].adev = adev;
+ adev->vcn.inst[i].inst = i;
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
- r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
- AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_%d.bin", ucode_prefix, i);
- else
- r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+
+ if (i != 0 && adev->vcn.per_inst_fw) {
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_%d.bin", ucode_prefix, i);
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
+ } else {
+ if (!adev->vcn.inst[0].fw) {
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[0].fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/%s.bin", ucode_prefix);
- if (r) {
- amdgpu_ucode_release(&adev->vcn.inst[i].fw);
- return r;
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.inst[0].fw);
+ } else {
+ r = 0;
}
+ adev->vcn.inst[i].fw = adev->vcn.inst[0].fw;
}
+
return r;
}
-int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
{
unsigned long bo_size;
const struct common_firmware_header *hdr;
unsigned char fw_check;
unsigned int fw_shared_size, log_offset;
- int i, r;
-
- INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
- mutex_init(&adev->vcn.vcn_pg_lock);
- mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
- atomic_set(&adev->vcn.total_submission_cnt, 0);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++)
- atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
+ int r;
+ mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
+ mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
+ atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
+ INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
+ atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
+ adev->vcn.inst[i].indirect_sram = true;
/*
* Some Steam Deck's BIOS versions are incompatible with the
@@ -146,18 +152,19 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
- !strncmp("F7A0114", bios_ver, 7))) {
- adev->vcn.indirect_sram = false;
+ !strncmp("F7A0114", bios_ver, 7))) {
+ adev->vcn.inst[i].indirect_sram = false;
dev_info(adev->dev,
- "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
+ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
}
}
/* from vcn4 and above, only unified queue is used */
- adev->vcn.using_unified_queue =
+ adev->vcn.inst[i].using_unified_queue =
amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
- hdr = (const struct common_firmware_header *)adev->vcn.inst[0].fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ adev->vcn.inst[i].fw_version = le32_to_cpu(hdr->ucode_version);
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -175,16 +182,17 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
enc_major = fw_check;
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
- DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
- enc_major, enc_minor, dec_ver, vep, fw_rev);
+ dev_info(adev->dev,
+ "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
+ enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
- version_major, version_minor, family_id);
+ dev_info(adev->dev, "Found VCN firmware Version: %u.%u Family ID: %u\n",
+ version_major, version_minor, family_id);
}
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
@@ -207,80 +215,77 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (amdgpu_vcnfw_log)
bo_size += AMDGPU_VCNFW_LOG_SIZE;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ &adev->vcn.inst[i].cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
+ return r;
+ }
+
+ adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
+ bo_size - fw_shared_size;
+ adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
+ bo_size - fw_shared_size;
+
+ adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
+
+ if (amdgpu_vcnfw_log) {
+ adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.log_offset = log_offset;
+ }
- r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ if (adev->vcn.inst[i].indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
- &adev->vcn.inst[i].vcpu_bo,
- &adev->vcn.inst[i].gpu_addr,
- &adev->vcn.inst[i].cpu_addr);
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ &adev->vcn.inst[i].dpg_sram_cpu_addr);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
return r;
}
-
- adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
- bo_size - fw_shared_size;
- adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
- bo_size - fw_shared_size;
-
- adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
-
- if (amdgpu_vcnfw_log) {
- adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
- adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
- adev->vcn.inst[i].fw_shared.log_offset = log_offset;
- }
-
- if (adev->vcn.indirect_sram) {
- r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->vcn.inst[i].dpg_sram_bo,
- &adev->vcn.inst[i].dpg_sram_gpu_addr,
- &adev->vcn.inst[i].dpg_sram_cpu_addr);
- if (r) {
- dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
- return r;
- }
- }
}
return 0;
}
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
{
- int i, j;
+ int j;
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- amdgpu_bo_free_kernel(
- &adev->vcn.inst[j].dpg_sram_bo,
- &adev->vcn.inst[j].dpg_sram_gpu_addr,
- (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
+ amdgpu_bo_free_kernel(
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ (void **)&adev->vcn.inst[i].dpg_sram_cpu_addr);
- kvfree(adev->vcn.inst[j].saved_bo);
+ kvfree(adev->vcn.inst[i].saved_bo);
- amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
- &adev->vcn.inst[j].gpu_addr,
- (void **)&adev->vcn.inst[j].cpu_addr);
+ amdgpu_bo_free_kernel(&adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ (void **)&adev->vcn.inst[i].cpu_addr);
- amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
+ amdgpu_ring_fini(&adev->vcn.inst[i].ring_dec);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
+ amdgpu_ring_fini(&adev->vcn.inst[i].ring_enc[j]);
- amdgpu_ucode_release(&adev->vcn.inst[j].fw);
+ if (adev->vcn.per_inst_fw) {
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
+ } else {
+ amdgpu_ucode_release(&adev->vcn.inst[0].fw);
+ adev->vcn.inst[i].fw = NULL;
}
-
- mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
- mutex_destroy(&adev->vcn.vcn_pg_lock);
+ mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
return 0;
}
@@ -300,179 +305,190 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t
return ret;
}
-int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
+static int amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device *adev, int i)
{
unsigned int size;
void *ptr;
- int i, idx;
+ int idx;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->vcn.inst[i].vcpu_bo == NULL)
- return 0;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return 0;
- size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
- ptr = adev->vcn.inst[i].cpu_addr;
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
- adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
- if (!adev->vcn.inst[i].saved_bo)
- return -ENOMEM;
+ adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->vcn.inst[i].saved_bo)
+ return -ENOMEM;
- if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
- drm_dev_exit(idx);
- }
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+ drm_dev_exit(idx);
}
return 0;
}
-int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
+{
+ int ret, i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ret = amdgpu_vcn_save_vcpu_bo_inst(adev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
{
bool in_ras_intr = amdgpu_ras_intr_triggered();
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+
+ cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
/* err_event_athub will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
if (in_ras_intr)
return 0;
- return amdgpu_vcn_save_vcpu_bo(adev);
+ return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
}
-int amdgpu_vcn_resume(struct amdgpu_device *adev)
+int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
{
unsigned int size;
void *ptr;
- int i, idx;
+ int idx;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->vcn.inst[i].vcpu_bo == NULL)
- return -EINVAL;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return -EINVAL;
- size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
- ptr = adev->vcn.inst[i].cpu_addr;
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
+
+ if (adev->vcn.inst[i].saved_bo != NULL) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ drm_dev_exit(idx);
+ }
+ kvfree(adev->vcn.inst[i].saved_bo);
+ adev->vcn.inst[i].saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned int offset;
- if (adev->vcn.inst[i].saved_bo != NULL) {
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ memcpy_toio(adev->vcn.inst[i].cpu_addr,
+ adev->vcn.inst[i].fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
- kvfree(adev->vcn.inst[i].saved_bo);
- adev->vcn.inst[i].saved_bo = NULL;
- } else {
- const struct common_firmware_header *hdr;
- unsigned int offset;
-
- hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(adev->vcn.inst[i].cpu_addr,
- adev->vcn.inst[i].fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
- drm_dev_exit(idx);
- }
- size -= le32_to_cpu(hdr->ucode_size_bytes);
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
- }
- memset_io(ptr, 0, size);
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
+ memset_io(ptr, 0, size);
}
+
return 0;
}
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ struct amdgpu_vcn_inst *vcn_inst =
+ container_of(work, struct amdgpu_vcn_inst, idle_work.work);
+ struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
- unsigned int i, j;
+ unsigned int i = vcn_inst->inst, j;
int r = 0;
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
-
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
- /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
- !adev->vcn.using_unified_queue) {
- struct dpg_pause_state new_state;
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
+ fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]);
- if (fence[j] ||
- unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
- new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else
- new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !adev->vcn.inst[i].using_unified_queue) {
+ struct dpg_pause_state new_state;
- adev->vcn.pause_dpg_mode(adev, j, &new_state);
- }
+ if (fence[i] ||
+ unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt)))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
- fences += fence[j];
+ adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state);
}
- if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_GATE);
+ fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec);
+ fences += fence[i];
+
+ if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
+ false);
if (r)
dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
} else {
- schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
}
}
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
int r = 0;
- atomic_inc(&adev->vcn.total_submission_cnt);
+ atomic_inc(&vcn_inst->total_submission_cnt);
- if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
+ if (!cancel_delayed_work_sync(&vcn_inst->idle_work)) {
r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
true);
if (r)
dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
}
- mutex_lock(&adev->vcn.vcn_pg_lock);
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_UNGATE);
+ mutex_lock(&vcn_inst->vcn_pg_lock);
+ vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
- !adev->vcn.using_unified_queue) {
+ !vcn_inst->using_unified_queue) {
struct dpg_pause_state new_state;
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
- atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+ atomic_inc(&vcn_inst->dpg_enc_submission_cnt);
new_state.fw_based = VCN_DPG_STATE__PAUSE;
} else {
unsigned int fences = 0;
unsigned int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
+ for (i = 0; i < vcn_inst->num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]);
- if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
+ if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt))
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
}
- adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
+ vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
}
- mutex_unlock(&adev->vcn.vcn_pg_lock);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -482,12 +498,13 @@ void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
- !adev->vcn.using_unified_queue)
+ !adev->vcn.inst[ring->me].using_unified_queue)
atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
- atomic_dec(&ring->adev->vcn.total_submission_cnt);
+ atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt);
- schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work,
+ VCN_IDLE_TIMEOUT);
}
int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
@@ -505,7 +522,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -570,14 +587,14 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
- ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
+ ib->ptr[0] = PACKET0(adev->vcn.inst[ring->me].internal.data0, 0);
ib->ptr[1] = addr;
- ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
+ ib->ptr[2] = PACKET0(adev->vcn.inst[ring->me].internal.data1, 0);
ib->ptr[3] = addr >> 32;
- ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
+ ib->ptr[4] = PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0);
ib->ptr[5] = 0;
for (i = 6; i < 16; i += 2) {
- ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
+ ib->ptr[i] = PACKET0(adev->vcn.inst[ring->me].internal.nop, 0);
ib->ptr[i+1] = 0;
}
ib->length_dw = 16;
@@ -740,7 +757,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
uint32_t ib_pack_in_dw;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -753,7 +770,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
ib->length_dw = 0;
/* single queue headers */
- if (adev->vcn.using_unified_queue) {
+ if (adev->vcn.inst[ring->me].using_unified_queue) {
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ 4 + 2; /* engine info + decoding ib in dw */
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
@@ -772,7 +789,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -870,7 +887,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
uint64_t addr;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -884,7 +901,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
ib->length_dw = 0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -906,7 +923,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -937,7 +954,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
uint64_t addr;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -951,7 +968,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
ib->length_dw = 0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -973,7 +990,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -1058,36 +1075,32 @@ enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
}
}
-void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i)
{
- int i;
unsigned int idx;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
- /* currently only support 2 FW instances */
- if (i >= 2) {
- dev_info(adev->dev, "More then 2 VCN FW instances!\n");
- break;
- }
- idx = AMDGPU_UCODE_ID_VCN + i;
- adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
-
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(4, 0, 3) ||
- amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(5, 0, 1))
- break;
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 1))
+ && (i > 0))
+ return;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ /* currently only support 2 FW instances */
+ if (i >= 2) {
+ dev_info(adev->dev, "More then 2 VCN FW instances!\n");
+ return;
}
+ idx = AMDGPU_UCODE_ID_VCN + i;
+ adev->firmware.ucode[idx].ucode_id = idx;
+ adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
}
}
@@ -1390,10 +1403,33 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
struct dentry *root = minor->debugfs_root;
char name[32];
- if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.using_unified_queue)
+ if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.inst[0].using_unified_queue)
return;
sprintf(name, "amdgpu_vcn_sched_mask");
debugfs_create_file(name, 0600, root, adev,
&amdgpu_debugfs_vcn_sched_mask_fops);
#endif
}
+
+/**
+ * vcn_set_powergating_state - set VCN block powergating state
+ *
+ * @ip_block: amdgpu_ip_block pointer
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0, i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ ret |= vinst->set_pg_state(vinst, state);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index c92f683ee595..26c9c2d90f45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -295,6 +295,8 @@ struct amdgpu_vcn_fw_shared {
};
struct amdgpu_vcn_inst {
+ struct amdgpu_device *adev;
+ int inst;
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
@@ -316,6 +318,20 @@ struct amdgpu_vcn_inst {
const struct firmware *fw; /* VCN firmware */
uint8_t vcn_config;
uint32_t vcn_codec_disable_mask;
+ atomic_t total_submission_cnt;
+ struct mutex vcn_pg_lock;
+ enum amd_powergating_state cur_state;
+ struct delayed_work idle_work;
+ unsigned fw_version;
+ unsigned num_enc_rings;
+ bool indirect_sram;
+ struct amdgpu_vcn_reg internal;
+ struct mutex vcn1_jpeg1_workaround;
+ int (*pause_dpg_mode)(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
+ int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+ bool using_unified_queue;
};
struct amdgpu_vcn_ras {
@@ -323,35 +339,25 @@ struct amdgpu_vcn_ras {
};
struct amdgpu_vcn {
- unsigned fw_version;
- struct delayed_work idle_work;
- unsigned num_enc_rings;
- enum amd_powergating_state cur_state;
- bool indirect_sram;
-
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
- struct amdgpu_vcn_reg internal;
- struct mutex vcn_pg_lock;
- struct mutex vcn1_jpeg1_workaround;
- atomic_t total_submission_cnt;
unsigned harvest_config;
- int (*pause_dpg_mode)(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
struct ras_common_if *ras_if;
struct amdgpu_vcn_ras *ras;
uint16_t inst_mask;
uint8_t num_inst_per_aid;
- bool using_unified_queue;
/* IP reg dump */
uint32_t *ip_dump;
uint32_t supported_reset;
uint32_t caps;
+
+ bool per_inst_fw;
+ unsigned fw_version;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -500,11 +506,11 @@ enum vcn_ring_type {
VCN_UNIFIED_RING,
};
-int amdgpu_vcn_early_init(struct amdgpu_device *adev);
-int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
-int amdgpu_vcn_suspend(struct amdgpu_device *adev);
-int amdgpu_vcn_resume(struct amdgpu_device *adev);
+int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring);
@@ -522,7 +528,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring);
-void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev);
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i);
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn);
void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
@@ -542,4 +548,7 @@ int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev);
void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev);
void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
+int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index e6f0152e5b08..ab7e73d0e7b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -739,7 +739,7 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
}
}
-void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+static u32 amdgpu_virt_init_detect_asic(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -775,8 +775,17 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
+ return reg;
+}
+
+static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
+{
+ bool is_sriov = false;
+
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
+ is_sriov = true;
+
switch (adev->asic_type) {
case CHIP_TONGA:
case CHIP_FIJI:
@@ -805,10 +814,39 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
amdgpu_virt_request_init_data(adev);
break;
default: /* other chip doesn't support SRIOV */
+ is_sriov = false;
DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
break;
}
}
+
+ return is_sriov;
+}
+
+static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
+{
+ ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
+
+ ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+
+ mutex_init(&adev->virt.ras.ras_telemetry_mutex);
+
+ adev->virt.ras.cper_rptr = 0;
+}
+
+void amdgpu_virt_init(struct amdgpu_device *adev)
+{
+ bool is_sriov = false;
+ uint32_t reg = amdgpu_virt_init_detect_asic(adev);
+
+ is_sriov = amdgpu_virt_init_req_data(adev, reg);
+
+ if (is_sriov)
+ amdgpu_virt_init_ras(adev);
}
static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
@@ -1288,10 +1326,12 @@ static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bo
* will ignore incoming guest messages. Ratelimit the guest messages to
* prevent guest self DOS.
*/
- if (__ratelimit(&adev->virt.ras_telemetry_rs) || force_update) {
+ if (__ratelimit(&virt->ras.ras_error_cnt_rs) || force_update) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
if (!virt->ops->req_ras_err_count(adev))
amdgpu_virt_cache_host_error_counts(adev,
- adev->virt.fw_reserve.ras_telemetry);
+ virt->fw_reserve.ras_telemetry);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
}
return 0;
@@ -1322,6 +1362,98 @@ int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_bl
return 0;
}
+static int
+amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ u32 *more)
+{
+ struct amd_sriov_ras_cper_dump *cper_dump = NULL;
+ struct cper_hdr *entry = NULL;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ uint32_t checksum, used_size, i;
+ int ret = 0;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return 0;
+
+ cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
+ if (!cper_dump)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0))
+ goto out;
+
+ *more = cper_dump->more;
+
+ if (cper_dump->wptr < adev->virt.ras.cper_rptr) {
+ dev_warn(
+ adev->dev,
+ "guest specified rptr that was too high! guest rptr: 0x%llx, host rptr: 0x%llx\n",
+ adev->virt.ras.cper_rptr, cper_dump->wptr);
+
+ adev->virt.ras.cper_rptr = cper_dump->wptr;
+ goto out;
+ }
+
+ entry = (struct cper_hdr *)&cper_dump->buf[0];
+
+ for (i = 0; i < cper_dump->count; i++) {
+ amdgpu_cper_ring_write(ring, entry, entry->record_length);
+ entry = (struct cper_hdr *)((char *)entry +
+ entry->record_length);
+ }
+
+ if (cper_dump->overflow_count)
+ dev_warn(adev->dev,
+ "host reported CPER overflow of 0x%llx entries!\n",
+ cper_dump->overflow_count);
+
+ adev->virt.ras.cper_rptr = cper_dump->wptr;
+out:
+ kfree(cper_dump);
+
+ return ret;
+}
+
+static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int ret = 0;
+ uint32_t more = 0;
+
+ if (!amdgpu_sriov_ras_cper_en(adev))
+ return -EOPNOTSUPP;
+
+ do {
+ if (!virt->ops->req_ras_cper_dump(adev, virt->ras.cper_rptr))
+ ret = amdgpu_virt_write_cpers_to_ring(
+ adev, virt->fw_reserve.ras_telemetry, &more);
+ else
+ ret = 0;
+ } while (more);
+
+ return ret;
+}
+
+int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int ret = 0;
+
+ if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
+ down_read_trylock(&adev->reset_domain->sem)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ ret = amdgpu_virt_req_ras_cper_dump_internal(adev);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ return ret;
+}
+
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
{
unsigned long ue_count, ce_count;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 0f3ccae5c1ab..9f65487e60f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -96,6 +96,7 @@ struct amdgpu_virt_ops {
enum amdgpu_ras_block block);
bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
int (*req_ras_err_count)(struct amdgpu_device *adev);
+ int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
};
/*
@@ -140,6 +141,7 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8),
AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
+ AMDGIM_FEATURE_RAS_CPER = (1 << 11),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -242,6 +244,13 @@ struct amdgpu_virt_ras_err_handler_data {
int last_reserved;
};
+struct amdgpu_virt_ras {
+ struct ratelimit_state ras_error_cnt_rs;
+ struct ratelimit_state ras_cper_dump_rs;
+ struct mutex ras_telemetry_mutex;
+ uint64_t cper_rptr;
+};
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -284,8 +293,7 @@ struct amdgpu_virt {
union amd_sriov_ras_caps ras_en_caps;
union amd_sriov_ras_caps ras_telemetry_en_caps;
-
- struct ratelimit_state ras_telemetry_rs;
+ struct amdgpu_virt_ras ras;
struct amd_sriov_ras_telemetry_error_count count_cache;
};
@@ -340,6 +348,9 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \
(amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk))
+#define amdgpu_sriov_ras_cper_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
@@ -378,7 +389,7 @@ void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
-void amdgpu_detect_virtualization(struct amdgpu_device *adev);
+void amdgpu_virt_init(struct amdgpu_device *adev);
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
@@ -406,6 +417,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev);
int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
struct ras_err_data *err_data);
+int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update);
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 03308261f894..fc6d02d0f047 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -627,7 +627,7 @@ static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block)
return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev));
}
-static bool amdgpu_vkms_is_idle(void *handle)
+static bool amdgpu_vkms_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index c98b6b35cfdf..7fdf30f1161c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -315,6 +315,7 @@ int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num)
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
xgmi_state_reg_val = xgmi_v6_4_get_link_status(adev, global_link_num);
break;
default:
@@ -818,28 +819,69 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
* num_hops[2:0] = number of hops
*/
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev)
+ struct amdgpu_device *peer_adev)
{
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
uint8_t num_hops_mask = 0x7;
int i;
+ if (!adev->gmc.xgmi.supported)
+ return 0;
+
for (i = 0 ; i < top->num_nodes; ++i)
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
return top->nodes[i].num_hops & num_hops_mask;
- return -EINVAL;
+
+ dev_err(adev->dev, "Failed to get xgmi hops count for peer %d.\n",
+ peer_adev->gmc.xgmi.physical_node_id);
+
+ return 0;
}
-int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev)
+int amdgpu_xgmi_get_bandwidth(struct amdgpu_device *adev, struct amdgpu_device *peer_adev,
+ enum amdgpu_xgmi_bw_mode bw_mode, enum amdgpu_xgmi_bw_unit bw_unit,
+ uint32_t *min_bw, uint32_t *max_bw)
{
- struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
- int i;
+ bool peer_mode = bw_mode == AMDGPU_XGMI_BW_MODE_PER_PEER;
+ int unit_scale = bw_unit == AMDGPU_XGMI_BW_UNIT_MBYTES ? 1000 : 1;
+ int speed = 25, num_lanes = 16, num_links = !peer_mode ? 1 : -1;
- for (i = 0 ; i < top->num_nodes; ++i)
- if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
- return top->nodes[i].num_links;
- return -EINVAL;
+ if (!(min_bw && max_bw))
+ return -EINVAL;
+
+ *min_bw = 0;
+ *max_bw = 0;
+
+ if (!adev->gmc.xgmi.supported)
+ return -ENODATA;
+
+ if (peer_mode && !peer_adev)
+ return -EINVAL;
+
+ if (peer_mode) {
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0 ; i < top->num_nodes; ++i) {
+ if (top->nodes[i].node_id != peer_adev->gmc.xgmi.node_id)
+ continue;
+
+ num_links = top->nodes[i].num_links;
+ break;
+ }
+ }
+
+ if (num_links == -1) {
+ dev_err(adev->dev, "Failed to get number of xgmi links for peer %d.\n",
+ peer_adev->gmc.xgmi.physical_node_id);
+ } else if (num_links) {
+ int per_link_bw = (speed * num_lanes * unit_scale)/BITS_PER_BYTE;
+
+ *min_bw = per_link_bw;
+ *max_bw = num_links * per_link_bw;
+ }
+
+ return 0;
}
bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
@@ -1128,8 +1170,8 @@ static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_ban
break;
case ACA_SMU_TYPE_CE:
count = ext_error_code == 6 ? count : 0ULL;
- bank->aca_err_type = ACA_ERROR_TYPE_CE;
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, count);
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, count);
break;
default:
return -EINVAL;
@@ -1164,6 +1206,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL,
&xgmi_v6_4_0_aca_info, NULL);
if (r)
@@ -1223,6 +1266,7 @@ static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++)
pcs_clear_status(adev,
xgmi3x16_pcs_err_status_reg_v6_4[i]);
@@ -1257,6 +1301,7 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
xgmi_v6_4_0_reset_ras_error_count(adev);
break;
default:
@@ -1282,7 +1327,9 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
IP_VERSION(6, 1, 0) ||
amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
- IP_VERSION(6, 4, 0)) {
+ IP_VERSION(6, 4, 0) ||
+ amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
+ IP_VERSION(6, 4, 1)) {
pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
} else {
@@ -1390,6 +1437,7 @@ static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
/* check xgmi3x16 pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) {
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_v6_4[i]);
@@ -1486,6 +1534,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
{
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
xgmi_v6_4_0_query_ras_error_count(adev, ras_error_status);
break;
default:
@@ -1673,3 +1722,34 @@ int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
return r;
}
+
+bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev)
+{
+ return (amdgpu_use_xgmi_p2p && adev != bo_adev &&
+ adev->gmc.xgmi.hive_id &&
+ adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
+}
+
+void amdgpu_xgmi_early_init(struct amdgpu_device *adev)
+{
+ if (!adev->gmc.xgmi.supported)
+ return;
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ adev->gmc.xgmi.max_speed = XGMI_SPEED_25GT;
+ adev->gmc.xgmi.max_width = 16;
+ break;
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ adev->gmc.xgmi.max_speed = XGMI_SPEED_32GT;
+ adev->gmc.xgmi.max_width = 16;
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index d1282b4c6348..32dabba4062f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -23,9 +23,14 @@
#define __AMDGPU_XGMI_H__
#include <drm/task_barrier.h>
-#include "amdgpu_psp.h"
#include "amdgpu_ras.h"
+enum amdgpu_xgmi_link_speed {
+ XGMI_SPEED_16GT = 16,
+ XGMI_SPEED_25GT = 25,
+ XGMI_SPEED_32GT = 32
+};
+
struct amdgpu_hive_info {
struct kobject kobj;
uint64_t hive_id;
@@ -55,29 +60,63 @@ struct amdgpu_pcs_ras_field {
uint32_t pcs_err_shift;
};
-extern struct amdgpu_xgmi_ras xgmi_ras;
+/**
+ * Bandwidth range reporting comes in two modes.
+ *
+ * PER_LINK - range for any xgmi link
+ * PER_PEER - range of max of single xgmi link to max of multiple links based on source peer
+ */
+enum amdgpu_xgmi_bw_mode {
+ AMDGPU_XGMI_BW_MODE_PER_LINK = 0,
+ AMDGPU_XGMI_BW_MODE_PER_PEER
+};
+
+enum amdgpu_xgmi_bw_unit {
+ AMDGPU_XGMI_BW_UNIT_GBYTES = 0,
+ AMDGPU_XGMI_BW_UNIT_MBYTES
+};
+
+struct amdgpu_xgmi_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+extern struct amdgpu_xgmi_ras xgmi_ras;
+
+struct amdgpu_xgmi {
+ /* from psp */
+ u64 node_id;
+ u64 hive_id;
+ /* fixed per family */
+ u64 node_segment_size;
+ /* physical node (0-3) */
+ unsigned physical_node_id;
+ /* number of nodes (0-4) */
+ unsigned num_physical_nodes;
+ /* gpu list in the same hive */
+ struct list_head head;
+ bool supported;
+ struct ras_common_if *ras_if;
+ bool connected_to_cpu;
+ struct amdgpu_xgmi_ras *ras;
+ enum amdgpu_xgmi_link_speed max_speed;
+ uint8_t max_width;
+};
+
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
-int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev);
-int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_get_bandwidth(struct amdgpu_device *adev, struct amdgpu_device *peer_adev,
+ enum amdgpu_xgmi_bw_mode bw_mode, enum amdgpu_xgmi_bw_unit bw_unit,
+ uint32_t *min_bw, uint32_t *max_bw);
bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
-static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
- struct amdgpu_device *bo_adev)
-{
- return (amdgpu_use_xgmi_p2p &&
- adev != bo_adev &&
- adev->gmc.xgmi.hive_id &&
- adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
-}
+bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev);
int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev);
@@ -87,4 +126,7 @@ int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev,
int global_link_num);
+void amdgpu_xgmi_early_init(struct amdgpu_device *adev);
+uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index b4f9c2f4e92c..d6ac2652f0ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -97,11 +97,12 @@ union amd_sriov_msg_feature_flags {
uint32_t pp_one_vf_mode : 1;
uint32_t reg_indirect_acc : 1;
uint32_t av1_support : 1;
- uint32_t vcn_rb_decouple : 1;
+ uint32_t vcn_rb_decouple : 1;
uint32_t mes_info_dump_enable : 1;
uint32_t ras_caps : 1;
uint32_t ras_telemetry : 1;
- uint32_t reserved : 21;
+ uint32_t ras_cper : 1;
+ uint32_t reserved : 20;
} flags;
uint32_t all;
};
@@ -328,21 +329,25 @@ enum amd_sriov_mailbox_request_message {
MB_REQ_MSG_READY_TO_RESET = 201,
MB_REQ_MSG_RAS_POISON = 202,
MB_REQ_RAS_ERROR_COUNT = 203,
+ MB_REQ_RAS_CPER_DUMP = 204,
};
/* mailbox message send from host to guest */
enum amd_sriov_mailbox_response_message {
- MB_RES_MSG_CLR_MSG_BUF = 0,
- MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
- MB_RES_MSG_FLR_NOTIFICATION,
- MB_RES_MSG_FLR_NOTIFICATION_COMPLETION,
- MB_RES_MSG_SUCCESS,
- MB_RES_MSG_FAIL,
- MB_RES_MSG_QUERY_ALIVE,
- MB_RES_MSG_GPU_INIT_DATA_READY,
- MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
-
- MB_RES_MSG_TEXT_MESSAGE = 255
+ MB_RES_MSG_CLR_MSG_BUF = 0,
+ MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
+ MB_RES_MSG_FLR_NOTIFICATION = 2,
+ MB_RES_MSG_FLR_NOTIFICATION_COMPLETION = 3,
+ MB_RES_MSG_SUCCESS = 4,
+ MB_RES_MSG_FAIL = 5,
+ MB_RES_MSG_QUERY_ALIVE = 6,
+ MB_RES_MSG_GPU_INIT_DATA_READY = 7,
+ MB_RES_MSG_RAS_POISON_READY = 8,
+ MB_RES_MSG_PF_SOFT_FLR_NOTIFICATION = 9,
+ MB_RES_MSG_GPU_RMA = 10,
+ MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
+ MB_REQ_RAS_CPER_DUMP_READY = 14,
+ MB_RES_MSG_TEXT_MESSAGE = 255
};
enum amd_sriov_ras_telemetry_gpu_block {
@@ -386,11 +391,20 @@ struct amd_sriov_ras_telemetry_error_count {
} block[RAS_TELEMETRY_GPU_BLOCK_COUNT];
};
+struct amd_sriov_ras_cper_dump {
+ uint32_t more;
+ uint64_t overflow_count;
+ uint64_t count;
+ uint64_t wptr;
+ uint32_t buf[];
+};
+
struct amdsriov_ras_telemetry {
struct amd_sriov_ras_telemetry_header header;
union {
struct amd_sriov_ras_telemetry_error_count error_count;
+ struct amd_sriov_ras_cper_dump cper_dump;
} body;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 08d6787893b3..9cd63b4177bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -2148,7 +2148,7 @@ static int cik_common_resume(struct amdgpu_ip_block *ip_block)
return cik_common_hw_init(ip_block);
}
-static bool cik_common_is_idle(void *handle)
+static bool cik_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 444563486769..41f4705bdbbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -345,9 +345,9 @@ static int cik_ih_resume(struct amdgpu_ip_block *ip_block)
return cik_ih_hw_init(ip_block);
}
-static bool cik_ih_is_idle(void *handle)
+static bool cik_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index d9bd8f3f17e2..508cea965983 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1025,9 +1025,9 @@ static int cik_sdma_resume(struct amdgpu_ip_block *ip_block)
return cik_sdma_hw_init(ip_block);
}
-static bool cik_sdma_is_idle(void *handle)
+static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 06088d52d81c..279288365940 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -51,6 +51,15 @@
#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
+/* audio endpt instance offsets */
+#define AUD0_REGISTER_OFFSET (0x1780 - 0x1780)
+#define AUD1_REGISTER_OFFSET (0x1786 - 0x1780)
+#define AUD2_REGISTER_OFFSET (0x178c - 0x1780)
+#define AUD3_REGISTER_OFFSET (0x1792 - 0x1780)
+#define AUD4_REGISTER_OFFSET (0x1798 - 0x1780)
+#define AUD5_REGISTER_OFFSET (0x179d - 0x1780)
+#define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780)
+
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 82586b76aeda..2f891fb846d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -341,9 +341,9 @@ static int cz_ih_resume(struct amdgpu_ip_block *ip_block)
return cz_ih_hw_init(ip_block);
}
-static bool cz_ih_is_idle(void *handle)
+static bool cz_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index c5e3d2251b18..df401aded662 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2970,7 +2970,7 @@ static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v10_0_is_idle(void *handle)
+static bool dce_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index ea42a4472bf6..80f01c3989cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3108,7 +3108,7 @@ static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v11_0_is_idle(void *handle)
+static bool dce_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 915804a6a1d7..ac51b7a6e8d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -206,9 +206,9 @@ static void dce_v6_0_page_flip(struct amdgpu_device *adev,
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
+ /* writing to the low address triggers the update */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)crtc_base);
-
/* post the write */
RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
}
@@ -218,11 +218,11 @@ static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
{
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
+
*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
return 0;
-
}
/**
@@ -242,7 +242,8 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
if (hpd >= adev->mode_info.num_hpd)
return connected;
- if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+ DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
connected = true;
return connected;
@@ -370,13 +371,41 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
return mmDC_GPIO_HPD_A;
}
+static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[6];
+ u32 i, j, tmp;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
+ crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
+
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
+}
+
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
if (!render)
WREG32(mmVGA_RENDER_CONTROL,
RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
-
}
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
@@ -419,7 +448,6 @@ void dce_v6_0_disable_dce(struct amdgpu_device *adev)
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
-
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -895,8 +923,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_high.dram_channels = dram_channels;
wm_high.num_heads = num_heads;
- if (adev->pm.dpm_enabled) {
/* watermark for low clocks */
+ if (adev->pm.dpm_enabled) {
wm_low.yclk =
amdgpu_dpm_get_mclk(adev, true) * 10;
wm_low.sclk =
@@ -1006,6 +1034,20 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
}
/* watermark setup */
+/**
+ * dce_v6_0_line_buffer_adjust - Set up the line buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @mode: the current display mode on the selected display
+ * controller
+ * @other_mode: the display mode of another display controller
+ * that may be sharing the line buffer
+ *
+ * Setup up the line buffer allocation for
+ * the selected display controller (CIK).
+ * Returns the line buffer size in pixels.
+ */
static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc,
struct drm_display_mode *mode,
@@ -1386,6 +1428,8 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev)
adev->mode_info.audio.pin[i].connected = false;
adev->mode_info.audio.pin[i].offset = pin_offsets[i];
adev->mode_info.audio.pin[i].id = i;
+ /* disable audio. it will be set up later */
+ /* XXX remove once we switch to ip funcs */
dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
@@ -2865,14 +2909,35 @@ static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v6_0_is_idle(void *handle)
+static bool dce_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
+ u32 srbm_soft_reset = 0, tmp;
+ struct amdgpu_device *adev = ip_block->adev;
+
+ if (dce_v6_0_is_display_hung(adev))
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
return 0;
}
@@ -3148,7 +3213,6 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
}
return 0;
-
}
static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
@@ -3281,8 +3345,7 @@ static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
}
-static void
-dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
+static void dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -3294,8 +3357,7 @@ static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
}
-static void
-dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
+static void dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
{
}
@@ -3366,7 +3428,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
amdgpu_encoder->devices |= supported_device;
return;
}
-
}
/* add a new one */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f2edc0fece5b..07358546581f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1395,13 +1395,13 @@ static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
}
static const u32 pin_offsets[7] = {
- (0x1780 - 0x1780),
- (0x1786 - 0x1780),
- (0x178c - 0x1780),
- (0x1792 - 0x1780),
- (0x1798 - 0x1780),
- (0x179d - 0x1780),
- (0x17a4 - 0x1780),
+ AUD0_REGISTER_OFFSET,
+ AUD1_REGISTER_OFFSET,
+ AUD2_REGISTER_OFFSET,
+ AUD3_REGISTER_OFFSET,
+ AUD4_REGISTER_OFFSET,
+ AUD5_REGISTER_OFFSET,
+ AUD6_REGISTER_OFFSET,
};
static int dce_v8_0_audio_init(struct amdgpu_device *adev)
@@ -2887,7 +2887,7 @@ static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v8_0_is_idle(void *handle)
+static bool dce_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f54617c6c071..6d514efb0a6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -7583,9 +7583,9 @@ static int gfx_v10_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v10_0_hw_init(ip_block);
}
-static bool gfx_v10_0_is_idle(void *handle)
+static bool gfx_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 57f1e2b50e5a..2b3ba404955d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -4787,9 +4787,9 @@ static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v11_0_hw_init(ip_block);
}
-static bool gfx_v11_0_is_idle(void *handle)
+static bool gfx_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 47490309045f..926fb536bbff 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -3695,9 +3695,9 @@ static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v12_0_hw_init(ip_block);
}
-static bool gfx_v12_0_is_idle(void *handle)
+static bool gfx_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index f26e2cdec07a..2f5cf87ede88 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3167,9 +3167,9 @@ static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v6_0_hw_init(ip_block);
}
-static bool gfx_v6_0_is_idle(void *handle)
+static bool gfx_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
return false;
@@ -3183,7 +3183,7 @@ static int gfx_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v6_0_is_idle(adev))
+ if (gfx_v6_0_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 84745b2453ab..8181bd0e4f18 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4515,9 +4515,9 @@ static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v7_0_hw_init(ip_block);
}
-static bool gfx_v7_0_is_idle(void *handle)
+static bool gfx_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6add76ef75e8..d116a2e2f469 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4851,9 +4851,9 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
return r;
}
-static bool gfx_v8_0_is_idle(void *handle)
+static bool gfx_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
|| RREG32(mmGRBM_STATUS2) != 0x8)
@@ -4892,7 +4892,7 @@ static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v8_0_is_idle(adev))
+ if (gfx_v8_0_is_idle(ip_block))
return 0;
udelay(1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index e144bce938d5..d345285ea885 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2637,7 +2637,10 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ if (!amdgpu_sriov_vf(adev) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) {
+ WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ }
gfx_v9_0_tiling_mode_table_init(adev);
@@ -4042,7 +4045,8 @@ static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) &&
+ !amdgpu_sriov_vf(adev))
gfx_v9_4_2_set_power_brake_sequence(adev);
return r;
@@ -4110,9 +4114,9 @@ static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v9_0_hw_init(ip_block);
}
-static bool gfx_v9_0_is_idle(void *handle)
+static bool gfx_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
@@ -4127,7 +4131,7 @@ static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v9_0_is_idle(adev))
+ if (gfx_v9_0_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index c88564de50cd..476542b6e7b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -349,18 +349,7 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
GOLDEN_GB_ADDR_CONFIG);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
- } else {
- /* Golden settings applied by driver for ASIC with rev_id 0 */
- if (adev->rev_id == 0) {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
- REDUCE_FIFO_DEPTH_BY_2, 2);
- } else {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
- SPARE, 0x1);
- }
- }
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
}
}
@@ -563,17 +552,6 @@ out:
return err;
}
-static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
-{
- return true;
-}
-
-static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
-{
- if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
-}
-
static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
const char *chip_name)
{
@@ -600,8 +578,6 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
- gfx_v9_4_3_check_if_need_gfxoff(adev);
-
out:
if (err)
amdgpu_ucode_release(&adev->gfx.mec_fw);
@@ -896,9 +872,10 @@ static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
ACA_ERROR_TYPE_UE, 1ULL);
break;
case ACA_SMU_TYPE_CE:
- bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
ret = aca_error_cache_log_bank_error(handle, &info,
- ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
+ bank->aca_err_type,
+ ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
return -EINVAL;
@@ -939,8 +916,6 @@ static const struct aca_info gfx_v9_4_3_aca_info = {
static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
{
- u32 gb_addr_config;
-
adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
adev->gfx.ras = &gfx_v9_4_3_ras;
@@ -949,9 +924,7 @@ static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
- gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
-
- adev->gfx.config.gb_addr_config = gb_addr_config;
+ adev->gfx.config.gb_addr_config = GOLDEN_GB_ADDR_CONFIG;
adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
REG_GET_FIELD(
@@ -1362,10 +1335,8 @@ static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
{
/*
* Rlc save restore list is workable since v2_1.
- * And it's needed by gfxoff feature.
*/
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
+ gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
}
static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
@@ -2408,9 +2379,9 @@ static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
return gfx_v9_4_3_hw_init(ip_block);
}
-static bool gfx_v9_4_3_is_idle(void *handle)
+static bool gfx_v9_4_3_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
@@ -2428,7 +2399,7 @@ static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v9_4_3_is_idle(adev))
+ if (gfx_v9_4_3_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 0e3ddea7b8e0..a7bfc9f41d0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
- /* Program the AGP BAR */
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
-
if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+ /* Program the AGP BAR */
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
/* Program the system aperture low logical page number. */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index edf6cf42f141..95d894a231fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1076,7 +1076,7 @@ static int gmc_v10_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v10_0_is_idle(void *handle)
+static bool gmc_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v10.*/
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index f86d0650a05e..ad099f136f84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -987,7 +987,7 @@ static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v11_0_is_idle(void *handle)
+static bool gmc_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v11.*/
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index c6d45d0fb9d1..ea7c32d8380b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -984,7 +984,7 @@ static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v12_0_is_idle(void *handle)
+static bool gmc_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v11.*/
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 2245dda92021..a992e79d9581 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -957,9 +957,9 @@ static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v6_0_is_idle(void *handle)
+static bool gmc_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -976,7 +976,7 @@ static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gmc_v6_0_is_idle(adev))
+ if (gmc_v6_0_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 9aac4b1101e3..83e39f16044a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1142,9 +1142,9 @@ static int gmc_v7_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v7_0_is_idle(void *handle)
+static bool gmc_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 744081652d42..99ca08e9bdb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1263,9 +1263,9 @@ static int gmc_v8_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v8_0_is_idle(void *handle)
+static bool gmc_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index a80f3e2bcba8..783e0c3b86b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1607,9 +1607,8 @@ static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
/* Mode detected by hardware and supported modes available */
if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) {
- for (i = AMDGPU_NPS1_PARTITION_MODE;
- supp_modes && i <= AMDGPU_NPS8_PARTITION_MODE; i++) {
- if (supp_modes & BIT(i - 1))
+ while ((i = ffs(supp_modes))) {
+ if (AMDGPU_ALL_NPS_MASK & BIT(i))
adev->gmc.supported_nps_modes |= BIT(i);
supp_modes &= supp_modes - 1;
}
@@ -2543,7 +2542,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v9_0_is_idle(void *handle)
+static bool gmc_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v9.*/
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 8ac3d3282268..1317ede131b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -335,9 +335,9 @@ static int iceland_ih_resume(struct amdgpu_ip_block *ip_block)
return iceland_ih_hw_init(ip_block);
}
-static bool iceland_ih_is_idle(void *handle)
+static bool iceland_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 7198ddfaa8f4..eb4185dcbd1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -652,7 +652,7 @@ static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block)
return ih_v6_0_hw_init(ip_block);
}
-static bool ih_v6_0_is_idle(void *handle)
+static bool ih_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 342b166c136d..068ed849dbad 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -631,7 +631,7 @@ static int ih_v6_1_resume(struct amdgpu_ip_block *ip_block)
return ih_v6_1_hw_init(ip_block);
}
-static bool ih_v6_1_is_idle(void *handle)
+static bool ih_v6_1_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 71c1c77035e0..40a3530e0453 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -621,7 +621,7 @@ static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block)
return ih_v7_0_hw_init(ip_block);
}
-static bool ih_v7_0_is_idle(void *handle)
+static bool ih_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
index 964c29ef25dc..0027a639c7e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
@@ -50,26 +50,29 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- num_res = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES + MAX_ISP410_INT_SRC;
+ num_res = MAX_ISP410_MEM_RES + MAX_ISP410_INT_SRC;
isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd res alloc failed\n", __func__);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
- DRM_ERROR("%s: isp platform data alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp platform data alloc failed\n", __func__);
goto failure;
}
@@ -88,14 +91,7 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_res[1].start = isp_base + ISP410_PHY0_OFFSET;
isp->isp_res[1].end = isp_base + ISP410_PHY0_OFFSET + ISP410_PHY0_SIZE;
- isp->isp_res[2].name = "isp_gpio_sensor0_reg";
- isp->isp_res[2].flags = IORESOURCE_MEM;
- isp->isp_res[2].start = isp_base + ISP410_GPIO_SENSOR0_OFFSET;
- isp->isp_res[2].end = isp_base + ISP410_GPIO_SENSOR0_OFFSET +
- ISP410_GPIO_SENSOR0_SIZE;
-
- for (idx = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES, int_idx = 0;
- idx < num_res; idx++, int_idx++) {
+ for (idx = MAX_ISP410_MEM_RES, int_idx = 0; idx < num_res; idx++, int_idx++) {
isp->isp_res[idx].name = "isp_4_1_0_irq";
isp->isp_res[idx].flags = IORESOURCE_IRQ;
isp->isp_res[idx].start =
@@ -110,11 +106,12 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
- isp->isp_i2c_res = kcalloc(1, sizeof(struct resource),
- GFP_KERNEL);
+ /* initialize isp i2c platform data */
+ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd res alloc failed\n", __func__);
goto failure;
}
@@ -129,9 +126,31 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[1].platform_data = isp->isp_pdata;
isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
+ /* initialize isp gpiochip platform data */
+ isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
+ if (!isp->isp_gpio_res) {
+ r = -ENOMEM;
+ drm_err(&adev->ddev,
+ "%s: isp gpio res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_gpio_res[0].name = "isp_gpio_reg";
+ isp->isp_gpio_res[0].flags = IORESOURCE_MEM;
+ isp->isp_gpio_res[0].start = isp_base + ISP410_GPIO_SENSOR_OFFSET;
+ isp->isp_gpio_res[0].end = isp_base + ISP410_GPIO_SENSOR_OFFSET +
+ ISP410_GPIO_SENSOR_SIZE;
+
+ isp->isp_cell[2].name = "amdisp-pinctrl";
+ isp->isp_cell[2].num_resources = 1;
+ isp->isp_cell[2].resources = &isp->isp_gpio_res[0];
+ isp->isp_cell[2].platform_data = isp->isp_pdata;
+ isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3);
if (r) {
- DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: add mfd hotplug device failed\n", __func__);
goto failure;
}
@@ -143,6 +162,7 @@ failure:
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return r;
}
@@ -155,6 +175,7 @@ static int isp_v4_1_0_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
index 7db24c0f1080..4d239198edd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
@@ -42,8 +42,8 @@
#define ISP410_I2C0_OFFSET 0x66400
#define ISP410_I2C0_SIZE 0x100
-#define ISP410_GPIO_SENSOR0_OFFSET 0x6613C
-#define ISP410_GPIO_SENSOR0_SIZE 0x4
+#define ISP410_GPIO_SENSOR_OFFSET 0x6613C
+#define ISP410_GPIO_SENSOR_SIZE 0x54
void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp);
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index b56f27295468..69dd92f6e86d 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -50,27 +50,30 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- num_res = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES + MAX_ISP411_INT_SRC;
+ num_res = MAX_ISP411_MEM_RES + MAX_ISP411_INT_SRC;
isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd res alloc failed\n", __func__);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
- DRM_ERROR("%s: isp platform data alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp platform data alloc failed\n", __func__);
goto failure;
}
@@ -89,14 +92,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_res[1].start = isp_base + ISP411_PHY0_OFFSET;
isp->isp_res[1].end = isp_base + ISP411_PHY0_OFFSET + ISP411_PHY0_SIZE;
- isp->isp_res[2].name = "isp_4_1_1_sensor0_reg";
- isp->isp_res[2].flags = IORESOURCE_MEM;
- isp->isp_res[2].start = isp_base + ISP411_GPIO_SENSOR0_OFFSET;
- isp->isp_res[2].end = isp_base + ISP411_GPIO_SENSOR0_OFFSET +
- ISP411_GPIO_SENSOR0_SIZE;
-
- for (idx = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES, int_idx = 0;
- idx < num_res; idx++, int_idx++) {
+ for (idx = MAX_ISP411_MEM_RES, int_idx = 0; idx < num_res; idx++, int_idx++) {
isp->isp_res[idx].name = "isp_4_1_1_irq";
isp->isp_res[idx].flags = IORESOURCE_IRQ;
isp->isp_res[idx].start =
@@ -111,10 +107,12 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
+ /* initialize isp i2c platform data */
isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd res alloc failed\n", __func__);
goto failure;
}
@@ -129,9 +127,31 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[1].platform_data = isp->isp_pdata;
isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
+ /* initialize isp gpiochip platform data */
+ isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
+ if (!isp->isp_gpio_res) {
+ r = -ENOMEM;
+ drm_err(&adev->ddev,
+ "%s: isp gpio res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_gpio_res[0].name = "isp_gpio_reg";
+ isp->isp_gpio_res[0].flags = IORESOURCE_MEM;
+ isp->isp_gpio_res[0].start = isp_base + ISP411_GPIO_SENSOR_OFFSET;
+ isp->isp_gpio_res[0].end = isp_base + ISP411_GPIO_SENSOR_OFFSET +
+ ISP411_GPIO_SENSOR_SIZE;
+
+ isp->isp_cell[2].name = "amdisp-pinctrl";
+ isp->isp_cell[2].num_resources = 1;
+ isp->isp_cell[2].resources = &isp->isp_gpio_res[0];
+ isp->isp_cell[2].platform_data = isp->isp_pdata;
+ isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3);
if (r) {
- DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: add mfd hotplug device failed\n", __func__);
goto failure;
}
@@ -143,6 +163,7 @@ failure:
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return r;
}
@@ -155,6 +176,7 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
index 40887ddeb08c..fe45d70d87f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
@@ -33,7 +33,6 @@
#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
#define MAX_ISP411_MEM_RES 2
-#define MAX_ISP411_SENSOR_RES 1
#define MAX_ISP411_INT_SRC 8
#define ISP411_PHY0_OFFSET 0x66700
@@ -42,8 +41,8 @@
#define ISP411_I2C0_OFFSET 0x66400
#define ISP411_I2C0_SIZE 0x100
-#define ISP411_GPIO_SENSOR0_OFFSET 0x6613C
-#define ISP411_GPIO_SENSOR0_SIZE 0x4
+#define ISP411_GPIO_SENSOR_OFFSET 0x6613C
+#define ISP411_GPIO_SENSOR_SIZE 0x54
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 03b8b7cd5229..9e428e669ada 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -604,15 +604,15 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
int cnt = 0;
- mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+ mutex_lock(&adev->vcn.inst[0].vcn1_jpeg1_workaround);
if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec))
DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n");
- for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) {
+ for (cnt = 0; cnt < adev->vcn.inst[0].num_enc_rings; cnt++) {
if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt]))
DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 75843a0e3bfb..4cde8a8bcc83 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -680,9 +680,9 @@ void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
}
}
-static bool jpeg_v2_0_is_idle(void *handle)
+static bool jpeg_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
@@ -707,7 +707,7 @@ static int jpeg_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
- if (!jpeg_v2_0_is_idle(adev))
+ if (!jpeg_v2_0_is_idle(ip_block))
return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 0a2c1dee2430..8b39e114f3be 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -515,9 +515,9 @@ static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14)));
}
-static bool jpeg_v2_5_is_idle(void *handle)
+static bool jpeg_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -563,7 +563,7 @@ static int jpeg_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
continue;
if (enable) {
- if (!jpeg_v2_5_is_idle(adev))
+ if (!jpeg_v2_5_is_idle(ip_block))
return -EBUSY;
jpeg_v2_5_enable_clock_gating(adev, i);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 9faa9c6809df..2f8510c2986b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -470,9 +470,9 @@ static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v3_0_is_idle(void *handle)
+static bool jpeg_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
@@ -498,7 +498,7 @@ static int jpeg_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v3_0_is_idle(adev))
+ if (!jpeg_v3_0_is_idle(ip_block))
return -EBUSY;
jpeg_v3_0_enable_clock_gating(adev);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 292d4a234ea6..f17ec5414fd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -630,9 +630,9 @@ static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v4_0_is_idle(void *handle)
+static bool jpeg_v4_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
@@ -658,7 +658,7 @@ static int jpeg_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v4_0_is_idle(adev))
+ if (!jpeg_v4_0_is_idle(ip_block))
return -EBUSY;
jpeg_v4_0_enable_clock_gating(adev);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 0588bb80f41e..5598a35f72af 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -634,12 +634,6 @@ static void jpeg_v4_0_3_stop_inst(struct amdgpu_device *adev, int inst)
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
- WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
- 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
- SOC15_WAIT_ON_RREG(JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
- UVD_PGFSM_STATUS__UVDJ_PWR_OFF <<
- UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
- UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
}
/**
@@ -692,7 +686,7 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring)
jpeg_v4_0_3_core_reg_offset(ring->pipe));
}
-static void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* JPEG engine access for HDP flush doesn't work when RRMT is enabled.
* This is a workaround to avoid any HDP flush through JPEG ring.
@@ -960,9 +954,9 @@ void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
}
}
-static bool jpeg_v4_0_3_is_idle(void *handle)
+static bool jpeg_v4_0_3_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool ret = false;
int i, j;
@@ -1004,7 +998,7 @@ static int jpeg_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (enable) {
- if (!jpeg_v4_0_3_is_idle(adev))
+ if (!jpeg_v4_0_3_is_idle(ip_block))
return -EBUSY;
jpeg_v4_0_3_enable_clock_gating(adev, i);
} else {
@@ -1110,24 +1104,20 @@ static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring)
WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
regUVD_JMI0_UVD_JMI_CLIENT_STALL,
reg_offset, 0x1F);
- SOC15_WAIT_ON_RREG(JPEG, jpeg_inst,
- regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS,
- 0x1F, 0x1F);
+ SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS,
+ reg_offset, 0x1F, 0x1F);
WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
regUVD_JMI0_JPEG_LMI_DROP,
reg_offset, 0x1F);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regJPEG_CORE_RST_CTRL,
- reg_offset, 1 << ring->pipe);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe);
WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
regUVD_JMI0_UVD_JMI_CLIENT_STALL,
reg_offset, 0x00);
WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
regUVD_JMI0_JPEG_LMI_DROP,
reg_offset, 0x00);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regJPEG_CORE_RST_CTRL,
- reg_offset, 0x00);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
}
static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
@@ -1338,8 +1328,8 @@ static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_ban
1ULL);
break;
case ACA_SMU_TYPE_CE:
- bank->aca_err_type = ACA_ERROR_TYPE_CE;
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index 747a3e5f6856..a90bf370a002 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -56,6 +56,7 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
unsigned int flags);
void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr);
+void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index cbba1d9e8367..974030a5c03c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -648,9 +648,9 @@ static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v4_0_5_is_idle(void *handle)
+static bool jpeg_v4_0_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -693,7 +693,7 @@ static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
continue;
if (enable) {
- if (!jpeg_v4_0_5_is_idle(adev))
+ if (!jpeg_v4_0_5_is_idle(ip_block))
return -EBUSY;
jpeg_v4_0_5_enable_clock_gating(adev, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index 4a55e0cf39e4..31d213ccbe0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -559,9 +559,9 @@ static void jpeg_v5_0_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v5_0_0_is_idle(void *handle)
+static bool jpeg_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
@@ -587,7 +587,7 @@ static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (enable) {
- if (!jpeg_v5_0_0_is_idle(adev))
+ if (!jpeg_v5_0_0_is_idle(ip_block))
return -EBUSY;
jpeg_v5_0_0_enable_clock_gating(adev);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 6e3f522e9133..218e16b68f1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -190,6 +190,13 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+ }
+
return 0;
}
@@ -209,6 +216,9 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -239,6 +249,9 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
}
return 0;
}
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->jpeg.caps |= AMDGPU_JPEG_CAPS(RRMT_ENABLED);
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
jpeg_inst = GET_INST(JPEG, i);
ring = adev->jpeg.inst[i].ring_dec;
@@ -326,11 +339,10 @@ static int jpeg_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int jpeg_v5_0_1_disable_antihang(struct amdgpu_device *adev, int inst_idx)
+static void jpeg_v5_0_1_init_inst(struct amdgpu_device *adev, int i)
{
- int jpeg_inst;
+ int jpeg_inst = GET_INST(JPEG, i);
- jpeg_inst = GET_INST(JPEG, inst_idx);
/* disable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
@@ -339,20 +351,75 @@ static int jpeg_v5_0_1_disable_antihang(struct amdgpu_device *adev, int inst_idx
WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
- return 0;
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
}
-static int jpeg_v5_0_1_enable_antihang(struct amdgpu_device *adev, int inst_idx)
+static void jpeg_v5_0_1_deinit_inst(struct amdgpu_device *adev, int i)
{
- int jpeg_inst;
+ int jpeg_inst = GET_INST(JPEG, i);
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
- jpeg_inst = GET_INST(JPEG, inst_idx);
/* enable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+}
- return 0;
+static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 reg, data, mask;
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0;
+
+ /* enable System Interrupt for JRBC */
+ reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
+ if (ring->pipe < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe;
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe);
+ WREG32_P(reg, data, mask);
+ } else {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12);
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12));
+ WREG32_P(reg, data, mask);
+ }
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_VMID,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset,
+ (0x00000001L | 0x00000002L));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ reg_offset, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ reg_offset, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_RPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_WPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset, 0x00000002L);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_SIZE,
+ reg_offset, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
+ reg_offset);
}
/**
@@ -365,69 +432,13 @@ static int jpeg_v5_0_1_enable_antihang(struct amdgpu_device *adev, int inst_idx)
static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int i, j, jpeg_inst, r;
+ int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- jpeg_inst = GET_INST(JPEG, i);
-
- /* disable antihang */
- r = jpeg_v5_0_1_disable_antihang(adev, i);
- if (r)
- return r;
-
- /* MJPEG global tiling registers */
- WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
+ jpeg_v5_0_1_init_inst(adev, i);
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
- u32 reg, data, mask;
-
ring = &adev->jpeg.inst[i].ring_dec[j];
-
- /* enable System Interrupt for JRBC */
- reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
- if (j < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
- data = JPEG_SYS_INT_EN__DJRBC0_MASK << j;
- mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j);
- WREG32_P(reg, data, mask);
- } else {
- data = JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12);
- mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12));
- WREG32_P(reg, data, mask);
- }
-
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_LMI_JRBC_RB_VMID,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_CNTL,
- reg_offset,
- (0x00000001L | 0x00000002L));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- reg_offset, lower_32_bits(ring->gpu_addr));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- reg_offset, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_RPTR,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_WPTR,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_CNTL,
- reg_offset, 0x00000002L);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_SIZE,
- reg_offset, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
- reg_offset);
+ jpeg_v5_0_1_init_jrbc(ring);
}
}
@@ -443,20 +454,10 @@ static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
*/
static int jpeg_v5_0_1_stop(struct amdgpu_device *adev)
{
- int i, jpeg_inst, r;
-
- for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- jpeg_inst = GET_INST(JPEG, i);
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+ int i;
- /* enable antihang */
- r = jpeg_v5_0_1_enable_antihang(adev, i);
- if (r)
- return r;
- }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
+ jpeg_v5_0_1_deinit_inst(adev, i);
return 0;
}
@@ -516,9 +517,9 @@ static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v5_0_1_is_idle(void *handle)
+static bool jpeg_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool ret = false;
int i, j;
@@ -567,7 +568,7 @@ static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (!jpeg_v5_0_1_is_idle(adev))
+ if (!jpeg_v5_0_1_is_idle(ip_block))
return -EBUSY;
}
@@ -662,6 +663,41 @@ static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0;
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_STALL,
+ reg_offset, 0x1F);
+ SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS,
+ reg_offset, 0x1F, 0x1F);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_JPEG_LMI_DROP,
+ reg_offset, 0x1F);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_STALL,
+ reg_offset, 0x00);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_JPEG_LMI_DROP,
+ reg_offset, 0x00);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
+}
+
+static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ if (amdgpu_sriov_vf(ring->adev))
+ return -EOPNOTSUPP;
+
+ jpeg_v5_0_1_core_stall_reset(ring);
+ jpeg_v5_0_1_init_jrbc(ring);
+ return amdgpu_ring_test_helper(ring);
+}
+
static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
.name = "jpeg_v5_0_1",
.early_init = jpeg_v5_0_1_early_init,
@@ -700,6 +736,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
.emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
.emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .emit_hdp_flush = jpeg_v4_0_3_ring_emit_hdp_flush,
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v4_0_3_dec_ring_nop,
@@ -711,6 +748,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v5_0_1_ring_reset,
};
static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
index 9de3272ef47f..efdab57324e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
@@ -26,65 +26,76 @@
extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
-#define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640
-#define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1
-#define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649
-#define regUVD_JRBC0_UVD_JRBC_STATUS_BASE_IDX 1
-#define regUVD_JRBC0_UVD_JRBC_RB_RPTR 0x064a
-#define regUVD_JRBC0_UVD_JRBC_RB_RPTR_BASE_IDX 1
-#define regUVD_JRBC1_UVD_JRBC_RB_WPTR 0x0000
-#define regUVD_JRBC1_UVD_JRBC_RB_WPTR_BASE_IDX 0
-#define regUVD_JRBC1_UVD_JRBC_STATUS 0x0009
-#define regUVD_JRBC1_UVD_JRBC_STATUS_BASE_IDX 0
-#define regUVD_JRBC1_UVD_JRBC_RB_RPTR 0x000a
-#define regUVD_JRBC1_UVD_JRBC_RB_RPTR_BASE_IDX 0
-#define regUVD_JRBC2_UVD_JRBC_RB_WPTR 0x0040
-#define regUVD_JRBC2_UVD_JRBC_RB_WPTR_BASE_IDX 0
-#define regUVD_JRBC2_UVD_JRBC_STATUS 0x0049
-#define regUVD_JRBC2_UVD_JRBC_STATUS_BASE_IDX 0
-#define regUVD_JRBC2_UVD_JRBC_RB_RPTR 0x004a
-#define regUVD_JRBC2_UVD_JRBC_RB_RPTR_BASE_IDX 0
-#define regUVD_JRBC3_UVD_JRBC_RB_WPTR 0x0080
-#define regUVD_JRBC3_UVD_JRBC_RB_WPTR_BASE_IDX 0
-#define regUVD_JRBC3_UVD_JRBC_STATUS 0x0089
-#define regUVD_JRBC3_UVD_JRBC_STATUS_BASE_IDX 0
-#define regUVD_JRBC3_UVD_JRBC_RB_RPTR 0x008a
-#define regUVD_JRBC3_UVD_JRBC_RB_RPTR_BASE_IDX 0
-#define regUVD_JRBC4_UVD_JRBC_RB_WPTR 0x00c0
-#define regUVD_JRBC4_UVD_JRBC_RB_WPTR_BASE_IDX 0
-#define regUVD_JRBC4_UVD_JRBC_STATUS 0x00c9
-#define regUVD_JRBC4_UVD_JRBC_STATUS_BASE_IDX 0
-#define regUVD_JRBC4_UVD_JRBC_RB_RPTR 0x00ca
-#define regUVD_JRBC4_UVD_JRBC_RB_RPTR_BASE_IDX 0
-#define regUVD_JRBC5_UVD_JRBC_RB_WPTR 0x0100
-#define regUVD_JRBC5_UVD_JRBC_RB_WPTR_BASE_IDX 0
-#define regUVD_JRBC5_UVD_JRBC_STATUS 0x0109
-#define regUVD_JRBC5_UVD_JRBC_STATUS_BASE_IDX 0
-#define regUVD_JRBC5_UVD_JRBC_RB_RPTR 0x010a
-#define regUVD_JRBC5_UVD_JRBC_RB_RPTR_BASE_IDX 0
-#define regUVD_JRBC6_UVD_JRBC_RB_WPTR 0x0140
-#define regUVD_JRBC6_UVD_JRBC_RB_WPTR_BASE_IDX 0
-#define regUVD_JRBC6_UVD_JRBC_STATUS 0x0149
-#define regUVD_JRBC6_UVD_JRBC_STATUS_BASE_IDX 0
-#define regUVD_JRBC6_UVD_JRBC_RB_RPTR 0x014a
-#define regUVD_JRBC6_UVD_JRBC_RB_RPTR_BASE_IDX 0
-#define regUVD_JRBC7_UVD_JRBC_RB_WPTR 0x0180
-#define regUVD_JRBC7_UVD_JRBC_RB_WPTR_BASE_IDX 0
-#define regUVD_JRBC7_UVD_JRBC_STATUS 0x0189
-#define regUVD_JRBC7_UVD_JRBC_STATUS_BASE_IDX 0
-#define regUVD_JRBC7_UVD_JRBC_RB_RPTR 0x018a
-#define regUVD_JRBC7_UVD_JRBC_RB_RPTR_BASE_IDX 0
-#define regUVD_JRBC8_UVD_JRBC_RB_WPTR 0x01c0
-#define regUVD_JRBC8_UVD_JRBC_RB_WPTR_BASE_IDX 0
-#define regUVD_JRBC8_UVD_JRBC_STATUS 0x01c9
-#define regUVD_JRBC8_UVD_JRBC_STATUS_BASE_IDX 0
-#define regUVD_JRBC8_UVD_JRBC_RB_RPTR 0x01ca
-#define regUVD_JRBC8_UVD_JRBC_RB_RPTR_BASE_IDX 0
-#define regUVD_JRBC9_UVD_JRBC_RB_WPTR 0x0440
-#define regUVD_JRBC9_UVD_JRBC_RB_WPTR_BASE_IDX 1
-#define regUVD_JRBC9_UVD_JRBC_STATUS 0x0449
-#define regUVD_JRBC9_UVD_JRBC_STATUS_BASE_IDX 1
-#define regUVD_JRBC9_UVD_JRBC_RB_RPTR 0x044a
-#define regUVD_JRBC9_UVD_JRBC_RB_RPTR_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640
+#define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649
+#define regUVD_JRBC0_UVD_JRBC_STATUS_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_RB_RPTR 0x064a
+#define regUVD_JRBC0_UVD_JRBC_RB_RPTR_BASE_IDX 1
+#define regUVD_JRBC1_UVD_JRBC_RB_WPTR 0x0000
+#define regUVD_JRBC1_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_STATUS 0x0009
+#define regUVD_JRBC1_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_RB_RPTR 0x000a
+#define regUVD_JRBC1_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_WPTR 0x0040
+#define regUVD_JRBC2_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_STATUS 0x0049
+#define regUVD_JRBC2_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_RPTR 0x004a
+#define regUVD_JRBC2_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_WPTR 0x0080
+#define regUVD_JRBC3_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_STATUS 0x0089
+#define regUVD_JRBC3_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_RPTR 0x008a
+#define regUVD_JRBC3_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_WPTR 0x00c0
+#define regUVD_JRBC4_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_STATUS 0x00c9
+#define regUVD_JRBC4_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_RPTR 0x00ca
+#define regUVD_JRBC4_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_WPTR 0x0100
+#define regUVD_JRBC5_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_STATUS 0x0109
+#define regUVD_JRBC5_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_RPTR 0x010a
+#define regUVD_JRBC5_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_WPTR 0x0140
+#define regUVD_JRBC6_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_STATUS 0x0149
+#define regUVD_JRBC6_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_RPTR 0x014a
+#define regUVD_JRBC6_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_WPTR 0x0180
+#define regUVD_JRBC7_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_STATUS 0x0189
+#define regUVD_JRBC7_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_RPTR 0x018a
+#define regUVD_JRBC7_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC8_UVD_JRBC_RB_WPTR 0x01c0
+#define regUVD_JRBC8_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC8_UVD_JRBC_STATUS 0x01c9
+#define regUVD_JRBC8_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC8_UVD_JRBC_RB_RPTR 0x01ca
+#define regUVD_JRBC8_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC9_UVD_JRBC_RB_WPTR 0x0440
+#define regUVD_JRBC9_UVD_JRBC_RB_WPTR_BASE_IDX 1
+#define regUVD_JRBC9_UVD_JRBC_STATUS 0x0449
+#define regUVD_JRBC9_UVD_JRBC_STATUS_BASE_IDX 1
+#define regUVD_JRBC9_UVD_JRBC_RB_RPTR 0x044a
+#define regUVD_JRBC9_UVD_JRBC_RB_RPTR_BASE_IDX 1
+#define regUVD_JMI0_JPEG_LMI_DROP 0x0663
+#define regUVD_JMI0_JPEG_LMI_DROP_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_CLIENT_STALL 0x067a
+#define regUVD_JMI0_UVD_JMI_CLIENT_STALL_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS 0x067b
+#define regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 1
+#define regJPEG_CORE_RST_CTRL 0x072e
+#define regJPEG_CORE_RST_CTRL_BASE_IDX 1
-#endif /* __JPEG_V5_0_0_H__ */
+#define regVCN_RRMT_CNTL 0x0940
+#define regVCN_RRMT_CNTL_BASE_IDX 1
+
+#endif /* __JPEG_V5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 747b05d9b3cf..7eee41187b7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -800,7 +800,7 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -835,7 +835,7 @@ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
}
static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -876,7 +876,7 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
}
static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
@@ -974,7 +974,7 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
/* This function is for backdoor MES firmware */
static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe, bool prime_icache)
+ enum amdgpu_mes_pipe pipe, bool prime_icache)
{
int r;
uint32_t data;
@@ -1046,7 +1046,7 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
}
static int mes_v11_0_allocate_eop_buf(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
u32 *eop;
@@ -1257,7 +1257,7 @@ static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
}
static int mes_v11_0_queue_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
struct amdgpu_ring *ring;
int r;
@@ -1340,7 +1340,7 @@ static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
}
static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r, mqd_size = sizeof(struct v11_compute_mqd);
struct amdgpu_ring *ring;
@@ -1647,6 +1647,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
+ r = amdgpu_mes_update_enforce_isolation(adev);
+ if (r)
+ goto failure;
+
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
@@ -1670,24 +1674,12 @@ static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = amdgpu_mes_suspend(ip_block->adev);
- if (r)
- return r;
-
return mes_v11_0_hw_fini(ip_block);
}
static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = mes_v11_0_hw_init(ip_block);
- if (r)
- return r;
-
- return amdgpu_mes_resume(ip_block->adev);
+ return mes_v11_0_hw_init(ip_block);
}
static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 8d7cf40030f2..fdc435b62012 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -901,7 +901,7 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -935,7 +935,7 @@ static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
}
static int mes_v12_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -969,7 +969,7 @@ static int mes_v12_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
}
static void mes_v12_0_free_ucode_buffers(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
@@ -1075,7 +1075,7 @@ static void mes_v12_0_set_ucode_start_addr(struct amdgpu_device *adev)
/* This function is for backdoor MES firmware */
static int mes_v12_0_load_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe, bool prime_icache)
+ enum amdgpu_mes_pipe pipe, bool prime_icache)
{
int r;
uint32_t data;
@@ -1139,7 +1139,7 @@ static int mes_v12_0_load_microcode(struct amdgpu_device *adev,
}
static int mes_v12_0_allocate_eop_buf(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
u32 *eop;
@@ -1360,7 +1360,7 @@ static int mes_v12_0_kiq_enable_queue(struct amdgpu_device *adev)
}
static int mes_v12_0_queue_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
struct amdgpu_ring *ring;
int r;
@@ -1460,7 +1460,7 @@ static int mes_v12_0_kiq_ring_init(struct amdgpu_device *adev)
}
static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r, mqd_size = sizeof(struct v12_compute_mqd);
struct amdgpu_ring *ring;
@@ -1762,6 +1762,10 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
+ r = amdgpu_mes_update_enforce_isolation(adev);
+ if (r)
+ goto failure;
+
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
@@ -1785,24 +1789,12 @@ static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = amdgpu_mes_suspend(ip_block->adev);
- if (r)
- return r;
-
return mes_v12_0_hw_fini(ip_block);
}
static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = mes_v12_0_hw_init(ip_block);
- if (r)
- return r;
-
- return amdgpu_mes_resume(ip_block->adev);
+ return mes_v12_0_hw_init(ip_block);
}
static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 58d22f0d5a68..a54e7b929295 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -751,8 +751,8 @@ static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank
1ULL);
break;
case ACA_SMU_TYPE_CE:
- bank->aca_err_type = ACA_ERROR_TYPE_CE;
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 4dcb72d1bdda..5aadf24cb202 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -184,6 +184,9 @@ send_request:
case IDH_REQ_RAS_ERROR_COUNT:
event = IDH_RAS_ERROR_COUNT_READY;
break;
+ case IDH_REQ_RAS_CPER_DUMP:
+ event = IDH_RAS_CPER_DUMP_READY;
+ break;
default:
break;
}
@@ -467,6 +470,16 @@ static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
}
+static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
+{
+ uint32_t vf_rptr_hi, vf_rptr_lo;
+
+ vf_rptr_hi = (uint32_t)(vf_rptr >> 32);
+ vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF);
+ return xgpu_nv_send_access_requests_with_param(
+ adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -478,4 +491,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.ras_poison_handler = xgpu_nv_ras_poison_handler,
.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
.req_ras_err_count = xgpu_nv_req_ras_err_count,
+ .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 9d61d76e1bf9..72c9fceb9d79 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -41,6 +41,7 @@ enum idh_request {
IDH_READY_TO_RESET = 201,
IDH_RAS_POISON = 202,
IDH_REQ_RAS_ERROR_COUNT = 203,
+ IDH_REQ_RAS_CPER_DUMP = 204,
};
enum idh_event {
@@ -56,6 +57,7 @@ enum idh_event {
IDH_PF_SOFT_FLR_NOTIFICATION,
IDH_RAS_ERROR_DETECTED,
IDH_RAS_ERROR_COUNT_READY = 11,
+ IDH_RAS_CPER_DUMP_READY = 14,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 1c727ccd03b1..4cd325149b63 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -625,7 +625,7 @@ static int navi10_ih_resume(struct amdgpu_ip_block *ip_block)
return navi10_ih_hw_init(ip_block);
}
-static bool navi10_ih_is_idle(void *handle)
+static bool navi10_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
index 9900fe5c3bc3..9b4025c39e44 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
@@ -473,52 +473,6 @@ const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs = {
};
-static void nbif_v6_3_1_sriov_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index)
-{
-}
-
-static void nbif_v6_3_1_sriov_sdma_doorbell_range(struct amdgpu_device *adev,
- int instance, bool use_doorbell,
- int doorbell_index,
- int doorbell_size)
-{
-}
-
-static void nbif_v6_3_1_sriov_vcn_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell,
- int doorbell_index, int instance)
-{
-}
-
-static void nbif_v6_3_1_sriov_gc_doorbell_init(struct amdgpu_device *adev)
-{
-}
-
-const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs = {
- .get_hdp_flush_req_offset = nbif_v6_3_1_get_hdp_flush_req_offset,
- .get_hdp_flush_done_offset = nbif_v6_3_1_get_hdp_flush_done_offset,
- .get_pcie_index_offset = nbif_v6_3_1_get_pcie_index_offset,
- .get_pcie_data_offset = nbif_v6_3_1_get_pcie_data_offset,
- .get_rev_id = nbif_v6_3_1_get_rev_id,
- .mc_access_enable = nbif_v6_3_1_mc_access_enable,
- .get_memsize = nbif_v6_3_1_get_memsize,
- .sdma_doorbell_range = nbif_v6_3_1_sriov_sdma_doorbell_range,
- .vcn_doorbell_range = nbif_v6_3_1_sriov_vcn_doorbell_range,
- .gc_doorbell_init = nbif_v6_3_1_sriov_gc_doorbell_init,
- .enable_doorbell_aperture = nbif_v6_3_1_enable_doorbell_aperture,
- .enable_doorbell_selfring_aperture = nbif_v6_3_1_enable_doorbell_selfring_aperture,
- .ih_doorbell_range = nbif_v6_3_1_sriov_ih_doorbell_range,
- .update_medium_grain_clock_gating = nbif_v6_3_1_update_medium_grain_clock_gating,
- .update_medium_grain_light_sleep = nbif_v6_3_1_update_medium_grain_light_sleep,
- .get_clockgating_state = nbif_v6_3_1_get_clockgating_state,
- .ih_control = nbif_v6_3_1_ih_control,
- .init_registers = nbif_v6_3_1_init_registers,
- .remap_hdp_registers = nbif_v6_3_1_remap_hdp_registers,
- .get_rom_offset = nbif_v6_3_1_get_rom_offset,
- .set_reg_remap = nbif_v6_3_1_set_reg_remap,
-};
-
static int nbif_v6_3_1_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
index 9ac4831d39e1..3afec715a9fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
@@ -28,7 +28,6 @@
extern const struct nbio_hdp_flush_reg nbif_v6_3_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs;
-extern const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs;
extern struct amdgpu_nbio_ras nbif_v6_3_1_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 5d4a4e7fd97f..8068f384f56c 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -1035,7 +1035,7 @@ static int nv_common_resume(struct amdgpu_ip_block *ip_block)
return nv_common_hw_init(ip_block);
}
-static bool nv_common_is_idle(void *handle)
+static bool nv_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 135c5099bfb8..92ce580647cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -911,9 +911,9 @@ static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block)
return sdma_v2_4_hw_init(ip_block);
}
-static bool sdma_v2_4_is_idle(void *handle)
+static bool sdma_v2_4_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 6e75a4a85f74..1c076bd1cf73 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1200,9 +1200,9 @@ static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v3_0_hw_init(ip_block);
}
-static bool sdma_v3_0_is_idle(void *handle)
+static bool sdma_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index d31ee01383df..33ed2b158fcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2015,9 +2015,9 @@ static int sdma_v4_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v4_0_hw_init(ip_block);
}
-static bool sdma_v4_0_is_idle(void *handle)
+static bool sdma_v4_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 23a6bb16a0b1..fd34dc138081 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -30,6 +30,7 @@
#include "amdgpu_xcp.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
+#include "amdgpu_reset.h"
#include "sdma/sdma_4_4_2_offset.h"
#include "sdma/sdma_4_4_2_sh_mask.h"
@@ -105,6 +106,8 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
@@ -669,11 +672,12 @@ static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
* @adev: amdgpu_device pointer
* @i: instance to resume
* @restore: used to restore wptr when restart
+ * @guilty: boolean indicating whether this queue is the guilty one (caused the timeout/error)
*
* Set up the gfx DMA ring buffers and enable them.
* Returns 0 for success, error for failure.
*/
-static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
+static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, bool restore, bool guilty)
{
struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
@@ -681,6 +685,7 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, b
u32 doorbell;
u32 doorbell_offset;
u64 wptr_gpu_addr;
+ u64 rwptr;
wb_offset = (ring->rptr_offs * 4);
@@ -706,12 +711,20 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, b
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
+ /* For the guilty queue, set RPTR to the current wptr to skip bad commands,
+ * It is not a guilty queue, restore cache_rptr and continue execution.
+ */
+ if (guilty)
+ rwptr = ring->wptr;
+ else
+ rwptr = ring->cached_rptr;
+
/* Initialize the ring buffer's read and write pointers */
if (restore) {
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(rwptr << 2));
} else {
WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
@@ -766,11 +779,12 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, b
* @adev: amdgpu_device pointer
* @i: instance to resume
* @restore: boolean to say restore needed or not
+ * @guilty: boolean indicating whether this queue is the guilty one (caused the timeout/error)
*
* Set up the page DMA ring buffers and enable them.
* Returns 0 for success, error for failure.
*/
-static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
+static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i, bool restore, bool guilty)
{
struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
@@ -778,6 +792,7 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i,
u32 doorbell;
u32 doorbell_offset;
u64 wptr_gpu_addr;
+ u64 rwptr;
wb_offset = (ring->rptr_offs * 4);
@@ -785,12 +800,20 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i,
rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+ /* For the guilty queue, set RPTR to the current wptr to skip bad commands,
+ * It is not a guilty queue, restore cache_rptr and continue execution.
+ */
+ if (guilty)
+ rwptr = ring->wptr;
+ else
+ rwptr = ring->cached_rptr;
+
/* Initialize the ring buffer's read and write pointers */
if (restore) {
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, upper_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, upper_32_bits(rwptr << 2));
} else {
WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
@@ -966,9 +989,9 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
uint32_t temp;
WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
- sdma_v4_4_2_gfx_resume(adev, i, restore);
+ sdma_v4_4_2_gfx_resume(adev, i, restore, adev->sdma.gfx_guilty);
if (adev->sdma.has_page_queue)
- sdma_v4_4_2_page_resume(adev, i, restore);
+ sdma_v4_4_2_page_resume(adev, i, restore, adev->sdma.page_guilty);
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
@@ -1330,6 +1353,7 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
sdma_v4_4_2_set_vm_pte_funcs(adev);
sdma_v4_4_2_set_irq_funcs(adev);
sdma_v4_4_2_set_ras_funcs(adev);
+ sdma_v4_4_2_set_engine_reset_funcs(adev);
return 0;
}
@@ -1351,6 +1375,12 @@ static int sdma_v4_4_2_late_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_persistent_edc_harvesting_supported(adev))
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
+ /* The initialization is done in the late_init stage to ensure that the SMU
+ * initialization and capability setup are completed before we check the SDMA
+ * reset capability
+ */
+ sdma_v4_4_2_update_reset_mask(adev);
+
return 0;
}
@@ -1458,7 +1488,6 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->sdma.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
@@ -1477,6 +1506,9 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
return r;
+ /* Initialize guilty flags for GFX and PAGE queues */
+ adev->sdma.gfx_guilty = false;
+ adev->sdma.page_guilty = false;
return r;
}
@@ -1561,9 +1593,9 @@ static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block)
return sdma_v4_4_2_hw_init(ip_block);
}
-static bool sdma_v4_4_2_is_idle(void *handle)
+static bool sdma_v4_4_2_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1602,25 +1634,83 @@ static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
+static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t instance_id, bool is_page_queue)
+{
+ uint32_t reg_offset = is_page_queue ? regSDMA_PAGE_CONTEXT_STATUS : regSDMA_GFX_CONTEXT_STATUS;
+ uint32_t context_status = RREG32(sdma_v4_4_2_get_reg_offset(adev, instance_id, reg_offset));
+
+ /* Check if the SELECTED bit is set */
+ return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0;
+}
+
+static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t instance_id = ring->me;
+
+ return sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
+}
+
+static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t instance_id = ring->me;
+
+ if (!adev->sdma.has_page_queue)
+ return false;
+
+ return sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
+}
+
static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ u32 id = GET_INST(SDMA0, ring->me);
+ return amdgpu_sdma_reset_engine(adev, id, true);
+}
+
+static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_id)
+{
u32 inst_mask;
+ uint64_t rptr;
+ struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
+ /* Check if this queue is the guilty one */
+ adev->sdma.gfx_guilty = sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
+ if (adev->sdma.has_page_queue)
+ adev->sdma.page_guilty = sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
+
+ /* Cache the rptr before reset, after the reset,
+ * all of the registers will be reset to 0
+ */
+ rptr = amdgpu_ring_get_rptr(ring);
+ ring->cached_rptr = rptr;
+ /* Cache the rptr for the page queue if it exists */
+ if (adev->sdma.has_page_queue) {
+ struct amdgpu_ring *page_ring = &adev->sdma.instance[instance_id].page;
+ rptr = amdgpu_ring_get_rptr(page_ring);
+ page_ring->cached_rptr = rptr;
+ }
+
/* stop queue */
inst_mask = 1 << ring->me;
sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
if (adev->sdma.has_page_queue)
sdma_v4_4_2_inst_page_stop(adev, inst_mask);
- r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, ring->me));
- if (r)
- return r;
+ return 0;
+}
+
+static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instance_id)
+{
+ int i;
+ u32 inst_mask;
+ struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
+ inst_mask = 1 << ring->me;
udelay(50);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -1638,6 +1728,16 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
return sdma_v4_4_2_inst_start(adev, inst_mask, true);
}
+static struct sdma_on_reset_funcs sdma_v4_4_2_engine_reset_funcs = {
+ .pre_reset = sdma_v4_4_2_stop_queue,
+ .post_reset = sdma_v4_4_2_restore_queue,
+};
+
+static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev)
+{
+ amdgpu_sdma_register_on_reset_callbacks(adev, &sdma_v4_4_2_engine_reset_funcs);
+}
+
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1683,6 +1783,9 @@ static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
case 0:
amdgpu_fence_process(&adev->sdma.instance[i].ring);
break;
+ case 1:
+ amdgpu_fence_process(&adev->sdma.instance[i].page);
+ break;
default:
break;
}
@@ -2029,6 +2132,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
+ .is_guilty = sdma_v4_4_2_ring_is_guilty,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
@@ -2060,6 +2164,8 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = sdma_v4_4_2_reset_queue,
+ .is_guilty = sdma_v4_4_2_page_ring_is_guilty,
};
static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -2231,6 +2337,35 @@ static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
+/**
+ * sdma_v4_4_2_update_reset_mask - update reset mask for SDMA
+ * @adev: Pointer to the AMDGPU device structure
+ *
+ * This function update reset mask for SDMA and sets the supported
+ * reset types based on the IP version and firmware versions.
+ *
+ */
+static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
+{
+
+ /*
+ * the user queue relies on MEC fw and pmfw when the sdma queue do reset.
+ * it needs to check both of them at here to skip old mec and pmfw.
+ */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ if ((adev->gfx.mec_fw_version >= 0xb0) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ case IP_VERSION(9, 5, 0):
+ /*TODO: enable the queue reset flag until fw supported */
+ default:
+ break;
+ }
+
+}
+
const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 4,
@@ -2397,8 +2532,8 @@ static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_ban
1ULL);
break;
case ACA_SMU_TYPE_CE:
- bank->aca_err_type = ACA_ERROR_TYPE_CE;
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 377efb2b8d0e..0dce59f4f6e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1530,9 +1530,9 @@ static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v5_0_hw_init(ip_block);
}
-static bool sdma_v5_0_is_idle(void *handle)
+static bool sdma_v5_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index ce05d895f977..2b39a03ff0c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1435,9 +1435,9 @@ static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block)
return sdma_v5_2_hw_init(ip_block);
}
-static bool sdma_v5_2_is_idle(void *handle)
+static bool sdma_v5_2_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 373703d1596d..c214c3d2149b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1429,9 +1429,9 @@ static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v6_0_hw_init(ip_block);
}
-static bool sdma_v6_0_is_idle(void *handle)
+static bool sdma_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index 9eb8f4f9f302..b2706221df99 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1430,9 +1430,9 @@ static int sdma_v7_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v7_0_hw_init(ip_block);
}
-static bool sdma_v7_0_is_idle(void *handle)
+static bool sdma_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 77ef7da2e4fe..f90e07375396 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -909,7 +909,7 @@ static const u32 hainan_mgcg_cgcg_init[] =
/* XXX: update when we support VCE */
#if 0
-/* tahiti, pitcarin, verde */
+/* tahiti, pitcairn, verde */
static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] =
{
{
@@ -940,7 +940,7 @@ static const struct amdgpu_video_codecs hainan_video_codecs_encode =
.codec_array = NULL,
};
-/* tahiti, pitcarin, verde, oland */
+/* tahiti, pitcairn, verde, oland */
static const struct amdgpu_video_codec_info tahiti_video_codecs_decode_array[] =
{
{
@@ -1888,7 +1888,7 @@ static int si_vce_send_vcepll_ctlreq(struct amdgpu_device *adev)
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) {
- DRM_ERROR("Timeout setting UVD clocks!\n");
+ DRM_ERROR("Timeout setting VCE clocks!\n");
return -ETIMEDOUT;
}
@@ -2644,7 +2644,7 @@ static int si_common_resume(struct amdgpu_ip_block *ip_block)
return si_common_hw_init(ip_block);
}
-static bool si_common_is_idle(void *handle)
+static bool si_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index dbd78d5345a4..e2089c8da71b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -541,9 +541,9 @@ static int si_dma_resume(struct amdgpu_ip_block *ip_block)
return si_dma_hw_init(ip_block);
}
-static bool si_dma_is_idle(void *handle)
+static bool si_dma_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(SRBM_STATUS2);
@@ -559,7 +559,7 @@ static int si_dma_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (si_dma_is_idle(adev))
+ if (si_dma_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index a32b6243c1f8..5c38e1fb1dca 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -210,9 +210,9 @@ static int si_ih_resume(struct amdgpu_ip_block *ip_block)
return si_ih_hw_init(ip_block);
}
-static bool si_ih_is_idle(void *handle)
+static bool si_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(SRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -227,7 +227,7 @@ static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (si_ih_is_idle(adev))
+ if (si_ih_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 2c6d2099e215..8732f766947e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1360,7 +1360,7 @@ static int soc15_common_resume(struct amdgpu_ip_block *ip_block)
return soc15_common_hw_init(ip_block);
}
-static bool soc15_common_is_idle(void *handle)
+static bool soc15_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 7925cbb61d0d..dd5d04c068f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -952,7 +952,7 @@ static int soc21_common_resume(struct amdgpu_ip_block *ip_block)
return soc21_common_hw_init(ip_block);
}
-static bool soc21_common_is_idle(void *handle)
+static bool soc21_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index 4e506c91e978..972b449ab89f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -531,7 +531,7 @@ static int soc24_common_resume(struct amdgpu_ip_block *ip_block)
return soc24_common_hw_init(ip_block);
}
-static bool soc24_common_is_idle(void *handle)
+static bool soc24_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 0968e551f7b5..7d17ae56f901 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -353,9 +353,9 @@ static int tonga_ih_resume(struct amdgpu_ip_block *ip_block)
return tonga_ih_hw_init(ip_block);
}
-static bool tonga_ih_is_idle(void *handle)
+static bool tonga_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 5830e799c0a3..5dbaebb592b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -98,7 +98,7 @@ static void uvd_v3_1_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
- * uvd_v3_1_ring_emit_fence - emit an fence & trap command
+ * uvd_v3_1_ring_emit_fence - emit a fence & trap command
*
* @ring: amdgpu_ring pointer
* @addr: address
@@ -242,7 +242,7 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
uint64_t addr;
uint32_t size;
- /* programm the VCPU memory controller bits 0-27 */
+ /* program the VCPU memory controller bits 0-27 */
addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
@@ -416,7 +416,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev)
/* Set the write pointer delay */
WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
- /* programm the 4GB memory segment for rptr and ring buffer */
+ /* Program the 4GB memory segment for rptr and ring buffer */
WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
(0x7 << 16) | (0x1 << 31));
@@ -758,9 +758,9 @@ static int uvd_v3_1_resume(struct amdgpu_ip_block *ip_block)
return uvd_v3_1_hw_init(ip_block);
}
-static bool uvd_v3_1_is_idle(void *handle)
+static bool uvd_v3_1_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f93079e09215..4b96fd583772 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -302,7 +302,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
/* enable VCPU clock */
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
- /* disable interupt */
+ /* disable interrupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
#ifdef __BIG_ENDIAN
@@ -312,6 +312,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
#endif
WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+
/* initialize UVD memory controller */
WREG32(mmUVD_LMI_CTRL, 0x203108);
@@ -658,9 +659,9 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
}
-static bool uvd_v4_2_is_idle(void *handle)
+static bool uvd_v4_2_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 2295c8713d61..71409ad8b7ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -580,9 +580,9 @@ static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
}
}
-static bool uvd_v5_0_is_idle(void *handle)
+static bool uvd_v5_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 070a0624c2c5..1c07b701d0e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1143,9 +1143,9 @@ static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, vmid);
}
-static bool uvd_v6_0_is_idle(void *handle)
+static bool uvd_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
@@ -1156,7 +1156,7 @@ static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (uvd_v6_0_is_idle(adev))
+ if (uvd_v6_0_is_idle(ip_block))
return 0;
}
return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index c633b7ff2943..8c8c02606d25 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -201,9 +201,9 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
-static bool vce_v2_0_is_idle(void *handle)
+static bool vce_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
}
@@ -214,7 +214,7 @@ static int vce_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
unsigned i;
for (i = 0; i < adev->usec_timeout; i++) {
- if (vce_v2_0_is_idle(adev))
+ if (vce_v2_0_is_idle(ip_block))
return 0;
}
return -ETIMEDOUT;
@@ -280,7 +280,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
if (vce_v2_0_lmi_clean(adev)) {
- DRM_INFO("vce is not idle \n");
+ DRM_INFO("VCE is not idle \n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 01248a3982ba..708123899c41 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -597,9 +597,9 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
-static bool vce_v3_0_is_idle(void *handle)
+static bool vce_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 mask = 0;
mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
@@ -614,7 +614,7 @@ static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++)
- if (vce_v3_0_is_idle(adev))
+ if (vce_v3_0_is_idle(ip_block))
return 0;
return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 5ea96c983517..21b57c29bf7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -81,14 +81,14 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_1_0[] = {
SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
};
-static int vcn_v1_0_stop(struct amdgpu_device *adev);
+static int vcn_v1_0_stop(struct amdgpu_vcn_inst *vinst);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v1_0_idle_work_handler(struct work_struct *work);
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
@@ -105,7 +105,8 @@ static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->vcn.num_enc_rings = 2;
+ adev->vcn.inst[0].num_enc_rings = 2;
+ adev->vcn.inst[0].set_pg_state = vcn_v1_0_set_pg_state;
vcn_v1_0_set_dec_ring_funcs(adev);
vcn_v1_0_set_enc_ring_funcs(adev);
@@ -113,7 +114,7 @@ static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block)
jpeg_v1_0_early_init(ip_block);
- return amdgpu_vcn_early_init(adev);
+ return amdgpu_vcn_early_init(adev, 0);
}
/**
@@ -138,23 +139,23 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
/* VCN ENC TRAP */
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
&adev->vcn.inst->irq);
if (r)
return r;
}
- r = amdgpu_vcn_sw_init(adev);
+ r = amdgpu_vcn_sw_init(adev, 0);
if (r)
return r;
/* Override the work func */
- adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
+ adev->vcn.inst[0].idle_work.work.func = vcn_v1_0_idle_work_handler;
- amdgpu_vcn_setup_ucode(adev);
+ amdgpu_vcn_setup_ucode(adev, 0);
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(adev, 0);
if (r)
return r;
@@ -166,18 +167,18 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
+ adev->vcn.inst[0].internal.scratch9 = adev->vcn.inst->external.scratch9 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
+ adev->vcn.inst[0].internal.data0 = adev->vcn.inst->external.data0 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
+ adev->vcn.inst[0].internal.data1 = adev->vcn.inst->external.data1 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
+ adev->vcn.inst[0].internal.cmd = adev->vcn.inst->external.cmd =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = adev->vcn.inst->external.nop =
+ adev->vcn.inst[0].internal.nop = adev->vcn.inst->external.nop =
SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst->ring_enc[i];
@@ -189,7 +190,7 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
+ adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
if (amdgpu_vcnfw_log) {
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
@@ -223,13 +224,13 @@ static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
int r;
struct amdgpu_device *adev = ip_block->adev;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(adev, 0);
if (r)
return r;
jpeg_v1_0_sw_fini(ip_block);
- r = amdgpu_vcn_sw_fini(adev);
+ r = amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
@@ -253,7 +254,7 @@ static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -276,13 +277,14 @@ static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&vinst->idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
- vcn_v1_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
return 0;
@@ -301,7 +303,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
bool idle_work_unexecuted;
- idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
+ idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
if (idle_work_unexecuted) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, 0);
@@ -311,7 +313,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(adev, 0);
return r;
}
@@ -327,7 +329,7 @@ static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- r = amdgpu_vcn_resume(ip_block->adev);
+ r = amdgpu_vcn_resume(ip_block->adev, 0);
if (r)
return r;
@@ -339,12 +341,13 @@ static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v1_0_mc_resume_spg_mode - memory controller programming
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
+static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -410,8 +413,9 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
adev->gfx.config.gb_addr_config);
}
-static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
+static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -485,12 +489,13 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
/**
* vcn_v1_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v1_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data;
/* JPEG disable CGC */
@@ -611,12 +616,13 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
/**
* vcn_v1_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: Pointer to the VCN instance structure
*
* Enable clock gating for VCN block
*/
-static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v1_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
/* enable JPEG CGC */
@@ -680,8 +686,10 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
+static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t reg_data = 0;
/* disable JPEG CGC */
@@ -734,8 +742,9 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
}
-static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_1_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -779,8 +788,9 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
}
-static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_1_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -823,12 +833,13 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
/**
* vcn_v1_0_start_spg_mode - start VCN block
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Setup and start the VCN block
*/
-static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_start_spg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -837,13 +848,13 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
/* disable byte swapping */
lmi_swap_cntl = 0;
- vcn_1_0_disable_static_power_gating(adev);
+ vcn_1_0_disable_static_power_gating(vinst);
tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
/* disable clock gating */
- vcn_v1_0_disable_clock_gating(adev);
+ vcn_v1_0_disable_clock_gating(vinst);
/* disable interupt */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
@@ -885,7 +896,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
- vcn_v1_0_mc_resume_spg_mode(adev);
+ vcn_v1_0_mc_resume_spg_mode(vinst);
WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
@@ -1001,8 +1012,9 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1010,7 +1022,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
/* disable byte swapping */
lmi_swap_cntl = 0;
- vcn_1_0_enable_static_power_gating(adev);
+ vcn_1_0_enable_static_power_gating(vinst);
/* enable dynamic power gating mode */
tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
@@ -1019,7 +1031,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
/* enable clock gating */
- vcn_v1_0_clock_gating_dpg_mode(adev, 0);
+ vcn_v1_0_clock_gating_dpg_mode(vinst, 0);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -1068,7 +1080,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
- vcn_v1_0_mc_resume_dpg_mode(adev);
+ vcn_v1_0_mc_resume_dpg_mode(vinst);
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
@@ -1085,7 +1097,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
- vcn_v1_0_clock_gating_dpg_mode(adev, 1);
+ vcn_v1_0_clock_gating_dpg_mode(vinst, 1);
/* setup mmUVD_LMI_CTRL */
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
@@ -1145,21 +1157,24 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v1_0_start(struct amdgpu_device *adev)
+static int vcn_v1_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+
return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ?
- vcn_v1_0_start_dpg_mode(adev) : vcn_v1_0_start_spg_mode(adev);
+ vcn_v1_0_start_dpg_mode(vinst) : vcn_v1_0_start_spg_mode(vinst);
}
/**
* vcn_v1_0_stop_spg_mode - stop VCN block
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* stop the VCN block
*/
-static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_stop_spg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
int tmp;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
@@ -1199,13 +1214,14 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
- vcn_v1_0_enable_clock_gating(adev);
- vcn_1_0_enable_static_power_gating(adev);
+ vcn_v1_0_enable_clock_gating(vinst);
+ vcn_1_0_enable_static_power_gating(vinst);
return 0;
}
-static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t tmp;
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
@@ -1237,21 +1253,24 @@ static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v1_0_stop(struct amdgpu_device *adev)
+static int vcn_v1_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
int r;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- r = vcn_v1_0_stop_dpg_mode(adev);
+ r = vcn_v1_0_stop_dpg_mode(vinst);
else
- r = vcn_v1_0_stop_spg_mode(adev);
+ r = vcn_v1_0_stop_spg_mode(vinst);
return r;
}
-static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
int ret_code;
uint32_t reg_data = 0;
uint32_t reg_data2 = 0;
@@ -1377,9 +1396,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
return 0;
}
-static bool vcn_v1_0_is_idle(void *handle)
+static bool vcn_v1_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
@@ -1399,16 +1418,17 @@ static int vcn_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v1_0_is_idle(adev))
+ if (!vcn_v1_0_is_idle(ip_block))
return -EBUSY;
- vcn_v1_0_enable_clock_gating(adev);
+ vcn_v1_0_enable_clock_gating(vinst);
} else {
/* disable HW gating and enable Sw gating */
- vcn_v1_0_disable_clock_gating(adev);
+ vcn_v1_0_disable_clock_gating(vinst);
}
return 0;
}
@@ -1800,8 +1820,8 @@ static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun
}
}
-static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
* That's done in the dpm code via the SMC. This
@@ -1811,28 +1831,29 @@ static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = ip_block->adev;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v1_0_stop(adev);
+ ret = vcn_v1_0_stop(vinst);
else
- ret = vcn_v1_0_start(adev);
+ ret = vcn_v1_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
+
return ret;
}
static void vcn_v1_0_idle_work_handler(struct work_struct *work)
{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ struct amdgpu_vcn_inst *vcn_inst =
+ container_of(work, struct amdgpu_vcn_inst, idle_work.work);
+ struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -1848,7 +1869,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
- adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ adev->vcn.inst->pause_dpg_mode(vcn_inst, &new_state);
}
fences += amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec);
@@ -1862,16 +1883,16 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
} else {
- schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
}
}
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
- mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+ mutex_lock(&adev->vcn.inst[0].vcn1_jpeg1_workaround);
if (amdgpu_fence_wait_empty(ring->adev->jpeg.inst->ring_dec))
DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
@@ -1897,7 +1918,7 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
struct dpg_pause_state new_state;
unsigned int fences = 0, i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
if (fences)
@@ -1915,14 +1936,14 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
new_state.jpeg = VCN_DPG_STATE__PAUSE;
- adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ adev->vcn.inst->pause_dpg_mode(adev->vcn.inst, &new_state);
}
}
void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
{
- schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
- mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
+ schedule_delayed_work(&ring->adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
+ mutex_unlock(&ring->adev->vcn.inst[0].vcn1_jpeg1_workaround);
}
static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -1997,7 +2018,7 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.is_idle = vcn_v1_0_is_idle,
.wait_for_idle = vcn_v1_0_wait_for_idle,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
- .set_powergating_state = vcn_v1_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v1_0_dump_ip_state,
.print_ip_state = vcn_v1_0_print_ip_state,
};
@@ -2056,11 +2077,11 @@ static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
uint32_t reg = amdgpu_ib_get_value(ib, i);
uint32_t val = amdgpu_ib_get_value(ib, i + 1);
- if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+ if (reg == PACKET0(p->adev->vcn.inst[0].internal.data0, 0)) {
msg_lo = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+ } else if (reg == PACKET0(p->adev->vcn.inst[0].internal.data1, 0)) {
msg_hi = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
+ } else if (reg == PACKET0(p->adev->vcn.inst[0].internal.cmd, 0)) {
r = vcn_v1_0_validate_bo(p, job,
((u64)msg_hi) << 32 | msg_lo);
if (r)
@@ -2145,7 +2166,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
}
@@ -2156,7 +2177,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+ adev->vcn.inst->irq.num_types = adev->vcn.inst[0].num_enc_rings + 2;
adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index e42cfc731ad8..8e7a36f26e9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -92,10 +92,10 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
* vcn_v2_0_early_init - set function pointers and load microcode
@@ -110,15 +110,16 @@ static int vcn_v2_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
- adev->vcn.num_enc_rings = 1;
+ adev->vcn.inst[0].num_enc_rings = 1;
else
- adev->vcn.num_enc_rings = 2;
+ adev->vcn.inst[0].num_enc_rings = 2;
+ adev->vcn.inst->set_pg_state = vcn_v2_0_set_pg_state;
vcn_v2_0_set_dec_ring_funcs(adev);
vcn_v2_0_set_enc_ring_funcs(adev);
vcn_v2_0_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ return amdgpu_vcn_early_init(adev, 0);
}
/**
@@ -145,7 +146,7 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
/* VCN ENC TRAP */
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
&adev->vcn.inst->irq);
@@ -153,13 +154,13 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- r = amdgpu_vcn_sw_init(adev);
+ r = amdgpu_vcn_sw_init(adev, 0);
if (r)
return r;
- amdgpu_vcn_setup_ucode(adev);
+ amdgpu_vcn_setup_ucode(adev, 0);
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(adev, 0);
if (r)
return r;
@@ -175,25 +176,25 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
- adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
- adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
- adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst->ring_enc[i];
@@ -210,7 +211,7 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -254,11 +255,11 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(adev, 0);
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev);
+ r = amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
@@ -292,7 +293,7 @@ static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
ring->sched.ready = false;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -312,13 +313,14 @@ static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&vinst->idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
- vcn_v2_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
return 0;
}
@@ -338,7 +340,7 @@ static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ r = amdgpu_vcn_suspend(ip_block->adev, 0);
return r;
}
@@ -354,7 +356,7 @@ static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- r = amdgpu_vcn_resume(ip_block->adev);
+ r = amdgpu_vcn_resume(ip_block->adev, 0);
if (r)
return r;
@@ -366,12 +368,13 @@ static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v2_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
+ * @vinst: Pointer to the VCN instance structure
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
+static void vcn_v2_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -426,8 +429,10 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
-static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
+static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -525,12 +530,13 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
/**
* vcn_v2_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data;
if (amdgpu_sriov_vf(adev))
@@ -634,9 +640,10 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
+static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
uint8_t sram_sel, uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t reg_data = 0;
/* enable sw clock gating control */
@@ -685,12 +692,13 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v2_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (amdgpu_sriov_vf(adev))
@@ -743,8 +751,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (amdgpu_sriov_vf(adev))
@@ -792,8 +801,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
}
-static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (amdgpu_sriov_vf(adev))
@@ -834,13 +844,14 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
}
}
-static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
+static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
- vcn_v2_0_enable_static_power_gating(adev);
+ vcn_v2_0_enable_static_power_gating(vinst);
/* enable dynamic power gating mode */
tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
@@ -852,7 +863,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
+ vcn_v2_0_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -901,7 +912,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
+ vcn_v2_0_mc_resume_dpg_mode(vinst, indirect);
WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
@@ -969,8 +980,9 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
return 0;
}
-static int vcn_v2_0_start(struct amdgpu_device *adev)
+static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
@@ -981,16 +993,16 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
amdgpu_dpm_enable_vcn(adev, true, 0);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
+ return vcn_v2_0_start_dpg_mode(vinst, adev->vcn.inst->indirect_sram);
- vcn_v2_0_disable_static_power_gating(adev);
+ vcn_v2_0_disable_static_power_gating(vinst);
/* set uvd status busy */
tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
/*SW clock gating */
- vcn_v2_0_disable_clock_gating(adev);
+ vcn_v2_0_disable_clock_gating(vinst);
/* enable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
@@ -1034,7 +1046,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
- vcn_v2_0_mc_resume(adev);
+ vcn_v2_0_mc_resume(vinst);
/* release VCPU reset to boot */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
@@ -1142,12 +1154,13 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
+static int vcn_v2_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v2_0_pause_dpg_mode(adev, 0, &state);
+ vcn_v2_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1172,13 +1185,14 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v2_0_stop(struct amdgpu_device *adev)
+static int vcn_v2_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t tmp;
int r;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v2_0_stop_dpg_mode(adev);
+ r = vcn_v2_0_stop_dpg_mode(vinst);
if (r)
return r;
goto power_off;
@@ -1230,8 +1244,8 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
/* clear status */
WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
- vcn_v2_0_enable_clock_gating(adev);
- vcn_v2_0_enable_static_power_gating(adev);
+ vcn_v2_0_enable_clock_gating(vinst);
+ vcn_v2_0_enable_static_power_gating(vinst);
power_off:
if (adev->pm.dpm_enabled)
@@ -1240,9 +1254,11 @@ power_off:
return 0;
}
-static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
@@ -1317,9 +1333,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
return 0;
}
-static bool vcn_v2_0_is_idle(void *handle)
+static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
@@ -1346,12 +1362,12 @@ static int vcn_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v2_0_is_idle(adev))
+ if (!vcn_v2_0_is_idle(ip_block))
return -EBUSY;
- vcn_v2_0_enable_clock_gating(adev);
+ vcn_v2_0_enable_clock_gating(&adev->vcn.inst[0]);
} else {
/* disable HW gating and enable Sw gating */
- vcn_v2_0_disable_clock_gating(adev);
+ vcn_v2_0_disable_clock_gating(&adev->vcn.inst[0]);
}
return 0;
}
@@ -1421,9 +1437,9 @@ void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
}
@@ -1438,7 +1454,7 @@ void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[0].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
}
@@ -1458,7 +1474,7 @@ void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.nop, 0));
amdgpu_ring_write(ring, 0);
}
}
@@ -1479,25 +1495,25 @@ void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
struct amdgpu_device *adev = ring->adev;
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.context_id, 0));
amdgpu_ring_write(ring, seq);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, addr & 0xffffffff);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
}
@@ -1520,14 +1536,14 @@ void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_vmid, 0));
amdgpu_ring_write(ring, vmid);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_low, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_bar_low, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_high, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_bar_high, 0));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_size, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_size, 0));
amdgpu_ring_write(ring, ib->length_dw);
}
@@ -1536,16 +1552,16 @@ void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, reg << 2);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, val);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.gp_scratch8, 0));
amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
}
@@ -1570,13 +1586,13 @@ void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, reg << 2);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, val);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
}
@@ -1777,9 +1793,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 4);
if (r)
return r;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -1796,8 +1812,8 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
}
-static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
* That's done in the dpm code via the SMC. This
@@ -1807,23 +1823,24 @@ static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_device *adev = vinst->adev;
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v2_0_stop(adev);
+ ret = vcn_v2_0_stop(vinst);
else
- ret = vcn_v2_0_start(adev);
+ ret = vcn_v2_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
+
return ret;
}
@@ -1862,7 +1879,7 @@ static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
adev->vcn.inst->ring_dec.wptr_old = 0;
vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
adev->vcn.inst->ring_enc[i].wptr = 0;
adev->vcn.inst->ring_enc[i].wptr_old = 0;
vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
@@ -1988,7 +2005,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
- for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
+ for (r = 0; r < adev->vcn.inst[0].num_enc_rings; ++r) {
ring = &adev->vcn.inst->ring_enc[r];
ring->wptr = 0;
MMSCH_V2_0_INSERT_DIRECT_WT(
@@ -2104,7 +2121,7 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.is_idle = vcn_v2_0_is_idle,
.wait_for_idle = vcn_v2_0_wait_for_idle,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
- .set_powergating_state = vcn_v2_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v2_0_dump_ip_state,
.print_ip_state = vcn_v2_0_print_ip_state,
};
@@ -2177,7 +2194,7 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
}
@@ -2188,7 +2205,7 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst->irq.num_types = adev->vcn.inst[0].num_enc_rings + 1;
adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 105e59f6132b..dff1a8859036 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -95,10 +95,10 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
@@ -118,11 +118,13 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
adev->vcn.harvest_config = 0;
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ adev->vcn.inst[i].num_enc_rings = 1;
} else {
u32 harvest;
int i;
@@ -131,13 +133,12 @@ static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->vcn.harvest_config |= 1 << i;
+ adev->vcn.inst[i].num_enc_rings = 2;
}
if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
+ AMDGPU_VCN_HARVEST_VCN1))
/* both instances are harvested, disable the block */
return -ENOENT;
-
- adev->vcn.num_enc_rings = 2;
}
vcn_v2_5_set_dec_ring_funcs(adev);
@@ -145,7 +146,15 @@ static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
vcn_v2_5_set_irq_funcs(adev);
vcn_v2_5_set_ras_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ adev->vcn.inst[i].set_pg_state = vcn_v2_5_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
/**
@@ -164,6 +173,8 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
+ volatile struct amdgpu_fw_shared *fw_shared;
+
if (adev->vcn.harvest_config & (1 << j))
continue;
/* VCN DEC TRAP */
@@ -173,7 +184,7 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
/* VCN ENC TRAP */
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
if (r)
@@ -185,39 +196,33 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
if (r)
return r;
- }
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
+ r = amdgpu_vcn_sw_init(adev, j);
+ if (r)
+ return r;
- amdgpu_vcn_setup_ucode(adev);
+ amdgpu_vcn_setup_ucode(adev, j);
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
+ r = amdgpu_vcn_resume(adev, j);
+ if (r)
+ return r;
- for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ adev->vcn.inst[j].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
- if (adev->vcn.harvest_config & (1 << j))
- continue;
- adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
- adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
- adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
-
- adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
ring = &adev->vcn.inst[j].ring_dec;
@@ -237,7 +242,7 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst[j].ring_enc[i];
@@ -265,6 +270,9 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
}
if (amdgpu_sriov_vf(adev)) {
@@ -273,9 +281,6 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
-
r = amdgpu_vcn_ras_sw_init(adev);
if (r)
return r;
@@ -319,15 +324,18 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
-
- r = amdgpu_vcn_sw_fini(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -366,7 +374,7 @@ static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -390,19 +398,21 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
- vcn_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
- amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+ amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0);
}
return 0;
@@ -417,15 +427,20 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v2_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -437,11 +452,14 @@ static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v2_5_hw_init(ip_block);
@@ -451,13 +469,14 @@ static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v2_5_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @i: instance to resume
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v2_5_mc_resume(struct amdgpu_device *adev, int i)
+static void vcn_v2_5_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t size;
uint32_t offset;
@@ -510,8 +529,11 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev, int i)
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
-static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
@@ -609,13 +631,14 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
/**
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @i: instance to disable clockgating on
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev, int i)
+static void vcn_v2_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t data;
if (adev->vcn.harvest_config & (1 << i))
@@ -721,9 +744,11 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev, int i)
WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
- uint8_t sram_sel, int inst_idx, uint8_t indirect)
+static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel, uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
/* enable sw clock gating control */
@@ -772,13 +797,14 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @i: instance to enable clockgating on
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev, int i)
+static void vcn_v2_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t data = 0;
if (adev->vcn.harvest_config & (1 << i))
@@ -829,9 +855,11 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev, int i)
WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
+static void vcn_v2_6_enable_ras(struct amdgpu_vcn_inst *vinst,
bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0))
@@ -856,8 +884,10 @@ static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
tmp, 0, indirect);
}
-static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@@ -875,7 +905,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v2_5_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -924,7 +954,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v2_5_mc_resume_dpg_mode(vinst, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
@@ -935,7 +965,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
- vcn_v2_6_enable_ras(adev, inst_idx, indirect);
+ vcn_v2_6_enable_ras(vinst, indirect);
/* unblock VCPU register access */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
@@ -1000,8 +1030,10 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
return 0;
}
-static int vcn_v2_5_start(struct amdgpu_device *adev, int i)
+static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared =
adev->vcn.inst[i].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
@@ -1015,7 +1047,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev, int i)
amdgpu_dpm_enable_vcn(adev, true, i);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- return vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ return vcn_v2_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
@@ -1029,7 +1061,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev, int i)
return 0;
/* SW clock gating */
- vcn_v2_5_disable_clock_gating(adev, i);
+ vcn_v2_5_disable_clock_gating(vinst);
/* enable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
@@ -1074,7 +1106,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev, int i)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
- vcn_v2_5_mc_resume(adev, i);
+ vcn_v2_5_mc_resume(vinst);
/* VCN global tiling registers */
WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
@@ -1379,8 +1411,10 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
}
-static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
/* Wait for power status to be 1 */
@@ -1407,20 +1441,25 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
return 0;
}
-static int vcn_v2_5_stop(struct amdgpu_device *adev, int i)
+static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t tmp;
int r;
if (adev->vcn.harvest_config & (1 << i))
return 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- return vcn_v2_5_stop_dpg_mode(adev, i);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v2_5_stop_dpg_mode(vinst);
+ goto done;
+ }
/* wait for vcn idle */
r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
if (r)
- return r;
+ goto done;
tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
UVD_LMI_STATUS__READ_CLEAN_MASK |
@@ -1428,7 +1467,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev, int i)
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
if (r)
- return r;
+ goto done;
/* block LMI UMC channel */
tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
@@ -1439,7 +1478,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev, int i)
UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
if (r)
- return r;
+ goto done;
/* block VCPU register access */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
@@ -1458,22 +1497,25 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev, int i)
/* clear status */
WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
- vcn_v2_5_enable_clock_gating(adev, i);
+ vcn_v2_5_enable_clock_gating(vinst);
/* enable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
- return 0;
+ return r;
}
-static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code = 0;
@@ -1743,16 +1785,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
adev->vcn.inst[j].ring_enc[i].me = j;
}
}
}
-static bool vcn_v2_5_is_idle(void *handle)
+static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1793,39 +1835,39 @@ static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (enable) {
- if (!vcn_v2_5_is_idle(adev))
+ if (!vcn_v2_5_is_idle(ip_block))
return -EBUSY;
- vcn_v2_5_enable_clock_gating(adev, i);
+ vcn_v2_5_enable_clock_gating(vinst);
} else {
- vcn_v2_5_disable_clock_gating(adev, i);
+ vcn_v2_5_disable_clock_gating(vinst);
}
}
return 0;
}
-static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret = 0, i;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret;
if (amdgpu_sriov_vf(adev))
return 0;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (state == AMD_PG_STATE_GATE)
- ret |= vcn_v2_5_stop(adev, i);
- else
- ret |= vcn_v2_5_start(adev, i);
- }
+ if (state == AMD_PG_STATE_GATE)
+ ret = vcn_v2_5_stop(vinst);
+ else
+ ret = vcn_v2_5_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1902,10 +1944,10 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
- adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
}
}
@@ -1982,7 +2024,7 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
- .set_powergating_state = vcn_v2_5_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v2_5_dump_ip_state,
.print_ip_state = vcn_v2_5_print_ip_state,
};
@@ -1999,7 +2041,7 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
- .set_powergating_state = vcn_v2_5_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v2_5_dump_ip_state,
.print_ip_state = vcn_v2_5_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 63ddd4cca910..22ae1939476f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -105,10 +105,10 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
@@ -124,11 +124,13 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
adev->vcn.harvest_config = 0;
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ adev->vcn.inst[i].num_enc_rings = 1;
} else {
if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
@@ -136,18 +138,27 @@ static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
/* both instances are harvested, disable the block */
return -ENOENT;
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(3, 0, 33))
- adev->vcn.num_enc_rings = 0;
- else
- adev->vcn.num_enc_rings = 2;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
+ IP_VERSION(3, 0, 33))
+ adev->vcn.inst[i].num_enc_rings = 0;
+ else
+ adev->vcn.inst[i].num_enc_rings = 2;
+ }
}
vcn_v3_0_set_dec_ring_funcs(adev);
vcn_v3_0_set_enc_ring_funcs(adev);
vcn_v3_0_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ adev->vcn.inst[i].set_pg_state = vcn_v3_0_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+ return 0;
}
/**
@@ -166,16 +177,6 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
/*
* Note: doorbell assignment is fixed for SRIOV multiple VCN engines
* Formula:
@@ -195,22 +196,32 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
- adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
- adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
+ adev->vcn.inst[i].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
- adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
/* VCN DEC TRAP */
@@ -224,7 +235,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_dec;
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev)) {
- ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1);
+ ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1);
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
}
@@ -236,7 +247,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
/* VCN ENC TRAP */
@@ -248,7 +259,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[j];
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev)) {
- ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j;
+ ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1) + 1 + j;
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
}
@@ -274,6 +285,9 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
}
if (amdgpu_sriov_vf(adev)) {
@@ -281,8 +295,6 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
/* Allocate memory for VCN IP Dump buffer */
ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
@@ -325,14 +337,18 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
- r = amdgpu_vcn_sw_fini(adev);
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -370,7 +386,7 @@ static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
ring->sched.ready = true;
}
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
ring->sched.ready = false;
@@ -398,7 +414,7 @@ static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -422,17 +438,19 @@ static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
- vcn_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
}
@@ -449,15 +467,20 @@ static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v3_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -469,11 +492,14 @@ static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v3_0_hw_init(ip_block);
@@ -483,13 +509,14 @@ static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v3_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4);
uint32_t offset;
@@ -538,8 +565,11 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
-static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
@@ -634,8 +664,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
-static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -685,8 +717,10 @@ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int
WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
}
-static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -733,13 +767,14 @@ static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int
/**
* vcn_v3_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: Pointer to the VCN instance structure
*
* Disable clock gating for VCN block
*/
-static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
/* VCN disable CGC */
@@ -866,9 +901,12 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
- uint8_t sram_sel, int inst_idx, uint8_t indirect)
+static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
/* enable sw clock gating control */
@@ -917,13 +955,14 @@ static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v3_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: Pointer to the VCN instance structure
*
* Enable clock gating for VCN block
*/
-static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
/* enable VCN CGC */
@@ -982,8 +1021,10 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
}
-static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@@ -1001,7 +1042,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v3_0_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -1050,7 +1091,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v3_0_mc_resume_dpg_mode(vinst, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
@@ -1134,192 +1175,188 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
return 0;
}
-static int vcn_v3_0_start(struct amdgpu_device *adev)
+static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v3_0_start_dpg_mode(vinst, vinst->indirect_sram);
- /* disable VCN power gating */
- vcn_v3_0_disable_static_power_gating(adev, i);
+ /* disable VCN power gating */
+ vcn_v3_0_disable_static_power_gating(vinst);
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
- /*SW clock gating */
- vcn_v3_0_disable_clock_gating(adev, i);
+ /* SW clock gating */
+ vcn_v3_0_disable_clock_gating(vinst);
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
-
- /* setup mmUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup mmUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup mmUVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v3_0_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
- for (j = 0; j < 10; ++j) {
- uint32_t status;
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
- DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
+ /* setup mmUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup mmUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup mmUVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v3_0_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
+ if (status & 2)
+ break;
mdelay(10);
- r = -1;
}
+ r = 0;
+ if (status & 2)
+ break;
- if (r) {
- DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
- return r;
- }
+ DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ mdelay(10);
+ r = -1;
+ }
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ if (r) {
+ DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
+ return r;
+ }
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- ring = &adev->vcn.inst[i].ring_dec;
- /* force RBC into idle state */
- rb_bufsz = order_base_2(ring->ring_size);
- tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
- /* programm the RB_BASE for ring buffer */
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
+ ring = &adev->vcn.inst[i].ring_dec;
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
- /* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
- WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
- ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
- lower_32_bits(ring->wptr));
- fw_shared->rb.wptr = lower_32_bits(ring->wptr);
- fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
-
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
- IP_VERSION(3, 0, 33)) {
- fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
- fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
-
- fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
- ring = &adev->vcn.inst[i].ring_enc[1];
- WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
- fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
- }
+ /* programm the RB_BASE for ring buffer */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+
+ WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
+ ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
+ fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
+ IP_VERSION(3, 0, 33)) {
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
+ ring = &adev->vcn.inst[i].ring_enc[1];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
}
return 0;
@@ -1434,7 +1471,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
ring->wptr = 0;
rb_addr = ring->gpu_addr;
@@ -1534,12 +1571,14 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state);
+ vcn_v3_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
@@ -1565,86 +1604,87 @@ static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
return 0;
}
-static int vcn_v3_0_stop(struct amdgpu_device *adev)
+static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t tmp;
- int i, r = 0;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ int r = 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v3_0_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v3_0_stop_dpg_mode(vinst);
+ goto done;
+ }
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
- /* clear status */
- WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
+ /* clear status */
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
- /* apply HW clock gating */
- vcn_v3_0_enable_clock_gating(adev, i);
+ /* apply HW clock gating */
+ vcn_v3_0_enable_clock_gating(vinst);
- /* enable VCN power gating */
- vcn_v3_0_enable_static_power_gating(adev, i);
- }
+ /* enable VCN power gating */
+ vcn_v3_0_enable_static_power_gating(vinst);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
- return 0;
+ return r;
}
-static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
@@ -1928,11 +1968,11 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
uint32_t reg = amdgpu_ib_get_value(ib, i);
uint32_t val = amdgpu_ib_get_value(ib, i + 1);
- if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+ if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data0, 0)) {
msg_lo = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+ } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data1, 0)) {
msg_hi = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
+ } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.cmd, 0) &&
val == 0) {
r = vcn_v3_0_dec_msg(p, job,
((u64)msg_hi) << 32 | msg_lo);
@@ -2096,16 +2136,16 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
adev->vcn.inst[i].ring_enc[j].me = i;
}
}
}
-static bool vcn_v3_0_is_idle(void *handle)
+static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2144,46 +2184,47 @@ static int vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v3_0_enable_clock_gating(adev, i);
+ vcn_v3_0_enable_clock_gating(vinst);
} else {
- vcn_v3_0_disable_clock_gating(adev, i);
+ vcn_v3_0_disable_clock_gating(vinst);
}
}
return 0;
}
-static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret = 0;
/* for SRIOV, guest should not control VCN Power-gating
* MMSCH FW should control Power-gating and clock-gating
* guest should avoid touching CGC and PG
*/
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v3_0_stop(adev);
+ ret = vcn_v3_0_stop(vinst);
else
- ret = vcn_v3_0_start(adev);
+ ret = vcn_v3_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -2248,7 +2289,7 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
}
}
@@ -2326,7 +2367,7 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.is_idle = vcn_v3_0_is_idle,
.wait_for_idle = vcn_v3_0_wait_for_idle,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
- .set_powergating_state = vcn_v3_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v3_0_dump_ip_state,
.print_ip_state = vcn_v3_0_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 00551d6f0370..c6f6392c1c20 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -96,10 +96,10 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v4_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v4_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
@@ -114,7 +114,7 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int i;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
@@ -126,14 +126,23 @@ static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
}
}
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v4_0_set_unified_ring_funcs(adev);
vcn_v4_0_set_irq_funcs(adev);
vcn_v4_0_set_ras_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v4_0_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
@@ -176,20 +185,20 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
uint32_t *ptr;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
/* Init instance 0 sched_score to 1, so it's scheduled after other instances */
if (i == 0)
atomic_set(&adev->vcn.inst[i].sched_score, 1);
@@ -211,7 +220,8 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev))
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i *
+ (adev->vcn.inst[i].num_enc_rings + 1) + 1;
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
ring->vm_hub = AMDGPU_MMHUB0(0);
@@ -223,6 +233,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
vcn_v4_0_fw_shared_init(adev, i);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
}
/* TODO: Add queue reset mask when FW fully supports it */
@@ -235,8 +248,6 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
r = amdgpu_vcn_ras_sw_init(adev);
if (r)
@@ -288,16 +299,23 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -359,20 +377,23 @@ static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
- amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+ amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0);
}
return 0;
@@ -387,15 +408,20 @@ static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v4_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -407,11 +433,14 @@ static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v4_0_hw_init(ip_block);
@@ -421,13 +450,14 @@ static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -481,14 +511,16 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
@@ -588,13 +620,14 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
/**
* vcn_v4_0_disable_static_power_gating - disable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable static power gating for VCN block
*/
-static void vcn_v4_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -653,13 +686,14 @@ static void vcn_v4_0_disable_static_power_gating(struct amdgpu_device *adev, int
/**
* vcn_v4_0_enable_static_power_gating - enable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable static power gating for VCN block
*/
-static void vcn_v4_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -708,13 +742,14 @@ static void vcn_v4_0_enable_static_power_gating(struct amdgpu_device *adev, int
/**
* vcn_v4_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v4_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -819,16 +854,18 @@ static void vcn_v4_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -876,13 +913,14 @@ static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, u
/**
* vcn_v4_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -932,9 +970,11 @@ static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v4_0_enable_ras(struct amdgpu_device *adev, int inst_idx,
+static void vcn_v4_0_enable_ras(struct amdgpu_vcn_inst *vinst,
bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
@@ -957,14 +997,15 @@ static void vcn_v4_0_enable_ras(struct amdgpu_device *adev, int inst_idx,
/**
* vcn_v4_0_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
@@ -982,7 +1023,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v4_0_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v4_0_disable_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -1030,7 +1071,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v4_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v4_0_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -1042,7 +1083,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
- vcn_v4_0_enable_ras(adev, inst_idx, indirect);
+ vcn_v4_0_enable_ras(vinst, indirect);
/* enable master interrupt */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
@@ -1086,183 +1127,179 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
/**
* vcn_v4_0_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v4_0_start(struct amdgpu_device *adev)
+static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v4_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* disable VCN power gating */
- vcn_v4_0_disable_static_power_gating(adev, i);
-
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
-
- /*SW clock gating */
- vcn_v4_0_disable_clock_gating(adev, i);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup regUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v4_0_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- if (amdgpu_emu_mode == 1)
- msleep(1);
- }
+ /* disable VCN power gating */
+ vcn_v4_0_disable_static_power_gating(vinst);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /*SW clock gating */
+ vcn_v4_0_disable_clock_gating(vinst);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
return 0;
}
@@ -1518,17 +1555,18 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
/**
* vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v4_0_pause_dpg_mode(adev, inst_idx, &state);
+ vcn_v4_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1548,87 +1586,87 @@ static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v4_0_stop(struct amdgpu_device *adev)
+static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ int r = 0;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v4_0_stop_dpg_mode(adev, i);
- continue;
- }
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_stop_dpg_mode(vinst);
+ r = 0;
+ goto done;
+ }
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- /* clear status */
- WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
- /* apply HW clock gating */
- vcn_v4_0_enable_clock_gating(adev, i);
+ /* apply HW clock gating */
+ vcn_v4_0_enable_clock_gating(vinst);
- /* enable VCN power gating */
- vcn_v4_0_enable_static_power_gating(adev, i);
- }
+ /* enable VCN power gating */
+ vcn_v4_0_enable_static_power_gating(vinst);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
return 0;
}
@@ -1636,15 +1674,16 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
/**
* vcn_v4_0_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v4_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
int ret_code;
@@ -1964,13 +2003,13 @@ static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v4_0_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v4_0_is_idle(void *handle)
+static bool vcn_v4_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2024,54 +2063,48 @@ static int vcn_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v4_0_enable_clock_gating(adev, i);
+ vcn_v4_0_enable_clock_gating(vinst);
} else {
- vcn_v4_0_disable_clock_gating(adev, i);
+ vcn_v4_0_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v4_0_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v4_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret = 0;
/* for SRIOV, guest should not control VCN Power-gating
* MMSCH FW should control Power-gating and clock-gating
* guest should avoid touching CGC and PG
*/
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v4_0_stop(adev);
+ ret = vcn_v4_0_stop(vinst);
else
- ret = vcn_v4_0_start(adev);
+ ret = vcn_v4_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -2163,10 +2196,10 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
- adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
}
}
@@ -2244,7 +2277,7 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.is_idle = vcn_v4_0_is_idle,
.wait_for_idle = vcn_v4_0_wait_for_idle,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
- .set_powergating_state = vcn_v4_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v4_0_dump_ip_state,
.print_ip_state = vcn_v4_0_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 75211366f8f6..7446ecc55714 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -31,6 +31,7 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "vcn_v4_0_3.h"
#include "mmsch_v4_0_3.h"
#include "vcn/vcn_4_0_3_offset.h"
@@ -87,10 +88,10 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v4_0_3_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
@@ -111,15 +112,25 @@ static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v4_0_3_set_unified_ring_funcs(adev);
vcn_v4_0_3_set_irq_funcs(adev);
vcn_v4_0_3_set_ras_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v4_0_3_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
@@ -151,16 +162,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
uint32_t *ptr;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
@@ -168,6 +169,17 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -191,6 +203,9 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
vcn_v4_0_3_fw_shared_init(adev, i);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
}
/* TODO: Add queue reset mask when FW fully supports it */
@@ -203,9 +218,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
-
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
r = amdgpu_vcn_ras_sw_init(adev);
if (r) {
@@ -256,16 +268,23 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -349,11 +368,16 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
- if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
- vcn_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ cancel_delayed_work_sync(&vinst->idle_work);
+
+ if (vinst->cur_state != AMD_PG_STATE_GATE)
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
+ }
return 0;
}
@@ -367,15 +391,20 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v4_0_3_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -387,11 +416,14 @@ static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v4_0_3_hw_init(ip_block);
@@ -401,13 +433,14 @@ static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_3_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
@@ -475,14 +508,16 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_3_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -589,13 +624,14 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t data;
int vcn_inst;
@@ -682,16 +718,18 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst
/**
* vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -733,13 +771,14 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v4_0_3_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t data;
int vcn_inst;
@@ -784,14 +823,16 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_
/**
* vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
@@ -819,7 +860,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
}
/* enable clock gating */
- vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v4_0_3_disable_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -869,7 +910,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v4_0_3_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -1116,186 +1157,185 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
/**
* vcn_v4_0_3_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v4_0_3_start(struct amdgpu_device *adev)
+static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
- int i, j, k, r, vcn_inst;
+ int j, k, r, vcn_inst;
uint32_t tmp;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- vcn_inst = GET_INST(VCN, i);
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
- UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
-
- /*SW clock gating */
- vcn_v4_0_3_disable_clock_gating(adev, i);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK,
- ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
- tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup regUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v4_0_3_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
- WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ vcn_inst = GET_INST(VCN, i);
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
+ UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
- for (j = 0; j < 10; ++j) {
- uint32_t status;
+ /* SW clock gating */
+ vcn_v4_0_3_disable_clock_gating(vinst);
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, vcn_inst,
- regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK,
+ ~UVD_VCPU_CNTL__CLK_EN_MASK);
- DRM_DEV_ERROR(adev->dev,
- "VCN decode not responding, trying to reset the VCPU!!!\n");
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
- regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
- regUVD_VCPU_CNTL),
- 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
+ tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_3_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, vcn_inst,
+ regUVD_STATUS);
+ if (status & 2)
+ break;
mdelay(10);
- r = -1;
}
+ r = 0;
+ if (status & 2)
+ break;
- if (r) {
- DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
- return r;
- }
+ DRM_DEV_ERROR(adev->dev,
+ "VCN decode not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
+ regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
+ regUVD_VCPU_CNTL),
+ 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ mdelay(10);
+ r = -1;
+ }
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
+ return r;
+ }
- ring = &adev->vcn.inst[i].ring_enc[0];
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- /* program the RB_BASE for ring buffer */
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
- upper_32_bits(ring->gpu_addr));
+ /* program the RB_BASE for ring buffer */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
+ upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
- ring->ring_size / sizeof(uint32_t));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
+ ring->ring_size / sizeof(uint32_t));
- /* resetting ring, fw should not check RB ring */
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ /* resetting ring, fw should not check RB ring */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- /* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB_EN_MASK;
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
- fw_shared->sq.queue_mode &=
- cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ fw_shared->sq.queue_mode &=
+ cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
- }
return 0;
}
/**
* vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
int vcn_inst;
@@ -1321,82 +1361,83 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_3_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
+static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- int i, r = 0, vcn_inst;
+ int r = 0, vcn_inst;
uint32_t tmp;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
+ vcn_inst = GET_INST(VCN, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v4_0_3_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_3_stop_dpg_mode(vinst);
+ goto Done;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
- UVD_STATUS__IDLE, 0x7);
- if (r)
- goto Done;
-
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
- tmp);
- if (r)
- goto Done;
-
- /* stall UMC channel */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
- tmp);
- if (r)
- goto Done;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
+ UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto Done;
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
+ tmp);
+ if (r)
+ goto Done;
+
+ /* stall UMC channel */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
+ tmp);
+ if (r)
+ goto Done;
- /* Unblock VCPU Register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* Unblock VCPU Register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* reset LMI UMC/LMI/VCPU */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ /* reset LMI UMC/LMI/VCPU */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- /* clear VCN status */
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+ /* clear VCN status */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+
+ /* apply HW clock gating */
+ vcn_v4_0_3_enable_clock_gating(vinst);
- /* apply HW clock gating */
- vcn_v4_0_3_enable_clock_gating(adev, i);
- }
Done:
return 0;
}
@@ -1404,14 +1445,13 @@ Done:
/**
* vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
return 0;
@@ -1455,8 +1495,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
regUVD_RB_WPTR);
}
-static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
+void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
{
/* Use normalized offsets when required */
if (vcn_v4_0_3_normalizn_reqd(ring->adev))
@@ -1468,7 +1508,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t
amdgpu_ring_write(ring, val);
}
-static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val)
{
/* Use normalized offsets when required */
if (vcn_v4_0_3_normalizn_reqd(ring->adev))
@@ -1479,8 +1520,8 @@ static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg
amdgpu_ring_write(ring, val);
}
-static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vmid, uint64_t pd_addr)
+void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
@@ -1492,7 +1533,7 @@ static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
lower_32_bits(pd_addr), 0xffffffff);
}
-static void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* VCN engine access for HDP flush doesn't work when RRMT is enabled.
* This is a workaround to avoid any HDP flush through VCN ring.
@@ -1575,13 +1616,13 @@ static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v4_0_3_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v4_0_3_is_idle(void *handle)
+static bool vcn_v4_0_3_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1629,51 +1670,45 @@ static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (enable) {
if (RREG32_SOC15(VCN, GET_INST(VCN, i),
regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v4_0_3_enable_clock_gating(adev, i);
+ vcn_v4_0_3_enable_clock_gating(vinst);
} else {
- vcn_v4_0_3_disable_clock_gating(adev, i);
+ vcn_v4_0_3_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v4_0_3_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret = 0;
/* for SRIOV, guest should not control VCN Power-gating
* MMSCH FW should control Power-gating and clock-gating
* guest should avoid touching CGC and PG
*/
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v4_0_3_stop(adev);
+ ret = vcn_v4_0_3_stop(vinst);
else
- ret = vcn_v4_0_3_start(adev);
+ ret = vcn_v4_0_3_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1835,7 +1870,7 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.is_idle = vcn_v4_0_3_is_idle,
.wait_for_idle = vcn_v4_0_3_wait_for_idle,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
- .set_powergating_state = vcn_v4_0_3_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v4_0_3_dump_ip_state,
.print_ip_state = vcn_v4_0_3_print_ip_state,
};
@@ -1930,8 +1965,8 @@ static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank
1ULL);
break;
case ACA_SMU_TYPE_CE:
- bank->aca_err_type = ACA_ERROR_TYPE_CE;
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
index 0b046114373a..03572a1d0c9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
@@ -26,4 +26,13 @@
extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block;
+void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+
+void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val);
+void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr);
+void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring);
+
#endif /* __VCN_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 5d757e7de9db..ba603b2246e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -95,10 +95,10 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v4_0_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
@@ -112,13 +112,26 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v4_0_5_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
+
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
+ adev->vcn.per_inst_fw = true;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v4_0_5_set_unified_ring_funcs(adev);
vcn_v4_0_5_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v4_0_5_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
/**
@@ -136,15 +149,6 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
uint32_t *ptr;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -152,6 +156,16 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->vcn.harvest_config & (1 << i))
continue;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* VCN UNIFIED TRAP */
@@ -170,7 +184,7 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev))
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- i * (adev->vcn.num_enc_rings + 1) + 1;
+ i * (adev->vcn.inst[i].num_enc_rings + 1) + 1;
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
2 + 8 * i;
@@ -195,6 +209,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
}
if (amdgpu_sriov_vf(adev)) {
@@ -203,9 +220,6 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
-
/* Allocate memory for VCN IP Dump buffer */
ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
if (!ptr) {
@@ -247,15 +261,19 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
- r = amdgpu_vcn_sw_fini(adev);
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -300,16 +318,19 @@ static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, regUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
}
@@ -326,13 +347,18 @@ static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v4_0_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
return r;
}
@@ -346,11 +372,14 @@ static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v4_0_5_hw_init(ip_block);
@@ -360,13 +389,14 @@ static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_5_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -420,14 +450,16 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_5_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -534,13 +566,14 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v4_0_5_disable_static_power_gating - disable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable static power gating for VCN block
*/
-static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -593,13 +626,14 @@ static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, i
/**
* vcn_v4_0_5_enable_static_power_gating - enable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable static power gating for VCN block
*/
-static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -635,13 +669,14 @@ static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, in
/**
* vcn_v4_0_5_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -746,16 +781,18 @@ static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst
/**
* vcn_v4_0_5_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -803,13 +840,14 @@ static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v4_0_5_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -862,14 +900,16 @@ static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_5_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
@@ -888,7 +928,7 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
(uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v4_0_5_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v4_0_5_disable_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -936,7 +976,7 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v4_0_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v4_0_5_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -989,184 +1029,180 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
/**
* vcn_v4_0_5_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v4_0_5_start(struct amdgpu_device *adev)
+static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v4_0_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* disable VCN power gating */
- vcn_v4_0_5_disable_static_power_gating(adev, i);
-
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
-
- /*SW clock gating */
- vcn_v4_0_5_disable_clock_gating(adev, i);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup regUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v4_0_5_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- if (amdgpu_emu_mode == 1)
- msleep(1);
- }
+ /* disable VCN power gating */
+ vcn_v4_0_5_disable_static_power_gating(vinst);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /* SW clock gating */
+ vcn_v4_0_5_disable_clock_gating(vinst);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_5_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev,
- "VCN[%d] is not responding, trying to reset VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
return 0;
}
@@ -1174,13 +1210,14 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev)
/**
* vcn_v4_0_5_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
/* Wait for power status to be 1 */
@@ -1202,103 +1239,104 @@ static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_5_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v4_0_5_stop(struct amdgpu_device *adev)
+static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0;
+ int r = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v4_0_5_stop_dpg_mode(adev, i);
- continue;
- }
-
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_5_stop_dpg_mode(vinst);
+ r = 0;
+ goto done;
+ }
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- /* clear status */
- WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
- /* apply HW clock gating */
- vcn_v4_0_5_enable_clock_gating(adev, i);
+ /* apply HW clock gating */
+ vcn_v4_0_5_enable_clock_gating(vinst);
- /* enable VCN power gating */
- vcn_v4_0_5_enable_static_power_gating(adev, i);
- }
+ /* enable VCN power gating */
+ vcn_v4_0_5_enable_static_power_gating(vinst);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
- return 0;
+ return r;
}
/**
* vcn_v4_0_5_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
int ret_code;
@@ -1452,13 +1490,13 @@ static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v4_0_5_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v4_0_5_is_idle(void *handle)
+static bool vcn_v4_0_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1512,45 +1550,38 @@ static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v4_0_5_enable_clock_gating(adev, i);
+ vcn_v4_0_5_enable_clock_gating(vinst);
} else {
- vcn_v4_0_5_disable_clock_gating(adev, i);
+ vcn_v4_0_5_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v4_0_5_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v4_0_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ int ret = 0;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v4_0_5_stop(adev);
+ ret = vcn_v4_0_5_stop(vinst);
else
- ret = vcn_v4_0_5_start(adev);
+ ret = vcn_v4_0_5_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1618,7 +1649,7 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_5_irq_funcs;
}
}
@@ -1696,7 +1727,7 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.is_idle = vcn_v4_0_5_is_idle,
.wait_for_idle = vcn_v4_0_5_wait_for_idle,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
- .set_powergating_state = vcn_v4_0_5_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v4_0_5_dump_ip_state,
.print_ip_state = vcn_v4_0_5_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index b6d78381ebfb..d99d05f42f1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -78,10 +78,10 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
@@ -95,14 +95,24 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v5_0_0_set_unified_ring_funcs(adev);
vcn_v5_0_0_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v5_0_0_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
@@ -133,22 +143,22 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* VCN UNIFIED TRAP */
@@ -181,15 +191,15 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
}
/* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
-
vcn_v5_0_0_alloc_ip_dump(adev);
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
@@ -226,16 +236,23 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -280,16 +297,19 @@ static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, regUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
}
@@ -306,13 +326,18 @@ static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v5_0_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
return r;
}
@@ -326,11 +351,14 @@ static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v5_0_0_hw_init(ip_block);
@@ -340,13 +368,14 @@ static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v5_0_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -400,14 +429,16 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
/**
* vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -510,13 +541,14 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable static power gating for VCN block
*/
-static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -577,13 +609,14 @@ static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, i
/**
* vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable static power gating for VCN block
*/
-static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -623,12 +656,11 @@ static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, in
/**
* vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
return;
}
@@ -637,15 +669,15 @@ static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst
/**
* vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
return;
}
@@ -654,12 +686,11 @@ static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
return;
}
@@ -667,14 +698,16 @@ static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
/**
* vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
@@ -714,7 +747,7 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
- vcn_v5_0_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v5_0_0_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -766,155 +799,151 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
/**
* vcn_v5_0_0_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v5_0_0_start(struct amdgpu_device *adev)
+static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v5_0_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v5_0_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* disable VCN power gating */
- vcn_v5_0_0_disable_static_power_gating(adev, i);
-
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- vcn_v5_0_0_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- if (amdgpu_emu_mode == 1)
- msleep(1);
- }
+ /* disable VCN power gating */
+ vcn_v5_0_0_disable_static_power_gating(vinst);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ vcn_v5_0_0_mc_resume(vinst);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev,
- "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
return 0;
}
@@ -922,17 +951,18 @@ static int vcn_v5_0_0_start(struct amdgpu_device *adev)
/**
* vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v5_0_0_pause_dpg_mode(adev, inst_idx, &state);
+ vcn_v5_0_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
@@ -952,100 +982,101 @@ static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v5_0_0_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
+static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0;
+ int r = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v5_0_0_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_0_stop_dpg_mode(vinst);
+ r = 0;
+ goto done;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
-
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* clear status */
- WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
-
- /* enable VCN power gating */
- vcn_v5_0_0_enable_static_power_gating(adev, i);
- }
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- return 0;
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+
+ /* enable VCN power gating */
+ vcn_v5_0_0_enable_static_power_gating(vinst);
+
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+
+ return r;
}
/**
* vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
int ret_code;
@@ -1192,13 +1223,13 @@ static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v5_0_0_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v5_0_0_is_idle(void *handle)
+static bool vcn_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1252,45 +1283,38 @@ static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v5_0_0_enable_clock_gating(adev, i);
+ vcn_v5_0_0_enable_clock_gating(vinst);
} else {
- vcn_v5_0_0_disable_clock_gating(adev, i);
+ vcn_v5_0_0_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v5_0_0_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ int ret = 0;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v5_0_0_stop(adev);
+ ret = vcn_v5_0_0_stop(vinst);
else
- ret = vcn_v5_0_0_start(adev);
+ ret = vcn_v5_0_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1358,7 +1382,7 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
}
}
@@ -1436,7 +1460,7 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.is_idle = vcn_v5_0_0_is_idle,
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
- .set_powergating_state = vcn_v5_0_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v5_0_0_dump_ip_state,
.print_ip_state = vcn_v5_0_0_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index 288a77179036..581d8629b9d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -29,6 +29,7 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "vcn_v4_0_3.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
@@ -40,8 +41,8 @@
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
+static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
@@ -55,14 +56,40 @@ static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v5_0_1_set_unified_ring_funcs(adev);
vcn_v5_0_1_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
+{
+ struct amdgpu_vcn5_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+
+ if (fw_shared->sq.is_enabled)
+ return;
+ fw_shared->present_flag_0 =
+ cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
}
/**
@@ -78,16 +105,6 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
/* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
@@ -95,10 +112,18 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
-
vcn_inst = GET_INST(VCN, i);
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
@@ -111,12 +136,7 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
- fw_shared->sq.is_enabled = true;
-
- if (amdgpu_vcnfw_log)
- amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ vcn_v5_0_1_fw_shared_init(adev, i);
}
/* TODO: Add queue reset mask when FW fully supports it */
@@ -142,7 +162,7 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -152,17 +172,23 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
- r = amdgpu_vcn_sw_fini(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -178,6 +204,8 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -188,6 +216,9 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
9 * vcn_inst),
adev->vcn.inst[i].aid_id);
+ /* Re-init fw_shared, if required */
+ vcn_v5_0_1_fw_shared_init(adev, i);
+
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
@@ -206,8 +237,15 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
+ if (vinst->cur_state != AMD_PG_STATE_GATE)
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
+ }
return 0;
}
@@ -222,13 +260,17 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int r;
+ int r, i;
r = vcn_v5_0_1_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
return r;
}
@@ -243,11 +285,18 @@ static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int r;
+ int r, i;
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ if (amdgpu_in_reset(adev))
+ vinst->cur_state = AMD_PG_STATE_GATE;
+
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v5_0_1_hw_init(ip_block);
@@ -257,13 +306,14 @@ static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v5_0_1_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_1_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
@@ -313,20 +363,22 @@ static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
}
/**
* vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -421,7 +473,7 @@ static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
@@ -431,39 +483,39 @@ static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v5_0_1_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
}
/**
* vcn_v5_0_1_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
}
/**
* vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
int vcn_inst;
@@ -510,7 +562,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
- vcn_v5_0_1_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v5_0_1_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -564,150 +616,148 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
/**
* vcn_v5_0_1_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v5_0_1_start(struct amdgpu_device *adev)
+static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r, vcn_inst;
+ int j, k, r, vcn_inst;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v5_0_1_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v5_0_1_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- vcn_inst = GET_INST(VCN, i);
+ vcn_inst = GET_INST(VCN, i);
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- vcn_v5_0_1_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(100);
- if (amdgpu_emu_mode == 1)
- msleep(20);
- }
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ vcn_v5_0_1_mc_resume(vinst);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(100);
+ if (amdgpu_emu_mode == 1)
+ msleep(20);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev,
- "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
+ ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
- /* Read DB_CTRL to flush the write DB_CTRL command. */
- RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
return 0;
}
@@ -715,13 +765,14 @@ static int vcn_v5_0_1_start(struct amdgpu_device *adev)
/**
* vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
int vcn_inst;
@@ -743,75 +794,75 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v5_0_1_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v5_0_1_stop(struct amdgpu_device *adev)
+static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0, vcn_inst;
+ int r = 0, vcn_inst;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
+ vcn_inst = GET_INST(VCN, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v5_0_1_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_1_stop_dpg_mode(vinst);
+ return 0;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ return r;
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
-
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
-
- /* clear status */
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
- }
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* clear status */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
return 0;
}
@@ -883,16 +934,17 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.get_rptr = vcn_v5_0_1_unified_ring_get_rptr,
.get_wptr = vcn_v5_0_1_unified_ring_get_wptr,
.set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
- .emit_frame_size =
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
- 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
- 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
- 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 +
+ 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
.emit_ib = vcn_v2_0_enc_ring_emit_ib,
.emit_fence = vcn_v2_0_enc_ring_emit_fence,
- .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
+ .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
.test_ring = amdgpu_vcn_enc_ring_test_ring,
.test_ib = amdgpu_vcn_unified_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -900,8 +952,8 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
- .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
@@ -927,13 +979,13 @@ static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v5_0_1_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v5_0_1_is_idle(void *handle)
+static bool vcn_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
@@ -980,42 +1032,35 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (enable) {
if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v5_0_1_enable_clock_gating(adev, i);
+ vcn_v5_0_1_enable_clock_gating(vinst);
} else {
- vcn_v5_0_1_disable_clock_gating(adev, i);
+ vcn_v5_0_1_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v5_0_1_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ int ret = 0;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v5_0_1_stop(adev);
+ ret = vcn_v5_0_1_stop(vinst);
else
- ret = vcn_v5_0_1_start(adev);
+ ret = vcn_v5_0_1_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1098,7 +1143,7 @@ static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.soft_reset = NULL,
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
- .set_powergating_state = vcn_v5_0_1_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v5_0_0_dump_ip_state,
.print_ip_state = vcn_v5_0_0_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
index 82ac709f44bf..8fd90bd10807 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
@@ -24,6 +24,9 @@
#ifndef __VCN_v5_0_1_H__
#define __VCN_v5_0_1_H__
+#define regVCN_RRMT_CNTL 0x0940
+#define regVCN_RRMT_CNTL_BASE_IDX 1
+
extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block;
#endif /* __VCN_v5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 98fc6941159e..eb16916c6473 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -555,7 +555,7 @@ static int vega10_ih_resume(struct amdgpu_ip_block *ip_block)
return vega10_ih_hw_init(ip_block);
}
-static bool vega10_ih_is_idle(void *handle)
+static bool vega10_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index e9e3b2ed4b7b..faa0dd75dd6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -651,7 +651,7 @@ static int vega20_ih_resume(struct amdgpu_ip_block *ip_block)
return vega20_ih_hw_init(ip_block);
}
-static bool vega20_ih_is_idle(void *handle)
+static bool vega20_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 12fe571787f4..3bbbb75242d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1736,7 +1736,7 @@ static int vi_common_resume(struct amdgpu_ip_block *ip_block)
return vi_common_hw_init(ip_block);
}
-static bool vi_common_is_idle(void *handle)
+static bool vi_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 0d3d8972240d..0ce08113c9f0 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -27,7 +27,6 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
$(AMDKFD_PATH)/kfd_device.o \
$(AMDKFD_PATH)/kfd_chardev.o \
$(AMDKFD_PATH)/kfd_topology.o \
- $(AMDKFD_PATH)/kfd_pasid.o \
$(AMDKFD_PATH)/kfd_doorbell.o \
$(AMDKFD_PATH)/kfd_flat_memory.o \
$(AMDKFD_PATH)/kfd_process.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 433de9e9a77e..1e9dd00620bf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -212,6 +212,11 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return -EINVAL;
}
+ if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
+ args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
+ pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
+ }
+
if (!access_ok((const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
pr_err("Can't access read pointer\n");
@@ -461,6 +466,11 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
return -EINVAL;
}
+ if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
+ args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
+ pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
+ }
+
properties.queue_address = args->ring_base_address;
properties.queue_size = args->ring_size;
properties.queue_percent = args->queue_percentage & 0xFF;
@@ -596,7 +606,8 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
default_policy,
alternate_policy,
(void __user *)args->alternate_aperture_base,
- args->alternate_aperture_size))
+ args->alternate_aperture_size,
+ args->misc_process_flag))
err = -EINVAL;
out:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 70b3ae0b74fe..4a7180b46b71 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -2133,9 +2133,6 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
bool ext_cpu = KFD_GC_VERSION(kdev) != IP_VERSION(9, 4, 3);
int mem_bw = 819200, weight = ext_cpu ? KFD_CRAT_XGMI_WEIGHT :
KFD_CRAT_INTRA_SOCKET_WEIGHT;
- uint32_t bandwidth = ext_cpu ? amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
- kdev->adev, NULL, true) : mem_bw;
-
/*
* with host gpu xgmi link, host can access gpu memory whether
* or not pcie bar type is large, so always create bidirectional
@@ -2144,8 +2141,16 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->weight_xgmi = weight;
- sub_type_hdr->minimum_bandwidth_mbs = bandwidth;
- sub_type_hdr->maximum_bandwidth_mbs = bandwidth;
+ if (ext_cpu) {
+ amdgpu_xgmi_get_bandwidth(kdev->adev, NULL,
+ AMDGPU_XGMI_BW_MODE_PER_LINK,
+ AMDGPU_XGMI_BW_UNIT_MBYTES,
+ &sub_type_hdr->minimum_bandwidth_mbs,
+ &sub_type_hdr->maximum_bandwidth_mbs);
+ } else {
+ sub_type_hdr->minimum_bandwidth_mbs = mem_bw;
+ sub_type_hdr->maximum_bandwidth_mbs = mem_bw;
+ }
} else {
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
sub_type_hdr->minimum_bandwidth_mbs =
@@ -2198,12 +2203,12 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
if (use_ta_info) {
sub_type_hdr->weight_xgmi = KFD_CRAT_XGMI_WEIGHT *
- amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
- sub_type_hdr->maximum_bandwidth_mbs =
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev,
- peer_kdev->adev, false);
- sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
+ amdgpu_xgmi_get_hops_count(kdev->adev, peer_kdev->adev);
+ amdgpu_xgmi_get_bandwidth(kdev->adev, peer_kdev->adev,
+ AMDGPU_XGMI_BW_MODE_PER_PEER,
+ AMDGPU_XGMI_BW_UNIT_MBYTES,
+ &sub_type_hdr->minimum_bandwidth_mbs,
+ &sub_type_hdr->maximum_bandwidth_mbs);
} else {
bool is_single_hop = kdev->kfd == peer_kdev->kfd;
int weight = is_single_hop ? KFD_CRAT_INTRA_SOCKET_WEIGHT :
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 9d20e169ec4a..b9c82be6ce13 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -590,9 +590,13 @@ static int kfd_gws_init(struct kfd_node *node)
&& kfd->mec2_fw_version >= 0x6b) ||
(KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
&& KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
- && mes_rev >= 68))))
+ && mes_rev >= 68) ||
+ (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))))) {
+ if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
+ node->adev->gds.gws_size = 64;
ret = amdgpu_amdkfd_alloc_gws(node->adev,
node->adev->gds.gws_size, &node->gws);
+ }
return ret;
}
@@ -1600,6 +1604,11 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev)
return -EINVAL;
}
+ if (dev->kfd->shared_resources.enable_mes) {
+ dev_err(dev->adev->dev, "Inducing MES hang is not supported\n");
+ return -EINVAL;
+ }
+
return dqm_debugfs_hang_hws(dev->dqm);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 195085079eb2..2afcc1b4856a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -36,6 +36,7 @@
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_reset.h"
+#include "amdgpu_sdma.h"
#include "mes_v11_api_def.h"
#include "kfd_debug.h"
@@ -66,7 +67,8 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q, const uint32_t *restore_sdma_id);
-static void kfd_process_hw_exception(struct work_struct *work);
+
+static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma);
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
@@ -170,7 +172,7 @@ static void kfd_hws_hang(struct device_queue_manager *dqm)
/*
* Issue a GPU reset if HWS is unresponsive
*/
- schedule_work(&dqm->hw_exception_work);
+ amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
}
static int convert_to_mes_queue_type(int queue_type)
@@ -207,21 +209,6 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO;
- if (!pdd->proc_ctx_cpu_ptr) {
- r = amdgpu_amdkfd_alloc_gtt_mem(adev,
- AMDGPU_MES_PROC_CTX_SIZE,
- &pdd->proc_ctx_bo,
- &pdd->proc_ctx_gpu_addr,
- &pdd->proc_ctx_cpu_ptr,
- false);
- if (r) {
- dev_err(adev->dev,
- "failed to allocate process context bo\n");
- return r;
- }
- memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
- }
-
memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
queue_input.process_id = pdd->pasid;
queue_input.page_table_base_addr = qpd->page_table_base;
@@ -1755,7 +1742,6 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->active_cp_queue_count = 0;
dqm->gws_queue_count = 0;
dqm->active_runlist = false;
- INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
dqm->trap_debug_vmid = 0;
init_sdma_bitmaps(dqm);
@@ -2222,8 +2208,7 @@ static struct queue *find_queue_by_address(struct device_queue_manager *dqm, uin
return NULL;
}
-/* only for compute queue */
-static int reset_queues_on_hws_hang(struct device_queue_manager *dqm)
+static int reset_hung_queues(struct device_queue_manager *dqm)
{
int r = 0, reset_count = 0, i;
@@ -2276,6 +2261,104 @@ reset_fail:
return r;
}
+static bool sdma_has_hang(struct device_queue_manager *dqm)
+{
+ int engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
+ int engine_end = engine_start + get_num_all_sdma_engines(dqm);
+ int num_queues_per_eng = dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
+ int i, j;
+
+ for (i = engine_start; i < engine_end; i++) {
+ for (j = 0; j < num_queues_per_eng; j++) {
+ if (!dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j))
+ continue;
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool set_sdma_queue_as_reset(struct device_queue_manager *dqm,
+ uint32_t doorbell_off)
+{
+ struct device_process_node *cur;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+
+ list_for_each_entry(cur, &dqm->queues, list) {
+ qpd = cur->qpd;
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if ((q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) &&
+ q->properties.doorbell_off == doorbell_off) {
+ set_queue_as_reset(dqm, q, qpd);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int reset_hung_queues_sdma(struct device_queue_manager *dqm)
+{
+ int engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
+ int engine_end = engine_start + get_num_all_sdma_engines(dqm);
+ int num_queues_per_eng = dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
+ int r = 0, i, j;
+
+ if (dqm->is_hws_hang)
+ return -EIO;
+
+ /* Scan for hung HW queues and reset engine. */
+ dqm->detect_hang_count = 0;
+ for (i = engine_start; i < engine_end; i++) {
+ for (j = 0; j < num_queues_per_eng; j++) {
+ uint32_t doorbell_off =
+ dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j);
+
+ if (!doorbell_off)
+ continue;
+
+ /* Reset engine and check. */
+ if (amdgpu_sdma_reset_engine(dqm->dev->adev, i, false) ||
+ dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) ||
+ !set_sdma_queue_as_reset(dqm, doorbell_off)) {
+ r = -ENOTRECOVERABLE;
+ goto reset_fail;
+ }
+
+ /* Should only expect one queue active per engine */
+ dqm->detect_hang_count++;
+ break;
+ }
+ }
+
+ /* Signal process reset */
+ if (dqm->detect_hang_count)
+ kfd_signal_reset_event(dqm->dev);
+ else
+ r = -ENOTRECOVERABLE;
+
+reset_fail:
+ dqm->detect_hang_count = 0;
+
+ return r;
+}
+
+static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma)
+{
+ while (halt_if_hws_hang)
+ schedule();
+
+ if (!amdgpu_gpu_recovery)
+ return -ENOTRECOVERABLE;
+
+ return is_sdma ? reset_hung_queues_sdma(dqm) : reset_hung_queues(dqm);
+}
+
/* dqm->lock mutex has to be locked before calling this function */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
@@ -2326,16 +2409,13 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
* check those fields
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
- if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
- while (halt_if_hws_hang)
- schedule();
- if (reset_queues_on_hws_hang(dqm)) {
- dqm->is_hws_hang = true;
- kfd_hws_hang(dqm);
- retval = -ETIME;
- goto out;
- }
- }
+ if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd) &&
+ reset_queues_on_hws_hang(dqm, false))
+ goto reset_fail;
+
+ /* Check for SDMA hang and attempt SDMA reset */
+ if (sdma_has_hang(dqm) && reset_queues_on_hws_hang(dqm, true))
+ goto reset_fail;
/* We need to reset the grace period value for this device */
if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
@@ -2346,10 +2426,15 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
pm_release_ib(&dqm->packet_mgr);
dqm->active_runlist = false;
-
out:
up_read(&dqm->dev->adev->reset_domain->sem);
return retval;
+
+reset_fail:
+ dqm->is_hws_hang = true;
+ kfd_hws_hang(dqm);
+ up_read(&dqm->dev->adev->reset_domain->sem);
+ return -ETIME;
}
/* only for compute queue */
@@ -2506,20 +2591,13 @@ failed_try_destroy_debugged_queue:
return retval;
}
-/*
- * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
- * stay in user mode.
- */
-#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
-/* APE1 limit is inclusive and 64K aligned. */
-#define APE1_LIMIT_ALIGNMENT 0xFFFF
-
static bool set_cache_memory_policy(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size)
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
bool retval = true;
@@ -2528,41 +2606,17 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
dqm_lock(dqm);
- if (alternate_aperture_size == 0) {
- /* base > limit disables APE1 */
- qpd->sh_mem_ape1_base = 1;
- qpd->sh_mem_ape1_limit = 0;
- } else {
- /*
- * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
- * SH_MEM_APE1_BASE[31:0], 0x0000 }
- * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
- * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
- * Verify that the base and size parameters can be
- * represented in this format and convert them.
- * Additionally restrict APE1 to user-mode addresses.
- */
-
- uint64_t base = (uintptr_t)alternate_aperture_base;
- uint64_t limit = base + alternate_aperture_size - 1;
-
- if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
- (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
- retval = false;
- goto out;
- }
-
- qpd->sh_mem_ape1_base = base >> 16;
- qpd->sh_mem_ape1_limit = limit >> 16;
- }
-
retval = dqm->asic_ops.set_cache_memory_policy(
dqm,
qpd,
default_policy,
alternate_policy,
alternate_aperture_base,
- alternate_aperture_size);
+ alternate_aperture_size,
+ misc_process_properties);
+
+ if (retval)
+ goto out;
if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
@@ -3095,13 +3149,6 @@ int kfd_evict_process_device(struct kfd_process_device *pdd)
return ret;
}
-static void kfd_process_hw_exception(struct work_struct *work)
-{
- struct device_queue_manager *dqm = container_of(work,
- struct device_queue_manager, hw_exception_work);
- amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
-}
-
int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 09ab36f8e8c6..122eb745e9c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -174,7 +174,8 @@ struct device_queue_manager_ops {
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
int (*process_termination)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
@@ -210,7 +211,8 @@ struct device_queue_manager_asic_ops {
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void (*init_sdma_vm)(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
@@ -269,7 +271,6 @@ struct device_queue_manager {
/* hw exception */
bool is_hws_hang;
bool is_resetting;
- struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
bool sched_running;
bool sched_halt;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index d4d95c7f2e5d..0508ef5a41d7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -27,12 +27,21 @@
#include "oss/oss_2_4_sh_mask.h"
#include "gca/gfx_7_2_sh_mask.h"
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
static int update_qpd_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm(struct device_queue_manager *dqm,
@@ -80,10 +89,41 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size)
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
uint32_t default_mtype;
uint32_t ape1_mtype;
+ unsigned int temp;
+ bool retval = true;
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+ qpd->sh_mem_ape1_base = 1;
+ qpd->sh_mem_ape1_limit = 0;
+ } else {
+ /*
+ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+ * SH_MEM_APE1_BASE[31:0], 0x0000 }
+ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+ * Verify that the base and size parameters can be
+ * represented in this format and convert them.
+ * Additionally restrict APE1 to user-mode addresses.
+ */
+
+ uint64_t base = (uintptr_t)alternate_aperture_base;
+ uint64_t limit = base + alternate_aperture_size - 1;
+
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
+ goto out;
+ }
+
+ qpd->sh_mem_ape1_base = base >> 16;
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_NONCACHED :
@@ -97,37 +137,22 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
| ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
| DEFAULT_MTYPE(default_mtype)
| APE1_MTYPE(ape1_mtype);
-
- return true;
-}
-
-static int update_qpd_cik(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
- DEFAULT_MTYPE(MTYPE_NONCACHED) |
- APE1_MTYPE(MTYPE_NONCACHED);
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
- temp = get_sh_mem_bases_nybble_64(pdd);
+ temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+out:
+ return retval;
+}
+
+static int update_qpd_cik(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
index 245a90dfc2f6..ba6e3d747ccd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
@@ -31,10 +31,18 @@ static int update_qpd_v10(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v10(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v10;
asic_ops->update_qpd = update_qpd_v10;
asic_ops->init_sdma_vm = init_sdma_vm_v10;
asic_ops->mqd_manager_init = mqd_manager_init_v10;
@@ -49,27 +57,28 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v10(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
index 2e129da7acb4..8b447d04558f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
@@ -30,10 +30,18 @@ static int update_qpd_v11(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v11(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v11;
asic_ops->update_qpd = update_qpd_v11;
asic_ops->init_sdma_vm = init_sdma_vm_v11;
asic_ops->mqd_manager_init = mqd_manager_init_v11;
@@ -48,28 +56,29 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v11(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
index 4f3295b29dfb..3550da3a46f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
@@ -30,10 +30,18 @@ static int update_qpd_v12(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v12(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v12(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v12;
asic_ops->update_qpd = update_qpd_v12;
asic_ops->init_sdma_vm = init_sdma_vm_v12;
asic_ops->mqd_manager_init = mqd_manager_init_v12;
@@ -48,28 +56,29 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v12(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 67137e674f1d..d794c8172b40 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -30,10 +30,18 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v9(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v9;
asic_ops->update_qpd = update_qpd_v9;
asic_ops->init_sdma_vm = init_sdma_vm_v9;
asic_ops->mqd_manager_init = mqd_manager_init_v9;
@@ -48,10 +56,43 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
+static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
+{
+ qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+
+ if (dqm->dev->kfd->noretry)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+
+ if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0))
+ qpd->sh_mem_config |= (1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
+
+ if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0)) {
+ if (misc_process_properties & KFD_PROC_FLAG_MFMA_HIGH_PRECISION)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRECISION_MODE__SHIFT;
+ }
+
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
+
+ pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases,
+ qpd->sh_mem_config);
+ return true;
+}
+
static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- struct kfd_process_device *pdd;
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
pdd = qpd_to_pdd(qpd);
@@ -64,8 +105,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0))
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
qpd->sh_mem_config |=
(1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index b291ee0fab94..dad83356e976 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -27,12 +27,21 @@
#include "gca/gfx_8_0_sh_mask.h"
#include "oss/oss_3_0_sh_mask.h"
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm(struct device_queue_manager *dqm,
@@ -81,10 +90,41 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size)
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
uint32_t default_mtype;
uint32_t ape1_mtype;
+ unsigned int temp;
+ bool retval = true;
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+ qpd->sh_mem_ape1_base = 1;
+ qpd->sh_mem_ape1_limit = 0;
+ } else {
+ /*
+ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+ * SH_MEM_APE1_BASE[31:0], 0x0000 }
+ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+ * Verify that the base and size parameters can be
+ * represented in this format and convert them.
+ * Additionally restrict APE1 to user-mode addresses.
+ */
+
+ uint64_t base = (uintptr_t)alternate_aperture_base;
+ uint64_t limit = base + alternate_aperture_size - 1;
+
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
+ goto out;
+ }
+
+ qpd->sh_mem_ape1_base = base >> 16;
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_UC :
@@ -100,40 +140,21 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
- return true;
-}
-
-static int update_qpd_vi(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
- MTYPE_UC <<
- SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
- MTYPE_UC <<
- SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
- temp = get_sh_mem_bases_nybble_64(pdd);
+ temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
temp, qpd->sh_mem_bases);
+out:
+ return retval;
+}
+static int update_qpd_vi(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 2eff37aaf827..1695dd78ede8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 68dbc0399c87..3c0ae28c5923 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
index 2b72d5b4949b..565858b9044d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
@@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index ff417d5361c4..3014925d95ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -183,6 +183,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -245,7 +248,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
deleted file mode 100644
index 8896426e0556..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/*
- * Copyright 2014-2022 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/types.h>
-#include "kfd_priv.h"
-#include "amdgpu_ids.h"
-
-static unsigned int pasid_bits = 16;
-static bool pasids_allocated; /* = false */
-
-u32 kfd_pasid_alloc(void)
-{
- int r = amdgpu_pasid_alloc(pasid_bits);
-
- if (r > 0) {
- pasids_allocated = true;
- return r;
- }
-
- return 0;
-}
-
-void kfd_pasid_free(u32 pasid)
-{
- amdgpu_pasid_free(pasid);
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 59619f794b6b..bb09c873a9a5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -289,7 +289,6 @@ struct kfd_node {
/* Global GWS resource shared between processes */
void *gws;
- bool gws_debug_workaround;
/* Clients watching SMI events */
struct list_head smi_clients;
@@ -1364,8 +1363,6 @@ int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid,
struct mqd_update_info *minfo);
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws);
-struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
- unsigned int qid);
struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
unsigned int qid);
int pqm_get_wave_state(struct process_queue_manager *pqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 1067afdb456e..7c0c24732481 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -35,6 +35,7 @@
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu.h"
+#include "amdgpu_reset.h"
struct mm_struct;
@@ -838,6 +839,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
return ERR_PTR(-EINVAL);
}
+ /* If the process just called exec(3), it is possible that the
+ * cleanup of the kfd_process (following the release of the mm
+ * of the old process image) is still in the cleanup work queue.
+ * Make sure to drain any job before trying to recreate any
+ * resource for this process.
+ */
+ flush_workqueue(kfd_process_wq);
+
/*
* take kfd processes mutex before starting of process creation
* so there won't be a case where two threads of the same process
@@ -858,14 +867,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
if (process) {
pr_debug("Process already found\n");
} else {
- /* If the process just called exec(3), it is possible that the
- * cleanup of the kfd_process (following the release of the mm
- * of the old process image) is still in the cleanup work queue.
- * Make sure to drain any job before trying to recreate any
- * resource for this process.
- */
- flush_workqueue(kfd_process_wq);
-
process = create_process(thread);
if (IS_ERR(process))
goto out;
@@ -1133,6 +1134,17 @@ static void kfd_process_remove_sysfs(struct kfd_process *p)
p->kobj = NULL;
}
+/*
+ * If any GPU is ongoing reset, wait for reset complete.
+ */
+static void kfd_process_wait_gpu_reset_complete(struct kfd_process *p)
+{
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++)
+ flush_workqueue(p->pdds[i]->dev->adev->reset_domain->wq);
+}
+
/* No process locking is needed in this function, because the process
* is not findable any more. We must assume that no other thread is
* using it any more, otherwise we couldn't safely free the process
@@ -1147,6 +1159,11 @@ static void kfd_process_wq_release(struct work_struct *work)
kfd_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm);
+ /*
+ * If GPU in reset, user queues may still running, wait for reset complete.
+ */
+ kfd_process_wait_gpu_reset_complete(p);
+
/* Signal the eviction fence after user mode queues are
* destroyed. This allows any BOs to be freed without
* triggering pointless evictions or waiting for fences.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index d7947311cbbd..7eb370b68159 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -363,10 +363,26 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (retval != 0)
return retval;
+ /* Register process if this is the first queue */
if (list_empty(&pdd->qpd.queues_list) &&
list_empty(&pdd->qpd.priv_queue_list))
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
+ /* Allocate proc_ctx_bo only if MES is enabled and this is the first queue */
+ if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) {
+ retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
+ if (retval) {
+ dev_err(dev->adev->dev, "failed to allocate process context bo\n");
+ return retval;
+ }
+ memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+ }
+
pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
if (!pqn) {
retval = -ENOMEM;
@@ -532,7 +548,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
pdd->pasid,
pqn->q->properties.queue_id, retval);
- if (retval != -ETIME)
+ if (retval != -ETIME && retval != -EIO)
goto err_destroy_queue;
}
kfd_procfs_del_queue(pqn->q);
@@ -652,19 +668,6 @@ int pqm_update_mqd(struct process_queue_manager *pqm,
return 0;
}
-struct kernel_queue *pqm_get_kernel_queue(
- struct process_queue_manager *pqm,
- unsigned int qid)
-{
- struct process_queue_node *pqn;
-
- pqn = get_queue_by_qid(pqm, qid);
- if (pqn && pqn->kq)
- return pqn->kq;
-
- return NULL;
-}
-
struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
unsigned int qid)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 62c635e9d1aa..4afff7094caf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -276,8 +276,8 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
/* EOP buffer is not required for all ASICs */
if (properties->eop_ring_buffer_address) {
if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
- pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
- properties->eop_buf_bo->tbo.base.size,
+ pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n",
+ properties->eop_ring_buffer_size,
topo_dev->node_props.eop_buffer_size);
err = -EINVAL;
goto out_err_unreserve;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index db3034b00dac..1a38ac75abbd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -3011,7 +3011,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
/* check if this page fault time stamp is before svms->checkpoint_ts */
if (svms->checkpoint_ts[gpuidx] != 0) {
- if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) {
+ if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
r = 0;
goto out;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index dbc5595e999a..27e7356eed6f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -519,6 +519,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kfd->mec_fw_version);
sysfs_show_32bit_prop(buffer, offs, "capability",
dev->node_props.capability);
+ sysfs_show_32bit_prop(buffer, offs, "capability2",
+ dev->node_props.capability2);
sysfs_show_64bit_prop(buffer, offs, "debug_prop",
dev->node_props.debug_prop);
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
@@ -1981,6 +1983,9 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
+ if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
+ dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
+
if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4))
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index f06c9db7ddde..3de8ec0043bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -51,6 +51,7 @@ struct kfd_node_properties {
uint32_t cpu_core_id_base;
uint32_t simd_id_base;
uint32_t capability;
+ uint32_t capability2;
uint64_t debug_prop;
uint32_t max_waves_per_simd;
uint32_t lds_size_in_kb;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 16e4eb474eec..6a54f1cfa125 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -321,7 +321,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
return 0;
}
-static bool dm_is_idle(void *handle)
+static bool dm_is_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX todo */
return true;
@@ -1625,75 +1625,130 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
-static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+struct amdgpu_dm_quirks {
+ bool aux_hpd_discon;
+ bool support_edp0_on_dp1;
+};
+
+static struct amdgpu_dm_quirks quirk_entries = {
+ .aux_hpd_discon = false,
+ .support_edp0_on_dp1 = false
+};
+
+static int edp0_on_dp1_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.support_edp0_on_dp1 = true;
+ return 0;
+}
+
+static int aux_hpd_discon_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.aux_hpd_discon = true;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_quirk_table[] = {
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
},
},
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ },
+ },
{}
/* TODO: refactor this from a fixed table to a dynamic option */
};
-static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
{
- const struct dmi_system_id *dmi_id;
+ int dmi_id;
+ struct drm_device *dev = dm->ddev;
dm->aux_hpd_discon_quirk = false;
+ init_data->flags.support_edp0_on_dp1 = false;
+
+ dmi_id = dmi_check_system(dmi_quirk_table);
- dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
- if (dmi_id) {
+ if (!dmi_id)
+ return;
+
+ if (quirk_entries.aux_hpd_discon) {
dm->aux_hpd_discon_quirk = true;
- DRM_INFO("aux_hpd_discon_quirk attached\n");
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ }
+ if (quirk_entries.support_edp0_on_dp1) {
+ init_data->flags.support_edp0_on_dp1 = true;
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
}
}
@@ -2002,7 +2057,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
- retrieve_dmi_info(&adev->dm);
+ retrieve_dmi_info(&adev->dm, &init_data);
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -3031,10 +3086,11 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
}
+DEFINE_FREE(state_release, struct dc_state *, if (_T) dc_state_release(_T))
+
static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
{
- struct dc_state *context = NULL;
- enum dc_status res = DC_ERROR_UNEXPECTED;
+ struct dc_state *context __free(state_release) = NULL;
int i;
struct dc_stream_state *del_streams[MAX_PIPES];
int del_streams_count = 0;
@@ -3044,7 +3100,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
context = dc_state_create_current_copy(dc);
if (context == NULL)
- goto context_alloc_fail;
+ return DC_ERROR_UNEXPECTED;
/* First remove from context all streams */
for (i = 0; i < context->stream_count; i++) {
@@ -3055,25 +3111,20 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
/* Remove all planes for removed streams and then remove the streams */
for (i = 0; i < del_streams_count; i++) {
- if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
- res = DC_FAIL_DETACH_SURFACES;
- goto fail;
- }
+ enum dc_status res;
+
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context))
+ return DC_FAIL_DETACH_SURFACES;
res = dc_state_remove_stream(dc, context, del_streams[i]);
if (res != DC_OK)
- goto fail;
+ return res;
}
params.streams = context->streams;
params.stream_count = context->stream_count;
- res = dc_commit_streams(dc, &params);
-
-fail:
- dc_state_release(context);
-context_alloc_fail:
- return res;
+ return dc_commit_streams(dc, &params);
}
static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
@@ -3090,9 +3141,10 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_display_manager *dm = &adev->dm;
- int ret = 0;
if (amdgpu_in_reset(adev)) {
+ enum dc_status res;
+
mutex_lock(&dm->dc_lock);
dc_allow_idle_optimizations(adev->dm.dc, false);
@@ -3102,13 +3154,17 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
if (dm->cached_dc_state)
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
- amdgpu_dm_commit_zero_streams(dm->dc);
+ res = amdgpu_dm_commit_zero_streams(dm->dc);
+ if (res != DC_OK) {
+ drm_err(adev_to_drm(adev), "Failed to commit zero streams: %d\n", res);
+ return -EINVAL;
+ }
amdgpu_dm_irq_suspend(adev);
hpd_rx_irq_work_suspend(dm);
- return ret;
+ return 0;
}
WARN_ON(adev->dm.cached_state);
@@ -3245,14 +3301,14 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
struct dc_scaling_info scaling_infos[MAX_SURFACES];
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
- } *bundle;
+ } *bundle __free(kfree);
int k, m;
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
if (!bundle) {
drm_err(dm->ddev, "Failed to allocate update bundle\n");
- goto cleanup;
+ return;
}
for (k = 0; k < dc_state->stream_count; k++) {
@@ -3272,9 +3328,24 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
&bundle->stream_update,
bundle->surface_updates);
}
+}
-cleanup:
- kfree(bundle);
+static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev,
+ struct dc_sink *sink)
+{
+ struct dc_panel_patch *ppatch = NULL;
+
+ if (!sink)
+ return;
+
+ ppatch = &sink->edid_caps.panel_patch;
+ if (ppatch->wait_after_dpcd_poweroff_ms) {
+ msleep(ppatch->wait_after_dpcd_poweroff_ms);
+ drm_dbg_driver(adev_to_drm(adev),
+ "%s: adding a %ds delay as w/a for panel\n",
+ __func__,
+ ppatch->wait_after_dpcd_poweroff_ms / 1000);
+ }
}
static int dm_resume(struct amdgpu_ip_block *ip_block)
@@ -3323,7 +3394,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
r = dm_dmub_hw_init(adev);
if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -3363,6 +3434,11 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
+
+ /* leave display off for S4 sequence */
+ if (adev->in_s4)
+ return 0;
+
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
dm_state->context = dc_state_create(dm->dc, NULL);
@@ -3398,6 +3474,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
/* Do detection*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+ bool ret;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -3414,17 +3491,20 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
if (aconnector->mst_root)
continue;
- mutex_lock(&aconnector->hpd_lock);
+ guard(mutex)(&aconnector->hpd_lock);
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
} else {
- mutex_lock(&dm->dc_lock);
+ guard(mutex)(&dm->dc_lock);
dc_exit_ips_for_hw_access(dm->dc);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
- mutex_unlock(&dm->dc_lock);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
+ if (ret) {
+ /* w/a delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
+ }
}
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
@@ -3434,7 +3514,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
amdgpu_dm_update_connector_after_detect(aconnector);
- mutex_unlock(&aconnector->hpd_lock);
}
drm_connector_list_iter_end(&iter);
@@ -3603,12 +3682,14 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->min_input_signal = min_input_signal_override;
}
+DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T))
+
void amdgpu_dm_update_connector_after_detect(
struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
+ struct dc_sink *sink __free(sink_release) = NULL;
struct drm_device *dev = connector->dev;
- struct dc_sink *sink;
/* MST handled by drm_mst framework */
if (aconnector->mst_mgr.mst_state == true)
@@ -3630,7 +3711,7 @@ void amdgpu_dm_update_connector_after_detect(
* For S3 resume with headless use eml_sink to fake stream
* because on resume connector->sink is set to NULL
*/
- mutex_lock(&dev->mode_config.mutex);
+ guard(mutex)(&dev->mode_config.mutex);
if (sink) {
if (aconnector->dc_sink) {
@@ -3655,10 +3736,6 @@ void amdgpu_dm_update_connector_after_detect(
}
}
- mutex_unlock(&dev->mode_config.mutex);
-
- if (sink)
- dc_sink_release(sink);
return;
}
@@ -3666,10 +3743,8 @@ void amdgpu_dm_update_connector_after_detect(
* TODO: temporary guard to look for proper fix
* if this sink is MST sink, we should not do anything
*/
- if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- dc_sink_release(sink);
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
return;
- }
if (aconnector->dc_sink == sink) {
/*
@@ -3678,15 +3753,13 @@ void amdgpu_dm_update_connector_after_detect(
*/
drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n",
aconnector->connector_id);
- if (sink)
- dc_sink_release(sink);
return;
}
drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
aconnector->connector_id, aconnector->dc_sink, sink);
- mutex_lock(&dev->mode_config.mutex);
+ guard(mutex)(&dev->mode_config.mutex);
/*
* 1. Update status of the drm connector
@@ -3748,12 +3821,7 @@ void amdgpu_dm_update_connector_after_detect(
connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
}
- mutex_unlock(&dev->mode_config.mutex);
-
update_subconnector_property(aconnector);
-
- if (sink)
- dc_sink_release(sink);
}
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
@@ -3773,7 +3841,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
*/
- mutex_lock(&aconnector->hpd_lock);
+ guard(mutex)(&aconnector->hpd_lock);
if (adev->dm.hdcp_workqueue) {
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
@@ -3785,7 +3853,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
aconnector->timing_changed = false;
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
@@ -3797,11 +3865,13 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_connector_hotplug_event(connector);
} else {
- mutex_lock(&adev->dm.dc_lock);
- dc_exit_ips_for_hw_access(dc);
- ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
- mutex_unlock(&adev->dm.dc_lock);
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ dc_exit_ips_for_hw_access(dc);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ }
if (ret) {
+ /* w/a delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
amdgpu_dm_update_connector_after_detect(aconnector);
drm_modeset_lock_all(dev);
@@ -3812,8 +3882,6 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
drm_kms_helper_connector_hotplug_event(connector);
}
}
- mutex_unlock(&aconnector->hpd_lock);
-
}
static void handle_hpd_irq(void *param)
@@ -4653,48 +4721,40 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
int bl_idx)
{
-#if defined(CONFIG_ACPI)
- struct amdgpu_dm_backlight_caps caps;
+ struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx];
- memset(&caps, 0, sizeof(caps));
-
- if (dm->backlight_caps[bl_idx].caps_valid)
+ if (caps->caps_valid)
return;
- amdgpu_acpi_get_backlight_caps(&caps);
+#if defined(CONFIG_ACPI)
+ amdgpu_acpi_get_backlight_caps(caps);
/* validate the firmware value is sane */
- if (caps.caps_valid) {
- int spread = caps.max_input_signal - caps.min_input_signal;
+ if (caps->caps_valid) {
+ int spread = caps->max_input_signal - caps->min_input_signal;
- if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
- caps.min_input_signal < 0 ||
+ if (caps->max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+ caps->min_input_signal < 0 ||
spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
spread < AMDGPU_DM_MIN_SPREAD) {
DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
- caps.min_input_signal, caps.max_input_signal);
- caps.caps_valid = false;
+ caps->min_input_signal, caps->max_input_signal);
+ caps->caps_valid = false;
}
}
- if (caps.caps_valid) {
- dm->backlight_caps[bl_idx].caps_valid = true;
- if (caps.aux_support)
- return;
- dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
- dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
- } else {
- dm->backlight_caps[bl_idx].min_input_signal =
- AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
- dm->backlight_caps[bl_idx].max_input_signal =
- AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ if (!caps->caps_valid) {
+ caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->caps_valid = true;
}
#else
- if (dm->backlight_caps[bl_idx].aux_support)
+ if (caps->aux_support)
return;
- dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
- dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->caps_valid = true;
#endif
}
@@ -4720,10 +4780,38 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
uint32_t brightness)
{
unsigned int min, max;
+ u8 prev_signal = 0, prev_lum = 0;
if (!get_brightness_range(caps, &min, &max))
return brightness;
+ for (int i = 0; i < caps->data_points; i++) {
+ u8 signal, lum;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
+ break;
+
+ signal = caps->luminance_data[i].input_signal;
+ lum = caps->luminance_data[i].luminance;
+
+ /*
+ * brightness == signal: luminance is percent numerator
+ * brightness < signal: interpolate between previous and current luminance numerator
+ * brightness > signal: find next data point
+ */
+ if (brightness < signal)
+ lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
+ (brightness - prev_signal),
+ signal - prev_signal);
+ else if (brightness > signal) {
+ prev_signal = signal;
+ prev_lum = lum;
+ continue;
+ }
+ brightness = DIV_ROUND_CLOSEST(lum * brightness, 101);
+ break;
+ }
+
// Rescale 0..255 to min..max
return min + DIV_ROUND_CLOSEST((max - min) * brightness,
AMDGPU_MAX_BL_LEVEL);
@@ -4748,19 +4836,19 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
int bl_idx,
u32 user_brightness)
{
- struct amdgpu_dm_backlight_caps caps;
+ struct amdgpu_dm_backlight_caps *caps;
struct dc_link *link;
u32 brightness;
bool rc, reallow_idle = false;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- caps = dm->backlight_caps[bl_idx];
+ caps = &dm->backlight_caps[bl_idx];
dm->brightness[bl_idx] = user_brightness;
/* update scratch register */
if (bl_idx == 0)
amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
- brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
+ brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
/* Change brightness based on AUX property */
@@ -4770,7 +4858,7 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
reallow_idle = true;
}
- if (caps.aux_support) {
+ if (caps->aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
if (!rc)
@@ -4887,6 +4975,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
} else
props.brightness = AMDGPU_MAX_BL_LEVEL;
+ if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
+ drm_info(drm, "Using custom brightness curve\n");
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
@@ -7295,8 +7385,14 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
- drm_edid = drm_edid_read(connector);
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
@@ -7341,14 +7437,21 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
+ struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
const struct drm_edid *drm_edid;
const struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
- drm_edid = drm_edid_read(connector);
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
@@ -12655,3 +12758,10 @@ bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
{
return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
}
+
+void dm_acpi_process_phy_transition_interlock(
+ const struct dc_context *ctx,
+ struct dm_process_phy_transition_init_params process_phy_transition_init_params)
+{
+ // Not yet implemented
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index acead14ab45d..385faaca6e26 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -151,6 +151,18 @@ struct idle_workqueue {
bool running;
};
+#define MAX_LUMINANCE_DATA_POINTS 99
+
+/**
+ * struct amdgpu_dm_luminance_data - Custom luminance data
+ * @luminance: Luminance in percent
+ * @input_signal: Input signal in range 0-255
+ */
+struct amdgpu_dm_luminance_data {
+ u8 luminance;
+ u8 input_signal;
+} __packed;
+
/**
* struct amdgpu_dm_backlight_caps - Information about backlight
*
@@ -195,6 +207,14 @@ struct amdgpu_dm_backlight_caps {
* @dc_level: the default brightness if booted on DC
*/
u8 dc_level;
+ /**
+ * @data_points: the number of custom luminance data points
+ */
+ u8 data_points;
+ /**
+ * @luminance_data: custom luminance data
+ */
+ struct amdgpu_dm_luminance_data luminance_data[MAX_LUMINANCE_DATA_POINTS];
};
/**
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index fbd80d8545a8..2cd35392e2da 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -55,16 +55,21 @@ static u32 edid_extract_panel_id(struct edid *edid)
(u32)EDID_PRODUCT_ID(edid);
}
-static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
+static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct dc_edid_caps *edid_caps)
{
uint32_t panel_id = edid_extract_panel_id(edid);
switch (panel_id) {
+ /* Workaround for monitors that need a delay after detecting the link */
+ case drm_edid_encode_panel_id('G', 'B', 'T', 0x3215):
+ drm_dbg_driver(dev, "Add 10s delay for link detection for panel id %X\n", panel_id);
+ edid_caps->panel_patch.wait_after_dpcd_poweroff_ms = 10000;
+ break;
/* Workaround for some monitors which does not work well with FAMS */
case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
- DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
+ drm_dbg_driver(dev, "Disabling FAMS on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_fams = true;
break;
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
@@ -73,11 +78,11 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003):
- DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
+ drm_dbg_driver(dev, "Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
- DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id);
+ drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;
default:
@@ -101,6 +106,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
{
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL;
struct cea_sad *sads;
int sad_count = -1;
@@ -130,7 +136,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
- apply_edid_quirks(edid_buf, edid_caps);
+ apply_edid_quirks(dev, edid_buf, edid_caps);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 3390f0d8420a..2b63cbab0e87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -473,7 +473,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
unregister_all_irq_handlers(adev);
}
-int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
{
int src;
struct list_head *hnd_list_h;
@@ -511,10 +511,9 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
- return 0;
}
-int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
{
int src;
struct list_head *hnd_list_h, *hnd_list_l;
@@ -522,7 +521,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- DRM_DEBUG_KMS("DM_IRQ: early resume\n");
+ drm_dbg(adev_to_drm(adev), "DM_IRQ: early resume\n");
/* re-enable short pulse interrupts HW interrupt */
for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
@@ -533,11 +532,9 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-
- return 0;
}
-int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
{
int src;
struct list_head *hnd_list_h, *hnd_list_l;
@@ -545,7 +542,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- DRM_DEBUG_KMS("DM_IRQ: resume\n");
+ drm_dbg(adev_to_drm(adev), "DM_IRQ: resume\n");
/**
* Renable HW interrupt for HPD and only since FLIP and VBLANK
@@ -559,7 +556,6 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
- return 0;
}
/*
@@ -894,6 +890,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int i;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -920,6 +917,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ /* Update reference counts for HPDs */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
+ }
}
/**
@@ -935,6 +938,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int i;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -960,4 +964,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ /* Update reference counts for HPDs */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
index 2349238a626b..ba17c23b2706 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -90,14 +90,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
* amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
*
*/
-int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
+void amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
* amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
*
*/
-int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
-int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
+void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
+void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
#endif /* __AMDGPU_DM_IRQ_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 056b17198956..7ceedf626d23 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1632,6 +1632,9 @@ int pre_validate_dsc(struct drm_atomic_state *state,
connector =
amdgpu_dm_find_first_crtc_matching_connector(state,
state->crtcs[ind].ptr);
+ if (!connector)
+ continue;
+
drm_new_conn_state =
drm_atomic_get_new_connector_state(state,
connector);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index fcb0e900a38a..0090e08d5057 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -277,8 +277,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
if (!dcc->enable)
return 0;
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
- !dc->cap_funcs.get_dcc_compression_cap)
+ if (adev->family < AMDGPU_FAMILY_GC_12_0_0 &&
+ format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return -EINVAL;
+
+ if (!dc->cap_funcs.get_dcc_compression_cap)
return -EINVAL;
input.format = format;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 104f03868266..e140b7a04d72 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -30,23 +30,6 @@
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
-static bool is_specific_oled_panel(struct dc_link *link)
-{
- if (!link->dpcd_sink_ext_caps.bits.oled)
- return false;
-
- /* Disable PSR-SU for some OLED panels to avoid glitches */
- if (link->dpcd_caps.sink_dev_id == 0xBA4159) {
- uint8_t sink_dev_id_str1[] = {'4', '0', 'C', 'U', '1'};
-
- if (!memcmp(link->dpcd_caps.sink_dev_id_str, sink_dev_id_str1,
- sizeof(sink_dev_id_str1)))
- return true;
- }
-
- return false;
-}
-
static bool link_supports_psrsu(struct dc_link *link)
{
struct dc *dc = link->ctx->dc;
@@ -57,9 +40,6 @@ static bool link_supports_psrsu(struct dc_link *link)
if (dc->ctx->dce_version < DCN_VERSION_3_1)
return false;
- if (is_specific_oled_panel(link))
- return false;
-
if (!is_psr_su_specific_panel(link))
return false;
@@ -74,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
return false;
- return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
+ /* Temporarily disable PSR-SU to avoid glitches */
+ return false;
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 7d18f372ce7a..2c645dffec18 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -101,7 +101,6 @@ static void init_dig_encoder_control(struct bios_parser *bp)
bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
break;
default:
- dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
bp->cmd_tbl.dig_encoder_control = encoder_control_fallback;
break;
}
@@ -210,6 +209,7 @@ static enum bp_result encoder_control_fallback(
******************************************************************************
*****************************************************************************/
+
static enum bp_result transmitter_control_v1_6(
struct bios_parser *bp,
struct bp_transmitter_control *cntl);
@@ -238,7 +238,6 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_7;
break;
default:
- dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = transmitter_control_fallback;
break;
}
@@ -325,6 +324,21 @@ static void transmitter_control_dmcub_v1_7(
dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static struct dc_link *get_link_by_phy_id(struct dc *p_dc, uint32_t phy_id)
+{
+ struct dc_link *link = NULL;
+
+ // Get Transition Bitmask from dc_link structure associated with PHY
+ for (uint8_t link_id = 0; link_id < MAX_LINKS; link_id++) {
+ if (phy_id == p_dc->links[link_id]->link_enc->transmitter) {
+ link = p_dc->links[link_id];
+ break;
+ }
+ }
+
+ return link;
+}
+
static enum bp_result transmitter_control_v1_7(
struct bios_parser *bp,
struct bp_transmitter_control *cntl)
@@ -363,7 +377,37 @@ static enum bp_result transmitter_control_v1_7(
if (bp->base.ctx->dc->ctx->dmub_srv &&
bp->base.ctx->dc->debug.dmub_command_table) {
+ struct dm_process_phy_transition_init_params process_phy_transition_init_params = {0};
+ struct dc_link *link = get_link_by_phy_id(bp->base.ctx->dc, dig_v1_7.phyid);
+ bool is_phy_transition_interlock_allowed = false;
+ uint8_t action = dig_v1_7.action;
+
+ if (link) {
+ if (link->phy_transition_bitmask &&
+ (action == TRANSMITTER_CONTROL_ENABLE || action == TRANSMITTER_CONTROL_DISABLE)) {
+ is_phy_transition_interlock_allowed = true;
+
+ // Prepare input parameters for processing ACPI retimers
+ process_phy_transition_init_params.action = action;
+ process_phy_transition_init_params.display_port_lanes_count = cntl->lanes_number;
+ process_phy_transition_init_params.phy_id = dig_v1_7.phyid;
+ process_phy_transition_init_params.signal = cntl->signal;
+ process_phy_transition_init_params.sym_clock_10khz = dig_v1_7.symclk_units.symclk_10khz;
+ process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate;
+ process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask;
+ }
+ }
+
+ // Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock
+ if (is_phy_transition_interlock_allowed && action == TRANSMITTER_CONTROL_ENABLE)
+ dm_acpi_process_phy_transition_interlock(bp->base.ctx, process_phy_transition_init_params);
+
transmitter_control_dmcub_v1_7(bp->base.ctx->dmub_srv, &dig_v1_7);
+
+ // Handle POST_ON_TO_OFF: Process ACPI PHY Transition Interlock
+ if (is_phy_transition_interlock_allowed && action == TRANSMITTER_CONTROL_DISABLE)
+ dm_acpi_process_phy_transition_interlock(bp->base.ctx, process_phy_transition_init_params);
+
return BP_RESULT_OK;
}
@@ -408,8 +452,6 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
- dm_output_to_console("Don't have set_pixel_clock for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback;
break;
}
@@ -554,7 +596,6 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
- dm_output_to_console("Don't have set_crtc_timing for v%d\n", dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -671,8 +712,6 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
- dm_output_to_console("Don't have enable_crtc for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(enablecrtc));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -864,8 +903,6 @@ static void init_set_dce_clock(struct bios_parser *bp)
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
break;
default:
- dm_output_to_console("Don't have set_dce_clock for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(setdceclock));
bp->cmd_tbl.set_dce_clock = NULL;
break;
}
@@ -1046,3 +1083,4 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
init_enable_lvtma_control(bp);
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index e317a3615147..91bc8a06e2cf 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -293,3 +293,107 @@ uint8_t dal_cmd_table_helper_encoder_id_to_atom(
return ENCODER_OBJECT_ID_NONE;
}
}
+
+uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
index dfd30aaf4032..547700e119a6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
@@ -59,4 +59,12 @@ uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
uint8_t dal_cmd_table_helper_encoder_id_to_atom(
enum encoder_id id);
+
+uint8_t phy_id_to_atom(enum transmitter t);
+
+uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id);
+
+bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index bad4e4711b4f..268e2414b34f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -88,8 +88,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
return true;
default:
- /* Unsupported DCE */
- BREAK_TO_DEBUGGER();
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
return false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
index 11bf247bb180..3099128223df 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
@@ -31,39 +31,6 @@
#include "../command_table_helper.h"
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
@@ -94,32 +61,6 @@ static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
return atom_dig_mode;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
{
uint8_t atom_hpd_sel = 0;
@@ -207,51 +148,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
{
uint8_t atom_action = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
index 755b6e33140a..349f0e5d5856 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
@@ -29,40 +29,9 @@
#include "include/bios_parser_types.h"
-#include "../command_table_helper2.h"
-
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
+#include "../command_table_helper.h"
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
+#include "../command_table_helper2.h"
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
@@ -91,32 +60,6 @@ static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
return atom_dig_mode;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
{
uint8_t atom_hpd_sel = 0;
@@ -209,51 +152,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
{
uint8_t atom_action = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
index 06b4f7fa4a50..1a5fefcde8af 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
@@ -31,39 +31,6 @@
#include "../command_table_helper.h"
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
@@ -91,32 +58,6 @@ static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
return atom_dig_mode;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
{
uint8_t atom_hpd_sel = 0;
@@ -209,51 +150,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
{
uint8_t atom_action = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
index 710221b4f5c5..01ccc803040c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
@@ -58,51 +58,6 @@ static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
return atom_action;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static bool clock_source_id_to_atom(
enum clock_source_id id,
uint32_t *atom_pll_id)
@@ -149,32 +104,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
@@ -270,39 +199,6 @@ static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
return atom_dig_encoder_sel;
}
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t disp_power_gating_action_to_atom(
enum bp_pipe_control_action action)
{
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
index 8b30b558cf1f..2ec5264536c7 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
@@ -58,51 +58,6 @@ static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
return atom_action;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static bool clock_source_id_to_atom(
enum clock_source_id id,
uint32_t *atom_pll_id)
@@ -149,32 +104,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
@@ -270,39 +199,6 @@ static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
return atom_dig_encoder_sel;
}
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t disp_power_gating_action_to_atom(
enum bp_pipe_control_action action)
{
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index df29d28d89c9..af722519a1fa 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -201,16 +201,26 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
struct pipe_ctx *pipe = safe_to_lower
? &context->res_ctx.pipe_ctx[i]
: &dc->current_state->res_ctx.pipe_ctx[i];
+ struct link_encoder *new_pipe_link_enc = new_pipe->link_res.dio_link_enc;
+ struct link_encoder *pipe_link_enc = pipe->link_res.dio_link_enc;
bool stream_changed_otg_dig_on = false;
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
+
+ if (!dc->config.unify_link_enc_assignment) {
+ if (new_pipe->stream)
+ new_pipe_link_enc = new_pipe->stream->link_enc;
+ if (pipe->stream)
+ pipe_link_enc = pipe->stream->link_enc;
+ }
+
stream_changed_otg_dig_on = old_pipe->stream && new_pipe->stream &&
old_pipe->stream != new_pipe->stream &&
old_pipe->stream_res.tg == new_pipe->stream_res.tg &&
- new_pipe->stream->link_enc && !new_pipe->stream->dpms_off &&
- new_pipe->stream->link_enc->funcs->is_dig_enabled &&
- new_pipe->stream->link_enc->funcs->is_dig_enabled(
- new_pipe->stream->link_enc) &&
+ new_pipe_link_enc && !new_pipe->stream->dpms_off &&
+ new_pipe_link_enc->funcs->is_dig_enabled &&
+ new_pipe_link_enc->funcs->is_dig_enabled(
+ new_pipe_link_enc) &&
new_pipe->stream_res.stream_enc &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
@@ -226,7 +236,7 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) &&
(pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
- !pipe->stream->link_enc) && !stream_changed_otg_dig_on)) {
+ !pipe_link_enc) && !stream_changed_otg_dig_on)) {
/* This w/a should not trigger when we have a dig active */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 05ad7a9af4ff..e71ea21401f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -905,7 +905,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,
static void dc_destruct(struct dc *dc)
{
// reset link encoder assignment table on destruct
- if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
+ if (dc->res_pool && dc->res_pool->funcs->link_encs_assign &&
+ !dc->config.unify_link_enc_assignment)
link_enc_cfg_init(dc, dc->current_state);
if (dc->current_state) {
@@ -1201,6 +1202,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR)
get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_DCC)
+ get_dcc_visual_confirm_color(dc, pipe_ctx, &(pipe_ctx->visual_confirm_color));
else {
if (dc->ctx->dce_version < DCN_VERSION_2_0)
color_space_to_black_color(
@@ -3955,6 +3958,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL && dc->optimized_required)
hwss_process_outstanding_hw_updates(dc, dc->current_state);
+ if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
+ dc->res_pool->funcs->prepare_mcache_programming(dc, context);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -4013,9 +4019,6 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
- dc->res_pool->funcs->prepare_mcache_programming(dc, context);
-
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
@@ -4937,7 +4940,8 @@ static bool full_update_required(struct dc *dc,
stream_update->lut3d_func ||
stream_update->pending_test_pattern ||
stream_update->crtc_timing_adjust ||
- stream_update->scaler_sharpener_update))
+ stream_update->scaler_sharpener_update ||
+ stream_update->hw_cursor_req))
return true;
if (stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index af1ea5792560..650e89825968 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -51,126 +51,6 @@
DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \
} while (0)
-void pre_surface_trace(
- struct dc *dc,
- const struct dc_plane_state *const *plane_states,
- int surface_count)
-{
- int i;
- DC_LOGGER_INIT(dc->ctx->logger);
-
- for (i = 0; i < surface_count; i++) {
- const struct dc_plane_state *plane_state = plane_states[i];
-
- SURFACE_TRACE("Planes %d:\n", i);
-
- SURFACE_TRACE(
- "plane_state->visible = %d;\n"
- "plane_state->flip_immediate = %d;\n"
- "plane_state->address.type = %d;\n"
- "plane_state->address.grph.addr.quad_part = 0x%llX;\n"
- "plane_state->address.grph.meta_addr.quad_part = 0x%llX;\n"
- "plane_state->scaling_quality.h_taps = %d;\n"
- "plane_state->scaling_quality.v_taps = %d;\n"
- "plane_state->scaling_quality.h_taps_c = %d;\n"
- "plane_state->scaling_quality.v_taps_c = %d;\n",
- plane_state->visible,
- plane_state->flip_immediate,
- plane_state->address.type,
- plane_state->address.grph.addr.quad_part,
- plane_state->address.grph.meta_addr.quad_part,
- plane_state->scaling_quality.h_taps,
- plane_state->scaling_quality.v_taps,
- plane_state->scaling_quality.h_taps_c,
- plane_state->scaling_quality.v_taps_c);
-
- SURFACE_TRACE(
- "plane_state->src_rect.x = %d;\n"
- "plane_state->src_rect.y = %d;\n"
- "plane_state->src_rect.width = %d;\n"
- "plane_state->src_rect.height = %d;\n"
- "plane_state->dst_rect.x = %d;\n"
- "plane_state->dst_rect.y = %d;\n"
- "plane_state->dst_rect.width = %d;\n"
- "plane_state->dst_rect.height = %d;\n"
- "plane_state->clip_rect.x = %d;\n"
- "plane_state->clip_rect.y = %d;\n"
- "plane_state->clip_rect.width = %d;\n"
- "plane_state->clip_rect.height = %d;\n",
- plane_state->src_rect.x,
- plane_state->src_rect.y,
- plane_state->src_rect.width,
- plane_state->src_rect.height,
- plane_state->dst_rect.x,
- plane_state->dst_rect.y,
- plane_state->dst_rect.width,
- plane_state->dst_rect.height,
- plane_state->clip_rect.x,
- plane_state->clip_rect.y,
- plane_state->clip_rect.width,
- plane_state->clip_rect.height);
-
- SURFACE_TRACE(
- "plane_state->plane_size.surface_size.x = %d;\n"
- "plane_state->plane_size.surface_size.y = %d;\n"
- "plane_state->plane_size.surface_size.width = %d;\n"
- "plane_state->plane_size.surface_size.height = %d;\n"
- "plane_state->plane_size.surface_pitch = %d;\n",
- plane_state->plane_size.surface_size.x,
- plane_state->plane_size.surface_size.y,
- plane_state->plane_size.surface_size.width,
- plane_state->plane_size.surface_size.height,
- plane_state->plane_size.surface_pitch);
-
-
- SURFACE_TRACE(
- "plane_state->tiling_info.gfx8.num_banks = %d;\n"
- "plane_state->tiling_info.gfx8.bank_width = %d;\n"
- "plane_state->tiling_info.gfx8.bank_width_c = %d;\n"
- "plane_state->tiling_info.gfx8.bank_height = %d;\n"
- "plane_state->tiling_info.gfx8.bank_height_c = %d;\n"
- "plane_state->tiling_info.gfx8.tile_aspect = %d;\n"
- "plane_state->tiling_info.gfx8.tile_aspect_c = %d;\n"
- "plane_state->tiling_info.gfx8.tile_split = %d;\n"
- "plane_state->tiling_info.gfx8.tile_split_c = %d;\n"
- "plane_state->tiling_info.gfx8.tile_mode = %d;\n"
- "plane_state->tiling_info.gfx8.tile_mode_c = %d;\n",
- plane_state->tiling_info.gfx8.num_banks,
- plane_state->tiling_info.gfx8.bank_width,
- plane_state->tiling_info.gfx8.bank_width_c,
- plane_state->tiling_info.gfx8.bank_height,
- plane_state->tiling_info.gfx8.bank_height_c,
- plane_state->tiling_info.gfx8.tile_aspect,
- plane_state->tiling_info.gfx8.tile_aspect_c,
- plane_state->tiling_info.gfx8.tile_split,
- plane_state->tiling_info.gfx8.tile_split_c,
- plane_state->tiling_info.gfx8.tile_mode,
- plane_state->tiling_info.gfx8.tile_mode_c);
-
- SURFACE_TRACE(
- "plane_state->tiling_info.gfx8.pipe_config = %d;\n"
- "plane_state->tiling_info.gfx8.array_mode = %d;\n"
- "plane_state->color_space = %d;\n"
- "plane_state->dcc.enable = %d;\n"
- "plane_state->format = %d;\n"
- "plane_state->rotation = %d;\n"
- "plane_state->stereo_format = %d;\n",
- plane_state->tiling_info.gfx8.pipe_config,
- plane_state->tiling_info.gfx8.array_mode,
- plane_state->color_space,
- plane_state->dcc.enable,
- plane_state->format,
- plane_state->rotation,
- plane_state->stereo_format);
-
- SURFACE_TRACE("plane_state->tiling_info.gfx9.swizzle = %d;\n",
- plane_state->tiling_info.gfx9.swizzle);
-
- SURFACE_TRACE("\n");
- }
- SURFACE_TRACE("\n");
-}
-
void update_surface_trace(
struct dc *dc,
const struct dc_surface_update *updates,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 6b514fd03f16..e0277728268a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -34,6 +34,7 @@
#include "dc_state_priv.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+#define MAX_NUM_MCACHE 8
/* used as index in array of black_color_format */
enum black_color_format {
@@ -553,6 +554,53 @@ void get_cursor_visual_confirm_color(
}
}
+void get_dcc_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ const uint32_t MCACHE_ID_UNASSIGNED = 0xF;
+
+ if (!pipe_ctx->plane_state->dcc.enable) {
+ color->color_r_cr = 0; /* black - DCC disabled */
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ return;
+ }
+
+ if (dc->ctx->dce_version < DCN_VERSION_4_01) {
+ color->color_r_cr = MAX_TG_COLOR_VALUE; /* red - DCC enabled */
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ return;
+ }
+
+ uint32_t first_id = pipe_ctx->mcache_regs.main.p0.mcache_id_first;
+ uint32_t second_id = pipe_ctx->mcache_regs.main.p0.mcache_id_second;
+
+ if (first_id != MCACHE_ID_UNASSIGNED && second_id != MCACHE_ID_UNASSIGNED && first_id != second_id) {
+ color->color_r_cr = MAX_TG_COLOR_VALUE/2; /* grey - 2 mcache */
+ color->color_g_y = MAX_TG_COLOR_VALUE/2;
+ color->color_b_cb = MAX_TG_COLOR_VALUE/2;
+ }
+
+ else if (first_id != MCACHE_ID_UNASSIGNED || second_id != MCACHE_ID_UNASSIGNED) {
+ const struct tg_color id_colors[MAX_NUM_MCACHE] = {
+ {0, MAX_TG_COLOR_VALUE, 0}, /* green */
+ {0, 0, MAX_TG_COLOR_VALUE}, /* blue */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* yellow */
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* magenta */
+ {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* cyan */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* white */
+ {MAX_TG_COLOR_VALUE/2, 0, 0}, /* dark red */
+ {0, MAX_TG_COLOR_VALUE/2, 0}, /* dark green */
+ };
+
+ uint32_t assigned_id = (first_id != MCACHE_ID_UNASSIGNED) ? first_id : second_id;
+ *color = id_colors[assigned_id];
+ }
+}
+
void set_p_state_switch_method(
struct dc *dc,
struct dc_state *context,
@@ -564,6 +612,7 @@ void set_p_state_switch_method(
if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba)
return;
+ pipe_ctx->p_state_type = P_STATE_UNKNOWN;
if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
dm_dram_clock_change_unsupported) {
/* MCLK switching is supported */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 7eb91612b60d..ea404435c9b9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1472,7 +1472,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Invalid input */
- if (!plane_state->dst_rect.width ||
+ if (!plane_state ||
+ !plane_state->dst_rect.width ||
!plane_state->dst_rect.height ||
!plane_state->src_rect.width ||
!plane_state->src_rect.height) {
@@ -4925,7 +4926,10 @@ bool pipe_need_reprogram(
return true;
/* DIG link encoder resource assignment for stream changed. */
- if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
+ if (pipe_ctx_old->stream->ctx->dc->config.unify_link_enc_assignment) {
+ if (pipe_ctx_old->link_res.dio_link_enc != pipe_ctx->link_res.dio_link_enc)
+ return true;
+ } else if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
bool need_reprogram = false;
struct dc *dc = pipe_ctx_old->stream->ctx->dc;
struct link_encoder *link_enc_prev =
@@ -5191,7 +5195,7 @@ void get_audio_check(struct audio_info *aud_modes,
}
}
-static struct link_encoder *get_temp_dio_link_enc(
+struct link_encoder *get_temp_dio_link_enc(
const struct resource_context *res_ctx,
const struct resource_pool *const pool,
const struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index e8134c47fe0d..0478dd856d8c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -201,7 +201,8 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
dc_stream_assign_stream_id(new_stream);
/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
- if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
+ if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign &&
+ !new_stream->ctx->dc->config.unify_link_enc_assignment)
new_stream->link_enc = NULL;
kref_init(&new_stream->refcount);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 5e96913bcab1..a62c4893e5ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -53,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.321"
+#define DC_VER "3.2.323"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -495,6 +495,7 @@ enum visual_confirm {
VISUAL_CONFIRM_FAMS2 = 19,
VISUAL_CONFIRM_HW_CURSOR = 20,
VISUAL_CONFIRM_VABC = 21,
+ VISUAL_CONFIRM_DCC = 22,
};
enum dc_psr_power_opts {
@@ -1083,6 +1084,7 @@ struct dc_debug_options {
unsigned int enable_oled_edp_power_up_opt;
bool enable_hblank_borrow;
bool force_subvp_df_throttle;
+ uint32_t acpi_transition_bitmasks[MAX_PIPES];
};
@@ -1806,6 +1808,7 @@ struct dc_link {
struct dc_panel_config panel_config;
struct phy_state phy_state;
+ uint32_t phy_transition_bitmask;
// BW ALLOCATON USB4 ONLY
struct dc_dpia_bw_alloc dpia_bw_alloc_config;
bool skip_implict_edp_power_control;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index ae6e2d8552ac..1f4f11adc491 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -300,6 +300,19 @@ union lane_align_status_updated {
uint8_t raw;
};
+union link_service_irq_vector_esi0 {
+ struct {
+ uint8_t DP_LINK_RX_CAP_CHANGED:1;
+ uint8_t DP_LINK_STATUS_CHANGED:1;
+ uint8_t DP_LINK_STREAM_STATUS_CHANGED:1;
+ uint8_t DP_LINK_HDMI_LINK_STATUS_CHANGED:1;
+ uint8_t DP_LINK_CONNECTED_OFF_ENTRY_REQUESTED:1;
+ uint8_t DP_LINK_TUNNELING_IRQ:1;
+ uint8_t reserved:2;
+ } bits;
+ uint8_t raw;
+};
+
union lane_adjust {
struct {
uint8_t VOLTAGE_SWING_LANE:2;
@@ -410,14 +423,6 @@ union dwnstream_port_caps_byte3_hdmi {
uint8_t raw;
};
-union hdmi_sink_encoded_link_bw_support {
- struct {
- uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3;
- uint8_t RESERVED:5;
- } bits;
- uint8_t raw;
-};
-
union hdmi_encoded_link_bw {
struct {
uint8_t FRL_MODE:1; // Bit 0
@@ -470,8 +475,10 @@ union sink_status {
uint8_t raw;
};
-/*6-byte structure corresponding to 6 registers (200h-205h)
-read during handling of HPD-IRQ*/
+/* 7-byte structure corresponding to 6 registers (200h-205h)
+ * and LINK_SERVICE_IRQ_ESI0 (2005h) for tunneling IRQ
+ * read during handling of HPD-IRQ
+ */
union hpd_irq_data {
struct {
union sink_count sink_cnt;/* 200h */
@@ -479,9 +486,10 @@ union hpd_irq_data {
union lane_status lane01_status;/* 202h */
union lane_status lane23_status;/* 203h */
union lane_align_status_updated lane_status_updated;/* 204h */
- union sink_status sink_status;
+ union sink_status sink_status;/* 205h */
+ union link_service_irq_vector_esi0 link_service_irq_esi0;/* 2005h */
} bytes;
- uint8_t raw[6];
+ uint8_t raw[7];
};
union down_stream_port_count {
@@ -1128,6 +1136,8 @@ struct dc_lttpr_caps {
union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
union dp_alpm_lttpr_cap alpm;
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
+ uint8_t lttpr_ieee_oui[3];
+ uint8_t lttpr_device_id[6];
};
struct dc_dongle_dfp_cap_ext {
@@ -1218,6 +1228,8 @@ struct dpcd_caps {
struct replay_info pr_info;
uint16_t edp_oled_emission_rate;
union dp_receive_port0_cap receive_port0_cap;
+ /* Indicates the number of SST links supported by MSO (Multi-Stream Output) */
+ uint8_t mso_cap_sst_links_supported;
};
union dpcd_sink_ext_caps {
@@ -1391,6 +1403,12 @@ struct dp_trace {
#ifndef DP_BRANCH_VENDOR_SPECIFIC_START
#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
#endif
+#ifndef DP_LTTPR_IEEE_OUI
+#define DP_LTTPR_IEEE_OUI 0xF003D
+#endif
+#ifndef DP_LTTPR_DEVICE_ID
+#define DP_LTTPR_DEVICE_ID 0xF0040
+#endif
/** USB4 DPCD BW Allocation Registers Chapter 10.7 **/
#ifndef DP_TUNNELING_CAPABILITIES
#define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */
@@ -1428,4 +1446,20 @@ struct dp_trace {
#ifndef REQUESTED_BW
#define REQUESTED_BW 0xE0031 /* 1.4a */
#endif
+# ifndef DP_TUNNELING_BW_ALLOC_BITS_MASK
+# define DP_TUNNELING_BW_ALLOC_BITS_MASK (0x0F << 0)
+# endif
+# ifndef DP_TUNNELING_BW_REQUEST_FAILED
+# define DP_TUNNELING_BW_REQUEST_FAILED (1 << 0)
+# endif
+# ifndef DP_TUNNELING_BW_REQUEST_SUCCEEDED
+# define DP_TUNNELING_BW_REQUEST_SUCCEEDED (1 << 1)
+# endif
+# ifndef DP_TUNNELING_ESTIMATED_BW_CHANGED
+# define DP_TUNNELING_ESTIMATED_BW_CHANGED (1 << 2)
+# endif
+# ifndef DP_TUNNELING_BW_ALLOC_CAP_CHANGED
+# define DP_TUNNELING_BW_ALLOC_CAP_CHANGED (1 << 3)
+# endif
+
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index e60898c2df01..acd3b373a18e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -181,6 +181,7 @@ struct dc_panel_patch {
uint8_t blankstream_before_otg_off;
bool oled_optimize_display_on;
unsigned int force_mst_blocked_discovery;
+ unsigned int wait_after_dpcd_poweroff_ms;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index 160c299419b7..a9b88f5e0c04 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -379,53 +379,55 @@ struct dccg_mask {
DCCG401_REG_FIELD_LIST(uint32_t)
};
+#define DCCG_REG_VARIABLE_LIST \
+ uint32_t DPPCLK_DTO_CTRL; \
+ uint32_t DPPCLK_DTO_PARAM[6]; \
+ uint32_t REFCLK_CNTL; \
+ uint32_t DISPCLK_FREQ_CHANGE_CNTL; \
+ uint32_t OTG_PIXEL_RATE_CNTL[MAX_PIPES]; \
+ uint32_t HDMICHARCLK_CLOCK_CNTL[6]; \
+ uint32_t PHYASYMCLK_CLOCK_CNTL; \
+ uint32_t PHYBSYMCLK_CLOCK_CNTL; \
+ uint32_t PHYCSYMCLK_CLOCK_CNTL; \
+ uint32_t PHYDSYMCLK_CLOCK_CNTL; \
+ uint32_t PHYESYMCLK_CLOCK_CNTL; \
+ uint32_t DTBCLK_DTO_MODULO[MAX_PIPES]; \
+ uint32_t DTBCLK_DTO_PHASE[MAX_PIPES]; \
+ uint32_t DCCG_AUDIO_DTBCLK_DTO_MODULO; \
+ uint32_t DCCG_AUDIO_DTBCLK_DTO_PHASE; \
+ uint32_t DCCG_AUDIO_DTO_SOURCE; \
+ uint32_t DPSTREAMCLK_CNTL; \
+ uint32_t HDMISTREAMCLK_CNTL; \
+ uint32_t SYMCLK32_SE_CNTL; \
+ uint32_t SYMCLK32_LE_CNTL; \
+ uint32_t DENTIST_DISPCLK_CNTL; \
+ uint32_t DSCCLK_DTO_CTRL; \
+ uint32_t DSCCLK0_DTO_PARAM; \
+ uint32_t DSCCLK1_DTO_PARAM; \
+ uint32_t DSCCLK2_DTO_PARAM; \
+ uint32_t DSCCLK3_DTO_PARAM; \
+ uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE; \
+ uint32_t DPSTREAMCLK_GATE_DISABLE; \
+ uint32_t DCCG_GATE_DISABLE_CNTL; \
+ uint32_t DCCG_GATE_DISABLE_CNTL2; \
+ uint32_t DCCG_GATE_DISABLE_CNTL3; \
+ uint32_t HDMISTREAMCLK0_DTO_PARAM; \
+ uint32_t DCCG_GATE_DISABLE_CNTL4; \
+ uint32_t OTG_PIXEL_RATE_DIV; \
+ uint32_t DTBCLK_P_CNTL; \
+ uint32_t DPPCLK_CTRL; \
+ uint32_t DCCG_GATE_DISABLE_CNTL5; \
+ uint32_t DCCG_GATE_DISABLE_CNTL6; \
+ uint32_t DCCG_GLOBAL_FGCG_REP_CNTL; \
+ uint32_t SYMCLKA_CLOCK_ENABLE; \
+ uint32_t SYMCLKB_CLOCK_ENABLE; \
+ uint32_t SYMCLKC_CLOCK_ENABLE; \
+ uint32_t SYMCLKD_CLOCK_ENABLE; \
+ uint32_t SYMCLKE_CLOCK_ENABLE; \
+ uint32_t DP_DTO_MODULO[MAX_PIPES]; \
+ uint32_t DP_DTO_PHASE[MAX_PIPES]
struct dccg_registers {
- uint32_t DPPCLK_DTO_CTRL;
- uint32_t DPPCLK_DTO_PARAM[6];
- uint32_t REFCLK_CNTL;
- uint32_t DISPCLK_FREQ_CHANGE_CNTL;
- uint32_t OTG_PIXEL_RATE_CNTL[MAX_PIPES];
- uint32_t HDMICHARCLK_CLOCK_CNTL[6];
- uint32_t PHYASYMCLK_CLOCK_CNTL;
- uint32_t PHYBSYMCLK_CLOCK_CNTL;
- uint32_t PHYCSYMCLK_CLOCK_CNTL;
- uint32_t PHYDSYMCLK_CLOCK_CNTL;
- uint32_t PHYESYMCLK_CLOCK_CNTL;
- uint32_t DTBCLK_DTO_MODULO[MAX_PIPES];
- uint32_t DTBCLK_DTO_PHASE[MAX_PIPES];
- uint32_t DCCG_AUDIO_DTBCLK_DTO_MODULO;
- uint32_t DCCG_AUDIO_DTBCLK_DTO_PHASE;
- uint32_t DCCG_AUDIO_DTO_SOURCE;
- uint32_t DPSTREAMCLK_CNTL;
- uint32_t HDMISTREAMCLK_CNTL;
- uint32_t SYMCLK32_SE_CNTL;
- uint32_t SYMCLK32_LE_CNTL;
- uint32_t DENTIST_DISPCLK_CNTL;
- uint32_t DSCCLK_DTO_CTRL;
- uint32_t DSCCLK0_DTO_PARAM;
- uint32_t DSCCLK1_DTO_PARAM;
- uint32_t DSCCLK2_DTO_PARAM;
- uint32_t DSCCLK3_DTO_PARAM;
- uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
- uint32_t DPSTREAMCLK_GATE_DISABLE;
- uint32_t DCCG_GATE_DISABLE_CNTL;
- uint32_t DCCG_GATE_DISABLE_CNTL2;
- uint32_t DCCG_GATE_DISABLE_CNTL3;
- uint32_t HDMISTREAMCLK0_DTO_PARAM;
- uint32_t DCCG_GATE_DISABLE_CNTL4;
- uint32_t OTG_PIXEL_RATE_DIV;
- uint32_t DTBCLK_P_CNTL;
- uint32_t DPPCLK_CTRL;
- uint32_t DCCG_GATE_DISABLE_CNTL5;
- uint32_t DCCG_GATE_DISABLE_CNTL6;
- uint32_t DCCG_GLOBAL_FGCG_REP_CNTL;
- uint32_t SYMCLKA_CLOCK_ENABLE;
- uint32_t SYMCLKB_CLOCK_ENABLE;
- uint32_t SYMCLKC_CLOCK_ENABLE;
- uint32_t SYMCLKD_CLOCK_ENABLE;
- uint32_t SYMCLKE_CLOCK_ENABLE;
- uint32_t DP_DTO_MODULO[MAX_PIPES];
- uint32_t DP_DTO_PHASE[MAX_PIPES];
+ DCCG_REG_VARIABLE_LIST;
};
struct dcn_dccg {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 332094ad2b05..ffd172231fdf 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -531,7 +531,7 @@ static void dccg401_enable_dpstreamclk(struct dccg *dccg, int otg_inst, int dp_h
DPSTREAMCLK_ROOT_GATE_DISABLE, 1);
}
-static void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst)
+void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
index b9905c73e754..55e8718aad22 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
@@ -208,6 +208,8 @@ void dccg401_enable_symclk32_le(
void dccg401_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst);
+void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst);
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
void dccg401_set_ref_dscclk(struct dccg *dccg,
uint32_t dsc_inst);
void dccg401_set_src_sel(
@@ -228,14 +230,11 @@ void dccg401_set_dp_dto(
const struct dp_dto_params *params);
void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
-
void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
void dccg401_set_dtbclk_p_src(
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst);
-
-
struct dccg *dccg401_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index e5fb0e8333e4..e691a1cf3356 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -239,6 +239,7 @@ static const struct timing_generator_funcs dce60_tg_funcs = {
dce60_timing_generator_enable_advanced_request,
.configure_crc = dce60_configure_crc,
.get_crc = dce110_get_crc,
+ .is_two_pixels_per_container = dce110_is_two_pixels_per_container,
};
void dce60_timing_generator_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
index ea0c9a9d0bd6..9972911330b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
@@ -137,9 +137,9 @@ static const struct link_encoder_funcs dcn35_link_enc_funcs = {
.hw_init = dcn35_link_encoder_init,
.setup = dcn35_link_encoder_setup,
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
- .enable_dp_output = dcn31_link_encoder_enable_dp_output,
- .enable_dp_mst_output = dcn31_link_encoder_enable_dp_mst_output,
- .disable_output = dcn31_link_encoder_disable_output,
+ .enable_dp_output = dcn35_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn35_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn35_link_encoder_disable_output,
.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
@@ -297,6 +297,50 @@ static void link_encoder_disable(struct dcn10_link_encoder *enc10)
REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
}
+void dcn35_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (!enc->ctx->dc->config.unify_link_enc_assignment)
+ dcn31_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ else {
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn20_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ }
+}
+
+void dcn35_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (!enc->ctx->dc->config.unify_link_enc_assignment)
+ dcn31_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+ else {
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+ }
+}
+
+void dcn35_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (!enc->ctx->dc->config.unify_link_enc_assignment)
+ dcn31_link_encoder_disable_output(enc, signal);
+ else {
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn10_link_encoder_disable_output(enc, signal);
+ }
+}
+
void dcn35_link_encoder_enable_dpia_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
index f9d4221f4b43..5712e6553fab 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
@@ -145,6 +145,29 @@ enum signal_type dcn35_get_dig_mode(struct link_encoder *enc);
void dcn35_link_encoder_setup(struct link_encoder *enc, enum signal_type signal);
/*
+ * Enable DP transmitter and its encoder.
+ */
+void dcn35_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/*
+ * Enable DP transmitter and its encoder in MST mode.
+ */
+void dcn35_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/*
+ * Disable transmitter and its encoder.
+ */
+void dcn35_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal);
+
+/*
* Enable DP transmitter and its encoder for dpia port.
*/
void dcn35_link_encoder_enable_dpia_output(
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index 4bab180e1938..d5fa551dd3c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -100,7 +100,7 @@ void enc401_stream_encoder_dvi_set_stream_attribute(
}
/* setup stream encoder in hdmi mode */
-static void enc401_stream_encoder_hdmi_set_stream_attribute(
+void enc401_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
int actual_pix_clk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
index 25cc8f72d8d3..d6b00cd246b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
@@ -232,4 +232,9 @@ void enc401_stream_encoder_map_to_link(
uint32_t stream_enc_inst,
uint32_t link_enc_inst);
void enc401_read_state(struct stream_encoder *enc, struct enc_state *s);
+void enc401_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio);
#endif /* __DC_DIO_STREAM_ENCODER_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index f81e5a4e1d6d..7b9c22c45453 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -291,6 +291,13 @@ bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, e
bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
/*
+ * ACPI Interfaces
+ */
+void dm_acpi_process_phy_transition_interlock(
+ const struct dc_context *ctx,
+ struct dm_process_phy_transition_init_params process_phy_transition_init_params);
+
+/*
* Debug and verification hooks
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index facf269c4326..bf63da266a18 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -275,4 +275,30 @@ enum dm_dmub_wait_type {
DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY,
};
+enum dm_acpi_transition_link_type {
+ hdmi_tmds,
+ hdmi_frl,
+ dp_8b_10b,
+ dp_128b_132b,
+ none
+};
+
+struct dm_process_phy_transition_init_params {
+ uint32_t phy_id;
+ uint8_t action;
+ uint32_t sym_clock_10khz;
+ enum signal_type signal;
+ enum dc_lane_count display_port_lanes_count;
+ enum dc_link_rate display_port_link_rate;
+ uint32_t transition_bitmask;
+ uint8_t hdmi_frl_num_lanes;
+};
+
+struct dm_process_phy_transition_input_params {
+ uint32_t phy_id;
+ uint32_t transition_id;
+ uint32_t phy_configuration;
+ uint32_t data_rate;
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index aac0a0ae2966..88789987bdbc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -178,82 +178,6 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
};
-void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
- double vtotal_avg)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- double vtotal_min, vtotal_max;
- double ratio, modulo, phase;
- uint32_t vblank_start;
- uint32_t v_total_mask_value = 0;
-
- dc_assert_fp_enabled();
-
- /* Compute VTOTAL_MIN and VTOTAL_MAX, so that
- * VOTAL_MAX - VTOTAL_MIN = 1
- */
- v_total_mask_value = 16;
- vtotal_min = dcn_bw_floor(vtotal_avg);
- vtotal_max = dcn_bw_ceil(vtotal_avg);
-
- /* Check that bottom VBLANK is at least 2 lines tall when running with
- * VTOTAL_MIN. Note that VTOTAL registers are defined as 'total number
- * of lines in a frame - 1'.
- */
- REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START,
- &vblank_start);
- ASSERT(vtotal_min >= vblank_start + 1);
-
- /* Special case where the average frame rate can be achieved
- * without using the DTO
- */
- if (vtotal_min == vtotal_max) {
- REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
-
- optc->funcs->set_vtotal_min_max(optc, 0, 0);
- REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
- REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
- REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 0,
- OTG_V_TOTAL_MAX_SEL, 0,
- OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
- return;
- }
-
- ratio = vtotal_max - vtotal_avg;
- modulo = 65536.0 * 65536.0 - 1.0; /* 2^32 - 1 */
- phase = ratio * modulo;
-
- /* Special cases where the DTO phase gets rounded to 0 or
- * to DTO modulo
- */
- if (phase <= 0 || phase >= modulo) {
- REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL,
- phase <= 0 ?
- (uint32_t)vtotal_max : (uint32_t)vtotal_min);
- REG_SET(OTG_V_TOTAL_MIN, 0, OTG_V_TOTAL_MIN, 0);
- REG_SET(OTG_V_TOTAL_MAX, 0, OTG_V_TOTAL_MAX, 0);
- REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
- REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
- REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 0,
- OTG_V_TOTAL_MAX_SEL, 0,
- OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
- return;
- }
- REG_UPDATE_6(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 1,
- OTG_V_TOTAL_MAX_SEL, 1,
- OTG_SET_V_TOTAL_MIN_MASK_EN, 1,
- OTG_SET_V_TOTAL_MIN_MASK, v_total_mask_value,
- OTG_VTOTAL_MID_REPLACING_MIN_EN, 0,
- OTG_VTOTAL_MID_REPLACING_MAX_EN, 0);
- REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
- optc->funcs->set_vtotal_min_max(optc, vtotal_min, vtotal_max);
- REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, (uint32_t)phase);
- REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, (uint32_t)modulo);
-}
-
void dcn30_fpu_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
index cab864095ce7..e3b6ad6a8784 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
@@ -29,9 +29,6 @@
#include "core_types.h"
#include "dcn20/dcn20_optc.h"
-void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
- double vtotal_avg);
-
void dcn30_fpu_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index cee1b351e105..f1fe49401bc0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -281,10 +281,10 @@ static void CalculateDynamicMetadataParameters(
double DISPCLK,
double DCFClkDeepSleep,
double PixelClock,
- long HTotal,
- long VBlank,
- long DynamicMetadataTransmittedBytes,
- long DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int HTotal,
+ unsigned int VBlank,
+ unsigned int DynamicMetadataTransmittedBytes,
+ int DynamicMetadataLinesBeforeActiveRequired,
int InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *Tsetup,
@@ -3265,8 +3265,8 @@ static double CalculateWriteBackDelay(
static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK,
- double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes,
- long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
+ double DCFClkDeepSleep, double PixelClock, unsigned int HTotal, unsigned int VBlank, unsigned int DynamicMetadataTransmittedBytes,
+ int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks)
{
double TotalRepeaterDelayTime = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 6f490d8d7038..56dda686e299 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -626,6 +626,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
+ !pipe->stream->hw_cursor_req &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 412e75eb4704..12ff65b6a7e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -122,17 +122,6 @@ void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const stru
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param)
-{
- dml_print("DML_RQ_DLG_CALC: =====================================\n");
- dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
- dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_l);
- dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_c);
- dml_print("DML_RQ_DLG_CALC: =====================================\n");
-}
-
void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
index ebcd717744e5..2bc64c4081dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -35,7 +35,6 @@ void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dp
void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing);
void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param);
void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param);
-void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param);
void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param);
void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index 3664980d1574..bb863c8c6b39 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -44,7 +44,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_cnvc_formatter = 28,
.dppclk_delay_cnvc_cursor = 6,
- .cursor_buffer_size = 42,
+ .cursor_buffer_size = 24,
.cursor_chunk_size = 2,
.dispclk_delay_subtotal = 125,
.max_inter_dcn_tile_repeaters = 8,
@@ -141,9 +141,8 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out)
core->clean_me_up.mode_lib.ip.subvp_fw_processing_delay_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us;
core->clean_me_up.mode_lib.ip.subvp_swath_height_margin_lines = core_dcn4_ip_caps_base.subvp_swath_height_margin_lines;
} else {
- memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params));
+ memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params));
patch_ip_params_with_ip_caps(&core->clean_me_up.mode_lib.ip, in_out->ip_caps);
-
core->clean_me_up.mode_lib.ip.imall_supported = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 78c93a502518..4c33d99ca7e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -5058,7 +5058,7 @@ static void CalculateExtraLatency(
double HostVMInefficiencyFactorPrefetch,
unsigned int HostVMMinPageSize,
enum dml2_qos_param_type qos_type,
- bool max_oustanding_when_urgent_expected,
+ bool max_outstanding_when_urgent_expected,
unsigned int max_outstanding_requests,
unsigned int request_size_bytes_luma[],
unsigned int request_size_bytes_chroma[],
@@ -5106,7 +5106,7 @@ static void CalculateExtraLatency(
if (qos_type == dml2_qos_param_type_dcn4x) {
*ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK;
*ExtraLatency = *ExtraLatency_sr;
- if (max_oustanding_when_urgent_expected)
+ if (max_outstanding_when_urgent_expected)
*ExtraLatency = *ExtraLatency + (ROBBufferSizeInKByte * 1024 - max_outstanding_requests * max_request_size_bytes) / ReturnBW;
} else {
*ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK + RoundTripPingLatencyCycles / FabricClock + ReorderingBytes / ReturnBW;
@@ -5121,7 +5121,7 @@ static void CalculateExtraLatency(
dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
- dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected);
+ dml2_printf("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index dfe54112a9c6..4e502f0a6d20 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -1571,7 +1571,7 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
unsigned int *DSTYAfterScaler;
bool UnboundedRequestEnabled;
unsigned int CompressedBufferSizeInkByte;
- bool max_oustanding_when_urgent_expected;
+ bool max_outstanding_when_urgent_expected;
unsigned int max_outstanding_requests;
unsigned int max_request_size_bytes;
unsigned int *meta_row_height_l;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 1ed21c1b86a5..a966abd40788 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -532,26 +532,6 @@ static void calculate_odm_slices(const struct dc_stream_state *stream, unsigned
odm_slice_end_x[odm_factor - 1] = stream->src.width - 1;
}
-static bool is_plane_in_odm_slice(const struct dc_plane_state *plane, unsigned int slice_index, unsigned int *odm_slice_end_x, unsigned int num_slices)
-{
- unsigned int slice_start_x, slice_end_x;
-
- if (slice_index == 0)
- slice_start_x = 0;
- else
- slice_start_x = odm_slice_end_x[slice_index - 1] + 1;
-
- slice_end_x = odm_slice_end_x[slice_index];
-
- if (plane->clip_rect.x + plane->clip_rect.width < slice_start_x)
- return false;
-
- if (plane->clip_rect.x > slice_end_x)
- return false;
-
- return true;
-}
-
static void add_odm_slice_to_odm_tree(struct dml2_context *ctx,
struct dc_state *state,
struct dc_pipe_mapping_scratch *scratch,
@@ -791,12 +771,6 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state
sort_pipes_for_splitting(&scratch->pipe_pool);
for (odm_slice_index = 0; odm_slice_index < scratch->odm_info.odm_factor; odm_slice_index++) {
- // We build the tree for one ODM slice at a time.
- // Each ODM slice shares a common OPP
- if (!is_plane_in_odm_slice(plane, odm_slice_index, scratch->odm_info.odm_slice_end_x, scratch->odm_info.odm_factor)) {
- continue;
- }
-
// Now we have a list of all pipes to be used for this plane/stream, now setup the tree.
scratch->odm_info.next_higher_pipe_for_odm_slice[odm_slice_index] = add_plane_to_blend_tree(ctx, state,
plane,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index f829d5ac7c8e..2061d43b92e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -557,6 +557,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn36 ||
dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0,
max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0, max_socclk_mhz = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 45584e2f5dfe..939ee0708bd2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -33,7 +33,6 @@
#include "dml2_dc_resource_mgmt.h"
#include "dml21_wrapper.h"
-
static void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
{
if (dml2->config.use_native_soc_bb_construction)
@@ -792,7 +791,7 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options
// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
if ((in_dc->debug.using_dml21)
&& (in_dc->ctx->dce_version == DCN_VERSION_4_01
- ))
+ ))
return dml21_create(in_dc, dml2, config);
// Allocate Mode Lib Ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 40acebd13e46..abf439e743f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -425,11 +425,6 @@ bool dpp3_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- if (scl_data->viewport.width > scl_data->h_active &&
- dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
- return false;
-
/*
* Set default taps if none are provided
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
@@ -467,6 +462,12 @@ bool dpp3_get_optimal_number_of_taps(
else
scl_data->taps.h_taps_c = in_taps->h_taps_c;
+ // Avoid null data in the scl data with this early return, proceed non-adaptive calcualtion first
+ if (scl_data->viewport.width > scl_data->h_active &&
+ dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+ return false;
+
/*Ensure we can support the requested number of vtaps*/
min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert);
min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index 4bc85aaf17da..ecaa976e1f52 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -567,80 +567,82 @@
type ISHARP_NLDELTA_SCLIP_PIVOT_N; \
type ISHARP_NLDELTA_SCLIP_SLOPE_N
+#define DPP_REG_VARIABLE_LIST_DCN401 \
+ DPP_DCN3_REG_VARIABLE_LIST_COMMON; \
+ uint32_t CURSOR0_FP_SCALE_BIAS_G_Y; \
+ uint32_t CURSOR0_FP_SCALE_BIAS_RB_CRCB; \
+ uint32_t CUR0_MATRIX_MODE; \
+ uint32_t CUR0_MATRIX_C11_C12_A; \
+ uint32_t CUR0_MATRIX_C13_C14_A; \
+ uint32_t CUR0_MATRIX_C21_C22_A; \
+ uint32_t CUR0_MATRIX_C23_C24_A; \
+ uint32_t CUR0_MATRIX_C31_C32_A; \
+ uint32_t CUR0_MATRIX_C33_C34_A; \
+ uint32_t CUR0_MATRIX_C11_C12_B; \
+ uint32_t CUR0_MATRIX_C13_C14_B; \
+ uint32_t CUR0_MATRIX_C21_C22_B; \
+ uint32_t CUR0_MATRIX_C23_C24_B; \
+ uint32_t CUR0_MATRIX_C31_C32_B; \
+ uint32_t CUR0_MATRIX_C33_C34_B; \
+ uint32_t DSCL_SC_MODE; \
+ uint32_t DSCL_EASF_H_MODE; \
+ uint32_t DSCL_EASF_H_BF_CNTL; \
+ uint32_t DSCL_EASF_H_RINGEST_EVENTAP_REDUCE; \
+ uint32_t DSCL_EASF_H_RINGEST_EVENTAP_GAIN; \
+ uint32_t DSCL_EASF_H_BF_FINAL_MAX_MIN; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG0; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG1; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG2; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG3; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG4; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG5; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG6; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG7; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG0; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG1; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG2; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG3; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG4; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG5; \
+ uint32_t DSCL_EASF_V_MODE; \
+ uint32_t DSCL_EASF_V_BF_CNTL; \
+ uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL1; \
+ uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL2; \
+ uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL3; \
+ uint32_t DSCL_EASF_V_RINGEST_EVENTAP_REDUCE; \
+ uint32_t DSCL_EASF_V_RINGEST_EVENTAP_GAIN; \
+ uint32_t DSCL_EASF_V_BF_FINAL_MAX_MIN; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG0; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG1; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG2; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG3; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG4; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG5; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG6; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG7; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG0; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG1; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG2; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG3; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG4; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG5; \
+ uint32_t DSCL_SC_MATRIX_C0C1; \
+ uint32_t DSCL_SC_MATRIX_C2C3; \
+ uint32_t ISHARP_MODE; \
+ uint32_t ISHARP_NOISEDET_THRESHOLD; \
+ uint32_t ISHARP_NOISE_GAIN_PWL; \
+ uint32_t ISHARP_LBA_PWL_SEG0; \
+ uint32_t ISHARP_LBA_PWL_SEG1; \
+ uint32_t ISHARP_LBA_PWL_SEG2; \
+ uint32_t ISHARP_LBA_PWL_SEG3; \
+ uint32_t ISHARP_LBA_PWL_SEG4; \
+ uint32_t ISHARP_LBA_PWL_SEG5; \
+ uint32_t ISHARP_DELTA_CTRL; \
+ uint32_t ISHARP_DELTA_DATA; \
+ uint32_t ISHARP_DELTA_INDEX; \
+ uint32_t ISHARP_NLDELTA_SOFT_CLIP
struct dcn401_dpp_registers {
- DPP_DCN3_REG_VARIABLE_LIST_COMMON;
- uint32_t CURSOR0_FP_SCALE_BIAS_G_Y;
- uint32_t CURSOR0_FP_SCALE_BIAS_RB_CRCB;
- uint32_t CUR0_MATRIX_MODE;
- uint32_t CUR0_MATRIX_C11_C12_A;
- uint32_t CUR0_MATRIX_C13_C14_A;
- uint32_t CUR0_MATRIX_C21_C22_A;
- uint32_t CUR0_MATRIX_C23_C24_A;
- uint32_t CUR0_MATRIX_C31_C32_A;
- uint32_t CUR0_MATRIX_C33_C34_A;
- uint32_t CUR0_MATRIX_C11_C12_B;
- uint32_t CUR0_MATRIX_C13_C14_B;
- uint32_t CUR0_MATRIX_C21_C22_B;
- uint32_t CUR0_MATRIX_C23_C24_B;
- uint32_t CUR0_MATRIX_C31_C32_B;
- uint32_t CUR0_MATRIX_C33_C34_B;
- uint32_t DSCL_SC_MODE;
- uint32_t DSCL_EASF_H_MODE;
- uint32_t DSCL_EASF_H_BF_CNTL;
- uint32_t DSCL_EASF_H_RINGEST_EVENTAP_REDUCE;
- uint32_t DSCL_EASF_H_RINGEST_EVENTAP_GAIN;
- uint32_t DSCL_EASF_H_BF_FINAL_MAX_MIN;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG0;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG1;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG2;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG3;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG4;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG5;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG6;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG7;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG0;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG1;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG2;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG3;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG4;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG5;
- uint32_t DSCL_EASF_V_MODE;
- uint32_t DSCL_EASF_V_BF_CNTL;
- uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL1;
- uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL2;
- uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL3;
- uint32_t DSCL_EASF_V_RINGEST_EVENTAP_REDUCE;
- uint32_t DSCL_EASF_V_RINGEST_EVENTAP_GAIN;
- uint32_t DSCL_EASF_V_BF_FINAL_MAX_MIN;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG0;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG1;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG2;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG3;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG4;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG5;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG6;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG7;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG0;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG1;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG2;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG3;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG4;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG5;
- uint32_t DSCL_SC_MATRIX_C0C1;
- uint32_t DSCL_SC_MATRIX_C2C3;
- uint32_t ISHARP_MODE;
- uint32_t ISHARP_NOISEDET_THRESHOLD;
- uint32_t ISHARP_NOISE_GAIN_PWL;
- uint32_t ISHARP_LBA_PWL_SEG0;
- uint32_t ISHARP_LBA_PWL_SEG1;
- uint32_t ISHARP_LBA_PWL_SEG2;
- uint32_t ISHARP_LBA_PWL_SEG3;
- uint32_t ISHARP_LBA_PWL_SEG4;
- uint32_t ISHARP_LBA_PWL_SEG5;
- uint32_t ISHARP_DELTA_CTRL;
- uint32_t ISHARP_DELTA_DATA;
- uint32_t ISHARP_DELTA_INDEX;
- uint32_t ISHARP_NLDELTA_SOFT_CLIP;
+ DPP_REG_VARIABLE_LIST_DCN401;
};
struct dcn401_dpp_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
index 03b4ac2f1991..0d2ae21abbdd 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -262,7 +262,7 @@ void dcn31_hpo_dp_link_enc_set_link_test_pattern(
}
}
-static void fill_stream_allocation_row_info(
+void dcn31_fill_stream_allocation_row_info(
const struct link_mst_stream_allocation *stream_allocation,
uint32_t *src,
uint32_t *slots)
@@ -296,7 +296,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
/* we should clean-up table each time */
if (table->stream_count >= 1) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[0],
&src,
&slots);
@@ -310,7 +310,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
SAT_SLOT_COUNT, slots);
if (table->stream_count >= 2) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[1],
&src,
&slots);
@@ -324,7 +324,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
SAT_SLOT_COUNT, slots);
if (table->stream_count >= 3) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[2],
&src,
&slots);
@@ -338,7 +338,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
SAT_SLOT_COUNT, slots);
if (table->stream_count >= 4) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[3],
&src,
&slots);
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
index 51f5781325e8..40859660e4dc 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
@@ -226,4 +226,10 @@ void dcn31_hpo_dp_link_enc_set_ffe(
const struct dc_link_settings *link_settings,
uint8_t ffe_preset);
+
+void dcn31_fill_stream_allocation_row_info(
+ const struct link_mst_stream_allocation *stream_allocation,
+ uint32_t *src,
+ uint32_t *slots);
+
#endif // __DAL_DCN31_HPO_LINK_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
index 48ef3d29b370..bea4e1a8ff90 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
@@ -62,4 +62,7 @@ void hpo_dp_link_encoder32_construct(struct dcn31_hpo_dp_link_encoder *enc31,
const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift,
const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask);
+bool dcn32_hpo_dp_link_enc_is_in_alt_mode(
+ struct hpo_dp_link_encoder *enc);
+
#endif // __DAL_DCN32_HPO_DP_LINK_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
index 09049aa3c4f3..f66a38f43a09 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
@@ -1244,6 +1244,7 @@ struct dce_hwseq_registers {
type DOMAIN24_PGFSM_PWR_STATUS; \
type DOMAIN25_PGFSM_PWR_STATUS; \
type DOMAIN_DESIRED_PWR_STATE;
+
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
HWSEQ_DCN_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 8280e3652171..9c9947fc5d44 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1153,9 +1153,12 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct dtbclk_dto_params dto_params = {0};
int dp_hpo_inst;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index b158eb1045a1..a5a3e0823e21 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -3020,9 +3020,12 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
enum phyd32clk_clock_source phyd32clk;
int dp_hpo_inst;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index f698062f1e90..288e9dd9205d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -621,7 +621,8 @@ void dcn31_reset_hw_ctx_wrap(
}
/* New dc_state in the process of being applied to hardware. */
- link_enc_cfg_set_transient_mode(dc, dc->current_state, context);
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc_cfg_set_transient_mode(dc, dc->current_state, context);
}
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index c4a37a95e812..39668d8cc13a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -927,9 +927,12 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
int dp_hpo_inst = 0;
unsigned int tmds_div = PIXEL_RATE_DIV_NA;
unsigned int unused_div = PIXEL_RATE_DIV_NA;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
&tmds_div, &early_control);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 599fa41fd75f..2b1a2a00648a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -517,6 +517,11 @@ void get_cursor_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_dcc_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
void set_p_state_switch_method(
struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 042e04f924a2..9458187b834d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -647,4 +647,9 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master);
bool resource_is_hpo_acquired(struct dc_state *context);
+
+struct link_encoder *get_temp_dio_link_enc(
+ const struct resource_context *res_ctx,
+ const struct resource_pool *const pool,
+ const struct dc_link *link);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 06faa461067b..b68bcc9fca0a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -48,9 +48,16 @@ void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
+
link_enc->funcs->connect_dig_be_to_fe(link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -71,9 +78,16 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
+
if (!stream_enc)
return;
@@ -142,7 +156,14 @@ void enable_dio_dp_link_output(struct dc_link *link,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
if (dc_is_dp_sst_signal(signal))
link_enc->funcs->enable_dp_output(
@@ -162,11 +183,16 @@ void disable_dio_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
- if (link_enc != NULL)
- link_enc->funcs->disable_output(link_enc, signal);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
+ link_enc->funcs->disable_output(link_enc, signal);
link->dc->link_srv->dp_trace_source_sequence(link,
DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
@@ -175,7 +201,14 @@ void set_dio_dp_link_test_pattern(struct dc_link *link,
const struct link_resource *link_res,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
@@ -186,7 +219,14 @@ void set_dio_dp_lane_settings(struct dc_link *link,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings);
}
@@ -195,9 +235,15 @@ void update_dio_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
- ASSERT(link_enc);
link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
}
@@ -282,7 +328,10 @@ static const struct link_hwss dio_link_hwss = {
bool can_use_dio_link_hwss(const struct dc_link *link,
const struct link_resource *link_res)
{
- return link->link_enc != NULL;
+ if (!link->dc->config.unify_link_enc_assignment)
+ return link->link_enc != NULL;
+ else
+ return link_res->dio_link_enc != NULL;
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
index a6d1d7641ab4..e1dff4e3f446 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -127,7 +127,10 @@ static void set_dio_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *lin
const struct link_resource *link_res,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (!set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(
link, link_res, tp_params, get_dio_link_hwss())) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 36adf95744fe..81bf3c5e1fdf 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
@@ -35,12 +35,15 @@ static void update_dpia_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
static enum dc_status status;
uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
int i;
DC_LOGGER_INIT(link->ctx->logger);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
for (i = 0; i < table->stream_count; i++)
mst_alloc_slots += table->stream_allocations[i].slot_count;
@@ -61,7 +64,10 @@ static void set_dio_dpia_link_test_pattern(struct dc_link *link,
if (tp_params->dp_phy_pattern != DP_TEST_PATTERN_VIDEO_MODE)
return;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (!link_enc)
return;
@@ -83,31 +89,28 @@ static void enable_dpia_link_output(struct dc_link *link,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc != NULL) {
- if (link->dc->config.enable_dpia_pre_training && link_enc->funcs->enable_dpia_output) {
+ if (link->dc->config.enable_dpia_pre_training || link->dc->config.unify_link_enc_assignment) {
uint8_t fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
- link_enc->funcs->enable_dpia_output(
- link_enc,
- link_settings,
- link->ddc_hw_inst,
- digmode,
- fec_rdy);
- } else {
- if (dc_is_dp_sst_signal(signal))
- link_enc->funcs->enable_dp_output(
+ if (link_enc->funcs->enable_dpia_output)
+ link_enc->funcs->enable_dpia_output(
link_enc,
link_settings,
- clock_source);
+ link->ddc_hw_inst,
+ digmode,
+ fec_rdy);
else
- link_enc->funcs->enable_dp_mst_output(
- link_enc,
- link_settings,
- clock_source);
- }
+ DC_LOG_ERROR("%s: link encoder does not support enable_dpia_output\n", __func__);
+ } else
+ enable_dio_dp_link_output(link, link_res, signal, clock_source, link_settings);
}
@@ -119,13 +122,20 @@ static void disable_dpia_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc != NULL) {
- if (link->dc->config.enable_dpia_pre_training && link_enc->funcs->disable_dpia_output) {
+ if (link->dc->config.enable_dpia_pre_training || link->dc->config.unify_link_enc_assignment) {
uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
- link_enc->funcs->disable_dpia_output(link_enc, link->ddc_hw_inst, digmode);
+ if (link_enc->funcs->disable_dpia_output)
+ link_enc->funcs->disable_dpia_output(link_enc, link->ddc_hw_inst, digmode);
+ else
+ DC_LOG_ERROR("%s: link encoder does not support disable_dpia_output\n", __func__);
} else
link_enc->funcs->disable_output(link_enc, signal);
}
@@ -154,8 +164,10 @@ static const struct link_hwss dpia_link_hwss = {
bool can_use_dpia_link_hwss(const struct dc_link *link,
const struct link_resource *link_res)
{
- return link->is_dig_mapping_flexible &&
- link->dc->res_pool->funcs->link_encs_assign;
+ if (!link->dc->config.unify_link_enc_assignment)
+ return link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign;
+ else
+ return link->is_dig_mapping_flexible && link_res->dio_link_enc != NULL;
}
const struct link_hwss *get_dpia_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 550e1a098fa2..cc9191a5c9e6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -816,7 +816,10 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
{
bool destrictive = false;
struct dc_link_settings max_link_cap;
- bool is_link_enc_unavailable = link->link_enc &&
+ bool is_link_enc_unavailable = false;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ is_link_enc_unavailable = link->link_enc &&
link->dc->res_pool->funcs->link_encs_assign &&
!link_enc_cfg_is_link_enc_avail(
link->ctx->dc,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index ec7de9c01fab..321fd1785370 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -652,15 +652,15 @@ static void write_i2c_redriver_setting(
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct cp_psp_stream_config config = {0};
enum dp_panel_mode panel_mode =
dp_get_panel_mode(pipe_ctx->stream->link);
if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
return;
-
- link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
ASSERT(link_enc);
if (link_enc == NULL)
return;
@@ -1924,7 +1924,7 @@ static void disable_link_dp(struct dc_link *link,
if (link_dp_get_encoding_format(&link_settings) ==
DP_8b_10b_ENCODING) {
- dp_set_fec_enable(link, false);
+ dp_set_fec_enable(link, link_res, false);
dp_set_fec_ready(link, link_res, false);
}
}
@@ -2122,7 +2122,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
fec_enable = true;
if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
- dp_set_fec_enable(link, fec_enable);
+ dp_set_fec_enable(link, &pipe_ctx->link_res, fec_enable);
// during mode set we do DP_SET_POWER off then on, aux writes are lost
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
@@ -2461,7 +2461,7 @@ void link_set_dpms_on(
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
enum dc_status status;
- struct link_encoder *link_enc;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
@@ -2486,7 +2486,8 @@ void link_set_dpms_on(
}
}
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index e3e7fcb07f19..a77410122636 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -250,21 +250,21 @@ static uint32_t intersect_frl_link_bw_support(
{
uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
- // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
- if (hdmi_encoded_link_bw.bits.FRL_MODE) {
- if (hdmi_encoded_link_bw.bits.BW_48Gbps)
- supported_bw_in_kbps = 48000000;
- else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
- supported_bw_in_kbps = 40000000;
- else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
- supported_bw_in_kbps = 32000000;
- else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
- supported_bw_in_kbps = 24000000;
- else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
- supported_bw_in_kbps = 18000000;
- else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
- supported_bw_in_kbps = 9000000;
- }
+ /* Skip checking FRL_MODE bit, as certain PCON will clear
+ * it despite supporting the link BW indicated in the other bits.
+ */
+ if (hdmi_encoded_link_bw.bits.BW_48Gbps)
+ supported_bw_in_kbps = 48000000;
+ else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
+ supported_bw_in_kbps = 40000000;
+ else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
+ supported_bw_in_kbps = 32000000;
+ else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
+ supported_bw_in_kbps = 24000000;
+ else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
+ supported_bw_in_kbps = 18000000;
+ else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
+ supported_bw_in_kbps = 9000000;
return supported_bw_in_kbps;
}
@@ -330,9 +330,12 @@ bool dp_is_fec_supported(const struct dc_link *link)
/* TODO - use asic cap instead of link_enc->features
* we no longer know which link enc to use for this link before commit
*/
- struct link_encoder *link_enc = NULL;
+ struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
+ struct resource_pool *res_pool = link->dc->res_pool;
+ struct link_encoder *link_enc = get_temp_dio_link_enc(res_ctx, res_pool, link);
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
return (dc_is_dp_signal(link->connector_signal) && link_enc &&
@@ -1572,10 +1575,18 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = dp_is_lttpr_present(link);
- if (is_lttpr_present)
+ DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
+
+ if (is_lttpr_present) {
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
+ core_link_read_dpcd(link, DP_LTTPR_IEEE_OUI, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), "LTTPR IEEE OUI: ");
+
+ core_link_read_dpcd(link, DP_LTTPR_DEVICE_ID, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), "LTTPR Device ID: ");
+ }
+
return status;
}
@@ -2089,18 +2100,32 @@ void detect_edp_sink_caps(struct dc_link *link)
core_link_read_dpcd(link, DP_SINK_EMISSION_RATE,
(uint8_t *)&link->dpcd_caps.edp_oled_emission_rate,
sizeof(link->dpcd_caps.edp_oled_emission_rate));
+
+ /*
+ * Read Multi-SST (Single Stream Transport) capability
+ * for eDP version 1.4 or higher.
+ */
+ if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14)
+ core_link_read_dpcd(
+ link,
+ DP_EDP_MSO_LINK_CAPABILITIES,
+ (uint8_t *)&link->dpcd_caps.mso_cap_sst_links_supported,
+ sizeof(link->dpcd_caps.mso_cap_sst_links_supported));
}
bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
{
- struct link_encoder *link_enc = NULL;
+ struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
+ struct resource_pool *res_pool = link->dc->res_pool;
+ struct link_encoder *link_enc = get_temp_dio_link_enc(res_ctx, res_pool, link);
if (!max_link_enc_cap) {
DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
return false;
}
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
if (link_enc && link_enc->funcs->get_max_link_cap) {
@@ -2128,10 +2153,13 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
struct dc_link_settings max_link_cap = {0};
enum dc_link_rate lttpr_max_link_rate;
enum dc_link_rate cable_max_link_rate;
- struct link_encoder *link_enc = NULL;
+ struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
+ struct resource_pool *res_pool = link->dc->res_pool;
+ struct link_encoder *link_enc = get_temp_dio_link_enc(res_ctx, res_pool, link);
bool is_uhbr13_5_supported = true;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
/* get max link encoder capability */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 8f0ce97f2362..0ce0af3ddbeb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -67,6 +67,7 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx);
/* Initialize output parameter lt_settings. */
void dp_decide_training_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 0f1c411523a2..a5541b8fc95b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -356,6 +356,32 @@ out:
return ret;
}
+/*
+ * Handle DP BW allocation status register
+ *
+ * @link: pointer to the dc_link struct instance
+ * @status: content of DP tunneling status DPCD register
+ *
+ * return: none
+ */
+void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
+{
+ if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) {
+ DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)",
+ __func__, link->link_index);
+ } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
+ DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d",
+ __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
+ } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
+ DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d",
+ __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
+ }
+
+ core_link_write_dpcd(
+ link, DP_TUNNELING_STATUS,
+ &status, sizeof(status));
+}
+
void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
{
int bw_needed = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 3b6d8494f9d5..1b240a2f6ce0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -108,4 +108,14 @@ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned
*/
int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
+/*
+ * Handle DP BW allocation status register
+ *
+ * @link: pointer to the dc_link struct instance
+ * @status: content of DP tunneling status register
+ *
+ * return: none
+ */
+void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status);
+
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index a08403c022ea..5be00e4ce10b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -37,6 +37,7 @@
#include "link/accessories/link_dp_trace.h"
#include "link/link_dpms.h"
#include "dm_helpers.h"
+#include "link_dp_dpia_bw.h"
#define DC_LOGGER \
link->ctx->logger
@@ -286,6 +287,30 @@ void dp_handle_link_loss(struct dc_link *link)
}
}
+static void dp_handle_tunneling_irq(struct dc_link *link)
+{
+ enum dc_status retval;
+ uint8_t tunneling_status = 0;
+
+ retval = core_link_read_dpcd(
+ link, DP_TUNNELING_STATUS,
+ &tunneling_status,
+ sizeof(tunneling_status));
+
+ if (retval == DC_OK) {
+ DC_LOG_HW_HPD_IRQ("%s: Got DP tunneling status on link %d status=0x%x",
+ __func__, link->link_index, tunneling_status);
+
+ if (tunneling_status & DP_TUNNELING_BW_ALLOC_BITS_MASK)
+ link_dp_dpia_handle_bw_alloc_status(link, tunneling_status);
+ }
+
+ tunneling_status = DP_TUNNELING_IRQ;
+ core_link_write_dpcd(
+ link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
+ &tunneling_status, 1);
+}
+
static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data)
{
enum dc_status retval;
@@ -319,13 +344,19 @@ enum dc_status dp_read_hpd_rx_irq_data(
*
* For DP 1.4 we need to read those from 2002h range.
*/
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) {
retval = core_link_read_dpcd(
link,
DP_SINK_COUNT,
irq_data->raw,
- sizeof(union hpd_irq_data));
- else {
+ DP_SINK_STATUS - DP_SINK_COUNT + 1);
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ retval = core_link_read_dpcd(
+ link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
+ &irq_data->bytes.link_service_irq_esi0.raw, 1);
+ }
+ } else {
/* Read 14 bytes in a single read and then copy only the required fields.
* This is more efficient than doing it in two separate AUX reads. */
@@ -346,6 +377,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.link_service_irq_esi0.raw = tmp[DP_LINK_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
/*
* This display doesn't have correct values in DPCD200Eh.
@@ -488,6 +520,11 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
dp_trace_link_loss_increment(link);
}
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (hpd_irq_dpcd_data.bytes.link_service_irq_esi0.bits.DP_LINK_TUNNELING_IRQ)
+ dp_handle_tunneling_irq(link);
+ }
+
if (link->type == dc_connection_sst_branch &&
hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
!= link->dpcd_sink_count)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index c27ffec5d84f..49521ac4b0e8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -142,11 +142,12 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
* if the sink supports it and leave it enabled on link.
* If FEC is not supported, disable it.
*/
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = link_res->dio_link_enc;
enum dc_status status = DC_OK;
uint8_t fec_config = 0;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
if (link_enc->funcs->fec_set_ready == NULL)
return DC_NOT_SUPPORTED;
@@ -176,13 +177,14 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
return status;
}
-void dp_set_fec_enable(struct dc_link *link, bool enable)
+void dp_set_fec_enable(struct dc_link *link, const struct link_resource *link_res, bool enable)
{
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = link_res->dio_link_enc;
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
- if (link_enc->funcs->fec_set_enable == NULL)
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc == NULL || link_enc->funcs == NULL || link_enc->funcs->fec_set_enable == NULL)
return;
if (enable && dp_should_enable_fec(link)) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
index 1eb0619d6710..ab1c1f8f1f8b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
@@ -52,7 +52,8 @@ void dp_set_drive_settings(
enum dc_status dp_set_fec_ready(struct dc_link *link,
const struct link_resource *link_res, bool ready);
-void dp_set_fec_enable(struct dc_link *link, bool enable);
+void dp_set_fec_enable(struct dc_link *link,
+ const struct link_resource *link_res, bool enable);
void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 751c18e592ea..613298d21d03 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -801,19 +801,23 @@ enum dc_dp_training_pattern decide_cr_training_pattern(
}
enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings)
{
- struct link_encoder *link_enc;
+ struct link_encoder *link_enc = link_res->dio_link_enc;
struct encoder_feature_support *enc_caps;
struct dpcd_caps *rx_caps = &link->dpcd_caps;
enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
- enc_caps = &link_enc->features;
-
switch (link_dp_get_encoding_format(link_settings)) {
case DP_8b_10b_ENCODING:
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (!link_enc)
+ break;
+
+ enc_caps = &link_enc->features;
if (enc_caps->flags.bits.IS_TPS4_CAPABLE &&
rx_caps->max_down_spread.bits.TPS4_SUPPORTED)
pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
@@ -886,13 +890,14 @@ void dp_decide_lane_settings(
void dp_decide_training_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings)
{
if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
- decide_8b_10b_training_settings(link, link_settings, lt_settings);
+ decide_8b_10b_training_settings(link, link_res, link_settings, lt_settings);
else if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING)
- decide_128b_132b_training_settings(link, link_settings, lt_settings);
+ decide_128b_132b_training_settings(link, link_res, link_settings, lt_settings);
}
@@ -1556,6 +1561,7 @@ enum link_training_result dp_perform_link_training(
/* decide training settings */
dp_decide_training_settings(
link,
+ link_res,
link_settings,
&lt_settings);
@@ -1569,7 +1575,8 @@ enum link_training_result dp_perform_link_training(
/* configure link prior to entering training mode */
dpcd_configure_lttpr_mode(link, &lt_settings);
- dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
+ dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
dpcd_configure_channel_coding(link, &lt_settings);
/* enter training mode:
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index 0b18aa35c33c..574b083e0936 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -104,6 +104,7 @@ void start_clock_recovery_pattern_early(struct dc_link *link,
void dp_decide_training_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings);
@@ -117,6 +118,7 @@ enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings);
enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings);
enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
index db87cfe37b5c..11565f187ac7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
@@ -204,6 +204,7 @@ enum link_training_result dp_perform_128b_132b_link_training(
struct link_training_settings legacy_settings;
decide_8b_10b_training_settings(link,
+ link_res,
&lt_settings->link_settings,
&legacy_settings);
return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);
@@ -227,6 +228,7 @@ enum link_training_result dp_perform_128b_132b_link_training(
}
void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings)
{
@@ -238,7 +240,7 @@ void decide_128b_132b_training_settings(struct dc_link *link,
LINK_SPREAD_05_DOWNSPREAD_30KHZ;
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings);
- lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_res, link_settings);
lt_settings->eq_pattern_time = 2500;
lt_settings->eq_wait_time_limit = 400000;
lt_settings->eq_loop_count_limit = 20;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
index 2147f24efc8b..901a42edafa1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
@@ -34,6 +34,7 @@ enum link_training_result dp_perform_128b_132b_link_training(
struct link_training_settings *lt_settings);
void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index ae95ec48e572..34d2e097ca2e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -93,7 +93,8 @@ static uint32_t get_eq_training_aux_rd_interval(
}
void decide_8b_10b_training_settings(
- struct dc_link *link,
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings)
{
@@ -115,7 +116,7 @@ void decide_8b_10b_training_settings(
LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
- lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_res, link_setting);
lt_settings->enhanced_framing = 1;
lt_settings->should_set_fec_ready = true;
lt_settings->disallow_per_lane_settings = true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
index d26de15ce954..ea0de701d83f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
@@ -54,7 +54,8 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
void decide_8b_10b_training_settings(
- struct dc_link *link,
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
index 4c6b886a9da8..f99d26290bc0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
@@ -39,6 +39,7 @@ bool dp_perform_link_training_skip_aux(
dp_decide_training_settings(
link,
+ link_res,
link_setting,
&lt_settings);
override_training_settings(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index 39e4b7dc9588..603537ffd128 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -110,6 +110,7 @@ static enum link_training_result dpia_configure_link(
dp_decide_training_settings(
link,
+ link_res,
link_setting,
lt_settings);
@@ -129,11 +130,14 @@ static enum link_training_result dpia_configure_link(
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
- if (link->preferred_training_settings.fec_enable != NULL)
- fec_enable = *link->preferred_training_settings.fec_enable;
- else
- fec_enable = true;
- status = dp_set_fec_ready(link, link_res, fec_enable);
+ if (link_dp_get_encoding_format(link_setting) == DP_8b_10b_ENCODING) {
+ if (link->preferred_training_settings.fec_enable != NULL)
+ fec_enable = *link->preferred_training_settings.fec_enable;
+ else
+ fec_enable = true;
+ status = dp_set_fec_ready(link, link_res, fec_enable);
+ }
+
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index ccf8096dde29..ce174ce5579c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -270,7 +270,8 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
rate = get_dpcd_link_rate(&lt_settings->link_settings);
- if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) {
+ // Only perform toggle if FIXED_VS LTTPR reports no IEEE OUI
+ if (memcmp("\x0,\x0,\x0", &link->dpcd_caps.lttpr_caps.lttpr_ieee_oui[0], 3) == 0) {
/* Vendor specific: Toggle link rate */
toggle_rate = (rate == 0x6) ? 0xA : 0x6;
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index 9267cdf88e9a..ce6fbcf14d7a 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -63,8 +63,7 @@
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C31_C32_B[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_B[MAX_MPCC]; \
uint32_t MPCC_MCM_3DLUT_FAST_LOAD_SELECT[MAX_MPCC]; \
- uint32_t MPCC_MCM_3DLUT_FAST_LOAD_STATUS[MAX_MPCC]; \
- uint32_t MPCC_CONTROL2[MAX_MPCC]
+ uint32_t MPCC_MCM_3DLUT_FAST_LOAD_STATUS[MAX_MPCC];
#define MPC_COMMON_MASK_SH_LIST_DCN4_01(mask_sh) \
MPC_COMMON_MASK_SH_LIST_DCN32(mask_sh), \
@@ -184,7 +183,7 @@ struct dcn401_mpc_mask {
};
struct dcn401_mpc_registers {
- MPC_REG_VARIABLE_LIST_DCN4_01;
+ MPC_REG_VARIABLE_LIST_DCN4_01
};
struct dcn401_mpc {
@@ -236,7 +235,29 @@ void mpc401_get_gamut_remap(
struct mpc *mpc,
int mpcc_id,
struct mpc_grph_gamut_adjustment *adjust);
-void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp_idx);
-void mpc401_get_3dlut_fast_load_status(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow);
+
+void mpc401_update_3dlut_fast_load_select(
+ struct mpc *mpc,
+ int mpcc_id,
+ int hubp_idx);
+
+void mpc401_get_3dlut_fast_load_status(
+ struct mpc *mpc,
+ int mpcc_id,
+ uint32_t *done,
+ uint32_t *soft_underflow,
+ uint32_t *hard_underflow);
+
+void mpc401_update_3dlut_fast_load_select(
+ struct mpc *mpc,
+ int mpcc_id,
+ int hubp_idx);
+
+void mpc401_get_3dlut_fast_load_status(
+ struct mpc *mpc,
+ int mpcc_id,
+ uint32_t *done,
+ uint32_t *soft_underflow,
+ uint32_t *hard_underflow);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index a6d4dbe82c13..8b2a8455eb56 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -104,120 +104,115 @@
SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst)
+#define OPTC_REG_VARIABLE_LIST_DCN \
+ uint32_t OTG_GLOBAL_CONTROL1; \
+ uint32_t OTG_GLOBAL_CONTROL2; \
+ uint32_t OTG_VERT_SYNC_CONTROL; \
+ uint32_t OTG_MASTER_UPDATE_MODE; \
+ uint32_t OTG_GSL_CONTROL; \
+ uint32_t OTG_VSTARTUP_PARAM; \
+ uint32_t OTG_VUPDATE_PARAM; \
+ uint32_t OTG_VREADY_PARAM; \
+ uint32_t OTG_BLANK_CONTROL; \
+ uint32_t OTG_MASTER_UPDATE_LOCK; \
+ uint32_t OTG_GLOBAL_CONTROL0; \
+ uint32_t OTG_DOUBLE_BUFFER_CONTROL; \
+ uint32_t OTG_H_TOTAL; \
+ uint32_t OTG_H_BLANK_START_END; \
+ uint32_t OTG_H_SYNC_A; \
+ uint32_t OTG_H_SYNC_A_CNTL; \
+ uint32_t OTG_H_TIMING_CNTL; \
+ uint32_t OTG_V_TOTAL; \
+ uint32_t OTG_V_BLANK_START_END; \
+ uint32_t OTG_V_SYNC_A; \
+ uint32_t OTG_V_SYNC_A_CNTL; \
+ uint32_t OTG_INTERLACE_CONTROL; \
+ uint32_t OTG_CONTROL; \
+ uint32_t OTG_STEREO_CONTROL; \
+ uint32_t OTG_3D_STRUCTURE_CONTROL; \
+ uint32_t OTG_STEREO_STATUS; \
+ uint32_t OTG_V_TOTAL_MAX; \
+ uint32_t OTG_V_TOTAL_MID; \
+ uint32_t OTG_V_TOTAL_MIN; \
+ uint32_t OTG_V_TOTAL_CONTROL; \
+ uint32_t OTG_V_COUNT_STOP_CONTROL; \
+ uint32_t OTG_V_COUNT_STOP_CONTROL2; \
+ uint32_t OTG_TRIGA_CNTL; \
+ uint32_t OTG_TRIGA_MANUAL_TRIG; \
+ uint32_t OTG_MANUAL_FLOW_CONTROL; \
+ uint32_t OTG_FORCE_COUNT_NOW_CNTL; \
+ uint32_t OTG_STATIC_SCREEN_CONTROL; \
+ uint32_t OTG_STATUS_FRAME_COUNT; \
+ uint32_t OTG_STATUS; \
+ uint32_t OTG_STATUS_POSITION; \
+ uint32_t OTG_NOM_VERT_POSITION; \
+ uint32_t OTG_BLACK_COLOR; \
+ uint32_t OTG_TEST_PATTERN_PARAMETERS; \
+ uint32_t OTG_TEST_PATTERN_CONTROL; \
+ uint32_t OTG_TEST_PATTERN_COLOR; \
+ uint32_t OTG_CLOCK_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT0_POSITION; \
+ uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT1_POSITION; \
+ uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT2_POSITION; \
+ uint32_t OPTC_INPUT_CLOCK_CONTROL; \
+ uint32_t OPTC_DATA_SOURCE_SELECT; \
+ uint32_t OPTC_MEMORY_CONFIG; \
+ uint32_t OPTC_INPUT_GLOBAL_CONTROL; \
+ uint32_t CONTROL; \
+ uint32_t OTG_GSL_WINDOW_X; \
+ uint32_t OTG_GSL_WINDOW_Y; \
+ uint32_t OTG_VUPDATE_KEEPOUT; \
+ uint32_t OTG_CRC_CNTL; \
+ uint32_t OTG_CRC_CNTL2; \
+ uint32_t OTG_CRC0_DATA_RG; \
+ uint32_t OTG_CRC0_DATA_B; \
+ uint32_t OTG_CRC1_DATA_B; \
+ uint32_t OTG_CRC2_DATA_B; \
+ uint32_t OTG_CRC3_DATA_B; \
+ uint32_t OTG_CRC1_DATA_RG; \
+ uint32_t OTG_CRC2_DATA_RG; \
+ uint32_t OTG_CRC3_DATA_RG; \
+ uint32_t OTG_CRC0_WINDOWA_X_CONTROL; \
+ uint32_t OTG_CRC0_WINDOWA_Y_CONTROL; \
+ uint32_t OTG_CRC0_WINDOWB_X_CONTROL; \
+ uint32_t OTG_CRC0_WINDOWB_Y_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWA_X_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWA_Y_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWB_X_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWB_Y_CONTROL; \
+ uint32_t GSL_SOURCE_SELECT; \
+ uint32_t DWB_SOURCE_SELECT; \
+ uint32_t OTG_DSC_START_POSITION; \
+ uint32_t OPTC_DATA_FORMAT_CONTROL; \
+ uint32_t OPTC_BYTES_PER_PIXEL; \
+ uint32_t OPTC_WIDTH_CONTROL; \
+ uint32_t OTG_DRR_CONTROL; \
+ uint32_t OTG_BLANK_DATA_COLOR; \
+ uint32_t OTG_BLANK_DATA_COLOR_EXT; \
+ uint32_t OTG_DRR_TRIGGER_WINDOW; \
+ uint32_t OTG_M_CONST_DTO0; \
+ uint32_t OTG_M_CONST_DTO1; \
+ uint32_t OTG_DRR_V_TOTAL_CHANGE; \
+ uint32_t OTG_GLOBAL_CONTROL4; \
+ uint32_t OTG_CRC0_WINDOWA_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC0_WINDOWA_Y_CONTROL_READBACK; \
+ uint32_t OTG_CRC0_WINDOWB_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC0_WINDOWB_Y_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWA_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWA_Y_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWB_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK; \
+ uint32_t OPTC_CLOCK_CONTROL; \
+ uint32_t OPTC_WIDTH_CONTROL2; \
+ uint32_t OTG_PSTATE_REGISTER; \
+ uint32_t OTG_PIPE_UPDATE_STATUS; \
+ uint32_t INTERRUPT_DEST
+
struct dcn_optc_registers {
- uint32_t OTG_GLOBAL_CONTROL1;
- uint32_t OTG_GLOBAL_CONTROL2;
- uint32_t OTG_VERT_SYNC_CONTROL;
- uint32_t OTG_MASTER_UPDATE_MODE;
- uint32_t OTG_GSL_CONTROL;
- uint32_t OTG_VSTARTUP_PARAM;
- uint32_t OTG_VUPDATE_PARAM;
- uint32_t OTG_VREADY_PARAM;
- uint32_t OTG_BLANK_CONTROL;
- uint32_t OTG_MASTER_UPDATE_LOCK;
- uint32_t OTG_GLOBAL_CONTROL0;
- uint32_t OTG_DOUBLE_BUFFER_CONTROL;
- uint32_t OTG_H_TOTAL;
- uint32_t OTG_H_BLANK_START_END;
- uint32_t OTG_H_SYNC_A;
- uint32_t OTG_H_SYNC_A_CNTL;
- uint32_t OTG_H_TIMING_CNTL;
- uint32_t OTG_V_TOTAL;
- uint32_t OTG_V_BLANK_START_END;
- uint32_t OTG_V_SYNC_A;
- uint32_t OTG_V_SYNC_A_CNTL;
- uint32_t OTG_INTERLACE_CONTROL;
- uint32_t OTG_CONTROL;
- uint32_t OTG_STEREO_CONTROL;
- uint32_t OTG_3D_STRUCTURE_CONTROL;
- uint32_t OTG_STEREO_STATUS;
- uint32_t OTG_V_TOTAL_MAX;
- uint32_t OTG_V_TOTAL_MID;
- uint32_t OTG_V_TOTAL_MIN;
- uint32_t OTG_V_TOTAL_CONTROL;
- uint32_t OTG_V_COUNT_STOP_CONTROL;
- uint32_t OTG_V_COUNT_STOP_CONTROL2;
- uint32_t OTG_TRIGA_CNTL;
- uint32_t OTG_TRIGA_MANUAL_TRIG;
- uint32_t OTG_MANUAL_FLOW_CONTROL;
- uint32_t OTG_FORCE_COUNT_NOW_CNTL;
- uint32_t OTG_STATIC_SCREEN_CONTROL;
- uint32_t OTG_STATUS_FRAME_COUNT;
- uint32_t OTG_STATUS;
- uint32_t OTG_STATUS_POSITION;
- uint32_t OTG_NOM_VERT_POSITION;
- uint32_t OTG_BLACK_COLOR;
- uint32_t OTG_TEST_PATTERN_PARAMETERS;
- uint32_t OTG_TEST_PATTERN_CONTROL;
- uint32_t OTG_TEST_PATTERN_COLOR;
- uint32_t OTG_CLOCK_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT0_POSITION;
- uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT1_POSITION;
- uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
- uint32_t OPTC_INPUT_CLOCK_CONTROL;
- uint32_t OPTC_DATA_SOURCE_SELECT;
- uint32_t OPTC_MEMORY_CONFIG;
- uint32_t OPTC_INPUT_GLOBAL_CONTROL;
- uint32_t CONTROL;
- uint32_t OTG_GSL_WINDOW_X;
- uint32_t OTG_GSL_WINDOW_Y;
- uint32_t OTG_VUPDATE_KEEPOUT;
- uint32_t OTG_CRC_CNTL;
- uint32_t OTG_CRC_CNTL2;
- uint32_t OTG_CRC0_DATA_RG;
- uint32_t OTG_CRC1_DATA_RG;
- uint32_t OTG_CRC2_DATA_RG;
- uint32_t OTG_CRC3_DATA_RG;
- uint32_t OTG_CRC0_DATA_B;
- uint32_t OTG_CRC1_DATA_B;
- uint32_t OTG_CRC2_DATA_B;
- uint32_t OTG_CRC3_DATA_B;
- uint32_t OTG_CRC0_DATA_R;
- uint32_t OTG_CRC1_DATA_R;
- uint32_t OTG_CRC2_DATA_R;
- uint32_t OTG_CRC3_DATA_R;
- uint32_t OTG_CRC0_DATA_G;
- uint32_t OTG_CRC1_DATA_G;
- uint32_t OTG_CRC2_DATA_G;
- uint32_t OTG_CRC3_DATA_G;
- uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
- uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
- uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
- uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
- uint32_t OTG_CRC1_WINDOWA_X_CONTROL;
- uint32_t OTG_CRC1_WINDOWA_Y_CONTROL;
- uint32_t OTG_CRC1_WINDOWB_X_CONTROL;
- uint32_t OTG_CRC1_WINDOWB_Y_CONTROL;
- uint32_t GSL_SOURCE_SELECT;
- uint32_t DWB_SOURCE_SELECT;
- uint32_t OTG_DSC_START_POSITION;
- uint32_t OPTC_DATA_FORMAT_CONTROL;
- uint32_t OPTC_BYTES_PER_PIXEL;
- uint32_t OPTC_WIDTH_CONTROL;
- uint32_t OTG_DRR_CONTROL;
- uint32_t OTG_BLANK_DATA_COLOR;
- uint32_t OTG_BLANK_DATA_COLOR_EXT;
- uint32_t OTG_DRR_TRIGGER_WINDOW;
- uint32_t OTG_M_CONST_DTO0;
- uint32_t OTG_M_CONST_DTO1;
- uint32_t OTG_DRR_V_TOTAL_CHANGE;
- uint32_t OTG_GLOBAL_CONTROL4;
- uint32_t OTG_CRC0_WINDOWA_X_CONTROL_READBACK;
- uint32_t OTG_CRC0_WINDOWA_Y_CONTROL_READBACK;
- uint32_t OTG_CRC0_WINDOWB_X_CONTROL_READBACK;
- uint32_t OTG_CRC0_WINDOWB_Y_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWA_X_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWA_Y_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWB_X_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK;
- uint32_t OPTC_CLOCK_CONTROL;
- uint32_t OPTC_WIDTH_CONTROL2;
- uint32_t OTG_PSTATE_REGISTER;
- uint32_t OTG_PIPE_UPDATE_STATUS;
- uint32_t INTERRUPT_DEST;
+ OPTC_REG_VARIABLE_LIST_DCN;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 5c6dc710e96c..e4eca3e32c1b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -1220,7 +1220,7 @@ static void get_pixel_clock_parameters(
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
struct dc_link *link = stream->link;
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dce_hwseq *hws = dc->hwseq;
@@ -1229,7 +1229,8 @@ static void get_pixel_clock_parameters(
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc)
pixel_clk_params->encoder_object_id = link_enc->id;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 911bd60d4fbc..3c42ba8566cf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -890,7 +890,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = true,
.enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
+ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
.using_dml2 = false,
};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 4e842f29d4c4..7436dfbdf927 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -1666,12 +1666,13 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct pixel_clk_params *pixel_clk_params = &pipe_ctx->stream_res.pix_clk_params;
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc)
pixel_clk_params->encoder_object_id = link_enc->id;
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index 31495c9978b0..28348734d900 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -3,12 +3,11 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc_spl.h"
-#include "dc_spl_scl_filters.h"
#include "dc_spl_scl_easf_filters.h"
#include "dc_spl_isharp_filters.h"
#include "spl_debug.h"
-#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19))
+#define IDENTITY_RATIO(ratio) (spl_fixpt_u3d19(ratio) == (1 << 19))
#define MIN_VIEWPORT_SIZE 12
static bool spl_is_yuv420(enum spl_pixel_format format)
@@ -76,6 +75,21 @@ static struct spl_rect shift_rec(const struct spl_rect *rec_in, int x, int y)
return rec_out;
}
+static void spl_opp_adjust_rect(struct spl_rect *rec, const struct spl_opp_adjust *adjust)
+{
+ if ((rec->x + adjust->x) >= 0)
+ rec->x += adjust->x;
+
+ if ((rec->y + adjust->y) >= 0)
+ rec->y += adjust->y;
+
+ if ((rec->width + adjust->width) >= 1)
+ rec->width += adjust->width;
+
+ if ((rec->height + adjust->height) >= 1)
+ rec->height += adjust->height;
+}
+
static struct spl_rect calculate_plane_rec_in_timing_active(
struct spl_in *spl_in,
const struct spl_rect *rec_in)
@@ -723,13 +737,15 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
}
}
-static void spl_clamp_viewport(struct spl_rect *viewport)
+static void spl_clamp_viewport(struct spl_rect *viewport, int min_viewport_size)
{
+ if (min_viewport_size == 0)
+ min_viewport_size = MIN_VIEWPORT_SIZE;
/* Clamp minimum viewport size */
- if (viewport->height < MIN_VIEWPORT_SIZE)
- viewport->height = MIN_VIEWPORT_SIZE;
- if (viewport->width < MIN_VIEWPORT_SIZE)
- viewport->width = MIN_VIEWPORT_SIZE;
+ if (viewport->height < min_viewport_size)
+ viewport->height = min_viewport_size;
+ if (viewport->width < min_viewport_size)
+ viewport->width = min_viewport_size;
}
static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
@@ -870,6 +886,8 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
static void spl_get_taps_non_adaptive_scaler(
struct spl_scratch *spl_scratch, const struct spl_taps *in_taps)
{
+ bool check_max_downscale = false;
+
if (in_taps->h_taps == 0) {
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1)
spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil(
@@ -909,6 +927,23 @@ static void spl_get_taps_non_adaptive_scaler(
else
spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+
+ /*
+ * Max downscale supported is 6.0x. Add ASSERT to catch if go beyond that
+ */
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.horz,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.vert,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.horz_c,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.vert_c,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
spl_scratch->scl_data.taps.h_taps = 1;
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
@@ -927,8 +962,8 @@ static bool spl_get_optimal_number_of_taps(
bool *enable_isharp)
{
int num_part_y, num_part_c;
- int max_taps_y, max_taps_c;
- int min_taps_y, min_taps_c;
+ unsigned int max_taps_y, max_taps_c;
+ unsigned int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
bool skip_easf = false;
bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
@@ -990,12 +1025,18 @@ static bool spl_get_optimal_number_of_taps(
lb_config, &num_part_y, &num_part_c);
/* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2)
- max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2);
+ if ((spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2) > num_part_y)
+ max_taps_y = 0;
+ else
+ max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2);
else
max_taps_y = num_part_y;
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 2)
- max_taps_c = num_part_c - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2);
+ if ((spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2) > num_part_c)
+ max_taps_c = 0;
+ else
+ max_taps_c = num_part_c - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2);
else
max_taps_c = num_part_c;
@@ -1764,6 +1805,8 @@ static bool spl_calculate_number_of_taps(struct spl_in *spl_in, struct spl_scrat
spl_calculate_recout(spl_in, spl_scratch, spl_out);
/* depends on pixel format */
spl_calculate_scaling_ratios(spl_in, spl_scratch, spl_out);
+ /* Adjust recout for opp if needed */
+ spl_opp_adjust_rect(&spl_scratch->scl_data.recout, &spl_in->basic_in.opp_recout_adjust);
/* depends on scaling ratios and recout, does not calculate offset yet */
spl_calculate_viewport_size(spl_in, spl_scratch);
@@ -1775,7 +1818,7 @@ static bool spl_calculate_number_of_taps(struct spl_in *spl_in, struct spl_scrat
}
/* Calculate scaler parameters */
-bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
+bool SPL_NAMESPACE(spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out))
{
bool res = false;
bool enable_easf_v = false;
@@ -1800,7 +1843,7 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
// Handle 3d recout
spl_handle_3d_recout(spl_in, &spl_scratch.scl_data.recout);
// Clamp
- spl_clamp_viewport(&spl_scratch.scl_data.viewport);
+ spl_clamp_viewport(&spl_scratch.scl_data.viewport, spl_in->min_viewport_size);
// Save all calculated parameters in dscl_prog_data structure to program hw registers
spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp);
@@ -1840,7 +1883,7 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
}
/* External interface to get number of taps only */
-bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out)
+bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out))
{
bool res = false;
bool enable_easf_v = false;
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h
index 02a2d6725ed5..145961803a92 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h
@@ -9,10 +9,22 @@
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
+#ifndef SPL_PFX_
+#define SPL_PFX_
+#endif
+
+#define SPL_EXPAND2(a, b) a##b
+#define SPL_EXPAND(a, b) SPL_EXPAND2(a, b)
+#define SPL_NAMESPACE(symbol) SPL_EXPAND(SPL_PFX_, symbol)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* SPL interfaces */
-bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out);
+bool SPL_NAMESPACE(spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out));
-bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out);
+bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out));
#endif /* __DC_SPL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 467af9dd90de..1c3949b24611 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -427,6 +427,14 @@ struct spl_out {
// SPL inputs
+// opp extra adjustment for rect
+struct spl_opp_adjust {
+ int x;
+ int y;
+ int width;
+ int height;
+};
+
// Basic input information
struct basic_in {
enum spl_pixel_format format; // Pixel Format
@@ -444,6 +452,7 @@ struct basic_in {
} num_slices_recout_width;
} num_h_slices_recout_width_align;
int mpc_h_slice_index; // previous mpc_combine_v - split_idx
+ struct spl_opp_adjust opp_recout_adjust;
// Inputs for adaptive scaler - TODO
enum spl_transfer_func_type tf_type; /* Transfer function type */
enum spl_transfer_func_predefined tf_predefined_type; /* Transfer function predefined type */
@@ -484,7 +493,7 @@ struct spl_sharpness_range {
};
struct adaptive_sharpness {
bool enable;
- int sharpness_level;
+ unsigned int sharpness_level;
struct spl_sharpness_range sharpness_range;
};
enum linear_light_scaling { // convert it in translation logic
@@ -535,6 +544,7 @@ struct spl_in {
bool is_hdr_on;
int h_active;
int v_active;
+ int min_viewport_size;
int sdr_white_level_nits;
enum sharpen_policy sharpen_policy;
};
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
index 131f1e3949d3..52d97918a3bd 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
@@ -346,7 +346,7 @@ struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg)
if (m > 0)
return spl_fixpt_shl(
spl_fixed31_32_exp_from_taylor_series(r),
- (unsigned char)m);
+ (unsigned int)m);
else
return spl_fixpt_div_int(
spl_fixed31_32_exp_from_taylor_series(r),
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h
index ed2647f9a099..9f349ffe9148 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h
@@ -189,7 +189,7 @@ static inline struct spl_fixed31_32 spl_fixpt_clamp(
* @brief
* result = arg << shift
*/
-static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift)
+static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned int shift)
{
SPL_ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
@@ -203,7 +203,7 @@ static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, uns
* @brief
* result = arg >> shift
*/
-static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned char shift)
+static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned int shift)
{
bool negative = arg.value < 0;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 8cf89aed024b..f84bbc033e64 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -161,6 +161,13 @@
#endif
/**
+ * OS/FW agnostic memcmp
+ */
+#ifndef dmub_memcmp
+#define dmub_memcmp(lhs, rhs, bytes) memcmp((lhs), (rhs), (bytes))
+#endif
+
+/**
* OS/FW agnostic udelay
*/
#ifndef dmub_udelay
@@ -1460,6 +1467,11 @@ enum dmub_cmd_type {
*/
DMUB_CMD__PSP = 88,
+ /**
+ * Command type used for all Fused IO commands.
+ */
+ DMUB_CMD__FUSED_IO = 89,
+
DMUB_CMD__VBIOS = 128,
};
@@ -1491,6 +1503,10 @@ enum dmub_out_cmd_type {
* Command type used for HPD redetect notification
*/
DMUB_OUT_CMD__HPD_SENSE_NOTIFY = 6,
+ /**
+ * Command type used for Fused IO notification
+ */
+ DMUB_OUT_CMD__FUSED_IO = 7,
};
/* DMUB_CMD__DPIA command sub-types. */
@@ -5325,6 +5341,63 @@ struct dmub_rb_cmd_get_usbc_cable_id {
} data;
};
+enum dmub_cmd_fused_io_sub_type {
+ DMUB_CMD__FUSED_IO_EXECUTE = 0,
+ DMUB_CMD__FUSED_IO_ABORT = 1,
+};
+
+enum dmub_cmd_fused_request_type {
+ FUSED_REQUEST_READ,
+ FUSED_REQUEST_WRITE,
+ FUSED_REQUEST_POLL,
+};
+
+enum dmub_cmd_fused_request_status {
+ FUSED_REQUEST_STATUS_SUCCESS,
+ FUSED_REQUEST_STATUS_BEGIN,
+ FUSED_REQUEST_STATUS_SUBMIT,
+ FUSED_REQUEST_STATUS_REPLY,
+ FUSED_REQUEST_STATUS_POLL,
+ FUSED_REQUEST_STATUS_ABORTED,
+ FUSED_REQUEST_STATUS_FAILED = 0x80,
+ FUSED_REQUEST_STATUS_INVALID,
+ FUSED_REQUEST_STATUS_BUSY,
+ FUSED_REQUEST_STATUS_TIMEOUT,
+ FUSED_REQUEST_STATUS_POLL_TIMEOUT,
+};
+
+struct dmub_cmd_fused_request {
+ uint8_t status;
+ uint8_t type : 2;
+ uint8_t _reserved0 : 3;
+ uint8_t poll_mask_msb : 3; // Number of MSB to zero out from last byte before comparing
+ uint8_t identifier;
+ uint8_t _reserved1;
+ uint32_t timeout_us;
+ union dmub_cmd_fused_request_location {
+ struct dmub_cmd_fused_request_location_i2c {
+ uint8_t is_aux : 1; // False
+ uint8_t ddc_line : 3;
+ uint8_t _reserved0 : 4;
+ uint8_t address;
+ uint8_t offset;
+ uint8_t length;
+ } i2c;
+ struct dmub_cmd_fused_request_location_aux {
+ uint32_t is_aux : 1; // True
+ uint32_t ddc_line : 3;
+ uint32_t address : 20;
+ uint32_t length : 8; // Automatically split into 16B transactions
+ } aux;
+ } u;
+ uint8_t buffer[0x30]; // Read: out, write: in, poll: expected
+};
+
+struct dmub_rb_cmd_fused_io {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_fused_request request;
+};
+
/**
* Command type of a DMUB_CMD__SECURE_DISPLAY command
*/
@@ -5738,6 +5811,8 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_fams2_drr_update fams2_drr_update;
struct dmub_rb_cmd_fams2_flip fams2_flip;
+
+ struct dmub_rb_cmd_fused_io fused_io;
};
/**
@@ -5768,6 +5843,7 @@ union dmub_rb_out_cmd {
* HPD sense notification command.
*/
struct dmub_rb_cmd_hpd_sense_notify hpd_sense_notify;
+ struct dmub_rb_cmd_fused_io fused_io;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 3d0bba602b53..9796077885c9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -83,8 +83,8 @@ static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn31_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 100;
- uint32_t in_reset, scratch, i, pwait_mode;
+ const uint32_t timeout = 100000;
+ uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -108,7 +108,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
}
for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
@@ -125,9 +125,14 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
/* Force reset in case we timed out, DMCUB is likely hung. */
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
+
+ if (is_enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ }
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index e5e77bd3c31e..01d013a12b94 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -88,7 +88,7 @@ static inline void dmub_dcn35_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn35_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 100;
+ const uint32_t timeout = 100000;
uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -113,7 +113,7 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
}
for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index 39a8cb6d7523..e1c4fe1c6e3e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -63,8 +63,10 @@ static inline void dmub_dcn401_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn401_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 30;
- uint32_t in_reset, scratch, i;
+ const uint32_t timeout_us = 1 * 1000 * 1000; //1s
+ const uint32_t poll_delay_us = 1; //1us
+ uint32_t i = 0;
+ uint32_t in_reset, scratch, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -75,32 +77,35 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- *
- * This is mostly bound by the PHY disable sequence.
- * Each register check will be greater than 1us, so
- * don't bother using udelay.
- */
-
- for (i = 0; i < timeout; ++i) {
+ for (i = 0; i < timeout_us; i++) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
+
+ udelay(poll_delay_us);
}
- for (i = 0; i < timeout; ++i) {
+ for (; i < timeout_us; i++) {
scratch = dmub->hw_funcs.get_gpint_response(dmub);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
+
+ udelay(poll_delay_us);
}
- /* Force reset in case we timed out, DMCUB is likely hung. */
+ for (; i < timeout_us; i++) {
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
+ if (pwait_mode & (1 << 0))
+ break;
+
+ udelay(poll_delay_us);
+ }
+ }
+
+ if (i >= timeout_us) {
+ /* timeout should never occur */
+ BREAK_TO_DEBUGGER();
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -131,7 +136,10 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
dmub_dcn401_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -151,6 +159,7 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
DMCUB_REGION3_CW1_ENABLE, 1);
+ /* release DMCUB reset only to prevent premature execution */
REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
0x20);
}
@@ -161,7 +170,10 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
{
union dmub_addr offset;
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
@@ -181,6 +193,7 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
DMCUB_REGION3_CW1_ENABLE, 1);
+ /* release DMCUB reset only to prevent premature execution */
REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
0x20);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
index 4c8843b79695..31f95b27e227 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
@@ -169,7 +169,8 @@ struct dmub_srv;
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN) \
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK) \
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_STAT) \
- DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN)
+ DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
struct dmub_srv_dcn401_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 058f882d5bdd..4c01514b926c 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,11 +40,6 @@ struct dc_state;
*
*/
-void pre_surface_trace(
- struct dc *dc,
- const struct dc_plane_state *const *plane_states,
- int surface_count);
-
void update_surface_trace(
struct dc *dc,
const struct dc_surface_update *updates,
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 2d089d30518f..06badbf0c5b9 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -61,7 +61,7 @@ struct atif_qbtc_arguments {
struct atif_qbtc_data_point {
u8 luminance; /* luminance in percent */
- u8 ipnut_signal; /* input signal in range 0-255 */
+ u8 input_signal; /* input signal in range 0-255 */
} __packed;
struct atif_qbtc_output {
@@ -75,6 +75,8 @@ struct atif_qbtc_output {
u8 number_of_points; /* number of data points */
struct atif_qbtc_data_point data_points[ATIF_QBTC_MAX_DATA_POINTS];
} __packed;
+static_assert(ATIF_QBTC_MAX_DATA_POINTS == MAX_LUMINANCE_DATA_POINTS);
+static_assert(sizeof(struct atif_qbtc_data_point) == sizeof(struct amdgpu_dm_luminance_data));
#define ATIF_NOTIFY_MASK 0x3
#define ATIF_NOTIFY_NONE 0
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 3e86865563dc..485b713cfad0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -354,6 +354,10 @@ enum DC_DEBUG_MASK {
* @DC_DISABLE_SUBVP: If set, disable DCN Sub-Viewport feature in amdgpu driver.
*/
DC_DISABLE_SUBVP = 0x20000,
+ /**
+ * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves
+ */
+ DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE = 0x40000,
};
enum amd_dpm_forced_level;
@@ -405,7 +409,7 @@ struct amd_ip_funcs {
int (*prepare_suspend)(struct amdgpu_ip_block *ip_block);
int (*suspend)(struct amdgpu_ip_block *ip_block);
int (*resume)(struct amdgpu_ip_block *ip_block);
- bool (*is_idle)(void *handle);
+ bool (*is_idle)(struct amdgpu_ip_block *ip_block);
int (*wait_for_idle)(struct amdgpu_ip_block *ip_block);
bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block);
int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h
index abdb8728156e..d6c02cf815be 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h
@@ -9478,6 +9478,8 @@
#define regRLC_GFX_IMU_CMD_BASE_IDX 1
#define regGFX_IMU_RLC_STATUS 0x4054
#define regGFX_IMU_RLC_STATUS_BASE_IDX 1
+#define regGFX_IMU_STATUS 0x4055
+#define regGFX_IMU_STATUS_BASE_IDX 1
#define regGFX_IMU_SOC_DATA 0x4059
#define regGFX_IMU_SOC_DATA_BASE_IDX 1
#define regGFX_IMU_SOC_ADDR 0x405a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
index 2bd9f3f1026f..0122a21c50cf 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
@@ -2261,11 +2261,13 @@
#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x3
#define SH_MEM_CONFIG__F8_MODE__SHIFT 0x8
+#define SH_MEM_CONFIG__PRECISION_MODE__SHIFT 0x9
#define SH_MEM_CONFIG__RETRY_DISABLE__SHIFT 0xc
#define SH_MEM_CONFIG__PRIVATE_NV__SHIFT 0xd
#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x00000001L
#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x00000018L
#define SH_MEM_CONFIG__F8_MODE_MASK 0x00000100L
+#define SH_MEM_CONFIG__PRECISION_MODE_MASK 0x00000200L
#define SH_MEM_CONFIG__RETRY_DISABLE_MASK 0x00001000L
#define SH_MEM_CONFIG__PRIVATE_NV_MASK 0x00002000L
//SP_MFMA_PORTD_RD_CONFIG
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index e3e635a31b8a..1e8dfa6c0dc8 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -330,6 +330,8 @@ struct kfd2kgd_calls {
uint64_t (*hqd_reset)(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t inst, unsigned int utimeout);
+ uint32_t (*hqd_sdma_get_doorbell)(struct amdgpu_device *adev,
+ int engine, int queue);
};
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index faae9bf48aa4..81e9b443ca0a 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -716,8 +716,32 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
ret = smu_send_rma_reason(smu);
mutex_unlock(&adev->pm.mutex);
- if (amdgpu_cper_generate_bp_threshold_record(adev))
- dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+ if (adev->cper.enabled)
+ if (amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
+ return ret;
+}
+
+/**
+ * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported
+ * @adev: amdgpu_device pointer
+ *
+ * This function checks if the SMU supports resetting the SDMA engine.
+ * It returns false if the hardware does not support software SMU or
+ * if the feature is not supported.
+ */
+bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool ret;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_sdma_is_supported(smu);
+ mutex_unlock(&adev->pm.mutex);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 1f5ac7e0230d..9fb26b5c8ae7 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -603,5 +603,6 @@ int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
+bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 67a8e22b1126..59fae668dc3f 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -3042,6 +3042,7 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
@@ -3049,6 +3050,8 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -3066,35 +3069,45 @@ static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
kv_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
return 0;
}
static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
/* asic init will reset to the boot state */
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+ return ret;
}
-static bool kv_dpm_is_idle(void *handle)
+static bool kv_dpm_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index e861355ebd75..c7518b13e787 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int temp, size = sizeof(temp);
- if (!adev->pm.dpm_enabled)
- return;
+ mutex_lock(&adev->pm.mutex);
+ if (!adev->pm.dpm_enabled) {
+ mutex_unlock(&adev->pm.mutex);
+ return;
+ }
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp,
@@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
adev->pm.dpm.state = dpm_state;
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index a87dcf0974bc..1c25f3023e93 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7786,6 +7786,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
@@ -7793,6 +7794,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
return ret;
}
@@ -7810,35 +7812,47 @@ static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
si_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
+
return 0;
}
static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+
+ return ret;
}
-static bool si_dpm_is_idle(void *handle)
+static bool si_dpm_is_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX */
return true;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 686345f75f26..b48a031cbba0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -51,6 +51,11 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev);
+ if (!hwmgr->device) {
+ kfree(hwmgr);
+ return -ENOMEM;
+ }
+
mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
@@ -239,7 +244,7 @@ static void pp_late_fini(struct amdgpu_ip_block *ip_block)
}
-static bool pp_is_idle(void *handle)
+static bool pp_is_idle(struct amdgpu_ip_block *ip_block)
{
return false;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
index 90452b66e107..a59677cf8dfc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
@@ -149,16 +149,6 @@ int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
return 0;
}
-int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
-{
- PHM_FUNC_CHECK(hwmgr);
-
- if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
- return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
- return 0;
-}
-
-
int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
index 82d540334318..6120f14caab0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
@@ -158,84 +158,6 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr,
return result;
}
-
-static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table(
- struct pp_hwmgr *hwmgr)
-{
- const void *table_address;
- uint16_t idx;
-
- idx = GetIndexIntoMasterDataTable(gpio_pin_lut);
- table_address = smu_atom_get_data_table(hwmgr->adev,
- idx, NULL, NULL, NULL);
- PP_ASSERT_WITH_CODE(table_address,
- "Error retrieving BIOS Table Address!",
- return NULL);
-
- return (struct atom_gpio_pin_lut_v2_1 *)table_address;
-}
-
-static bool pp_atomfwctrl_lookup_gpio_pin(
- struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table,
- const uint32_t pin_id,
- struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment)
-{
- unsigned int size = le16_to_cpu(
- gpio_lookup_table->table_header.structuresize);
- unsigned int offset =
- offsetof(struct atom_gpio_pin_lut_v2_1, gpio_pin[0]);
- unsigned long start = (unsigned long)gpio_lookup_table;
-
- while (offset < size) {
- const struct atom_gpio_pin_assignment *pin_assignment =
- (const struct atom_gpio_pin_assignment *)(start + offset);
-
- if (pin_id == pin_assignment->gpio_id) {
- gpio_pin_assignment->uc_gpio_pin_bit_shift =
- pin_assignment->gpio_bitshift;
- gpio_pin_assignment->us_gpio_pin_aindex =
- le16_to_cpu(pin_assignment->data_a_reg_index);
- return true;
- }
- offset += offsetof(struct atom_gpio_pin_assignment, gpio_id) + 1;
- }
- return false;
-}
-
-/*
- * Returns TRUE if the given pin id find in lookup table.
- */
-bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr,
- const uint32_t pin_id,
- struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment)
-{
- bool ret = false;
- struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table =
- pp_atomfwctrl_get_gpio_lookup_table(hwmgr);
-
- /* If we cannot find the table do NOT try to control this voltage. */
- PP_ASSERT_WITH_CODE(gpio_lookup_table,
- "Could not find GPIO lookup Table in BIOS.",
- return false);
-
- ret = pp_atomfwctrl_lookup_gpio_pin(gpio_lookup_table,
- pin_id, gpio_pin_assignment);
-
- return ret;
-}
-
-/*
- * Enter to SelfRefresh mode.
- * @param hwmgr
- */
-int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr)
-{
- /* 0 - no action
- * 1 - leave power to video memory always on
- */
- return 0;
-}
-
/** pp_atomfwctrl_get_gpu_pll_dividers_vega10().
*
* @param hwmgr input parameter: pointer to HwMgr
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
index e86e05c786d9..0d62903d5676 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
@@ -217,9 +217,6 @@ struct pp_atomfwctrl_smc_dpm_parameters {
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
uint32_t clock_type, uint32_t clock_value,
struct pp_atomfwctrl_clock_dividers_soc15 *dividers);
-int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr);
-bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pin_id,
- struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment);
int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint8_t voltage_mode, struct pp_atomfwctrl_voltage_table *voltage_table);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index a8c732e07006..9a821563bc8e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1642,7 +1642,6 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.apply_state_adjust_rules = smu10_apply_state_adjust_rules,
.force_dpm_level = smu10_dpm_force_dpm_level,
.get_power_state_size = smu10_get_power_state_size,
- .powerdown_uvd = NULL,
.powergate_uvd = smu10_powergate_vcn,
.powergate_vce = NULL,
.get_mclk = smu10_dpm_get_mclk,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
index f2bda3bcbbde..5e4c80f7b20a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
@@ -55,7 +55,7 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
}
-int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
index fc8f8a6acc72..e56abbadc78b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
@@ -28,7 +28,6 @@
void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 632a25957477..8da882c51856 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5754,7 +5754,6 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.patch_boot_state = smu7_dpm_patch_boot_state,
.get_pp_table_entry = smu7_get_pp_table_entry,
.get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
- .powerdown_uvd = smu7_powerdown_uvd,
.powergate_uvd = smu7_powergate_uvd,
.powergate_vce = smu7_powergate_vce,
.disable_clock_power_gating = smu7_disable_clock_power_gating,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index 7e1197420873..9d3b33446adc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -2044,7 +2044,6 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
.apply_state_adjust_rules = smu8_apply_state_adjust_rules,
.force_dpm_level = smu8_dpm_force_dpm_level,
.get_power_state_size = smu8_get_power_state_size,
- .powerdown_uvd = smu8_dpm_powerdown_uvd,
.powergate_uvd = smu8_dpm_powergate_uvd,
.powergate_vce = smu8_dpm_powergate_vce,
.powergate_acp = smu8_dpm_powergate_acp,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
index f4f9a104d170..915f1b8e4dba 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
@@ -396,7 +396,6 @@ struct phm_odn_clock_levels {
};
extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
-extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 227bf0e84a13..c661185753b4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -257,7 +257,6 @@ struct pp_hwmgr_func {
int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr,
unsigned long, struct pp_power_state *);
int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr);
- int (*powerdown_uvd)(struct pp_hwmgr *hwmgr);
void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
void (*powergate_acp)(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 0b32c6cf6924..8cfb07549f54 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2041,15 +2041,15 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
smu_dpm_set_vcn_enable(smu, false, i);
+ adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE;
+ }
smu_dpm_set_jpeg_enable(smu, false);
+ adev->jpeg.cur_state = AMD_PG_STATE_GATE;
smu_dpm_set_vpe_enable(smu, false);
smu_dpm_set_umsch_mm_enable(smu, false);
- adev->vcn.cur_state = AMD_PG_STATE_GATE;
- adev->jpeg.cur_state = AMD_PG_STATE_GATE;
-
if (!smu->pm_enabled)
return 0;
@@ -2315,7 +2315,12 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
- dev_err(smu->adev->dev, "Failed to set performance level!");
+ if (ret == -EOPNOTSUPP)
+ dev_info(smu->adev->dev, "set performance level %d not supported",
+ level);
+ else
+ dev_err(smu->adev->dev, "Failed to set performance level %d",
+ level);
return ret;
}
@@ -3907,6 +3912,23 @@ int smu_send_rma_reason(struct smu_context *smu)
return ret;
}
+/**
+ * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU
+ * @smu: smu_context pointer
+ *
+ * This function checks if the SMU supports resetting the SDMA engine.
+ * It returns true if supported, false otherwise.
+ */
+bool smu_reset_sdma_is_supported(struct smu_context *smu)
+{
+ bool ret = false;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported)
+ ret = smu->ppt_funcs->reset_sdma_is_supported(smu);
+
+ return ret;
+}
+
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3630593bce61..3ba169639f54 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1376,6 +1376,10 @@ struct pptable_funcs {
* @reset_sdma: message SMU to soft reset sdma instance.
*/
int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
+ /**
+ * @reset_sdma_is_supported: Check if support resets the SDMA engine.
+ */
+ bool (*reset_sdma_is_supported)(struct smu_context *smu);
/**
* @get_ecc_table: message SMU to get ECC INFO table.
@@ -1637,6 +1641,7 @@ int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
+bool smu_reset_sdma_is_supported(struct smu_context *smu);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
index 4a1256d29d62..d7505cfc433a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -38,6 +38,13 @@
#define NUM_SOC_P2S_TABLES 6
#define NUM_GFX_P2S_TABLES 8
#define NUM_PSM_DIDT_THRESHOLDS 3
+#define NUM_XVMIN_VMIN_THRESHOLDS 3
+
+#define PRODUCT_MODEL_NUMBER_LEN 20
+#define PRODUCT_NAME_LEN 64
+#define PRODUCT_SERIAL_LEN 20
+#define PRODUCT_MANUFACTURER_NAME_LEN 32
+#define PRODUCT_FRU_ID_LEN 32
typedef enum {
/*0*/ FEATURE_DATA_CALCULATION = 0,
@@ -85,11 +92,11 @@ typedef enum {
//enum for MPIO PCIe gen speed msgs
typedef enum {
+ PCIE_LINK_SPEED_INDEX_TABLE_RESERVED,
PCIE_LINK_SPEED_INDEX_TABLE_GEN1,
PCIE_LINK_SPEED_INDEX_TABLE_GEN2,
PCIE_LINK_SPEED_INDEX_TABLE_GEN3,
PCIE_LINK_SPEED_INDEX_TABLE_GEN4,
- PCIE_LINK_SPEED_INDEX_TABLE_GEN4_ESM,
PCIE_LINK_SPEED_INDEX_TABLE_GEN5,
PCIE_LINK_SPEED_INDEX_TABLE_COUNT
} PCIE_LINK_SPEED_INDEX_TABLE_e;
@@ -126,13 +133,149 @@ typedef enum {
GFX_DVM_MARGIN_COUNT
} GFX_DVM_MARGIN_e;
-#define SMU_VF_METRICS_TABLE_VERSION 0x3
+#define SMU_METRICS_TABLE_VERSION 0x12
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint64_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+
+ //FREQUENCY RANGE
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+ uint32_t GfxLockXCDMak;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ //PCIE BW Data and error count
+ uint32_t PcieBandwidth[4];
+ uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[40];
+
+ // PCIE LINK Speed and width
+ uint32_t PCIeLinkSpeed;
+ uint32_t PCIeLinkWidth;
+
+ // PER XCD ACTIVITY
+ uint32_t GfxBusy[8];
+ uint64_t GfxBusyAcc[8];
+
+ //PCIE BW Data and error count
+ uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ //Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitPptAcc[8];
+ uint64_t GfxclkBelowHostLimitThmAcc[8];
+ uint64_t GfxclkBelowHostLimitTotalAcc[8];
+ uint64_t GfxclkLowUtilizationAcc[8];
+} MetricsTable_t;
+
+#define SMU_VF_METRICS_TABLE_MASK (1 << 31)
+#define SMU_VF_METRICS_TABLE_VERSION (0x6 | SMU_VF_METRICS_TABLE_MASK)
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
uint64_t AccGfxclk_TargFreq;
uint64_t AccGfxRsmuDpm_Busy;
+ uint64_t AccGfxclkBelowHostLimitPpt;
+ uint64_t AccGfxclkBelowHostLimitThm;
+ uint64_t AccGfxclkBelowHostLimitTotal;
+ uint64_t AccGfxclkLowUtilization;
} VfMetricsTable_t;
+/* FRU product information */
+typedef struct __attribute__((packed, aligned(4))) {
+ uint8_t ModelNumber[PRODUCT_MODEL_NUMBER_LEN];
+ uint8_t Name[PRODUCT_NAME_LEN];
+ uint8_t Serial[PRODUCT_SERIAL_LEN];
+ uint8_t ManufacturerName[PRODUCT_MANUFACTURER_NAME_LEN];
+ uint8_t FruId[PRODUCT_FRU_ID_LEN];
+} FRUProductInfo_t;
+
+#pragma pack(push, 4)
+typedef struct {
+ //FRU PRODUCT INFO
+ FRUProductInfo_t ProductInfo;
+
+ //POWER
+ uint32_t MaxSocketPowerLimit;
+
+ //FREQUENCY RANGE
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
new file mode 100644
index 000000000000..e1f490b6ce64
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_12_PPSMC_H
+#define SMU_13_0_12_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GfxDriverReset 0x3
+#define PPSMC_MSG_GetDriverIfVersion 0x4
+#define PPSMC_MSG_EnableAllSmuFeatures 0x5
+#define PPSMC_MSG_DisableAllSmuFeatures 0x6
+#define PPSMC_MSG_RequestI2cTransaction 0x7
+#define PPSMC_MSG_GetMetricsVersion 0x8
+#define PPSMC_MSG_GetMetricsTable 0x9
+#define PPSMC_MSG_GetEccInfoTable 0xA
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xC
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrLow 0xE
+#define PPSMC_MSG_SetToolsDramAddrHigh 0xF
+#define PPSMC_MSG_SetToolsDramAddrLow 0x10
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x11
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x12
+#define PPSMC_MSG_SetSoftMinByFreq 0x13
+#define PPSMC_MSG_SetSoftMaxByFreq 0x14
+#define PPSMC_MSG_GetMinDpmFreq 0x15
+#define PPSMC_MSG_GetMaxDpmFreq 0x16
+#define PPSMC_MSG_GetDpmFreqByIndex 0x17
+#define PPSMC_MSG_SetPptLimit 0x18
+#define PPSMC_MSG_GetPptLimit 0x19
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x1A
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x1B
+#define PPSMC_MSG_DramLogSetDramSize 0x1C
+#define PPSMC_MSG_GetDebugData 0x1D
+#define PPSMC_MSG_HeavySBR 0x1E
+#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x1F
+#define PPSMC_MSG_DFCstateControl 0x20
+#define PPSMC_MSG_GetGmiPwrDnHyst 0x21
+#define PPSMC_MSG_SetGmiPwrDnHyst 0x22
+#define PPSMC_MSG_GmiPwrDnControl 0x23
+#define PPSMC_MSG_EnterGfxoff 0x24
+#define PPSMC_MSG_ExitGfxoff 0x25
+#define PPSMC_MSG_EnableDeterminism 0x26
+#define PPSMC_MSG_DisableDeterminism 0x27
+#define PPSMC_MSG_DumpSTBtoDram 0x28
+#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x29
+#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x2A
+#define PPSMC_MSG_STBtoDramLogSetDramSize 0x2B
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x2C
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x2D
+#define PPSMC_MSG_GfxDriverResetRecovery 0x2E
+#define PPSMC_MSG_TriggerVFFLR 0x2F
+#define PPSMC_MSG_SetSoftMinGfxClk 0x30
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x31
+#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
+#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
+#define PPSMC_MSG_PrepareForDriverUnload 0x34
+#define PPSMC_MSG_ReadThrottlerLimit 0x35
+#define PPSMC_MSG_QueryValidMcaCount 0x36
+#define PPSMC_MSG_McaBankDumpDW 0x37
+#define PPSMC_MSG_GetCTFLimit 0x38
+#define PPSMC_MSG_ClearMcaOnRead 0x39
+#define PPSMC_MSG_QueryValidMcaCeCount 0x3A
+#define PPSMC_MSG_McaBankCeDumpDW 0x3B
+#define PPSMC_MSG_SelectPLPDMode 0x40
+#define PPSMC_MSG_PmLogReadSample 0x41
+#define PPSMC_MSG_PmLogGetTableVersion 0x42
+#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
+#define PPSMC_MSG_SetThrottlingPolicy 0x44
+#define PPSMC_MSG_SetPhaseDetectCSBWThreshold 0x45
+#define PPSMC_MSG_SetPhaseDetectFreqHigh 0x46
+#define PPSMC_MSG_SetPhaseDetectFreqLow 0x47
+#define PPSMC_MSG_SetPhaseDetectDownHysterisis 0x48
+#define PPSMC_MSG_SetPhaseDetectAlphaX1e6 0x49
+#define PPSMC_MSG_SetPhaseDetectOnOff 0x4A
+#define PPSMC_MSG_GetPhaseDetectResidency 0x4B
+#define PPSMC_MSG_UpdatePccWaitDecMaxStr 0x4C
+#define PPSMC_MSG_ResetSDMA 0x4D
+#define PPSMC_MSG_GetRasTableVersion 0x4E
+#define PPSMC_MSG_GetRmaStatus 0x4F
+#define PPSMC_MSG_GetErrorCount 0x50
+#define PPSMC_MSG_GetBadPageCount 0x51
+#define PPSMC_MSG_GetBadPageInfo 0x52
+#define PPSMC_MSG_GetBadPagePaAddrLoHi 0x53
+#define PPSMC_MSG_SetTimestampLoHi 0x54
+#define PPSMC_MSG_GetTimestampLoHi 0x55
+#define PPSMC_MSG_GetRasPolicy 0x56
+#define PPSMC_MSG_DumpErrorRecord 0x57
+#define PPSMC_MSG_EraseRasTable 0x58
+#define PPSMC_MSG_GetStaticMetricsTable 0x59
+#define PPSMC_Message_Count 0x5A
+
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
+#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
+#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_THROTTLING_LIMIT_TYPE_SOCKET 0x1
+#define PPSMC_THROTTLING_LIMIT_TYPE_HBM 0x2
+
+//CTF/Throttle Limit types
+#define PPSMC_AID_THM_TYPE 0x1
+#define PPSMC_CCD_THM_TYPE 0x2
+#define PPSMC_XCD_THM_TYPE 0x3
+#define PPSMC_HBM_THM_TYPE 0x4
+
+//PLPD modes
+#define PPSMC_PLPD_MODE_DEFAULT 0x1
+#define PPSMC_PLPD_MODE_OPTIMIZED 0x2
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_MSG;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 9ccd5a1986d3..9c8468fb203a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -276,7 +276,8 @@
__SMU_DUMMY_MAP(SetThrottlingPolicy), \
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState), \
- __SMU_DUMMY_MAP(ResetSDMA),
+ __SMU_DUMMY_MAP(ResetSDMA), \
+ __SMU_DUMMY_MAP(GetStaticMetricsTable),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 31166974746f..cd03caffe317 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -53,6 +53,10 @@
#define SMU_13_VCLK_SHIFT 16
+#define SMUQ10_TO_UINT(x) ((x) >> 10)
+#define SMUQ10_FRAC(x) ((x) & 0x3ff)
+#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+
extern const int pmfw_decoded_link_speed[5];
extern const int pmfw_decoded_link_width[7];
@@ -307,6 +311,13 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
void smu_v13_0_interrupt_work(struct smu_context *smu);
bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
+int smu_v13_0_12_get_max_metrics_size(void);
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value);
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
+extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 985355bf78b2..898487ad6cd2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -3234,4 +3234,9 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = smu_v13_0_0_workload_map;
smu->smc_driver_if_version = SMU13_0_0_DRIVER_IF_VERSION;
smu_v13_0_0_set_smu_mailbox_registers(smu);
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(smu->adev))
+ smu->adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 86852e738837..285dbfe10303 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -28,8 +28,10 @@
#include "amdgpu_smu.h"
#include "smu_v13_0_12_pmfw.h"
#include "smu_v13_0_6_ppt.h"
+#include "smu_v13_0_12_ppsmc.h"
#include "smu_v13_0.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_fru_eeprom.h"
#include <linux/pci.h>
#include "smu_cmn.h"
@@ -54,6 +56,10 @@
(FEATURE_MASK(FEATURE_DATA_CALCULATION) | \
FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_FCLK))
+#define NUM_JPEG_RINGS_FW 10
+#define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \
+ (ARRAY_SIZE(gpu_metrics->xcp_stats[0].jpeg_busy) / 4)
+
const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = {
SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK),
@@ -72,6 +78,63 @@ const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] =
SMU_13_0_12_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
};
+// clang-format off
+const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
+ MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
+ MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
+ MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
+ MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
+ MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
+ MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
+ MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
+ MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0),
+ MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0),
+ MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0),
+ MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0),
+ MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0),
+ MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
+ MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
+ MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
+ MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1),
+ MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
+ MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
+ MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
+ MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
+ MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
+ MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
+ MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
+ MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
+};
+
static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu,
uint64_t *feature_mask)
{
@@ -87,6 +150,114 @@ static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_12_fru_get_product_info(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct amdgpu_fru_info *fru_info;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->fru_info) {
+ adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
+ if (!adev->fru_info)
+ return -ENOMEM;
+ }
+
+ fru_info = adev->fru_info;
+ strscpy(fru_info->product_number, static_metrics->ProductInfo.ModelNumber,
+ sizeof(fru_info->product_number));
+ strscpy(fru_info->product_name, static_metrics->ProductInfo.Name,
+ sizeof(fru_info->product_name));
+ strscpy(fru_info->serial, static_metrics->ProductInfo.Serial,
+ sizeof(fru_info->serial));
+ strscpy(fru_info->manufacturer_name, static_metrics->ProductInfo.ManufacturerName,
+ sizeof(fru_info->manufacturer_name));
+ strscpy(fru_info->fru_id, static_metrics->ProductInfo.FruId,
+ sizeof(fru_info->fru_id));
+
+ return 0;
+}
+
+int smu_v13_0_12_get_max_metrics_size(void)
+{
+ return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
+}
+
+static int smu_v13_0_12_get_static_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export static metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ return 0;
+}
+
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t table_version;
+ int ret, i;
+
+ if (!pptable->Init) {
+ ret = smu_v13_0_12_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
+ &table_version);
+ if (ret)
+ return ret;
+ smu_table->tables[SMU_TABLE_SMU_METRICS].version =
+ table_version;
+
+ pptable->MaxSocketPowerLimit =
+ SMUQ10_ROUND(static_metrics->MaxSocketPowerLimit);
+ pptable->MaxGfxclkFrequency =
+ SMUQ10_ROUND(static_metrics->MaxGfxclkFrequency);
+ pptable->MinGfxclkFrequency =
+ SMUQ10_ROUND(static_metrics->MinGfxclkFrequency);
+
+ for (i = 0; i < 4; ++i) {
+ pptable->FclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->FclkFrequencyTable[i]);
+ pptable->UclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->UclkFrequencyTable[i]);
+ pptable->SocclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->SocclkFrequencyTable[i]);
+ pptable->VclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->VclkFrequencyTable[i]);
+ pptable->DclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->DclkFrequencyTable[i]);
+ pptable->LclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->LclkFrequencyTable[i]);
+ }
+
+ /* use AID0 serial number by default */
+ pptable->PublicSerialNumber_AID =
+ static_metrics->PublicSerialNumber_AID[0];
+ ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics);
+ if (ret)
+ return ret;
+
+ pptable->Init = true;
+ }
+
+ return 0;
+}
+
bool smu_v13_0_12_is_dpm_running(struct smu_context *smu)
{
int ret;
@@ -99,3 +270,216 @@ bool smu_v13_0_12_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+ int xcc_id;
+
+ /* For clocks with multiple instances, only report the first one */
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ case METRICS_AVERAGE_GFXCLK:
+ xcc_id = GET_INST(GC, 0);
+ *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ break;
+ case METRICS_CURR_SOCCLK:
+ case METRICS_AVERAGE_SOCCLK:
+ *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]);
+ break;
+ case METRICS_CURR_UCLK:
+ case METRICS_AVERAGE_UCLK:
+ *value = SMUQ10_ROUND(metrics->UclkFrequency);
+ break;
+ case METRICS_CURR_VCLK:
+ *value = SMUQ10_ROUND(metrics->VclkFrequency[0]);
+ break;
+ case METRICS_CURR_DCLK:
+ *value = SMUQ10_ROUND(metrics->DclkFrequency[0]);
+ break;
+ case METRICS_CURR_FCLK:
+ *value = SMUQ10_ROUND(metrics->FclkFrequency);
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = SMUQ10_ROUND(metrics->SocketGfxBusy);
+ break;
+ case METRICS_AVERAGE_MEMACTIVITY:
+ *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ break;
+ case METRICS_CURR_SOCKETPOWER:
+ *value = SMUQ10_ROUND(metrics->SocketPower) << 8;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_MEM:
+ *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ /* This is the max of all VRs and not just SOC VR.
+ * No need to define another data type for the same.
+ */
+ case METRICS_TEMPERATURE_VRSOC:
+ *value = SMUQ10_ROUND(metrics->MaxVrTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return ret;
+}
+
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v1_7 *gpu_metrics =
+ (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ int ret = 0, xcc_id, inst, i, j, k, idx;
+ struct amdgpu_device *adev = smu->adev;
+ u8 num_jpeg_rings_gpu_metrics;
+ MetricsTable_t *metrics;
+ struct amdgpu_xcp *xcp;
+ u32 inst_mask;
+
+ metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t));
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+
+ gpu_metrics->temperature_hotspot =
+ SMUQ10_ROUND(metrics->MaxSocketTemperature);
+ /* Individual HBM stack temperature is not reported */
+ gpu_metrics->temperature_mem =
+ SMUQ10_ROUND(metrics->MaxHbmTemperature);
+ /* Reports max temperature of all voltage rails */
+ gpu_metrics->temperature_vrsoc =
+ SMUQ10_ROUND(metrics->MaxVrTemperature);
+
+ gpu_metrics->average_gfx_activity =
+ SMUQ10_ROUND(metrics->SocketGfxBusy);
+ gpu_metrics->average_umc_activity =
+ SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+
+ gpu_metrics->mem_max_bandwidth =
+ SMUQ10_ROUND(metrics->MaxDramBandwidth);
+
+ gpu_metrics->curr_socket_power =
+ SMUQ10_ROUND(metrics->SocketPower);
+ /* Energy counter reported in 15.259uJ (2^-16) units */
+ gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+
+ for (i = 0; i < MAX_GFX_CLKS; i++) {
+ xcc_id = GET_INST(GC, i);
+ if (xcc_id >= 0)
+ gpu_metrics->current_gfxclk[i] =
+ SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+
+ if (i < MAX_CLKS) {
+ gpu_metrics->current_socclk[i] =
+ SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+ inst = GET_INST(VCN, i);
+ if (inst >= 0) {
+ gpu_metrics->current_vclk0[i] =
+ SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+ gpu_metrics->current_dclk0[i] =
+ SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+ }
+ }
+ }
+
+ gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency);
+
+ /* Total accumulated cycle counter */
+ gpu_metrics->accumulation_counter = metrics->AccumulationCounter;
+
+ /* Accumulated throttler residencies */
+ gpu_metrics->prochot_residency_acc = metrics->ProchotResidencyAcc;
+ gpu_metrics->ppt_residency_acc = metrics->PptResidencyAcc;
+ gpu_metrics->socket_thm_residency_acc = metrics->SocketThmResidencyAcc;
+ gpu_metrics->vr_thm_residency_acc = metrics->VrThmResidencyAcc;
+ gpu_metrics->hbm_thm_residency_acc = metrics->HbmThmResidencyAcc;
+
+ /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
+ gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0);
+
+ gpu_metrics->pcie_link_width = metrics->PCIeLinkWidth;
+ gpu_metrics->pcie_link_speed =
+ pcie_gen_to_speed(metrics->PCIeLinkSpeed);
+ gpu_metrics->pcie_bandwidth_acc =
+ SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
+ gpu_metrics->pcie_bandwidth_inst =
+ SMUQ10_ROUND(metrics->PcieBandwidth[0]);
+ gpu_metrics->pcie_l0_to_recov_count_acc = metrics->PCIeL0ToRecoveryCountAcc;
+ gpu_metrics->pcie_replay_count_acc = metrics->PCIenReplayAAcc;
+ gpu_metrics->pcie_replay_rover_count_acc =
+ metrics->PCIenReplayARolloverCountAcc;
+ gpu_metrics->pcie_nak_sent_count_acc = metrics->PCIeNAKSentCountAcc;
+ gpu_metrics->pcie_nak_rcvd_count_acc = metrics->PCIeNAKReceivedCountAcc;
+ gpu_metrics->pcie_lc_perf_other_end_recovery = metrics->PCIeOtherEndRecoveryAcc;
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ gpu_metrics->gfx_activity_acc = SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+ gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+
+ for (i = 0; i < NUM_XGMI_LINKS; i++) {
+ gpu_metrics->xgmi_read_data_acc[i] =
+ SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
+ gpu_metrics->xgmi_write_data_acc[i] =
+ SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
+ ret = amdgpu_get_xgmi_link_status(adev, i);
+ if (ret >= 0)
+ gpu_metrics->xgmi_link_status[i] = ret;
+ }
+
+ gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
+
+ num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics);
+ for_each_xcp(adev->xcp_mgr, xcp, i) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instances */
+ inst = GET_INST(VCN, k);
+
+ for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) {
+ gpu_metrics->xcp_stats[i].jpeg_busy
+ [(idx * num_jpeg_rings_gpu_metrics) + j] =
+ SMUQ10_ROUND(metrics->JpegBusy
+ [(inst * NUM_JPEG_RINGS_FW) + j]);
+ }
+ gpu_metrics->xcp_stats[i].vcn_busy[idx] =
+ SMUQ10_ROUND(metrics->VcnBusy[inst]);
+ idx++;
+ }
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
+ SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ idx++;
+ }
+ }
+
+ gpu_metrics->xgmi_link_width = SMUQ10_ROUND(metrics->XgmiWidth);
+ gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(metrics->XgmiBitrate);
+
+ gpu_metrics->firmware_timestamp = metrics->Timestamp;
+
+ *table = (void *)gpu_metrics;
+ kfree(metrics);
+
+ return sizeof(*gpu_metrics);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 9f2de69f53b2..1e1d8989c77a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -116,6 +116,7 @@ enum smu_v13_0_6_caps {
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND),
SMU_CAP(SDMA_RESET),
+ SMU_CAP(STATIC_METRICS),
SMU_CAP(ALL),
};
@@ -252,25 +253,6 @@ static const uint8_t smu_v13_0_6_throttler_map[] = {
[THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
};
-struct PPTable_t {
- uint32_t MaxSocketPowerLimit;
- uint32_t MaxGfxclkFrequency;
- uint32_t MinGfxclkFrequency;
- uint32_t FclkFrequencyTable[4];
- uint32_t UclkFrequencyTable[4];
- uint32_t SocclkFrequencyTable[4];
- uint32_t VclkFrequencyTable[4];
- uint32_t DclkFrequencyTable[4];
- uint32_t LclkFrequencyTable[4];
- uint32_t MaxLclkDpmRange;
- uint32_t MinLclkDpmRange;
- uint64_t PublicSerialNumber_AID;
- bool Init;
-};
-
-#define SMUQ10_TO_UINT(x) ((x) >> 10)
-#define SMUQ10_FRAC(x) ((x) & 0x3ff)
-#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
#define GET_GPU_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V0) ?\
(metrics_v0->field) : (metrics_v2->field))
#define GET_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V1) ?\
@@ -368,6 +350,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
if (fw_ver >= 0x00561700)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+
+ if (fw_ver >= 0x00561E00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -523,13 +508,15 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
struct amdgpu_device *adev = smu->adev;
+ int gpu_metrcs_size = METRICS_TABLE_SIZE;
if (!(adev->flags & AMD_IS_APU))
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
- METRICS_TABLE_SIZE,
+ max(gpu_metrcs_size,
+ smu_v13_0_12_get_max_metrics_size()),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
@@ -776,6 +763,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
int ret, i, retry = 100;
uint32_t table_version;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_setup_driver_pptable(smu);
+
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
while (--retry) {
@@ -1156,6 +1146,9 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
if (ret)
return ret;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
+
/* For clocks with multiple instances, only report the first one */
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -1947,7 +1940,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
break;
}
- return -EINVAL;
+ return -EOPNOTSUPP;
}
static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
@@ -2518,6 +2511,9 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
return ret;
}
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_gpu_metrics(smu, table);
+
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
@@ -2902,11 +2898,31 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
return ret;
}
+/**
+ * smu_v13_0_6_reset_sdma_is_supported - Check if SDMA reset is supported
+ * @smu: smu_context pointer
+ *
+ * This function checks if the SMU supports resetting the SDMA engine.
+ * It returns false if the capability is not supported.
+ */
+static bool smu_v13_0_6_reset_sdma_is_supported(struct smu_context *smu)
+{
+ bool ret = true;
+
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET))) {
+ dev_info(smu->adev->dev,
+ "SDMA reset capability is not supported\n");
+ ret = false;
+ }
+
+ return ret;
+}
+
static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
- if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET)))
+ if (!smu_v13_0_6_reset_sdma_is_supported(smu))
return -EOPNOTSUPP;
ret = smu_cmn_send_smc_msg_with_param(smu,
@@ -3590,12 +3606,14 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
+ .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
- smu->message_map = smu_v13_0_6_message_map;
+ smu->message_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
+ smu_v13_0_12_message_map : smu_v13_0_6_message_map;
smu->clock_map = smu_v13_0_6_clk_map;
smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 717fe669882e..83745909e564 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -35,6 +35,22 @@ typedef enum {
/*3*/ NUM_METRICS = 3
} METRICS_LIST_e;
+struct PPTable_t {
+ uint32_t MaxSocketPowerLimit;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+ uint64_t PublicSerialNumber_AID;
+ bool Init;
+};
+
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index adbb6332376e..76c1adda83db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -950,6 +950,14 @@ static int smu_v14_0_irq_process(struct amdgpu_device *adev,
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
+ /*
+ * ctxid is used to distinguish different
+ * events for SMCToHost interrupt.
+ */
+ uint32_t ctxid = entry->src_data[0];
+ uint32_t data;
+ uint32_t high;
+
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
@@ -964,6 +972,50 @@ static int smu_v14_0_irq_process(struct amdgpu_device *adev,
src_id);
break;
}
+ } else if (client_id == SOC15_IH_CLIENTID_MP1) {
+ if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
+ /* ACK SMUToHost interrupt */
+ data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
+
+ switch (ctxid) {
+ case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL:
+ high = smu->thermal_range.software_shutdown_temp +
+ smu->thermal_range.software_shutdown_temp_offset;
+ high = min_t(typeof(high),
+ SMU_THERMAL_MAXIMUM_ALERT_TEMP,
+ high);
+ dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
+ high,
+ smu->thermal_range.software_shutdown_temp_offset);
+
+ data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
+ data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
+ DIG_THERM_INTH,
+ (high & 0xff));
+ data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
+ break;
+ case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY:
+ high = min_t(typeof(high),
+ SMU_THERMAL_MAXIMUM_ALERT_TEMP,
+ smu->thermal_range.software_shutdown_temp);
+ dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
+
+ data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
+ data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
+ DIG_THERM_INTH,
+ (high & 0xff));
+ data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
+ break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
+ break;
+ }
+ }
}
return 0;
@@ -1897,16 +1949,6 @@ static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu)
NULL);
}
-static int smu_v14_0_process_pending_interrupt(struct smu_context *smu)
-{
- int ret = 0;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
- ret = smu_v14_0_allow_ih_interrupt(smu);
-
- return ret;
-}
-
int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
{
int ret = 0;
@@ -1918,7 +1960,7 @@ int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
if (ret)
return ret;
- return smu_v14_0_process_pending_interrupt(smu);
+ return smu_v14_0_allow_ih_interrupt(smu);
}
int smu_v14_0_disable_thermal_alert(struct smu_context *smu)
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 397e677a691c..46094cca2974 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -144,11 +144,9 @@ static int aspeed_gfx_load(struct drm_device *drm)
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct device_node *np = pdev->dev.of_node;
const struct aspeed_gfx_config *config;
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(drm->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 2100a687096e..914a2609a685 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -887,7 +887,7 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x,
lanes[0] = 0;
} else if (num_lanes < 0) {
dev_err(dev,
- "%s: Error gettin \"sil,i2s-data-lanes\": %d\n",
+ "%s: Error getting \"sil,i2s-data-lanes\": %d\n",
__func__, num_lanes);
return num_lanes;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 1d39015f1533..6166f197e37b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -36,6 +36,88 @@
#define SCRAMB_POLL_DELAY_MS 3000
+/*
+ * Unless otherwise noted, entries in this table are 100% optimization.
+ * Values can be obtained from dw_hdmi_qp_compute_n() but that function is
+ * slow so we pre-compute values we expect to see.
+ *
+ * The values for TMDS 25175, 25200, 27000, 54000, 74250 and 148500 kHz are
+ * the recommended N values specified in the Audio chapter of the HDMI
+ * specification.
+ */
+static const struct dw_hdmi_audio_tmds_n {
+ unsigned long tmds;
+ unsigned int n_32k;
+ unsigned int n_44k1;
+ unsigned int n_48k;
+} common_tmds_n_table[] = {
+ { .tmds = 25175000, .n_32k = 4576, .n_44k1 = 7007, .n_48k = 6864, },
+ { .tmds = 25200000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 27000000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 28320000, .n_32k = 4096, .n_44k1 = 5586, .n_48k = 6144, },
+ { .tmds = 30240000, .n_32k = 4096, .n_44k1 = 5642, .n_48k = 6144, },
+ { .tmds = 31500000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, },
+ { .tmds = 32000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, },
+ { .tmds = 33750000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 36000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
+ { .tmds = 40000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, },
+ { .tmds = 49500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
+ { .tmds = 50000000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, },
+ { .tmds = 54000000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 65000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 68250000, .n_32k = 4096, .n_44k1 = 5376, .n_48k = 6144, },
+ { .tmds = 71000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 72000000, .n_32k = 4096, .n_44k1 = 5635, .n_48k = 6144, },
+ { .tmds = 73250000, .n_32k = 11648, .n_44k1 = 14112, .n_48k = 6144, },
+ { .tmds = 74250000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 75000000, .n_32k = 4096, .n_44k1 = 5880, .n_48k = 6144, },
+ { .tmds = 78750000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, },
+ { .tmds = 78800000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, },
+ { .tmds = 79500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, },
+ { .tmds = 83500000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 85500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
+ { .tmds = 88750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, },
+ { .tmds = 97750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, },
+ { .tmds = 101000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 106500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, },
+ { .tmds = 108000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
+ { .tmds = 115500000, .n_32k = 4096, .n_44k1 = 5712, .n_48k = 6144, },
+ { .tmds = 119000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, },
+ { .tmds = 135000000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
+ { .tmds = 146250000, .n_32k = 11648, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 148500000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 154000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, },
+ { .tmds = 162000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
+
+ /* For 297 MHz+ HDMI spec have some other rule for setting N */
+ { .tmds = 297000000, .n_32k = 3073, .n_44k1 = 4704, .n_48k = 5120, },
+ { .tmds = 594000000, .n_32k = 3073, .n_44k1 = 9408, .n_48k = 10240,},
+
+ /* End of table */
+ { .tmds = 0, .n_32k = 0, .n_44k1 = 0, .n_48k = 0, },
+};
+
+/*
+ * These are the CTS values as recommended in the Audio chapter of the HDMI
+ * specification.
+ */
+static const struct dw_hdmi_audio_tmds_cts {
+ unsigned long tmds;
+ unsigned int cts_32k;
+ unsigned int cts_44k1;
+ unsigned int cts_48k;
+} common_tmds_cts_table[] = {
+ { .tmds = 25175000, .cts_32k = 28125, .cts_44k1 = 31250, .cts_48k = 28125, },
+ { .tmds = 25200000, .cts_32k = 25200, .cts_44k1 = 28000, .cts_48k = 25200, },
+ { .tmds = 27000000, .cts_32k = 27000, .cts_44k1 = 30000, .cts_48k = 27000, },
+ { .tmds = 54000000, .cts_32k = 54000, .cts_44k1 = 60000, .cts_48k = 54000, },
+ { .tmds = 74250000, .cts_32k = 74250, .cts_44k1 = 82500, .cts_48k = 74250, },
+ { .tmds = 148500000, .cts_32k = 148500, .cts_44k1 = 165000, .cts_48k = 148500, },
+
+ /* End of table */
+ { .tmds = 0, .cts_32k = 0, .cts_44k1 = 0, .cts_48k = 0, },
+};
+
struct dw_hdmi_qp_i2c {
struct i2c_adapter adap;
@@ -60,6 +142,8 @@ struct dw_hdmi_qp {
} phy;
struct regmap *regm;
+
+ unsigned long tmds_char_rate;
};
static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val,
@@ -83,6 +167,346 @@ static void dw_hdmi_qp_mod(struct dw_hdmi_qp *hdmi, unsigned int data,
regmap_update_bits(hdmi->regm, reg, mask, data);
}
+static struct dw_hdmi_qp *dw_hdmi_qp_from_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct dw_hdmi_qp, bridge);
+}
+
+static void dw_hdmi_qp_set_cts_n(struct dw_hdmi_qp *hdmi, unsigned int cts,
+ unsigned int n)
+{
+ /* Set N */
+ dw_hdmi_qp_mod(hdmi, n, AUDPKT_ACR_N_VALUE, AUDPKT_ACR_CONTROL0);
+
+ /* Set CTS */
+ if (cts)
+ dw_hdmi_qp_mod(hdmi, AUDPKT_ACR_CTS_OVR_EN, AUDPKT_ACR_CTS_OVR_EN_MSK,
+ AUDPKT_ACR_CONTROL1);
+ else
+ dw_hdmi_qp_mod(hdmi, 0, AUDPKT_ACR_CTS_OVR_EN_MSK,
+ AUDPKT_ACR_CONTROL1);
+
+ dw_hdmi_qp_mod(hdmi, AUDPKT_ACR_CTS_OVR_VAL(cts), AUDPKT_ACR_CTS_OVR_VAL_MSK,
+ AUDPKT_ACR_CONTROL1);
+}
+
+static int dw_hdmi_qp_match_tmds_n_table(struct dw_hdmi_qp *hdmi,
+ unsigned long pixel_clk,
+ unsigned long freq)
+{
+ const struct dw_hdmi_audio_tmds_n *tmds_n = NULL;
+ int i;
+
+ for (i = 0; common_tmds_n_table[i].tmds != 0; i++) {
+ if (pixel_clk == common_tmds_n_table[i].tmds) {
+ tmds_n = &common_tmds_n_table[i];
+ break;
+ }
+ }
+
+ if (!tmds_n)
+ return -ENOENT;
+
+ switch (freq) {
+ case 32000:
+ return tmds_n->n_32k;
+ case 44100:
+ case 88200:
+ case 176400:
+ return (freq / 44100) * tmds_n->n_44k1;
+ case 48000:
+ case 96000:
+ case 192000:
+ return (freq / 48000) * tmds_n->n_48k;
+ default:
+ return -ENOENT;
+ }
+}
+
+static u32 dw_hdmi_qp_audio_math_diff(unsigned int freq, unsigned int n,
+ unsigned int pixel_clk)
+{
+ u64 cts = mul_u32_u32(pixel_clk, n);
+
+ return do_div(cts, 128 * freq);
+}
+
+static unsigned int dw_hdmi_qp_compute_n(struct dw_hdmi_qp *hdmi,
+ unsigned long pixel_clk,
+ unsigned long freq)
+{
+ unsigned int min_n = DIV_ROUND_UP((128 * freq), 1500);
+ unsigned int max_n = (128 * freq) / 300;
+ unsigned int ideal_n = (128 * freq) / 1000;
+ unsigned int best_n_distance = ideal_n;
+ unsigned int best_n = 0;
+ u64 best_diff = U64_MAX;
+ int n;
+
+ /* If the ideal N could satisfy the audio math, then just take it */
+ if (dw_hdmi_qp_audio_math_diff(freq, ideal_n, pixel_clk) == 0)
+ return ideal_n;
+
+ for (n = min_n; n <= max_n; n++) {
+ u64 diff = dw_hdmi_qp_audio_math_diff(freq, n, pixel_clk);
+
+ if (diff < best_diff ||
+ (diff == best_diff && abs(n - ideal_n) < best_n_distance)) {
+ best_n = n;
+ best_diff = diff;
+ best_n_distance = abs(best_n - ideal_n);
+ }
+
+ /*
+ * The best N already satisfy the audio math, and also be
+ * the closest value to ideal N, so just cut the loop.
+ */
+ if (best_diff == 0 && (abs(n - ideal_n) > best_n_distance))
+ break;
+ }
+
+ return best_n;
+}
+
+static unsigned int dw_hdmi_qp_find_n(struct dw_hdmi_qp *hdmi, unsigned long pixel_clk,
+ unsigned long sample_rate)
+{
+ int n = dw_hdmi_qp_match_tmds_n_table(hdmi, pixel_clk, sample_rate);
+
+ if (n > 0)
+ return n;
+
+ dev_warn(hdmi->dev, "Rate %lu missing; compute N dynamically\n",
+ pixel_clk);
+
+ return dw_hdmi_qp_compute_n(hdmi, pixel_clk, sample_rate);
+}
+
+static unsigned int dw_hdmi_qp_find_cts(struct dw_hdmi_qp *hdmi, unsigned long pixel_clk,
+ unsigned long sample_rate)
+{
+ const struct dw_hdmi_audio_tmds_cts *tmds_cts = NULL;
+ int i;
+
+ for (i = 0; common_tmds_cts_table[i].tmds != 0; i++) {
+ if (pixel_clk == common_tmds_cts_table[i].tmds) {
+ tmds_cts = &common_tmds_cts_table[i];
+ break;
+ }
+ }
+
+ if (!tmds_cts)
+ return 0;
+
+ switch (sample_rate) {
+ case 32000:
+ return tmds_cts->cts_32k;
+ case 44100:
+ case 88200:
+ case 176400:
+ return tmds_cts->cts_44k1;
+ case 48000:
+ case 96000:
+ case 192000:
+ return tmds_cts->cts_48k;
+ default:
+ return -ENOENT;
+ }
+}
+
+static void dw_hdmi_qp_set_audio_interface(struct dw_hdmi_qp *hdmi,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ u32 conf0 = 0;
+
+ /* Reset the audio data path of the AVP */
+ dw_hdmi_qp_write(hdmi, AVP_DATAPATH_PACKET_AUDIO_SWINIT_P, GLOBAL_SWRESET_REQUEST);
+
+ /* Disable AUDS, ACR, AUDI */
+ dw_hdmi_qp_mod(hdmi, 0,
+ PKTSCHED_ACR_TX_EN | PKTSCHED_AUDS_TX_EN | PKTSCHED_AUDI_TX_EN,
+ PKTSCHED_PKT_EN);
+
+ /* Clear the audio FIFO */
+ dw_hdmi_qp_write(hdmi, AUDIO_FIFO_CLR_P, AUDIO_INTERFACE_CONTROL0);
+
+ /* Select I2S interface as the audio source */
+ dw_hdmi_qp_mod(hdmi, AUD_IF_I2S, AUD_IF_SEL_MSK, AUDIO_INTERFACE_CONFIG0);
+
+ /* Enable the active i2s lanes */
+ switch (hparms->channels) {
+ case 7 ... 8:
+ conf0 |= I2S_LINES_EN(3);
+ fallthrough;
+ case 5 ... 6:
+ conf0 |= I2S_LINES_EN(2);
+ fallthrough;
+ case 3 ... 4:
+ conf0 |= I2S_LINES_EN(1);
+ fallthrough;
+ default:
+ conf0 |= I2S_LINES_EN(0);
+ break;
+ }
+
+ dw_hdmi_qp_mod(hdmi, conf0, I2S_LINES_EN_MSK, AUDIO_INTERFACE_CONFIG0);
+
+ /*
+ * Enable bpcuv generated internally for L-PCM, or received
+ * from stream for NLPCM/HBR.
+ */
+ switch (fmt->bit_fmt) {
+ case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
+ conf0 = (hparms->channels == 8) ? AUD_HBR : AUD_ASP;
+ conf0 |= I2S_BPCUV_RCV_EN;
+ break;
+ default:
+ conf0 = AUD_ASP | I2S_BPCUV_RCV_DIS;
+ break;
+ }
+
+ dw_hdmi_qp_mod(hdmi, conf0, I2S_BPCUV_RCV_MSK | AUD_FORMAT_MSK,
+ AUDIO_INTERFACE_CONFIG0);
+
+ /* Enable audio FIFO auto clear when overflow */
+ dw_hdmi_qp_mod(hdmi, AUD_FIFO_INIT_ON_OVF_EN, AUD_FIFO_INIT_ON_OVF_MSK,
+ AUDIO_INTERFACE_CONFIG0);
+}
+
+/*
+ * When transmitting IEC60958 linear PCM audio, these registers allow to
+ * configure the channel status information of all the channel status
+ * bits in the IEC60958 frame. For the moment this configuration is only
+ * used when the I2S audio interface, General Purpose Audio (GPA),
+ * or AHB audio DMA (AHBAUDDMA) interface is active
+ * (for S/PDIF interface this information comes from the stream).
+ */
+static void dw_hdmi_qp_set_channel_status(struct dw_hdmi_qp *hdmi,
+ u8 *channel_status, bool ref2stream)
+{
+ /*
+ * AUDPKT_CHSTATUS_OVR0: { RSV, RSV, CS1, CS0 }
+ * AUDPKT_CHSTATUS_OVR1: { CS6, CS5, CS4, CS3 }
+ *
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * CS0: | Mode | d | c | b | a |
+ * CS1: | Category Code |
+ * CS2: | Channel Number | Source Number |
+ * CS3: | Clock Accuracy | Sample Freq |
+ * CS4: | Ori Sample Freq | Word Length |
+ * CS5: | | CGMS-A |
+ * CS6~CS23: Reserved
+ *
+ * a: use of channel status block
+ * b: linear PCM identification: 0 for lpcm, 1 for nlpcm
+ * c: copyright information
+ * d: additional format information
+ */
+
+ if (ref2stream)
+ channel_status[0] |= IEC958_AES0_NONAUDIO;
+
+ if ((dw_hdmi_qp_read(hdmi, AUDIO_INTERFACE_CONFIG0) & GENMASK(25, 24)) == AUD_HBR) {
+ /* fixup cs for HBR */
+ channel_status[3] = (channel_status[3] & 0xf0) | IEC958_AES3_CON_FS_768000;
+ channel_status[4] = (channel_status[4] & 0x0f) | IEC958_AES4_CON_ORIGFS_NOTID;
+ }
+
+ dw_hdmi_qp_write(hdmi, channel_status[0] | (channel_status[1] << 8),
+ AUDPKT_CHSTATUS_OVR0);
+
+ regmap_bulk_write(hdmi->regm, AUDPKT_CHSTATUS_OVR1, &channel_status[3], 1);
+
+ if (ref2stream)
+ dw_hdmi_qp_mod(hdmi, 0,
+ AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK,
+ AUDPKT_CONTROL0);
+ else
+ dw_hdmi_qp_mod(hdmi, AUDPKT_PBIT_FORCE_EN | AUDPKT_CHSTATUS_OVR_EN,
+ AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK,
+ AUDPKT_CONTROL0);
+}
+
+static void dw_hdmi_qp_set_sample_rate(struct dw_hdmi_qp *hdmi, unsigned long long tmds_char_rate,
+ unsigned int sample_rate)
+{
+ unsigned int n, cts;
+
+ n = dw_hdmi_qp_find_n(hdmi, tmds_char_rate, sample_rate);
+ cts = dw_hdmi_qp_find_cts(hdmi, tmds_char_rate, sample_rate);
+
+ dw_hdmi_qp_set_cts_n(hdmi, cts, n);
+}
+
+static int dw_hdmi_qp_audio_enable(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+
+ if (hdmi->tmds_char_rate)
+ dw_hdmi_qp_mod(hdmi, 0, AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE);
+
+ return 0;
+}
+
+static int dw_hdmi_qp_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ bool ref2stream = false;
+
+ if (!hdmi->tmds_char_rate)
+ return -ENODEV;
+
+ if (fmt->bit_clk_provider | fmt->frame_clk_provider) {
+ dev_err(hdmi->dev, "unsupported clock settings\n");
+ return -EINVAL;
+ }
+
+ if (fmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE)
+ ref2stream = true;
+
+ dw_hdmi_qp_set_audio_interface(hdmi, fmt, hparms);
+ dw_hdmi_qp_set_sample_rate(hdmi, hdmi->tmds_char_rate, hparms->sample_rate);
+ dw_hdmi_qp_set_channel_status(hdmi, hparms->iec.status, ref2stream);
+ drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector, &hparms->cea);
+
+ return 0;
+}
+
+static void dw_hdmi_qp_audio_disable_regs(struct dw_hdmi_qp *hdmi)
+{
+ /*
+ * Keep ACR, AUDI, AUDS packet always on to make SINK device
+ * active for better compatibility and user experience.
+ *
+ * This also fix POP sound on some SINK devices which wakeup
+ * from suspend to active.
+ */
+ dw_hdmi_qp_mod(hdmi, I2S_BPCUV_RCV_DIS, I2S_BPCUV_RCV_MSK,
+ AUDIO_INTERFACE_CONFIG0);
+ dw_hdmi_qp_mod(hdmi, AUDPKT_PBIT_FORCE_EN | AUDPKT_CHSTATUS_OVR_EN,
+ AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK,
+ AUDPKT_CONTROL0);
+
+ dw_hdmi_qp_mod(hdmi, AVP_DATAPATH_PACKET_AUDIO_SWDISABLE,
+ AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE);
+}
+
+static void dw_hdmi_qp_audio_disable(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
+
+ if (hdmi->tmds_char_rate)
+ dw_hdmi_qp_audio_disable_regs(hdmi);
+}
+
static int dw_hdmi_qp_i2c_read(struct dw_hdmi_qp *hdmi,
unsigned char *buf, unsigned int length)
{
@@ -361,6 +785,51 @@ static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi,
return 0;
}
+/*
+ * Static values documented in the TRM
+ * Different values are only used for debug purposes
+ */
+#define DW_HDMI_QP_AUDIO_INFOFRAME_HB1 0x1
+#define DW_HDMI_QP_AUDIO_INFOFRAME_HB2 0xa
+
+static int dw_hdmi_qp_config_audio_infoframe(struct dw_hdmi_qp *hdmi,
+ const u8 *buffer, size_t len)
+{
+ /*
+ * AUDI_CONTENTS0: { RSV, HB2, HB1, RSV }
+ * AUDI_CONTENTS1: { PB3, PB2, PB1, PB0 }
+ * AUDI_CONTENTS2: { PB7, PB6, PB5, PB4 }
+ *
+ * PB0: CheckSum
+ * PB1: | CT3 | CT2 | CT1 | CT0 | F13 | CC2 | CC1 | CC0 |
+ * PB2: | F27 | F26 | F25 | SF2 | SF1 | SF0 | SS1 | SS0 |
+ * PB3: | F37 | F36 | F35 | F34 | F33 | F32 | F31 | F30 |
+ * PB4: | CA7 | CA6 | CA5 | CA4 | CA3 | CA2 | CA1 | CA0 |
+ * PB5: | DM_INH | LSV3 | LSV2 | LSV1 | LSV0 | F52 | F51 | F50 |
+ * PB6~PB10: Reserved
+ *
+ * AUDI_CONTENTS0 default value defined by HDMI specification,
+ * and shall only be changed for debug purposes.
+ */
+ u32 header_bytes = (DW_HDMI_QP_AUDIO_INFOFRAME_HB1 << 8) |
+ (DW_HDMI_QP_AUDIO_INFOFRAME_HB2 << 16);
+
+ regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1);
+ regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1);
+ regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[4], 1);
+
+ /* Enable ACR, AUDI, AMD */
+ dw_hdmi_qp_mod(hdmi,
+ PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN,
+ PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN,
+ PKTSCHED_PKT_EN);
+
+ /* Enable AUDS */
+ dw_hdmi_qp_mod(hdmi, PKTSCHED_AUDS_TX_EN, PKTSCHED_AUDS_TX_EN, PKTSCHED_PKT_EN);
+
+ return 0;
+}
+
static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
@@ -381,6 +850,7 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n",
__func__, conn_state->hdmi.tmds_char_rate);
op_mode = 0;
+ hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate;
} else {
dev_dbg(hdmi->dev, "%s mode=DVI\n", __func__);
op_mode = OPMODE_DVI;
@@ -399,6 +869,8 @@ static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge,
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
+ hdmi->tmds_char_rate = 0;
+
hdmi->phy.ops->disable(hdmi, hdmi->phy.data);
}
@@ -454,6 +926,13 @@ static int dw_hdmi_qp_bridge_clear_infoframe(struct drm_bridge *bridge,
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN);
break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ dw_hdmi_qp_mod(hdmi, 0,
+ PKTSCHED_ACR_TX_EN |
+ PKTSCHED_AUDS_TX_EN |
+ PKTSCHED_AUDI_TX_EN,
+ PKTSCHED_PKT_EN);
+ break;
default:
dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type);
}
@@ -476,6 +955,9 @@ static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge,
case HDMI_INFOFRAME_TYPE_DRM:
return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return dw_hdmi_qp_config_audio_infoframe(hdmi, buffer, len);
+
default:
dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type);
return 0;
@@ -493,6 +975,9 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.hdmi_tmds_char_rate_valid = dw_hdmi_qp_bridge_tmds_char_rate_valid,
.hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe,
.hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe,
+ .hdmi_audio_startup = dw_hdmi_qp_audio_enable,
+ .hdmi_audio_shutdown = dw_hdmi_qp_audio_disable,
+ .hdmi_audio_prepare = dw_hdmi_qp_audio_prepare,
};
static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id)
@@ -602,6 +1087,10 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
if (IS_ERR(hdmi->bridge.ddc))
return ERR_CAST(hdmi->bridge.ddc);
+ hdmi->bridge.hdmi_audio_max_i2s_playback_channels = 8;
+ hdmi->bridge.hdmi_audio_dev = dev;
+ hdmi->bridge.hdmi_audio_dai_port = 1;
+
ret = devm_drm_bridge_add(dev, &hdmi->bridge);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 54ad462d17ef..95563aa1b450 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -561,6 +561,8 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
REG_LVDS_FMT_HS_NEG_POLARITY : 0) |
(mode->flags & DRM_MODE_FLAG_NVSYNC ?
REG_LVDS_FMT_VS_NEG_POLARITY : 0);
+ val |= bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_LOW ?
+ REG_LVDS_FMT_DE_NEG_POLARITY : 0;
/* Set up bits-per-pixel, 18bpp or 24bpp. */
if (lvds_format_24bpp) {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index ae34585e05b3..01d456b955ab 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -195,7 +195,7 @@ struct ti_sn65dsi86 {
struct gpio_chip gchip;
DECLARE_BITMAP(gchip_output, SN_NUM_GPIOS);
#endif
-#if defined(CONFIG_PWM)
+#if IS_REACHABLE(CONFIG_PWM)
struct pwm_chip *pchip;
bool pwm_enabled;
atomic_t pwm_pin_busy;
@@ -1362,7 +1362,7 @@ static struct auxiliary_driver ti_sn_bridge_driver = {
/* -----------------------------------------------------------------------------
* PWM Controller
*/
-#if defined(CONFIG_PWM)
+#if IS_REACHABLE(CONFIG_PWM)
static int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata)
{
return atomic_xchg(&pdata->pwm_pin_busy, 1) ? -EBUSY : 0;
@@ -1956,7 +1956,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return ret;
}
- if (IS_ENABLED(CONFIG_PWM)) {
+ if (IS_REACHABLE(CONFIG_PWM)) {
ret = ti_sn65dsi86_add_aux_device(pdata, &pdata->pwm_aux, "pwm");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index f5c596234729..dbce1c3f4969 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2876,6 +2876,67 @@ int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
EXPORT_SYMBOL(drm_dp_lttpr_max_link_rate);
/**
+ * drm_dp_lttpr_set_transparent_mode() - set the LTTPR in transparent mode
+ * @aux: DisplayPort AUX channel
+ * @enable: Enable or disable transparent mode
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_lttpr_set_transparent_mode(struct drm_dp_aux *aux, bool enable)
+{
+ u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
+ DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
+ int ret = drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE, val);
+
+ if (ret < 0)
+ return ret;
+
+ return (ret == 1) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(drm_dp_lttpr_set_transparent_mode);
+
+/**
+ * drm_dp_lttpr_init() - init LTTPR transparency mode according to DP standard
+ * @aux: DisplayPort AUX channel
+ * @lttpr_count: Number of LTTPRs. Between 0 and 8, according to DP standard.
+ * Negative error code for any non-valid number.
+ * See drm_dp_lttpr_count().
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_lttpr_init(struct drm_dp_aux *aux, int lttpr_count)
+{
+ int ret;
+
+ if (!lttpr_count)
+ return 0;
+
+ /*
+ * See DP Standard v2.0 3.6.6.1 about the explicit disabling of
+ * non-transparent mode and the disable->enable non-transparent mode
+ * sequence.
+ */
+ ret = drm_dp_lttpr_set_transparent_mode(aux, true);
+ if (ret)
+ return ret;
+
+ if (lttpr_count < 0)
+ return -ENODEV;
+
+ if (drm_dp_lttpr_set_transparent_mode(aux, false)) {
+ /*
+ * Roll-back to transparent mode if setting non-transparent
+ * mode has failed
+ */
+ drm_dp_lttpr_set_transparent_mode(aux, true);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_lttpr_init);
+
+/**
* drm_dp_lttpr_max_lane_count - get the maximum lane count supported by all LTTPRs
* @caps: LTTPR common capabilities
*
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 7a25e70694ba..5302ab324898 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3409,6 +3409,10 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_all);
* This implies a reset of all active components available between the CRTC and
* connectors.
*
+ * NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
+ * For drivers which optimize out unnecessary modesets this will result in
+ * a no-op commit, achieving nothing.
+ *
* Returns:
* 0 on success or a negative error code on failure.
*/
diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index e1d61a65210b..2c4dc7ebc0c3 100644
--- a/drivers/gpu/drm/drm_fb_dma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -178,7 +178,7 @@ int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane,
dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
/* Buffer should be accessible from the CPU */
- if (dma_obj->base.import_attach)
+ if (drm_gem_is_imported(&dma_obj->base))
return -ENODEV;
/* Buffer should be already mapped to CPU */
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index ecb278b63e8c..01d3ab307ac3 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -702,6 +702,57 @@ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pi
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
+static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ u8 *dbuf8 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ /* write red-green-blue to output in little endianness */
+ *dbuf8++ = (pix & 0x00ff0000) >> 16;
+ *dbuf8++ = (pix & 0x0000ff00) >> 8;
+ *dbuf8++ = (pix & 0x000000ff) >> 0;
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_bgr888 - Convert XRGB8888 to BGR888 clip buffer
+ * @dst: Array of BGR888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for BGR888 devices that don't natively
+ * support XRGB8888.
+ */
+void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 3,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_bgr888_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
+
static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
@@ -1104,6 +1155,9 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
} else if (dst_format == DRM_FORMAT_RGB888) {
drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state);
return 0;
+ } else if (dst_format == DRM_FORMAT_BGR888) {
+ drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state);
+ return 0;
} else if (dst_format == DRM_FORMAT_ARGB8888) {
drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
return 0;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ee811764c3df..c6240bab3fa5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -348,7 +348,7 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
return -ENOENT;
/* Don't allow imported objects to be mapped */
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = -EINVAL;
goto out;
}
@@ -1178,7 +1178,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
drm_vma_node_start(&obj->vma_node));
drm_printf_indent(p, indent, "size=%zu\n", obj->size);
drm_printf_indent(p, indent, "imported=%s\n",
- str_yes_no(obj->import_attach));
+ str_yes_no(drm_gem_is_imported(obj)));
if (obj->funcs->print_info)
obj->funcs->print_info(p, indent, obj);
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 16988d316a6d..b7f033d4352a 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -228,9 +228,9 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
struct drm_gem_object *gem_obj = &dma_obj->base;
struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
- if (gem_obj->import_attach) {
+ if (drm_gem_is_imported(gem_obj)) {
if (dma_obj->vaddr)
- dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
+ dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map);
drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
} else if (dma_obj->vaddr) {
if (dma_obj->map_noncoherent)
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 185534f56bab..0fbeb686e561 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -419,7 +419,6 @@ EXPORT_SYMBOL(drm_gem_fb_vunmap);
static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir,
unsigned int num_planes)
{
- struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
int ret;
@@ -428,10 +427,9 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
obj = drm_gem_fb_get_obj(fb, num_planes);
if (!obj)
continue;
- import_attach = obj->import_attach;
- if (!import_attach)
+ if (!drm_gem_is_imported(obj))
continue;
- ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
+ ret = dma_buf_end_cpu_access(obj->dma_buf, dir);
if (ret)
drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n",
ret, num_planes, dir);
@@ -454,7 +452,6 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
*/
int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir)
{
- struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
unsigned int i;
int ret;
@@ -465,10 +462,9 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct
ret = -EINVAL;
goto err___drm_gem_fb_end_cpu_access;
}
- import_attach = obj->import_attach;
- if (!import_attach)
+ if (!drm_gem_is_imported(obj))
continue;
- ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir);
+ ret = dma_buf_begin_cpu_access(obj->dma_buf, dir);
if (ret)
goto err___drm_gem_fb_end_cpu_access;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5ab351409312..d99dee67353a 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -160,7 +160,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
dma_resv_lock(shmem->base.resv, NULL);
@@ -255,7 +255,7 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
dma_resv_assert_held(shmem->base.resv);
- drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
+ drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
ret = drm_gem_shmem_get_pages(shmem);
@@ -286,7 +286,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
struct drm_gem_object *obj = &shmem->base;
int ret;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
@@ -309,7 +309,7 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_unpin_locked(shmem);
@@ -338,11 +338,11 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- if (obj->import_attach) {
- ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
+ if (drm_gem_is_imported(obj)) {
+ ret = dma_buf_vmap(obj->dma_buf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ dma_buf_vunmap(obj->dma_buf, map);
return -EIO;
}
}
@@ -378,7 +378,7 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
return 0;
err_put_pages:
- if (!obj->import_attach)
+ if (!drm_gem_is_imported(obj))
drm_gem_shmem_put_pages(shmem);
err_zero_use:
shmem->vmap_use_count = 0;
@@ -404,8 +404,8 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
{
struct drm_gem_object *obj = &shmem->base;
- if (obj->import_attach) {
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ if (drm_gem_is_imported(obj)) {
+ dma_buf_vunmap(obj->dma_buf, map);
} else {
dma_resv_assert_held(shmem->base.resv);
@@ -566,7 +566,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
dma_resv_lock(shmem->base.resv, NULL);
@@ -618,7 +618,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
struct drm_gem_object *obj = &shmem->base;
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
* doesn't set those fields.
@@ -663,7 +663,7 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent)
{
- if (shmem->base.import_attach)
+ if (drm_gem_is_imported(&shmem->base))
return;
drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
@@ -690,7 +690,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
@@ -705,7 +705,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
if (shmem->sgt)
return shmem->sgt;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 34bca7567576..89e05a5bed1d 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -218,7 +218,7 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach,
+ drm_fb_swab(&dst_map, NULL, src, fb, clip, !drm_gem_is_imported(gem),
fmtcnv_state);
else
drm_fb_memcpy(&dst_map, NULL, src, fb, clip);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 5e5c5f84daac..dfa595556320 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -162,13 +162,13 @@ of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
u32 reg;
if (of_alias_from_compatible(node, info.type, sizeof(info.type)) < 0) {
- drm_err(host, "modalias failure on %pOF\n", node);
+ dev_err(host->dev, "modalias failure on %pOF\n", node);
return ERR_PTR(-EINVAL);
}
ret = of_property_read_u32(node, "reg", &reg);
if (ret) {
- drm_err(host, "device node %pOF has no valid reg property: %d\n",
+ dev_err(host->dev, "device node %pOF has no valid reg property: %d\n",
node, ret);
return ERR_PTR(-EINVAL);
}
@@ -206,18 +206,18 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
int ret;
if (!info) {
- drm_err(host, "invalid mipi_dsi_device_info pointer\n");
+ dev_err(host->dev, "invalid mipi_dsi_device_info pointer\n");
return ERR_PTR(-EINVAL);
}
if (info->channel > 3) {
- drm_err(host, "invalid virtual channel: %u\n", info->channel);
+ dev_err(host->dev, "invalid virtual channel: %u\n", info->channel);
return ERR_PTR(-EINVAL);
}
dsi = mipi_dsi_device_alloc(host);
if (IS_ERR(dsi)) {
- drm_err(host, "failed to allocate DSI device %ld\n",
+ dev_err(host->dev, "failed to allocate DSI device %ld\n",
PTR_ERR(dsi));
return dsi;
}
@@ -228,7 +228,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
ret = mipi_dsi_device_add(dsi);
if (ret) {
- drm_err(host, "failed to add DSI device %d\n", ret);
+ dev_err(host->dev, "failed to add DSI device %d\n", ret);
kfree(dsi);
return ERR_PTR(ret);
}
@@ -1266,25 +1266,6 @@ int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address);
/**
- * mipi_dsi_dcs_set_tear_off() - turn off the display module's Tearing Effect
- * output signal on the TE signal line
- * @dsi: DSI peripheral device
- *
- * Return: 0 on success or a negative error code on failure
- */
-int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi)
-{
- ssize_t err;
-
- err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
-
-/**
* mipi_dsi_dcs_set_tear_on() - turn on the display module's Tearing Effect
* output signal on the TE signal line.
* @dsi: DSI peripheral device
@@ -1714,6 +1695,29 @@ void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx)
EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral_multi);
/**
+ * mipi_dsi_dcs_set_tear_off_multi() - turn off the display module's Tearing Effect
+ * output signal on the TE signal line
+ * @ctx: Context for multiple DSI transactions
+ */
+void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ ssize_t err;
+
+ if (ctx->accum_err)
+ return;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
+ if (err < 0) {
+ ctx->accum_err = err;
+ dev_err(dev, "Failed to set tear off: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off_multi);
+
+/**
* mipi_dsi_dcs_soft_reset_multi() - perform a software reset of the display module
* @ctx: Context for multiple DSI transactions
*
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 32a8781cfd67..bdb51c8f262e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -453,13 +453,7 @@ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
}
mutex_lock(&dev->object_name_lock);
- /* re-export the original imported object */
- if (obj->import_attach) {
- dmabuf = obj->import_attach->dmabuf;
- get_dma_buf(dmabuf);
- goto out_have_obj;
- }
-
+ /* re-export the original imported/exported object */
if (obj->dma_buf) {
get_dma_buf(obj->dma_buf);
dmabuf = obj->dma_buf;
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index f139b49af4c9..edbeab88ff2b 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -213,7 +213,7 @@ static void delete_writeback_properties(struct drm_device *dev)
}
/**
- * drm_writeback_connector_init_with_encoder - Initialize a writeback connector with
+ * __drm_writeback_connector_init - Initialize a writeback connector with
* a custom encoder
*
* @dev: DRM device
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 3e83299113e3..718d45891fc7 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -215,7 +215,7 @@ static void cdv_errata(struct drm_device *dev)
* Bonus Launch to work around the issue, by degrading
* performance.
*/
- CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
+ CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
}
/**
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 53990d27c39f..c85143792019 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -855,8 +855,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
- strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
- intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+ strscpy(intel_dp->adapter.name, name);
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = connector->base.kdev;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 11953b03bb6a..66fcd90f0028 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -119,9 +119,6 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
- if (drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) != 1)
- return false;
-
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = val;
@@ -146,6 +143,7 @@ bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
int lttpr_count;
+ int ret;
if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
return 0;
@@ -172,22 +170,8 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
return lttpr_count;
}
- /*
- * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
- * non-transparent mode and the disable->enable non-transparent mode
- * sequence.
- */
- intel_dp_set_lttpr_transparent_mode(intel_dp, true);
-
- /*
- * In case of unsupported number of LTTPRs or failing to switch to
- * non-transparent mode fall-back to transparent link training mode,
- * still taking into account any LTTPR common lane- rate/count limits.
- */
- if (lttpr_count < 0)
- goto out_reset_lttpr_count;
-
- if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
+ ret = drm_dp_lttpr_init(&intel_dp->aux, lttpr_count);
+ if (ret) {
lt_dbg(intel_dp, DP_PHY_DPRX,
"Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
@@ -196,6 +180,8 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
goto out_reset_lttpr_count;
}
+ intel_dp_set_lttpr_transparent_mode(intel_dp, false);
+
return lttpr_count;
out_reset_lttpr_count:
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index dd8433a38282..39c7de4cdcc1 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -96,7 +96,6 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_color *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -108,8 +107,7 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get color clk\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap color\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index b17b11d93846..8afd15006df2 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -256,7 +256,6 @@ static int mtk_disp_gamma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_gamma *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -268,8 +267,7 @@ static int mtk_disp_gamma_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get gamma clk\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap gamma\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
index 563b1b248fbb..b174dda091d3 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -306,7 +306,6 @@ static const struct component_ops mtk_disp_merge_component_ops = {
static int mtk_disp_merge_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct mtk_disp_merge *priv;
int ret;
@@ -314,8 +313,7 @@ static int mtk_disp_merge_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap merge\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index df82cea4bb79..d0581c4e3c99 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -604,7 +604,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_ovl *priv;
- struct resource *res;
int irq;
int ret;
@@ -621,8 +620,7 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get ovl clk\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap ovl\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index bf47790e4d6b..c9d41d75e7f2 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -313,7 +313,6 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_rdma *priv;
- struct resource *res;
int irq;
int ret;
@@ -330,8 +329,7 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get rdma clk\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap rdma\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index b50dc9a013ac..0683c2b3ca5b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1192,7 +1192,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
{
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
- struct resource *regs;
int irq_num;
int ret;
@@ -1217,8 +1216,7 @@ static int mtk_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->hs_clk))
return dev_err_probe(dev, PTR_ERR(dsi->hs_clk), "Failed to get hs clock\n");
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->regs = devm_ioremap_resource(dev, regs);
+ dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs))
return dev_err_probe(dev, PTR_ERR(dsi->regs), "Failed to ioremap memory\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index ac5e40c27617..d4ab098e1174 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1424,7 +1424,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
struct device_node *cec_np, *remote, *i2c_np;
struct platform_device *cec_pdev;
struct regmap *regmap;
- struct resource *mem;
int ret;
ret = mtk_hdmi_get_all_clk(hdmi, np);
@@ -1470,8 +1469,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
}
hdmi->sys_regmap = regmap;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->regs = devm_ioremap_resource(dev, mem);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs)) {
ret = PTR_ERR(hdmi->regs);
goto put_device;
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
index fc69ee38ce7d..7982788ae9df 100644
--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -291,7 +291,6 @@ static const struct component_ops mtk_mdp_rdma_component_ops = {
static int mtk_mdp_rdma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct mtk_mdp_rdma *priv;
int ret = 0;
@@ -299,8 +298,7 @@ static int mtk_mdp_rdma_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap rdma\n");
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 7ec833b6d829..974bc7c0ea76 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -170,6 +170,8 @@ config DRM_MSM_HDMI
bool "Enable HDMI support in MSM DRM driver"
depends on DRM_MSM
default y
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
help
Compile in support for the HDMI output MSM DRM driver. It can
be a primary or a secondary display on device. Note that this is used
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index edffb7737a97..53e2ff4406d8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -880,6 +880,35 @@ static const struct adreno_info a6xx_gpus[] = {
{ 137, 1 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06020300),
+ .family = ADRENO_6XX_GEN3,
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a623_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a690_hwcg,
+ .protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00010000,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ },
+ { /* sentinel */ },
+ },
+ },
+ .address_space_size = SZ_16G,
+ }, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
0x06030002
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 699b0dd34b18..38c94915d4c9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1169,49 +1169,50 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 val;
+ int ret;
/*
- * The GMU may still be in slumber unless the GPU started so check and
- * skip putting it back into slumber if so
+ * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when
+ * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when
+ * required
*/
- val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+ if (adreno_gpu->base.needs_hw_init) {
+ if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET))
+ goto force_off;
- if (val != 0xf) {
- int ret = a6xx_gmu_wait_for_idle(gmu);
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ }
- /* If the GMU isn't responding assume it is hung */
- if (ret) {
- a6xx_gmu_force_off(gmu);
- return;
- }
+ ret = a6xx_gmu_wait_for_idle(gmu);
- a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+ /* If the GMU isn't responding assume it is hung */
+ if (ret)
+ goto force_off;
- /* tell the GMU we want to slumber */
- ret = a6xx_gmu_notify_slumber(gmu);
- if (ret) {
- a6xx_gmu_force_off(gmu);
- return;
- }
+ a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
- ret = gmu_poll_timeout(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
- !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
- 100, 10000);
+ /* tell the GMU we want to slumber */
+ ret = a6xx_gmu_notify_slumber(gmu);
+ if (ret)
+ goto force_off;
- /*
- * Let the user know we failed to slumber but don't worry too
- * much because we are powering down anyway
- */
+ ret = gmu_poll_timeout(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+ !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+ 100, 10000);
- if (ret)
- DRM_DEV_ERROR(gmu->dev,
- "Unable to slumber GMU: status = 0%x/0%x\n",
- gmu_read(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
- gmu_read(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
- }
+ /*
+ * Let the user know we failed to slumber but don't worry too
+ * much because we are powering down anyway
+ */
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev,
+ "Unable to slumber GMU: status = 0%x/0%x\n",
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
/* Turn off HFI */
a6xx_hfi_stop(gmu);
@@ -1221,6 +1222,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+
+ return;
+
+force_off:
+ a6xx_gmu_force_off(gmu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 0ae29a7c8a4d..1820c167fcee 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -616,6 +616,14 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.uavflagprd_inv = 2;
}
+ if (adreno_is_a623(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 16;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.rgb565_predicator = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
+ }
+
if (adreno_is_a640_family(gpu))
gpu->ubwc_config.amsbc = 1;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 0fcae53c0b14..341a72a67401 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1214,12 +1214,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
- 3, sizeof(*a6xx_state->gmu_registers));
+ 4, sizeof(*a6xx_state->gmu_registers));
if (!a6xx_state->gmu_registers)
return;
- a6xx_state->nr_gmu_registers = 3;
+ a6xx_state->nr_gmu_registers = 4;
/* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
@@ -1227,6 +1227,13 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
&a6xx_state->gmu_registers[1], true);
+ if (adreno_is_a621(adreno_gpu) || adreno_is_a623(adreno_gpu))
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a621_gpucc_reg,
+ &a6xx_state->gmu_registers[2], false);
+ else
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gpucc_reg,
+ &a6xx_state->gmu_registers[2], false);
+
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return;
@@ -1234,7 +1241,7 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
- &a6xx_state->gmu_registers[2], false);
+ &a6xx_state->gmu_registers[3], false);
}
static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
@@ -1507,6 +1514,8 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
/* Restore the size in the hardware */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+ a6xx_state->nr_indexed_regs = count;
}
static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index dd4c28a8d923..e545106c70be 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -363,6 +363,9 @@ static const u32 a6xx_gmu_cx_registers[] = {
0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
/* GMU AO */
0x9300, 0x9316, 0x9400, 0x9400,
+};
+
+static const u32 a6xx_gmu_gpucc_registers[] = {
/* GPU CC */
0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
@@ -373,6 +376,17 @@ static const u32 a6xx_gmu_cx_registers[] = {
0xbc00, 0xbc16, 0xbc20, 0xbc27,
};
+static const u32 a621_gmu_gpucc_registers[] = {
+ /* GPU CC */
+ 0x9800, 0x980e, 0x9c00, 0x9c0e, 0xb000, 0xb004, 0xb400, 0xb404,
+ 0xb800, 0xb804, 0xbc00, 0xbc05, 0xbc14, 0xbc1d, 0xbc2a, 0xbc30,
+ 0xbc32, 0xbc32, 0xbc41, 0xbc55, 0xbc66, 0xbc68, 0xbc78, 0xbc7a,
+ 0xbc89, 0xbc8a, 0xbc9c, 0xbc9e, 0xbca0, 0xbca3, 0xbcb3, 0xbcb5,
+ 0xbcc5, 0xbcc7, 0xbcd6, 0xbcd8, 0xbce8, 0xbce9, 0xbcf9, 0xbcfc,
+ 0xbd0b, 0xbd0c, 0xbd1c, 0xbd1e, 0xbd40, 0xbd70, 0xbe00, 0xbe16,
+ 0xbe20, 0xbe2d,
+};
+
static const u32 a6xx_gmu_cx_rscc_registers[] = {
/* GPU RSCC */
0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347,
@@ -386,6 +400,9 @@ static const struct a6xx_registers a6xx_gmu_reglist[] = {
REGS(a6xx_gmu_gx_registers, 0, 0),
};
+static const struct a6xx_registers a6xx_gpucc_reg = REGS(a6xx_gmu_gpucc_registers, 0, 0);
+static const struct a6xx_registers a621_gpucc_reg = REGS(a621_gmu_gpucc_registers, 0, 0);
+
static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu);
static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 1238f3265978..7156cda07b03 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -883,6 +883,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
drm_printf(p, " - type=%s\n", info->type);
drm_printf(p, " - source=%s\n", info->block);
+
+ /* Information extracted from what we think are the current
+ * pgtables. Hopefully the TTBR0 matches what we've extracted
+ * from the SMMU registers in smmu_info!
+ */
+ drm_puts(p, "pgtable-fault-info:\n");
+ drm_printf(p, " - ttbr0: %.16llx\n", (u64)info->pgtbl_ttbr0);
+ drm_printf(p, " - asid: %d\n", info->asid);
+ drm_printf(p, " - ptes: %.16llx %.16llx %.16llx %.16llx\n",
+ info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]);
}
drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index dcf454629ce0..92caba3584da 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -442,6 +442,11 @@ static inline int adreno_is_a621(const struct adreno_gpu *gpu)
return gpu->info->chip_ids[0] == 0x06020100;
}
+static inline int adreno_is_a623(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06020300;
+}
+
static inline int adreno_is_a630(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 630);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index bcb39807fe61..6ac97c378056 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -343,8 +343,8 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -452,6 +452,7 @@ const struct dpu_mdss_cfg dpu_sm8650_cfg = {
.mdss_ver = &sm8650_mdss_ver,
.caps = &sm8650_dpu_caps,
.mdp = &sm8650_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8650_ctl),
.ctl = sm8650_ctl,
.sspp_count = ARRAY_SIZE(sm8650_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index ab3dfb0b374e..1f32807bb5e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -190,6 +190,7 @@ const struct dpu_mdss_cfg dpu_msm8937_cfg = {
.mdss_ver = &msm8937_mdss_ver,
.caps = &msm8937_dpu_caps,
.mdp = msm8937_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8937_ctl),
.ctl = msm8937_ctl,
.sspp_count = ARRAY_SIZE(msm8937_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index 6bdaecca6761..42131959ff22 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -167,6 +167,7 @@ const struct dpu_mdss_cfg dpu_msm8917_cfg = {
.mdss_ver = &msm8917_mdss_ver,
.caps = &msm8917_dpu_caps,
.mdp = msm8917_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8917_ctl),
.ctl = msm8917_ctl,
.sspp_count = ARRAY_SIZE(msm8917_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index 14f36ea6ad0e..2b4723a5c676 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -198,6 +198,7 @@ const struct dpu_mdss_cfg dpu_msm8953_cfg = {
.mdss_ver = &msm8953_mdss_ver,
.caps = &msm8953_dpu_caps,
.mdp = msm8953_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8953_ctl),
.ctl = msm8953_ctl,
.sspp_count = ARRAY_SIZE(msm8953_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index 491f6f5827d1..5cf19de71f06 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -316,6 +316,7 @@ const struct dpu_mdss_cfg dpu_msm8996_cfg = {
.mdss_ver = &msm8996_mdss_ver,
.caps = &msm8996_dpu_caps,
.mdp = msm8996_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8996_ctl),
.ctl = msm8996_ctl,
.sspp_count = ARRAY_SIZE(msm8996_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 64c94e919a69..746474679ef5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -302,6 +302,7 @@ const struct dpu_mdss_cfg dpu_msm8998_cfg = {
.mdss_ver = &msm8998_mdss_ver,
.caps = &msm8998_dpu_caps,
.mdp = &msm8998_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8998_ctl),
.ctl = msm8998_ctl,
.sspp_count = ARRAY_SIZE(msm8998_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index 424815e7fb7d..4f2f68b07f20 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -269,6 +269,7 @@ const struct dpu_mdss_cfg dpu_sdm660_cfg = {
.mdss_ver = &sdm660_mdss_ver,
.caps = &sdm660_dpu_caps,
.mdp = &sdm660_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm660_ctl),
.ctl = sdm660_ctl,
.sspp_count = ARRAY_SIZE(sdm660_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index df01227fc364..c70bef025ac4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -205,6 +205,7 @@ const struct dpu_mdss_cfg dpu_sdm630_cfg = {
.mdss_ver = &sdm630_mdss_ver,
.caps = &sdm630_dpu_caps,
.mdp = &sdm630_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm630_ctl),
.ctl = sdm630_ctl,
.sspp_count = ARRAY_SIZE(sdm630_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index 72bd4f7e9e50..ab7b4822ca63 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -319,6 +319,7 @@ const struct dpu_mdss_cfg dpu_sdm845_cfg = {
.mdss_ver = &sdm845_mdss_ver,
.caps = &sdm845_dpu_caps,
.mdp = &sdm845_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm845_ctl),
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm845_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
index daef07924886..c2fde980fb52 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -132,6 +132,7 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = {
.mdss_ver = &sdm670_mdss_ver,
.caps = &sdm845_dpu_caps,
.mdp = &sdm670_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm845_ctl),
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm670_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 36cc9dbc00b5..979527d98fbc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -298,8 +298,8 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -388,6 +388,7 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = {
.mdss_ver = &sm8150_mdss_ver,
.caps = &sm8150_dpu_caps,
.mdp = &sm8150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8150_ctl),
.ctl = sm8150_ctl,
.sspp_count = ARRAY_SIZE(sm8150_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index e8eacdb47967..d76b8992a6c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -305,8 +305,8 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -414,6 +414,7 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
.mdss_ver = &sc8180x_mdss_ver,
.caps = &sc8180x_dpu_caps,
.mdp = &sc8180x_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc8180x_ctl),
.ctl = sc8180x_ctl,
.sspp_count = ARRAY_SIZE(sc8180x_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
index 2fe674d1e059..83db11339b29 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
@@ -261,8 +261,8 @@ static const struct dpu_wb_cfg sm7150_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -309,6 +309,7 @@ const struct dpu_mdss_cfg dpu_sm7150_cfg = {
.mdss_ver = &sm7150_mdss_ver,
.caps = &sm7150_dpu_caps,
.mdp = &sm7150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm7150_ctl),
.ctl = sm7150_ctl,
.sspp_count = ARRAY_SIZE(sm7150_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
index d761ed705bac..da11830d4407 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -27,6 +27,7 @@ static const struct dpu_mdp_cfg sm6150_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -162,6 +163,21 @@ static const struct dpu_pingpong_cfg sm6150_pp[] = {
},
};
+static const struct dpu_wb_cfg sm6150_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 2160,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm6150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -232,6 +248,7 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = {
.mdss_ver = &sm6150_mdss_ver,
.caps = &sm6150_dpu_caps,
.mdp = &sm6150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6150_ctl),
.ctl = sm6150_ctl,
.sspp_count = ARRAY_SIZE(sm6150_sspp),
@@ -242,6 +259,8 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = {
.dspp = sm6150_dspp,
.pingpong_count = ARRAY_SIZE(sm6150_pp),
.pingpong = sm6150_pp,
+ .wb_count = ARRAY_SIZE(sm6150_wb),
+ .wb = sm6150_wb,
.intf_count = ARRAY_SIZE(sm6150_intf),
.intf = sm6150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index 76f60a2df7a8..d3d3a34d0b45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -145,8 +145,8 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -216,6 +216,7 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = {
.mdss_ver = &sm6125_mdss_ver,
.caps = &sm6125_dpu_caps,
.mdp = &sm6125_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6125_ctl),
.ctl = sm6125_ctl,
.sspp_count = ARRAY_SIZE(sm6125_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index e8916ae826a6..47e01c3c242f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -386,7 +386,7 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = {
.mdss_ver = &sm8250_mdss_ver,
.caps = &sm8250_dpu_caps,
.mdp = &sm8250_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8250_ctl),
.ctl = sm8250_ctl,
.sspp_count = ARRAY_SIZE(sm8250_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 7382ebb6e5b2..040c94c0bb66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -157,8 +157,8 @@ static const struct dpu_wb_cfg sc7180_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -204,6 +204,7 @@ const struct dpu_mdss_cfg dpu_sc7180_cfg = {
.mdss_ver = &sc7180_mdss_ver,
.caps = &sc7180_dpu_caps,
.mdp = &sc7180_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc7180_ctl),
.ctl = sc7180_ctl,
.sspp_count = ARRAY_SIZE(sc7180_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 0502cee2f116..397278ba999b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -151,8 +151,8 @@ static const struct dpu_wb_cfg sm6350_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -222,6 +222,7 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = {
.mdss_ver = &sm6350_mdss_ver,
.caps = &sm6350_dpu_caps,
.mdp = &sm6350_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6350_ctl),
.ctl = sm6350_ctl,
.sspp_count = ARRAY_SIZE(sm6350_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index f7c08e89c882..0c860e804cab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -305,8 +305,8 @@ static const struct dpu_wb_cfg sm8350_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -396,6 +396,7 @@ const struct dpu_mdss_cfg dpu_sm8350_cfg = {
.mdss_ver = &sm8350_mdss_ver,
.caps = &sm8350_dpu_caps,
.mdp = &sm8350_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8350_ctl),
.ctl = sm8350_ctl,
.sspp_count = ARRAY_SIZE(sm8350_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index 2f153e0b5c6a..e9625c48c567 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -248,7 +248,7 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = {
.mdss_ver = &sc7280_mdss_ver,
.caps = &sc7280_dpu_caps,
.mdp = &sc7280_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc7280_ctl),
.ctl = sc7280_ctl,
.sspp_count = ARRAY_SIZE(sc7280_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 0d143e390eca..fcee1c3665f8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -435,6 +435,7 @@ const struct dpu_mdss_cfg dpu_sc8280xp_cfg = {
.mdss_ver = &sc8280xp_mdss_ver,
.caps = &sc8280xp_dpu_caps,
.mdp = &sc8280xp_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc8280xp_ctl),
.ctl = sc8280xp_ctl,
.sspp_count = ARRAY_SIZE(sc8280xp_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 08742472f9cc..19b2ee8bbd5f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -321,8 +321,8 @@ static const struct dpu_wb_cfg sm8450_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -412,6 +412,7 @@ const struct dpu_mdss_cfg dpu_sm8450_cfg = {
.mdss_ver = &sm8450_mdss_ver,
.caps = &sm8450_dpu_caps,
.mdp = &sm8450_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8450_ctl),
.ctl = sm8450_ctl,
.sspp_count = ARRAY_SIZE(sm8450_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 76ec72a32378..4d96ce71746f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -458,7 +458,7 @@ const struct dpu_mdss_cfg dpu_sa8775p_cfg = {
.mdss_ver = &sa8775p_mdss_ver,
.caps = &sa8775p_dpu_caps,
.mdp = &sa8775p_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sa8775p_ctl),
.ctl = sa8775p_ctl,
.sspp_count = ARRAY_SIZE(sa8775p_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 4d3787fceb72..24f988465bf6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -317,8 +317,8 @@ static const struct dpu_wb_cfg sm8550_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -407,6 +407,7 @@ const struct dpu_mdss_cfg dpu_sm8550_cfg = {
.mdss_ver = &sm8550_mdss_ver,
.caps = &sm8550_dpu_caps,
.mdp = &sm8550_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8550_ctl),
.ctl = sm8550_ctl,
.sspp_count = ARRAY_SIZE(sm8550_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 6b112e3d17da..6417baa84f82 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -317,8 +317,8 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -453,6 +453,7 @@ const struct dpu_mdss_cfg dpu_x1e80100_cfg = {
.mdss_ver = &x1e80100_mdss_ver,
.caps = &x1e80100_dpu_caps,
.mdp = &x1e80100_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(x1e80100_ctl),
.ctl = x1e80100_ctl,
.sspp_count = ARRAY_SIZE(x1e80100_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 6f0a37f954fe..0fb5789c60d0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -118,26 +118,38 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf,
return;
}
- memset(perf, 0, sizeof(struct dpu_core_perf_params));
-
- if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
- perf->bw_ctl = 0;
- perf->max_per_pipe_ib = 0;
- perf->core_clk_rate = 0;
- } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) {
- perf->bw_ctl = core_perf->fix_core_ab_vote;
- perf->max_per_pipe_ib = core_perf->fix_core_ib_vote;
- perf->core_clk_rate = core_perf->fix_core_clk_rate;
- } else {
- perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
- perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
- perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
- }
-
+ perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
+ perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
+ perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
DRM_DEBUG_ATOMIC(
- "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
+ "crtc=%d clk_rate=%llu core_ib=%u core_ab=%u\n",
crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib, perf->bw_ctl);
+ perf->max_per_pipe_ib,
+ (u32)DIV_ROUND_UP_ULL(perf->bw_ctl, 1000));
+}
+
+static void dpu_core_perf_aggregate(struct drm_device *ddev,
+ enum dpu_crtc_client_type curr_client_type,
+ struct dpu_core_perf_params *perf)
+{
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+
+ drm_for_each_crtc(tmp_crtc, ddev) {
+ if (tmp_crtc->enabled &&
+ curr_client_type == dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+ dpu_cstate->new_perf.max_per_pipe_ib);
+
+ perf->bw_ctl += dpu_cstate->new_perf.bw_ctl;
+
+ DRM_DEBUG_ATOMIC("crtc=%d bw=%llu\n",
+ tmp_crtc->base.id,
+ dpu_cstate->new_perf.bw_ctl);
+ }
+ }
}
/**
@@ -150,11 +162,9 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
u32 bw, threshold;
- u64 bw_sum_of_intfs = 0;
- enum dpu_crtc_client_type curr_client_type;
struct dpu_crtc_state *dpu_cstate;
- struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
+ struct dpu_core_perf_params perf = { 0 };
if (!crtc || !state) {
DPU_ERROR("invalid crtc\n");
@@ -172,80 +182,56 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* obtain new values */
_dpu_core_perf_calc_crtc(&kms->perf, crtc, state, &dpu_cstate->new_perf);
- bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl;
- curr_client_type = dpu_crtc_get_client_type(crtc);
-
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (tmp_crtc->enabled &&
- dpu_crtc_get_client_type(tmp_crtc) == curr_client_type &&
- tmp_crtc != crtc) {
- struct dpu_crtc_state *tmp_cstate =
- to_dpu_crtc_state(tmp_crtc->state);
-
- DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
- tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
- tmp_cstate->bw_control);
+ dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
- bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
- }
-
- /* convert bandwidth to kb */
- bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
- DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(perf.bw_ctl, 1000);
+ DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
- threshold = kms->perf.perf_cfg->max_bw_high;
+ threshold = kms->perf.perf_cfg->max_bw_high;
- DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
+ DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
- if (!threshold) {
- DPU_ERROR("no bandwidth limits specified\n");
- return -E2BIG;
- } else if (bw > threshold) {
- DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
- threshold);
- return -E2BIG;
- }
+ if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
}
return 0;
}
static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc)
{
struct dpu_core_perf_params perf = { 0 };
- enum dpu_crtc_client_type curr_client_type
- = dpu_crtc_get_client_type(crtc);
- struct drm_crtc *tmp_crtc;
- struct dpu_crtc_state *dpu_cstate;
int i, ret = 0;
- u64 avg_bw;
+ u32 avg_bw;
+ u32 peak_bw;
if (!kms->num_paths)
return 0;
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (tmp_crtc->enabled &&
- curr_client_type ==
- dpu_crtc_get_client_type(tmp_crtc)) {
- dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
-
- perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
- dpu_cstate->new_perf.max_per_pipe_ib);
-
- perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ avg_bw = 0;
+ peak_bw = 0;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ avg_bw = kms->perf.fix_core_ab_vote;
+ peak_bw = kms->perf.fix_core_ib_vote;
+ } else {
+ dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
- DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
- tmp_crtc->base.id,
- dpu_cstate->new_perf.bw_ctl, kms->num_paths);
- }
+ avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/
+ peak_bw = perf.max_per_pipe_ib;
}
- avg_bw = perf.bw_ctl;
- do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
+ avg_bw /= kms->num_paths;
for (i = 0; i < kms->num_paths; i++)
- icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
+ icc_set_bw(kms->path[i], avg_bw, peak_bw);
return ret;
}
@@ -476,9 +462,9 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0400, entry,
+ debugfs_create_u32("low_core_ab", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
- debugfs_create_u32("threshold_high", 0400, entry,
+ debugfs_create_u32("max_core_ab", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
debugfs_create_u32("min_core_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
@@ -490,9 +476,9 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
(u32 *)perf, &dpu_core_perf_mode_fops);
debugfs_create_u64("fix_core_clk_rate", 0600, entry,
&perf->fix_core_clk_rate);
- debugfs_create_u64("fix_core_ib_vote", 0600, entry,
+ debugfs_create_u32("fix_core_ib_vote", 0600, entry,
&perf->fix_core_ib_vote);
- debugfs_create_u64("fix_core_ab_vote", 0600, entry,
+ debugfs_create_u32("fix_core_ab_vote", 0600, entry,
&perf->fix_core_ab_vote);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index 451bf8021114..d2f21d34e501 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -19,7 +19,7 @@
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
- u64 max_per_pipe_ib;
+ u32 max_per_pipe_ib;
u64 bw_ctl;
u64 core_clk_rate;
};
@@ -40,8 +40,8 @@ struct dpu_core_perf_tune {
* @perf_tune: debug control for performance tuning
* @enable_bw_release: debug control for bandwidth release
* @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
- * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
- * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in KBps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in KBps used in mode 2
*/
struct dpu_core_perf {
const struct dpu_perf_cfg *perf_cfg;
@@ -50,8 +50,8 @@ struct dpu_core_perf {
struct dpu_core_perf_tune perf_tune;
u32 enable_bw_release;
u64 fix_core_clk_rate;
- u64 fix_core_ib_vote;
- u64 fix_core_ab_vote;
+ u32 fix_core_ib_vote;
+ u32 fix_core_ab_vote;
};
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index e5dcd41a361f..0714936d8835 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -953,6 +953,45 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
+static int dpu_crtc_kickoff_clone_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_encoder *rt_encoder = NULL, *wb_encoder = NULL;
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ /* Find encoder for real time display */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ wb_encoder = encoder;
+ else
+ rt_encoder = encoder;
+ }
+
+ if (!rt_encoder || !wb_encoder) {
+ DRM_DEBUG_ATOMIC("real time or wb encoder not found\n");
+ return -EINVAL;
+ }
+
+ dpu_encoder_prepare_for_kickoff(wb_encoder);
+ dpu_encoder_prepare_for_kickoff(rt_encoder);
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ /*
+ * Kickoff real time encoder last as it's the encoder that
+ * will do the flush
+ */
+ dpu_encoder_kickoff(wb_encoder);
+ dpu_encoder_kickoff(rt_encoder);
+
+ /* Don't start frame done timers until the kickoffs have finished */
+ dpu_encoder_start_frame_done_timer(wb_encoder);
+ dpu_encoder_start_frame_done_timer(rt_encoder);
+
+ return 0;
+}
+
/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
@@ -981,13 +1020,27 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
goto end;
}
}
- /*
- * Encoder will flush/start now, unless it has a tx pending. If so, it
- * may delay and flush at an irq event (e.g. ppdone)
- */
- drm_for_each_encoder_mask(encoder, crtc->dev,
- crtc->state->encoder_mask)
- dpu_encoder_prepare_for_kickoff(encoder);
+
+ if (drm_crtc_in_clone_mode(crtc->state)) {
+ if (dpu_crtc_kickoff_clone_mode(crtc))
+ goto end;
+ } else {
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask)
+ dpu_encoder_prepare_for_kickoff(encoder);
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ dpu_encoder_kickoff(encoder);
+ dpu_encoder_start_frame_done_timer(encoder);
+ }
+ }
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
@@ -997,11 +1050,6 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
dpu_crtc->play_count++;
- dpu_vbif_clear_errors(dpu_kms);
-
- drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
- dpu_encoder_kickoff(encoder);
-
reinit_completion(&dpu_crtc->frame_done_comp);
end:
@@ -1230,6 +1278,151 @@ done:
return ret;
}
+#define MAX_CHANNELS_PER_CRTC 2
+#define MAX_HDISPLAY_SPLIT 1080
+
+static struct msm_display_topology dpu_crtc_get_topology(
+ struct drm_crtc *crtc,
+ struct dpu_kms *dpu_kms,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct msm_display_topology topology = {0};
+ struct drm_encoder *drm_enc;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask)
+ dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state,
+ &crtc_state->adjusted_mode);
+
+ topology.cwb_enabled = drm_crtc_in_clone_mode(crtc_state);
+
+ /*
+ * Datapath topology selection
+ *
+ * Dual display
+ * 2 LM, 2 INTF ( Split display using 2 interfaces)
+ *
+ * Single display
+ * 1 LM, 1 INTF
+ * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+ *
+ * If DSC is enabled, use 2 LMs for 2:2:1 topology
+ *
+ * Add dspps to the reservation requirements if ctm is requested
+ *
+ * Only hardcode num_lm to 2 for cases where num_intf == 2 and CWB is not
+ * enabled. This is because in cases where CWB is enabled, num_intf will
+ * count both the WB and real-time phys encoders.
+ *
+ * For non-DSC CWB usecases, have the num_lm be decided by the
+ * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check.
+ */
+
+ if (topology.num_intf == 2 && !topology.cwb_enabled)
+ topology.num_lm = 2;
+ else if (topology.num_dsc == 2)
+ topology.num_lm = 2;
+ else if (dpu_kms->catalog->caps->has_3d_merge)
+ topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+ else
+ topology.num_lm = 1;
+
+ if (crtc_state->ctm)
+ topology.num_dspp = topology.num_lm;
+
+ return topology;
+}
+
+static int dpu_crtc_assign_resources(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_CRTC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_CRTC];
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_CRTC];
+ int i, num_lm, num_ctl, num_dspp;
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_global_state *global_state;
+ struct dpu_crtc_state *cstate;
+ struct msm_display_topology topology;
+ int ret;
+
+ /*
+ * Release and Allocate resources on every modeset
+ */
+ global_state = dpu_kms_get_global_state(crtc_state->state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ dpu_rm_release(global_state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state);
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ crtc_state->crtc, &topology);
+ if (ret)
+ return ret;
+
+ cstate = to_dpu_crtc_state(crtc_state);
+
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_CTL, hw_ctl,
+ ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_LM, hw_lm,
+ ARRAY_SIZE(hw_lm));
+ num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ if (i < num_dspp)
+ cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
+ }
+
+ cstate->num_mixers = num_lm;
+
+ return 0;
+}
+
+/**
+ * dpu_crtc_check_mode_changed: check if full modeset is required
+ * @old_crtc_state: Previous CRTC state
+ * @new_crtc_state: Corresponding CRTC state to be checked
+ *
+ * Check if the changes in the object properties demand full mode set.
+ */
+int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_encoder *drm_enc;
+ struct drm_crtc *crtc = new_crtc_state->crtc;
+ bool clone_mode_enabled = drm_crtc_in_clone_mode(old_crtc_state);
+ bool clone_mode_requested = drm_crtc_in_clone_mode(new_crtc_state);
+
+ DRM_DEBUG_ATOMIC("%d\n", crtc->base.id);
+
+ /* there might be cases where encoder needs a modeset too */
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, new_crtc_state->encoder_mask) {
+ if (dpu_encoder_needs_modeset(drm_enc, new_crtc_state->state))
+ new_crtc_state->mode_changed = true;
+ }
+
+ if ((clone_mode_requested && !clone_mode_enabled) ||
+ (!clone_mode_requested && clone_mode_enabled))
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
+
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1245,6 +1438,13 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+ /* don't reallocate resources if only ACTIVE has beeen changed */
+ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
+ rc = dpu_crtc_assign_resources(crtc, crtc_state);
+ if (rc < 0)
+ return rc;
+ }
+
if (dpu_use_virtual_planes &&
(crtc_state->planes_changed || crtc_state->zpos_changed)) {
rc = dpu_crtc_reassign_planes(crtc, crtc_state);
@@ -1262,10 +1462,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
- /* force a full mode set if active state changed */
- if (crtc_state->active_changed)
- crtc_state->mode_changed = true;
-
if (cstate->num_mixers) {
rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
if (rc)
@@ -1484,8 +1680,9 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
- seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
- seq_printf(s, "max_per_pipe_ib: %llu\n",
+ seq_printf(s, "bw_ctl: %uk\n",
+ (u32)DIV_ROUND_UP_ULL(dpu_crtc->cur_perf.bw_ctl, 1000));
+ seq_printf(s, "max_per_pipe_ib: %u\n",
dpu_crtc->cur_perf.max_per_pipe_ib);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 0b148f3ce0d7..94392b9b9245 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -239,6 +239,9 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
+int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state);
+
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 48e6e8d74c85..284e69bb47c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -24,6 +24,7 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_cwb.h"
#include "dpu_hw_dspp.h"
#include "dpu_hw_dsc.h"
#include "dpu_hw_merge3d.h"
@@ -58,8 +59,6 @@
#define IDLE_SHORT_TIMEOUT 1
-#define MAX_HDISPLAY_SPLIT 1080
-
/* timeout in frames waiting for frame done */
#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
@@ -135,8 +134,12 @@ enum dpu_enc_rc_states {
* @cur_slave: As above but for the slave encoder.
* @hw_pp: Handle to the pingpong blocks used for the display. No.
* pingpong blocks can be different than num_phys_encs.
+ * @hw_cwb: Handle to the CWB muxes used for concurrent writeback
+ * display. Number of CWB muxes can be different than
+ * num_phys_encs.
* @hw_dsc: Handle to the DSC blocks used for the display.
* @dsc_mask: Bitmask of used DSC blocks.
+ * @cwb_mask: Bitmask of used CWB muxes
* @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
@@ -179,9 +182,11 @@ struct dpu_encoder_virt {
struct dpu_encoder_phys *cur_master;
struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_cwb *hw_cwb[MAX_CHANNELS_PER_ENC];
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
unsigned int dsc_mask;
+ unsigned int cwb_mask;
bool intfs_swapped;
@@ -622,9 +627,9 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
if (dpu_enc->phys_encs[i])
intf_count++;
- /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
- if (dpu_enc->dsc)
- num_dsc = 2;
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ if (dpu_enc->hw_dsc[i])
+ num_dsc++;
return (num_dsc > 0) && (num_dsc > intf_count);
}
@@ -647,130 +652,51 @@ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
return NULL;
}
-static struct msm_display_topology dpu_encoder_get_topology(
- struct dpu_encoder_virt *dpu_enc,
- struct dpu_kms *dpu_kms,
- struct drm_display_mode *mode,
- struct drm_crtc_state *crtc_state,
- struct drm_dsc_config *dsc)
+void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
+ struct msm_display_topology *topology,
+ struct drm_atomic_state *state,
+ const struct drm_display_mode *adj_mode)
{
- struct msm_display_topology topology = {0};
- int i, intf_count = 0;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct msm_drm_private *priv = dpu_enc->base.dev->dev_private;
+ struct msm_display_info *disp_info = &dpu_enc->disp_info;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_framebuffer *fb;
+ struct drm_dsc_config *dsc;
+
+ int i;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
if (dpu_enc->phys_encs[i])
- intf_count++;
+ topology->num_intf++;
- /* Datapath topology selection
- *
- * Dual display
- * 2 LM, 2 INTF ( Split display using 2 interfaces)
- *
- * Single display
- * 1 LM, 1 INTF
- * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
- *
- * Add dspps to the reservation requirements if ctm is requested
- */
- if (intf_count == 2)
- topology.num_lm = 2;
- else if (!dpu_kms->catalog->caps->has_3d_merge)
- topology.num_lm = 1;
- else
- topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
-
- if (crtc_state->ctm)
- topology.num_dspp = topology.num_lm;
-
- topology.num_intf = intf_count;
+ dsc = dpu_encoder_get_dsc_config(drm_enc);
+ /* We only support 2 DSC mode (with 2 LM and 1 INTF) */
if (dsc) {
/*
- * In case of Display Stream Compression (DSC), we would use
- * 2 DSC encoders, 2 layer mixers and 1 interface
- * this is power optimal and can drive up to (including) 4k
- * screens
+ * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces
+ * when Display Stream Compression (DSC) is enabled,
+ * and when enough DSC blocks are available.
+ * This is power-optimal and can drive up to (including) 4k
+ * screens.
*/
- topology.num_dsc = 2;
- topology.num_lm = 2;
- topology.num_intf = 1;
- }
-
- return topology;
-}
-
-static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms,
- struct drm_encoder *drm_enc,
- struct dpu_global_state *global_state,
- struct drm_crtc_state *crtc_state)
-{
- struct dpu_crtc_state *cstate;
- struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC];
- int num_lm, num_ctl, num_dspp, i;
-
- cstate = to_dpu_crtc_state(crtc_state);
-
- memset(cstate->mixers, 0, sizeof(cstate->mixers));
-
- num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
- num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
- num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
- ARRAY_SIZE(hw_dspp));
-
- for (i = 0; i < num_lm; i++) {
- int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
-
- cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
- cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
- cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL;
- }
-
- cstate->num_mixers = num_lm;
-}
-
-static int dpu_encoder_virt_atomic_check(
- struct drm_encoder *drm_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct dpu_encoder_virt *dpu_enc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- struct drm_display_mode *adj_mode;
- struct msm_display_topology topology;
- struct msm_display_info *disp_info;
- struct dpu_global_state *global_state;
- struct drm_framebuffer *fb;
- struct drm_dsc_config *dsc;
- int ret = 0;
-
- if (!drm_enc || !crtc_state || !conn_state) {
- DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
- drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
- return -EINVAL;
+ WARN(topology->num_intf > 2,
+ "DSC topology cannot support more than 2 interfaces\n");
+ if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2)
+ topology->num_dsc = 2;
+ else
+ topology->num_dsc = 1;
}
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "\n");
-
- priv = drm_enc->dev->dev_private;
- disp_info = &dpu_enc->disp_info;
- dpu_kms = to_dpu_kms(priv->kms);
- adj_mode = &crtc_state->adjusted_mode;
- global_state = dpu_kms_get_global_state(crtc_state->state);
- if (IS_ERR(global_state))
- return PTR_ERR(global_state);
-
- trace_dpu_enc_atomic_check(DRMID(drm_enc));
-
- dsc = dpu_encoder_get_dsc_config(drm_enc);
-
- topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
+ connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
+ if (!connector)
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return;
/*
* Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
@@ -781,34 +707,45 @@ static int dpu_encoder_virt_atomic_check(
fb = conn_state->writeback_job->fb;
if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
- topology.needs_cdm = true;
+ topology->num_cdm++;
} else if (disp_info->intf_type == INTF_DP) {
if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
- topology.needs_cdm = true;
+ topology->num_cdm++;
}
+}
- if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
- crtc_state->mode_changed = true;
- else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
- crtc_state->mode_changed = true;
- /*
- * Release and Allocate resources on every modeset
- * Dont allocate when active is false.
- */
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
- dpu_rm_release(global_state, drm_enc);
+bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_framebuffer *fb;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- if (!crtc_state->active_changed || crtc_state->enable)
- ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
- drm_enc, crtc_state, &topology);
- if (!ret)
- dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
- global_state, crtc_state);
- }
+ if (!drm_enc || !state)
+ return false;
- trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
+ connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
+ if (!connector)
+ return false;
- return ret;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+
+ /**
+ * These checks are duplicated from dpu_encoder_update_topology() since
+ * CRTC and encoder don't hold topology information
+ */
+ if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
+ fb = conn_state->writeback_job->fb;
+ if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) {
+ if (!dpu_enc->cur_master->hw_cdm)
+ return true;
+ } else {
+ if (dpu_enc->cur_master->hw_cdm)
+ return true;
+ }
+ }
+
+ return false;
}
static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
@@ -1219,8 +1156,12 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_cwb[MAX_CHANNELS_PER_ENC];
int num_ctl, num_pp, num_dsc;
+ int num_cwb = 0;
+ bool is_cwb_encoder;
unsigned int dsc_mask = 0;
+ unsigned int cwb_mask = 0;
int i;
if (!drm_enc) {
@@ -1233,6 +1174,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
+ is_cwb_encoder = drm_crtc_in_clone_mode(crtc_state) &&
+ dpu_enc->disp_info.intf_type == INTF_WB;
global_state = dpu_kms_get_existing_global_state(dpu_kms);
if (IS_ERR_OR_NULL(global_state)) {
@@ -1243,18 +1186,38 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
/* Query resource that have been reserved in atomic check step. */
- num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
- ARRAY_SIZE(hw_pp));
+ if (is_cwb_encoder) {
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_DCWB_PINGPONG,
+ hw_pp, ARRAY_SIZE(hw_pp));
+ num_cwb = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_CWB,
+ hw_cwb, ARRAY_SIZE(hw_cwb));
+ } else {
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_PINGPONG, hw_pp,
+ ARRAY_SIZE(hw_pp));
+ }
+
+ for (i = 0; i < num_cwb; i++) {
+ dpu_enc->hw_cwb[i] = to_dpu_hw_cwb(hw_cwb[i]);
+ cwb_mask |= BIT(dpu_enc->hw_cwb[i]->idx - CWB_0);
+ }
+
+ dpu_enc->cwb_mask = cwb_mask;
+
num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ drm_enc->crtc, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
: NULL;
num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSC,
+ drm_enc->crtc, DPU_HW_BLK_DSC,
hw_dsc, ARRAY_SIZE(hw_dsc));
for (i = 0; i < num_dsc; i++) {
dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
@@ -1268,7 +1231,7 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_cdm = NULL;
dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CDM,
+ drm_enc->crtc, DPU_HW_BLK_CDM,
&hw_cdm, 1);
dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
}
@@ -1654,6 +1617,7 @@ static void dpu_encoder_off_work(struct work_struct *work)
static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
u32 ret = UINT_MAX;
@@ -1671,6 +1635,15 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+ /* Return early if encoder is writeback and in clone mode */
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
+ dpu_enc->cwb_mask) {
+ DPU_DEBUG("encoder %d skip flush for concurrent writeback encoder\n",
+ DRMID(drm_enc));
+ return;
+ }
+
+
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1693,6 +1666,8 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
*/
static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys->parent);
+
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
return;
@@ -1703,6 +1678,12 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
return;
}
+ if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
+ dpu_enc->cwb_mask) {
+ DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent));
+ return;
+ }
+
if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
phys->ops.trigger_start(phys);
}
@@ -2020,7 +2001,6 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
struct drm_dsc_config *dsc)
{
- /* coding only for 2LM, 2enc, 1 dsc config */
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
@@ -2030,22 +2010,24 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
int dsc_common_mode;
int pic_width;
u32 initial_lines;
+ int num_dsc = 0;
int i;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
hw_pp[i] = dpu_enc->hw_pp[i];
hw_dsc[i] = dpu_enc->hw_dsc[i];
- if (!hw_pp[i] || !hw_dsc[i]) {
- DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
- return;
- }
+ if (!hw_pp[i] || !hw_dsc[i])
+ break;
+
+ num_dsc++;
}
- dsc_common_mode = 0;
pic_width = dsc->pic_width;
- dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+ dsc_common_mode = 0;
+ if (num_dsc > 1)
+ dsc_common_mode |= DSC_MODE_SPLIT_PANEL;
if (dpu_encoder_use_dsc_merge(enc_master->parent))
dsc_common_mode |= DSC_MODE_MULTIPLEX;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
@@ -2054,14 +2036,10 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
this_frame_slices = pic_width / dsc->slice_width;
intf_ip_w = this_frame_slices * dsc->slice_width;
- /*
- * dsc merge case: when using 2 encoders for the same stream,
- * no. of slices need to be same on both the encoders.
- */
- enc_ip_w = intf_ip_w / 2;
+ enc_ip_w = intf_ip_w / num_dsc;
initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ for (i = 0; i < num_dsc; i++)
dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
dsc, dsc_common_mode, initial_lines);
}
@@ -2135,6 +2113,25 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
}
/**
+ * dpu_encoder_start_frame_done_timer - Start the encoder frame done timer
+ * @drm_enc: Pointer to drm encoder structure
+ */
+void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ unsigned long timeout_ms;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
+
+}
+
+/**
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately.
* @drm_enc: encoder pointer
@@ -2143,7 +2140,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
- unsigned long timeout_ms;
unsigned int i;
DPU_ATRACE_BEGIN("encoder_kickoff");
@@ -2151,13 +2147,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
trace_dpu_enc_kickoff(DRMID(drm_enc));
- timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
- drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
-
- atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
- mod_timer(&dpu_enc->frame_done_timer,
- jiffies + msecs_to_jiffies(timeout_ms));
-
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc);
@@ -2183,22 +2172,22 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
memset(&mixer, 0, sizeof(mixer));
/* reset all mixers for this encoder */
- if (phys_enc->hw_ctl->ops.clear_all_blendstages)
- phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
+ if (ctl->ops.clear_all_blendstages)
+ ctl->ops.clear_all_blendstages(ctl);
global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
- phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ phys_enc->parent->crtc, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
for (i = 0; i < num_lm; i++) {
hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
- if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
- phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
+ if (ctl->ops.update_pending_flush_mixer)
+ ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
/* clear all blendstages */
- if (phys_enc->hw_ctl->ops.setup_blendstage)
- phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+ if (ctl->ops.setup_blendstage)
+ ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
}
}
@@ -2250,7 +2239,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
- phys_enc->hw_ctl->ops.reset(ctl);
+ ctl->ops.reset(ctl);
dpu_encoder_helper_reset_mixers(phys_enc);
@@ -2265,8 +2254,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
/* mark WB flush as pending */
- if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
- phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
+ if (ctl->ops.update_pending_flush_wb)
+ ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
} else {
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
@@ -2275,8 +2264,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
PINGPONG_NONE);
/* mark INTF flush as pending */
- if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
- phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
+ if (ctl->ops.update_pending_flush_intf)
+ ctl->ops.update_pending_flush_intf(ctl,
dpu_enc->phys_encs[i]->hw_intf->idx);
}
}
@@ -2284,12 +2273,15 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
+ if (dpu_enc->cwb_mask)
+ dpu_encoder_helper_phys_setup_cwb(phys_enc, false);
+
/* reset the merge 3D HW block */
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
BLEND_3D_NONE);
- if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
- phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
+ if (ctl->ops.update_pending_flush_merge_3d)
+ ctl->ops.update_pending_flush_merge_3d(ctl,
phys_enc->hw_pp->merge_3d->idx);
}
@@ -2297,9 +2289,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp)
phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
PINGPONG_NONE);
- if (phys_enc->hw_ctl->ops.update_pending_flush_cdm)
- phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl,
- phys_enc->hw_cdm->idx);
+ if (ctl->ops.update_pending_flush_cdm)
+ ctl->ops.update_pending_flush_cdm(ctl,
+ phys_enc->hw_cdm->idx);
}
if (dpu_enc->dsc) {
@@ -2310,6 +2302,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
+ intf_cfg.cwb = dpu_enc->cwb_mask;
if (phys_enc->hw_intf)
intf_cfg.intf = phys_enc->hw_intf->idx;
@@ -2327,6 +2320,68 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
ctl->ops.clear_pending_flush(ctl);
}
+void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ struct dpu_hw_cwb *hw_cwb;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_cwb_setup_cfg cwb_cfg;
+
+ struct dpu_kms *dpu_kms;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *rt_pp_list[MAX_CHANNELS_PER_ENC];
+ int num_pp;
+
+ if (!phys_enc->hw_wb)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+
+ if (!phys_enc->hw_ctl) {
+ DPU_DEBUG("[wb:%d] no ctl assigned\n",
+ phys_enc->hw_wb->idx - WB_0);
+ return;
+ }
+
+ dpu_kms = phys_enc->dpu_kms;
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ phys_enc->parent->crtc,
+ DPU_HW_BLK_PINGPONG, rt_pp_list,
+ ARRAY_SIZE(rt_pp_list));
+
+ if (num_pp == 0 || num_pp > MAX_CHANNELS_PER_ENC) {
+ DPU_DEBUG_ENC(dpu_enc, "invalid num_pp %d\n", num_pp);
+ return;
+ }
+
+ /*
+ * The CWB mux supports using LM or DSPP as tap points. For now,
+ * always use LM tap point
+ */
+ cwb_cfg.input = INPUT_MODE_LM_OUT;
+
+ for (int i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ hw_cwb = dpu_enc->hw_cwb[i];
+ if (!hw_cwb)
+ continue;
+
+ if (enable) {
+ struct dpu_hw_pingpong *hw_pp =
+ to_dpu_hw_pingpong(rt_pp_list[i]);
+ cwb_cfg.pp_idx = hw_pp->idx;
+ } else {
+ cwb_cfg.pp_idx = PINGPONG_NONE;
+ }
+
+ hw_cwb->ops.config_cwb(hw_cwb, &cwb_cfg);
+
+ if (hw_ctl->ops.update_pending_flush_cwb)
+ hw_ctl->ops.update_pending_flush_cwb(hw_ctl, hw_cwb->idx);
+ }
+}
+
/**
* dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
* @phys_enc: Pointer to physical encoder
@@ -2513,6 +2568,38 @@ static int dpu_encoder_virt_add_phys_encs(
return 0;
}
+/**
+ * dpu_encoder_get_clones - Calculate the possible_clones for DPU encoder
+ * @drm_enc: DRM encoder pointer
+ * Returns: possible_clones mask
+ */
+uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc)
+{
+ struct drm_encoder *curr;
+ int type = drm_enc->encoder_type;
+ uint32_t clone_mask = drm_encoder_mask(drm_enc);
+
+ /*
+ * Set writeback as possible clones of real-time DSI encoders and vice
+ * versa
+ *
+ * Writeback encoders can't be clones of each other and DSI
+ * encoders can't be clones of each other.
+ *
+ * TODO: Add DP encoders as valid possible clones for writeback encoders
+ * (and vice versa) once concurrent writeback has been validated for DP
+ */
+ drm_for_each_encoder(curr, drm_enc->dev) {
+ if ((type == DRM_MODE_ENCODER_VIRTUAL &&
+ curr->encoder_type == DRM_MODE_ENCODER_DSI) ||
+ (type == DRM_MODE_ENCODER_DSI &&
+ curr->encoder_type == DRM_MODE_ENCODER_VIRTUAL))
+ clone_mask |= drm_encoder_mask(curr);
+ }
+
+ return clone_mask;
+}
+
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
struct msm_display_info *disp_info)
@@ -2630,7 +2717,6 @@ static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
.atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
.atomic_disable = dpu_encoder_virt_atomic_disable,
.atomic_enable = dpu_encoder_virt_atomic_enable,
- .atomic_check = dpu_encoder_virt_atomic_check,
};
static const struct drm_encoder_funcs dpu_encoder_funcs = {
@@ -2789,6 +2875,18 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
}
/**
+ * dpu_encoder_helper_get_cwb_mask - get CWB blocks mask for the DPU encoder
+ * @phys_enc: Pointer to physical encoder structure
+ */
+unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ return dpu_enc->cwb_mask;
+}
+
+/**
* dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
* This helper function is used by physical encoder to get DSC blocks mask
* used for this encoder.
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 92b5ee390788..ca1ca2e51d7e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@@ -60,6 +60,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
+uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc);
+
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
int drm_enc_mode,
struct msm_display_info *disp_info);
@@ -80,6 +82,13 @@ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos);
bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
+void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
+ struct msm_display_topology *topology,
+ struct drm_atomic_state *state,
+ const struct drm_display_mode *adj_mode);
+
+bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state);
+
void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job);
@@ -88,4 +97,5 @@ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
+void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc);
#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 63f09857025c..61b22d949454 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*/
@@ -309,6 +309,8 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
return BLEND_3D_NONE;
}
+unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc);
+
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc);
@@ -331,6 +333,9 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
+void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc,
+ bool enable);
+
void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
const struct msm_format *dpu_fmt,
u32 output_type);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index e9bbccc44dad..da9994a79ca2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_pingpong.h"
@@ -261,7 +262,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
- enable ? "true" : "false", refcount);
+ str_true_false(enable), refcount);
if (enable) {
if (phys_enc->vblank_refcount == 0)
@@ -285,7 +286,7 @@ end:
DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0, ret,
- enable ? "true" : "false", refcount);
+ str_true_false(enable), refcount);
}
return ret;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 4c006ec74575..849fea580a4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -68,7 +68,7 @@ static void dpu_encoder_phys_wb_set_ot_limit(
ot_params.num = hw_wb->idx - WB_0;
ot_params.width = phys_enc->cached_mode.hdisplay;
ot_params.height = phys_enc->cached_mode.vdisplay;
- ot_params.is_wfd = true;
+ ot_params.is_wfd = !dpu_encoder_helper_get_cwb_mask(phys_enc);
ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
ot_params.vbif_idx = hw_wb->caps->vbif_idx;
ot_params.rd = false;
@@ -111,7 +111,7 @@ static void dpu_encoder_phys_wb_set_qos_remap(
qos_params.vbif_idx = hw_wb->caps->vbif_idx;
qos_params.xin_id = hw_wb->caps->xin_id;
qos_params.num = hw_wb->idx - WB_0;
- qos_params.is_rt = false;
+ qos_params.is_rt = dpu_encoder_helper_get_cwb_mask(phys_enc);
DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n",
qos_params.num,
@@ -174,6 +174,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct dpu_hw_wb *hw_wb;
struct dpu_hw_wb_cfg *wb_cfg;
+ u32 cdp_usage;
if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
DPU_ERROR("invalid encoder\n");
@@ -182,6 +183,10 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
hw_wb = phys_enc->hw_wb;
wb_cfg = &wb_enc->wb_cfg;
+ if (dpu_encoder_helper_get_cwb_mask(phys_enc))
+ cdp_usage = DPU_PERF_CDP_USAGE_RT;
+ else
+ cdp_usage = DPU_PERF_CDP_USAGE_NRT;
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->roi.x1 = 0;
@@ -199,7 +204,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf;
hw_wb->ops.setup_cdp(hw_wb, format,
- perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable);
+ perf->cdp_cfg[cdp_usage].wr_enable);
}
if (hw_wb->ops.setup_outaddress)
@@ -236,6 +241,7 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
intf_cfg.intf = DPU_NONE;
intf_cfg.wb = hw_wb->idx;
+ intf_cfg.cwb = dpu_encoder_helper_get_cwb_mask(phys_enc);
if (mode_3d && hw_pp && hw_pp->merge_3d)
intf_cfg.merge_3d = hw_pp->merge_3d->idx;
@@ -340,6 +346,8 @@ static void dpu_encoder_phys_wb_setup(
dpu_encoder_helper_phys_setup_cdm(phys_enc, format, CDM_CDWN_OUTPUT_WB);
+ dpu_encoder_helper_phys_setup_cwb(phys_enc, true);
+
dpu_encoder_phys_wb_setup_ctl(phys_enc);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 0b342c043875..64265ca4656a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -232,37 +232,6 @@ static const u32 rotation_v2_formats[] = {
/* TODO add formats after validation */
};
-static const u32 wb2_formats_rgb[] = {
- DRM_FORMAT_RGB565,
- DRM_FORMAT_BGR565,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGBX8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGBA4444,
- DRM_FORMAT_RGBX4444,
- DRM_FORMAT_XRGB4444,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_BGRA8888,
- DRM_FORMAT_BGRX8888,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_BGRA5551,
- DRM_FORMAT_XBGR1555,
- DRM_FORMAT_BGRX5551,
- DRM_FORMAT_ABGR4444,
- DRM_FORMAT_BGRA4444,
- DRM_FORMAT_BGRX4444,
- DRM_FORMAT_XBGR4444,
-};
-
static const u32 wb2_formats_rgb_yuv[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
@@ -507,7 +476,14 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
/*************************************************************
* CDM block config
*************************************************************/
-static const struct dpu_cdm_cfg sc7280_cdm = {
+static const struct dpu_cdm_cfg dpu_cdm_1_x_4_x = {
+ .name = "cdm_0",
+ .id = CDM_0,
+ .len = 0x224,
+ .base = 0x79200,
+};
+
+static const struct dpu_cdm_cfg dpu_cdm_5_x = {
.name = "cdm_0",
.id = CDM_0,
.len = 0x228,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
index ae1534c49ae0..3f88c3641d4a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -214,7 +214,9 @@ static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_
mux_cfg = DPU_REG_READ(c, CDM_MUX);
mux_cfg &= ~0xf;
- if (pp)
+ if (pp >= PINGPONG_CWB_0)
+ mux_cfg |= 0xd;
+ else if (pp)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 4893f10d6a58..411a7cf088eb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -31,12 +31,14 @@
#define CTL_MERGE_3D_ACTIVE 0x0E4
#define CTL_DSC_ACTIVE 0x0E8
#define CTL_WB_ACTIVE 0x0EC
+#define CTL_CWB_ACTIVE 0x0F0
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_CDM_ACTIVE 0x0F8
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_DSC_FLUSH 0x104
#define CTL_WB_FLUSH 0x108
+#define CTL_CWB_FLUSH 0x10C
#define CTL_INTF_FLUSH 0x110
#define CTL_CDM_FLUSH 0x114
#define CTL_PERIPH_FLUSH 0x128
@@ -53,6 +55,7 @@
#define PERIPH_IDX 30
#define INTF_IDX 31
#define WB_IDX 16
+#define CWB_IDX 28
#define DSPP_IDX 29 /* From DPU hw rev 7.x.x */
#define CTL_INVALID_BIT 0xffff
#define CTL_DEFAULT_GROUP_ID 0xf
@@ -110,6 +113,7 @@ static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
ctx->pending_flush_mask = 0x0;
ctx->pending_intf_flush_mask = 0;
ctx->pending_wb_flush_mask = 0;
+ ctx->pending_cwb_flush_mask = 0;
ctx->pending_merge_3d_flush_mask = 0;
ctx->pending_dsc_flush_mask = 0;
ctx->pending_cdm_flush_mask = 0;
@@ -144,6 +148,9 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
if (ctx->pending_flush_mask & BIT(WB_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
ctx->pending_wb_flush_mask);
+ if (ctx->pending_flush_mask & BIT(CWB_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_CWB_FLUSH,
+ ctx->pending_cwb_flush_mask);
if (ctx->pending_flush_mask & BIT(DSPP_IDX))
for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
@@ -310,6 +317,13 @@ static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(WB_IDX);
}
+static void dpu_hw_ctl_update_pending_flush_cwb_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_cwb cwb)
+{
+ ctx->pending_cwb_flush_mask |= BIT(cwb - CWB_0);
+ ctx->pending_flush_mask |= BIT(CWB_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
@@ -547,6 +561,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 intf_active = 0;
u32 dsc_active = 0;
u32 wb_active = 0;
+ u32 cwb_active = 0;
u32 mode_sel = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
@@ -561,6 +576,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
if (cfg->intf)
@@ -569,12 +585,16 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->wb)
wb_active |= BIT(cfg->wb - WB_0);
+ if (cfg->cwb)
+ cwb_active |= cfg->cwb;
+
if (cfg->dsc)
dsc_active |= cfg->dsc;
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
if (cfg->merge_3d)
@@ -624,6 +644,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 wb_active = 0;
+ u32 cwb_active = 0;
u32 merge3d_active = 0;
u32 dsc_active;
u32 cdm_active;
@@ -651,6 +672,12 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
}
+ if (cfg->cwb) {
+ cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
+ cwb_active &= ~cfg->cwb;
+ DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
+ }
+
if (cfg->wb) {
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
wb_active &= ~BIT(cfg->wb - WB_0);
@@ -703,6 +730,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
+ ops->update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
ops->update_pending_flush_dsc =
dpu_hw_ctl_update_pending_flush_dsc_v1;
ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 85c6c835cc87..080a9550a0cc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DPU_HW_CTL_H
@@ -42,6 +42,7 @@ struct dpu_hw_stage_cfg {
* @cdm: CDM block used
* @stream_sel: Stream selection for multi-stream interfaces
* @dsc: DSC BIT masks used
+ * @cwb: CWB BIT masks used
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
@@ -51,6 +52,7 @@ struct dpu_hw_intf_cfg {
enum dpu_ctl_mode_sel intf_mode_sel;
enum dpu_cdm cdm;
int stream_sel;
+ unsigned int cwb;
unsigned int dsc;
};
@@ -115,6 +117,15 @@ struct dpu_hw_ctl_ops {
enum dpu_wb blk);
/**
+ * OR in the given flushbits to the cached pending_(cwb_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : concurrent writeback block index
+ */
+ void (*update_pending_flush_cwb)(struct dpu_hw_ctl *ctx,
+ enum dpu_cwb blk);
+
+ /**
* OR in the given flushbits to the cached pending_(intf_)flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
@@ -258,6 +269,7 @@ struct dpu_hw_ctl_ops {
* @pending_flush_mask: storage for pending ctl_flush managed via ops
* @pending_intf_flush_mask: pending INTF flush
* @pending_wb_flush_mask: pending WB flush
+ * @pending_cwb_flush_mask: pending CWB flush
* @pending_dsc_flush_mask: pending DSC flush
* @pending_cdm_flush_mask: pending CDM flush
* @ops: operation list
@@ -274,6 +286,7 @@ struct dpu_hw_ctl {
u32 pending_flush_mask;
u32 pending_intf_flush_mask;
u32 pending_wb_flush_mask;
+ u32 pending_cwb_flush_mask;
u32 pending_periph_flush_mask;
u32 pending_merge_3d_flush_mask;
u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index ba7bb05efe9b..8d820cd1b554 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -77,12 +77,14 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_LM,
DPU_HW_BLK_CTL,
DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_DCWB_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
DPU_HW_BLK_DSPP,
DPU_HW_BLK_MERGE_3D,
DPU_HW_BLK_DSC,
DPU_HW_BLK_CDM,
+ DPU_HW_BLK_CWB,
DPU_HW_BLK_MAX,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 97e9cb8c2b09..3305ad0623ca 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -446,6 +446,19 @@ static void dpu_kms_disable_commit(struct msm_kms *kms)
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
+static int dpu_kms_check_mode_changed(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
+ dpu_crtc_check_mode_changed(old_crtc_state, new_crtc_state);
+
+ return 0;
+}
+
static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -811,8 +824,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
return ret;
num_encoders = 0;
- drm_for_each_encoder(encoder, dev)
+ drm_for_each_encoder(encoder, dev) {
num_encoders++;
+ if (catalog->cwb_count > 0)
+ encoder->possible_clones = dpu_encoder_get_clones(encoder);
+ }
max_crtc_count = min(catalog->mixer_count, num_encoders);
@@ -1062,6 +1078,7 @@ static const struct msm_kms_funcs kms_funcs = {
.irq = dpu_core_irq,
.enable_commit = dpu_kms_enable_commit,
.disable_commit = dpu_kms_disable_commit,
+ .check_mode_changed = dpu_kms_check_mode_changed,
.flush_commit = dpu_kms_flush_commit,
.wait_flush = dpu_kms_wait_flush,
.complete_commit = dpu_kms_complete_commit,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 547cdb2c0c78..a57ec2ec1060 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -124,14 +124,15 @@ struct dpu_global_state {
struct dpu_rm *rm;
- uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
- uint32_t mixer_to_enc_id[LM_MAX - LM_0];
- uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
- uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
- uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
- uint32_t cdm_to_enc_id;
+ uint32_t pingpong_to_crtc_id[PINGPONG_MAX - PINGPONG_0];
+ uint32_t mixer_to_crtc_id[LM_MAX - LM_0];
+ uint32_t ctl_to_crtc_id[CTL_MAX - CTL_0];
+ uint32_t dspp_to_crtc_id[DSPP_MAX - DSPP_0];
+ uint32_t dsc_to_crtc_id[DSC_MAX - DSC_0];
+ uint32_t cdm_to_crtc_id;
uint32_t sspp_to_crtc_id[SSPP_MAX - SSPP_NONE];
+ uint32_t cwb_to_crtc_id[CWB_MAX - CWB_0];
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 5baf9df702b8..3efbba425ca6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -22,9 +22,9 @@
static inline bool reserved_by_other(uint32_t *res_map, int idx,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
- return res_map[idx] && res_map[idx] != enc_id;
+ return res_map[idx] && res_map[idx] != crtc_id;
}
/**
@@ -233,13 +233,66 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
return -EINVAL;
}
+static int _dpu_rm_reserve_cwb_mux_and_pingpongs(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t crtc_id,
+ struct msm_display_topology *topology)
+{
+ int num_cwb_mux = topology->num_lm, cwb_mux_count = 0;
+ int cwb_pp_start_idx = PINGPONG_CWB_0 - PINGPONG_0;
+ int cwb_pp_idx[MAX_BLOCKS];
+ int cwb_mux_idx[MAX_BLOCKS];
+
+ /*
+ * Reserve additional dedicated CWB PINGPONG blocks and muxes for each
+ * mixer
+ *
+ * TODO: add support reserving resources for platforms with no
+ * PINGPONG_CWB
+ */
+ for (int i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
+ cwb_mux_count < num_cwb_mux; i++) {
+ for (int j = 0; j < ARRAY_SIZE(rm->cwb_blks); j++) {
+ /*
+ * Odd LMs must be assigned to odd CWB muxes and even
+ * LMs with even CWB muxes.
+ *
+ * Since the RM HW block array index is based on the HW
+ * block ids, we can also use the array index to enforce
+ * the odd/even rule. See dpu_rm_init() for more
+ * information
+ */
+ if (reserved_by_other(global_state->cwb_to_crtc_id, j, crtc_id) ||
+ i % 2 != j % 2)
+ continue;
+
+ cwb_mux_idx[cwb_mux_count] = j;
+ cwb_pp_idx[cwb_mux_count] = j + cwb_pp_start_idx;
+ cwb_mux_count++;
+ break;
+ }
+ }
+
+ if (cwb_mux_count != num_cwb_mux) {
+ DPU_ERROR("Unable to reserve all CWB PINGPONGs\n");
+ return -ENAVAIL;
+ }
+
+ for (int i = 0; i < cwb_mux_count; i++) {
+ global_state->pingpong_to_crtc_id[cwb_pp_idx[i]] = crtc_id;
+ global_state->cwb_to_crtc_id[cwb_mux_idx[i]] = crtc_id;
+ }
+
+ return 0;
+}
+
/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
* @global_state: resources shared across multiple kms objects
- * @enc_id: encoder id requesting for allocation
+ * @crtc_id: crtc id requesting for allocation
* @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
* if lm, and all other hardwired blocks connected to the lm (pp) is
* available and appropriate
@@ -252,14 +305,14 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
+ uint32_t crtc_id, int lm_idx, int *pp_idx, int *dspp_idx,
struct msm_display_topology *topology)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
/* Already reserved? */
- if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
+ if (reserved_by_other(global_state->mixer_to_crtc_id, lm_idx, crtc_id)) {
DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
return false;
}
@@ -271,7 +324,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
- if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
+ if (reserved_by_other(global_state->pingpong_to_crtc_id, idx, crtc_id)) {
DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
lm_cfg->pingpong);
return false;
@@ -287,7 +340,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
- if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
+ if (reserved_by_other(global_state->dspp_to_crtc_id, idx, crtc_id)) {
DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
lm_cfg->dspp);
return false;
@@ -299,7 +352,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
struct msm_display_topology *topology)
{
@@ -323,7 +376,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
lm_idx[lm_count] = i;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
- enc_id, i, &pp_idx[lm_count],
+ crtc_id, i, &pp_idx[lm_count],
&dspp_idx[lm_count], topology)) {
continue;
}
@@ -342,7 +395,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
continue;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
- global_state, enc_id, j,
+ global_state, crtc_id, j,
&pp_idx[lm_count], &dspp_idx[lm_count],
topology)) {
continue;
@@ -359,12 +412,12 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
}
for (i = 0; i < lm_count; i++) {
- global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
- global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
- global_state->dspp_to_enc_id[dspp_idx[i]] =
- topology->num_dspp ? enc_id : 0;
+ global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id;
+ global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id;
+ global_state->dspp_to_crtc_id[dspp_idx[i]] =
+ topology->num_dspp ? crtc_id : 0;
- trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
+ trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, crtc_id,
pp_idx[i] + PINGPONG_0);
}
@@ -374,15 +427,25 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int ctl_idx[MAX_BLOCKS];
int i = 0, j, num_ctls;
bool needs_split_display;
- /* each hw_intf needs its own hw_ctrl to program its control path */
- num_ctls = top->num_intf;
+ /*
+ * For non-CWB mode, each hw_intf needs its own hw_ctl to program its
+ * control path.
+ *
+ * Hardcode num_ctls to 1 if CWB is enabled because in CWB, both the
+ * writeback and real-time encoders must be driven by the same control
+ * path
+ */
+ if (top->cwb_enabled)
+ num_ctls = 1;
+ else
+ num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top);
@@ -393,7 +456,7 @@ static int _dpu_rm_reserve_ctls(
if (!rm->ctl_blks[j])
continue;
- if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
+ if (reserved_by_other(global_state->ctl_to_crtc_id, j, crtc_id))
continue;
ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
@@ -417,8 +480,8 @@ static int _dpu_rm_reserve_ctls(
return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
- global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
- trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
+ global_state->ctl_to_crtc_id[ctl_idx[i]] = crtc_id;
+ trace_dpu_rm_reserve_ctls(i + CTL_0, crtc_id);
}
return 0;
@@ -426,12 +489,12 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state,
int start,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
int i;
for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) {
- if (global_state->pingpong_to_enc_id[i] == enc_id)
+ if (global_state->pingpong_to_crtc_id[i] == crtc_id)
return i;
}
@@ -452,7 +515,7 @@ static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx)
static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int num_dsc = 0;
@@ -465,10 +528,10 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
if (!rm->dsc_blks[dsc_idx])
continue;
- if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id))
+ if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id))
continue;
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -476,7 +539,7 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
if (ret)
return -ENAVAIL;
- global_state->dsc_to_enc_id[dsc_idx] = enc_id;
+ global_state->dsc_to_crtc_id[dsc_idx] = crtc_id;
num_dsc++;
pp_idx++;
}
@@ -492,7 +555,7 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int num_dsc = 0;
@@ -507,11 +570,11 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
/* consective dsc index to be paired */
- if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id) ||
- reserved_by_other(global_state->dsc_to_enc_id, dsc_idx + 1, enc_id))
+ if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id) ||
+ reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx + 1, crtc_id))
continue;
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -521,7 +584,7 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
}
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -531,8 +594,8 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
}
- global_state->dsc_to_enc_id[dsc_idx] = enc_id;
- global_state->dsc_to_enc_id[dsc_idx + 1] = enc_id;
+ global_state->dsc_to_crtc_id[dsc_idx] = crtc_id;
+ global_state->dsc_to_crtc_id[dsc_idx + 1] = crtc_id;
num_dsc += 2;
pp_idx++; /* start for next pair */
}
@@ -548,11 +611,9 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
- uint32_t enc_id = enc->base.id;
-
if (!top->num_dsc || !top->num_intf)
return 0;
@@ -568,16 +629,17 @@ static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
/* num_dsc should be either 1, 2 or 4 */
if (top->num_dsc > top->num_intf) /* merge mode */
- return _dpu_rm_dsc_alloc_pair(rm, global_state, enc_id, top);
+ return _dpu_rm_dsc_alloc_pair(rm, global_state, crtc_id, top);
else
- return _dpu_rm_dsc_alloc(rm, global_state, enc_id, top);
+ return _dpu_rm_dsc_alloc(rm, global_state, crtc_id, top);
return 0;
}
static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc)
+ uint32_t crtc_id,
+ int num_cdm)
{
/* try allocating only one CDM block */
if (!rm->cdm_blk) {
@@ -585,12 +647,17 @@ static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
return -EIO;
}
- if (global_state->cdm_to_enc_id) {
+ if (num_cdm > 1) {
+ DPU_ERROR("More than 1 INTF requesting CDM\n");
+ return -EINVAL;
+ }
+
+ if (global_state->cdm_to_crtc_id) {
DPU_ERROR("CDM_0 is already allocated\n");
return -EIO;
}
- global_state->cdm_to_enc_id = enc->base.id;
+ global_state->cdm_to_crtc_id = crtc_id;
return 0;
}
@@ -598,30 +665,37 @@ static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
+ uint32_t crtc_id,
struct msm_display_topology *topology)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, topology);
+ ret = _dpu_rm_reserve_lms(rm, global_state, crtc_id, topology);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
+ if (topology->cwb_enabled) {
+ ret = _dpu_rm_reserve_cwb_mux_and_pingpongs(rm, global_state,
+ crtc_id, topology);
+ if (ret)
+ return ret;
+ }
+
+ ret = _dpu_rm_reserve_ctls(rm, global_state, crtc_id,
topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_dsc(rm, global_state, enc, topology);
+ ret = _dpu_rm_reserve_dsc(rm, global_state, crtc_id, topology);
if (ret)
return ret;
- if (topology->needs_cdm) {
- ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
+ if (topology->num_cdm > 0) {
+ ret = _dpu_rm_reserve_cdm(rm, global_state, crtc_id, topology->num_cdm);
if (ret) {
DPU_ERROR("unable to find CDM blk\n");
return ret;
@@ -632,12 +706,12 @@ static int _dpu_rm_make_reservation(
}
static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
int i;
for (i = 0; i < cnt; i++) {
- if (res_mapping[i] == enc_id)
+ if (res_mapping[i] == crtc_id)
res_mapping[i] = 0;
}
}
@@ -646,23 +720,27 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
* dpu_rm_release - Given the encoder for the display chain, release any
* HW blocks previously reserved for that use case.
* @global_state: resources shared across multiple kms objects
- * @enc: DRM Encoder handle
+ * @crtc: DRM CRTC handle
* @return: 0 on Success otherwise -ERROR
*/
void dpu_rm_release(struct dpu_global_state *global_state,
- struct drm_encoder *enc)
+ struct drm_crtc *crtc)
{
- _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
- ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
- ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
- ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
- ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
- ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id);
+ uint32_t crtc_id = crtc->base.id;
+
+ _dpu_rm_clear_mapping(global_state->pingpong_to_crtc_id,
+ ARRAY_SIZE(global_state->pingpong_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->mixer_to_crtc_id,
+ ARRAY_SIZE(global_state->mixer_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->ctl_to_crtc_id,
+ ARRAY_SIZE(global_state->ctl_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->dsc_to_crtc_id,
+ ARRAY_SIZE(global_state->dsc_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->dspp_to_crtc_id,
+ ARRAY_SIZE(global_state->dspp_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(&global_state->cdm_to_crtc_id, 1, crtc_id);
+ _dpu_rm_clear_mapping(global_state->cwb_to_crtc_id,
+ ARRAY_SIZE(global_state->cwb_to_crtc_id), crtc_id);
}
/**
@@ -674,42 +752,33 @@ void dpu_rm_release(struct dpu_global_state *global_state,
* HW Reservations should be released via dpu_rm_release_hw.
* @rm: DPU Resource Manager handle
* @global_state: resources shared across multiple kms objects
- * @enc: DRM Encoder handle
- * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @crtc: DRM CRTC handle
* @topology: Pointer to topology info for the display
* @return: 0 on Success otherwise -ERROR
*/
int dpu_rm_reserve(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc,
struct msm_display_topology *topology)
{
int ret;
- /* Check if this is just a page-flip */
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
- return 0;
-
if (IS_ERR(global_state)) {
DPU_ERROR("failed to global state\n");
return PTR_ERR(global_state);
}
- DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
- enc->base.id, crtc_state->crtc->base.id);
+ DRM_DEBUG_KMS("reserving hw for crtc %d\n", crtc->base.id);
DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
topology->num_lm, topology->num_dsc,
topology->num_intf);
- ret = _dpu_rm_make_reservation(rm, global_state, enc, topology);
+ ret = _dpu_rm_make_reservation(rm, global_state, crtc->base.id, topology);
if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
-
-
return ret;
}
@@ -800,50 +869,57 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
* assigned to this encoder
* @rm: DPU Resource Manager handle
* @global_state: resources shared across multiple kms objects
- * @enc_id: encoder id requesting for allocation
+ * @crtc: DRM CRTC handle
* @type: resource type to return data for
* @blks: pointer to the array to be filled by HW resources
* @blks_size: size of the @blks array
*/
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
- struct dpu_global_state *global_state, uint32_t enc_id,
+ struct dpu_global_state *global_state, struct drm_crtc *crtc,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
{
+ uint32_t crtc_id = crtc->base.id;
struct dpu_hw_blk **hw_blks;
- uint32_t *hw_to_enc_id;
+ uint32_t *hw_to_crtc_id;
int i, num_blks, max_blks;
switch (type) {
case DPU_HW_BLK_PINGPONG:
+ case DPU_HW_BLK_DCWB_PINGPONG:
hw_blks = rm->pingpong_blks;
- hw_to_enc_id = global_state->pingpong_to_enc_id;
+ hw_to_crtc_id = global_state->pingpong_to_crtc_id;
max_blks = ARRAY_SIZE(rm->pingpong_blks);
break;
case DPU_HW_BLK_LM:
hw_blks = rm->mixer_blks;
- hw_to_enc_id = global_state->mixer_to_enc_id;
+ hw_to_crtc_id = global_state->mixer_to_crtc_id;
max_blks = ARRAY_SIZE(rm->mixer_blks);
break;
case DPU_HW_BLK_CTL:
hw_blks = rm->ctl_blks;
- hw_to_enc_id = global_state->ctl_to_enc_id;
+ hw_to_crtc_id = global_state->ctl_to_crtc_id;
max_blks = ARRAY_SIZE(rm->ctl_blks);
break;
case DPU_HW_BLK_DSPP:
hw_blks = rm->dspp_blks;
- hw_to_enc_id = global_state->dspp_to_enc_id;
+ hw_to_crtc_id = global_state->dspp_to_crtc_id;
max_blks = ARRAY_SIZE(rm->dspp_blks);
break;
case DPU_HW_BLK_DSC:
hw_blks = rm->dsc_blks;
- hw_to_enc_id = global_state->dsc_to_enc_id;
+ hw_to_crtc_id = global_state->dsc_to_crtc_id;
max_blks = ARRAY_SIZE(rm->dsc_blks);
break;
case DPU_HW_BLK_CDM:
hw_blks = &rm->cdm_blk;
- hw_to_enc_id = &global_state->cdm_to_enc_id;
+ hw_to_crtc_id = &global_state->cdm_to_crtc_id;
max_blks = 1;
break;
+ case DPU_HW_BLK_CWB:
+ hw_blks = rm->cwb_blks;
+ hw_to_crtc_id = global_state->cwb_to_crtc_id;
+ max_blks = ARRAY_SIZE(rm->cwb_blks);
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
@@ -851,17 +927,31 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
num_blks = 0;
for (i = 0; i < max_blks; i++) {
- if (hw_to_enc_id[i] != enc_id)
+ if (hw_to_crtc_id[i] != crtc_id)
continue;
+ if (type == DPU_HW_BLK_PINGPONG) {
+ struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
+
+ if (pp->idx >= PINGPONG_CWB_0)
+ continue;
+ }
+
+ if (type == DPU_HW_BLK_DCWB_PINGPONG) {
+ struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
+
+ if (pp->idx < PINGPONG_CWB_0)
+ continue;
+ }
+
if (num_blks == blks_size) {
- DPU_ERROR("More than %d resources assigned to enc %d\n",
- blks_size, enc_id);
+ DPU_ERROR("More than %d resources assigned to crtc %d\n",
+ blks_size, crtc_id);
break;
}
if (!hw_blks[i]) {
- DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
- type, enc_id);
+ DPU_ERROR("Allocated resource %d unavailable to assign to crtc %d\n",
+ type, crtc_id);
break;
}
blks[num_blks++] = hw_blks[i];
@@ -896,38 +986,38 @@ void dpu_rm_print_state(struct drm_printer *p,
drm_puts(p, "resource mapping:\n");
drm_puts(p, "\tpingpong=");
- for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->pingpong_blks[i],
- global_state->pingpong_to_enc_id[i]);
+ global_state->pingpong_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tmixer=");
- for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->mixer_blks[i],
- global_state->mixer_to_enc_id[i]);
+ global_state->mixer_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tctl=");
- for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->ctl_blks[i],
- global_state->ctl_to_enc_id[i]);
+ global_state->ctl_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tdspp=");
- for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->dspp_blks[i],
- global_state->dspp_to_enc_id[i]);
+ global_state->dspp_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tdsc=");
- for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->dsc_blks[i],
- global_state->dsc_to_enc_id[i]);
+ global_state->dsc_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tcdm=");
dpu_rm_print_state_helper(p, rm->cdm_blk,
- global_state->cdm_to_enc_id);
+ global_state->cdm_to_crtc_id);
drm_puts(p, "\n");
drm_puts(p, "\tsspp=");
@@ -936,4 +1026,10 @@ void dpu_rm_print_state(struct drm_printer *p,
dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL,
global_state->sspp_to_crtc_id[i]);
drm_puts(p, "\n");
+
+ drm_puts(p, "\tcwb=");
+ for (i = 0; i < ARRAY_SIZE(global_state->cwb_to_crtc_id); i++)
+ dpu_rm_print_state_helper(p, rm->cwb_blks[i],
+ global_state->cwb_to_crtc_id[i]);
+ drm_puts(p, "\n");
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 99bd594ee0d1..a19dbdb1b6f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -51,14 +51,17 @@ struct dpu_rm_sspp_requirements {
* @num_intf: number of interfaces the panel is mounted on
* @num_dspp: number of dspp blocks used
* @num_dsc: number of Display Stream Compression (DSC) blocks used
- * @needs_cdm: indicates whether cdm block is needed for this display topology
+ * @num_cdm: indicates how many outputs are requesting cdm block for
+ * this display topology
+ * @cwb_enabled: indicates whether CWB is enabled for this display topology
*/
struct msm_display_topology {
u32 num_lm;
u32 num_intf;
u32 num_dspp;
u32 num_dsc;
- bool needs_cdm;
+ int num_cdm;
+ bool cwb_enabled;
};
int dpu_rm_init(struct drm_device *dev,
@@ -69,12 +72,11 @@ int dpu_rm_init(struct drm_device *dev,
int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *drm_enc,
- struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc,
struct msm_display_topology *topology);
void dpu_rm_release(struct dpu_global_state *global_state,
- struct drm_encoder *enc);
+ struct drm_crtc *crtc);
struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
struct dpu_global_state *global_state,
@@ -85,7 +87,7 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
struct drm_crtc *crtc);
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
- struct dpu_global_state *global_state, uint32_t enc_id,
+ struct dpu_global_state *global_state, struct drm_crtc *crtc,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
void dpu_rm_print_state(struct drm_printer *p,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 666de99a46a5..fc183fe37f56 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -3,6 +3,7 @@
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*/
+#include <linux/string_choices.h>
#include "mdp5_kms.h"
#include "mdp5_ctl.h"
@@ -233,7 +234,7 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
return -EINVAL;
ctl->encoder_enabled = enabled;
- DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
+ DBG("intf_%d: %s", intf->num, str_on_off(enabled));
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 9c463ae2f8fa..d8633a596f8d 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -11,6 +11,7 @@
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/pm_opp.h>
+#include <linux/string_choices.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_fixed.h>
@@ -1366,9 +1367,9 @@ int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "enable core clocks \n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
return 0;
}
@@ -1385,9 +1386,9 @@ void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "disable core clocks \n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
}
static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
@@ -1416,9 +1417,9 @@ static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "enable link clocks\n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
return 0;
}
@@ -1435,9 +1436,9 @@ static void msm_dp_ctrl_link_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "disabled link clocks\n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
}
static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a542d2781a09..bbc47d86ae9e 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -11,6 +11,7 @@
#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/drm_edid.h>
@@ -343,8 +344,7 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
{
if ((hpd && dp->msm_dp_display.link_ready) ||
(!hpd && !dp->msm_dp_display.link_ready)) {
- drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
- (hpd ? "on" : "off"));
+ drm_dbg_dp(dp->drm_dev, "HPD already %s\n", str_on_off(hpd));
return 0;
}
@@ -367,6 +367,19 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
return 0;
}
+static void msm_dp_display_lttpr_init(struct msm_dp_display_private *dp)
+{
+ u8 lttpr_caps[DP_LTTPR_COMMON_CAP_SIZE];
+ int rc;
+
+ if (drm_dp_read_lttpr_common_caps(dp->aux, dp->panel->dpcd, lttpr_caps))
+ return;
+
+ rc = drm_dp_lttpr_init(dp->aux, drm_dp_lttpr_count(lttpr_caps));
+ if (rc)
+ DRM_ERROR("failed to set LTTPRs transparency mode, rc=%d\n", rc);
+}
+
static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
{
struct drm_connector *connector = dp->msm_dp_display.connector;
@@ -377,6 +390,8 @@ static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
if (rc)
goto end;
+ msm_dp_display_lttpr_init(dp);
+
msm_dp_link_process_request(dp->link);
if (!dp->msm_dp_display.is_edp)
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 022b3e815cf3..cca57e56c906 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
+#include <linux/string_choices.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_bridge.h>
@@ -25,7 +26,7 @@ static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge)
dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
- (dp->link_ready) ? "true" : "false");
+ str_true_false(dp->link_ready));
return (dp->link_ready) ? connector_status_connected :
connector_status_disconnected;
@@ -41,7 +42,7 @@ static int msm_dp_bridge_atomic_check(struct drm_bridge *bridge,
dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
- (dp->link_ready) ? "true" : "false");
+ str_true_false(dp->link_ready));
/*
* There is no protection in the DRM framework to check if the display
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 007311c21fda..4d75529c0e85 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -179,18 +179,18 @@ struct msm_dsi_host {
int irq;
};
-
static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
{
return readl(msm_host->ctrl_base + reg);
}
+
static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
{
writel(data, msm_host->ctrl_base + reg);
}
-static const struct msm_dsi_cfg_handler *dsi_get_config(
- struct msm_dsi_host *msm_host)
+static const struct msm_dsi_cfg_handler *
+dsi_get_config(struct msm_dsi_host *msm_host)
{
const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
struct device *dev = &msm_host->pdev->dev;
@@ -200,7 +200,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ahb_clk = msm_clk_get(msm_host->pdev, "iface");
if (IS_ERR(ahb_clk)) {
- pr_err("%s: cannot get interface clock\n", __func__);
+ dev_err_probe(dev, PTR_ERR(ahb_clk), "%s: cannot get interface clock\n",
+ __func__);
goto exit;
}
@@ -208,13 +209,13 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ret = clk_prepare_enable(ahb_clk);
if (ret) {
- pr_err("%s: unable to enable ahb_clk\n", __func__);
+ dev_err_probe(dev, ret, "%s: unable to enable ahb_clk\n", __func__);
goto runtime_put;
}
ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
if (ret) {
- pr_err("%s: Invalid version\n", __func__);
+ dev_err_probe(dev, ret, "%s: Invalid version\n", __func__);
goto disable_clks;
}
@@ -281,42 +282,31 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->num_bus_clks = cfg->num_bus_clks;
ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret);
- goto exit;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Unable to get clocks\n");
/* get link and source clocks */
msm_host->byte_clk = msm_clk_get(pdev, "byte");
- if (IS_ERR(msm_host->byte_clk)) {
- ret = PTR_ERR(msm_host->byte_clk);
- pr_err("%s: can't find dsi_byte clock. ret=%d\n",
- __func__, ret);
- msm_host->byte_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->byte_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->byte_clk),
+ "%s: can't find dsi_byte clock\n",
+ __func__);
msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
- if (IS_ERR(msm_host->pixel_clk)) {
- ret = PTR_ERR(msm_host->pixel_clk);
- pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
- __func__, ret);
- msm_host->pixel_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->pixel_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->pixel_clk),
+ "%s: can't find dsi_pixel clock\n",
+ __func__);
msm_host->esc_clk = msm_clk_get(pdev, "core");
- if (IS_ERR(msm_host->esc_clk)) {
- ret = PTR_ERR(msm_host->esc_clk);
- pr_err("%s: can't find dsi_esc clock. ret=%d\n",
- __func__, ret);
- msm_host->esc_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->esc_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->esc_clk),
+ "%s: can't find dsi_esc clock\n",
+ __func__);
if (cfg_hnd->ops->clk_init_ver)
ret = cfg_hnd->ops->clk_init_ver(msm_host);
-exit:
+
return ret;
}
@@ -380,7 +370,6 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
return 0;
}
-
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -598,7 +587,6 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate,
msm_host->byte_clk_rate);
-
}
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
@@ -687,8 +675,8 @@ static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
return NON_BURST_SYNCH_EVENT;
}
-static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
- const enum mipi_dsi_pixel_format mipi_fmt)
+static inline enum dsi_vid_dst_format
+dsi_get_vid_fmt(const enum mipi_dsi_pixel_format mipi_fmt)
{
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
@@ -699,8 +687,8 @@ static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
}
}
-static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
- const enum mipi_dsi_pixel_format mipi_fmt)
+static inline enum dsi_cmd_dst_format
+dsi_get_cmd_fmt(const enum mipi_dsi_pixel_format mipi_fmt)
{
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
@@ -846,7 +834,7 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0));
}
-static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
+static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode)
{
struct drm_dsc_config *dsc = msm_host->dsc;
u32 reg, reg_ctrl, reg_ctrl2;
@@ -858,7 +846,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
/* first calculate dsc parameters and then program
* compress mode registers
*/
- slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay);
+ slice_per_intf = dsc->slice_count;
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */
@@ -991,7 +979,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
if (msm_host->dsc)
- dsi_update_dsc_timing(msm_host, false, mode->hdisplay);
+ dsi_update_dsc_timing(msm_host, false);
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
@@ -1012,7 +1000,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
if (msm_host->dsc)
- dsi_update_dsc_timing(msm_host, true, mode->hdisplay);
+ dsi_update_dsc_timing(msm_host, true);
/* image data and 1 byte write_memory_start cmd */
if (!msm_host->dsc)
@@ -1292,14 +1280,15 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
u8 *data = msg->rx_buf;
+
if (data && (msg->rx_len >= 1)) {
*data = buf[1]; /* strip out dcs type */
return 1;
- } else {
- pr_err("%s: read data does not match with rx_buf len %zu\n",
- __func__, msg->rx_len);
- return -EINVAL;
}
+
+ pr_err("%s: read data does not match with rx_buf len %zu\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
}
/*
@@ -1308,15 +1297,16 @@ static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
u8 *data = msg->rx_buf;
+
if (data && (msg->rx_len >= 2)) {
data[0] = buf[1]; /* strip out dcs type */
data[1] = buf[2];
return 2;
- } else {
- pr_err("%s: read data does not match with rx_buf len %zu\n",
- __func__, msg->rx_len);
- return -EINVAL;
}
+
+ pr_err("%s: read data does not match with rx_buf len %zu\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
}
static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
@@ -1376,8 +1366,9 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
ret = -ETIMEDOUT;
else
ret = len;
- } else
+ } else {
ret = len;
+ }
return ret;
}
@@ -1445,11 +1436,12 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
return len;
}
- /* for video mode, do not send cmds more than
- * one pixel line, since it only transmit it
- * during BLLP.
- */
- /* TODO: if the command is sent in LP mode, the bit rate is only
+ /*
+ * for video mode, do not send cmds more than
+ * one pixel line, since it only transmit it
+ * during BLLP.
+ *
+ * TODO: if the command is sent in LP mode, the bit rate is only
* half of esc clk rate. In this case, if the video is already
* actively streaming, we need to check more carefully if the
* command can be fit into one BLLP.
@@ -1767,8 +1759,20 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc
return -EINVAL;
}
- if (dsc->bits_per_component != 8) {
- DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support bits_per_component != 8 yet\n");
+ switch (dsc->bits_per_component) {
+ case 8:
+ case 10:
+ case 12:
+ /*
+ * Only 8, 10, and 12 bpc are supported for DSC 1.1 block.
+ * If additional bpc values need to be supported, update
+ * this quard with the appropriate DSC version verification.
+ */
+ break;
+ default:
+ DRM_DEV_ERROR(&msm_host->pdev->dev,
+ "Unsupported bits_per_component value: %d\n",
+ dsc->bits_per_component);
return -EOPNOTSUPP;
}
@@ -1779,7 +1783,7 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc
drm_dsc_set_const_params(dsc);
drm_dsc_set_rc_buf_thresh(dsc);
- /* handle only bpp = bpc = 8, pre-SCR panels */
+ /* DPU supports only pre-SCR panels */
ret = drm_dsc_setup_rc_params(dsc, DRM_DSC_1_1_PRE_SCR);
if (ret) {
DRM_DEV_ERROR(&msm_host->pdev->dev, "could not find DSC RC parameters\n");
@@ -1827,8 +1831,15 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
__func__, ret);
goto err;
}
- if (!ret)
+ if (!ret) {
msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL);
+ if (!msm_dsi->te_source) {
+ DRM_DEV_ERROR(dev, "%s: failed to allocate te_source\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
ret = 0;
if (of_property_present(np, "syscon-sfpb")) {
@@ -1874,39 +1885,35 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
int ret;
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
- if (!msm_host) {
+ if (!msm_host)
return -ENOMEM;
- }
msm_host->pdev = pdev;
msm_dsi->host = &msm_host->base;
ret = dsi_host_parse_dt(msm_host);
- if (ret) {
- pr_err("%s: failed to parse dt\n", __func__);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "%s: failed to parse dt\n",
+ __func__);
msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size);
- if (IS_ERR(msm_host->ctrl_base)) {
- pr_err("%s: unable to map Dsi ctrl base\n", __func__);
- return PTR_ERR(msm_host->ctrl_base);
- }
+ if (IS_ERR(msm_host->ctrl_base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->ctrl_base),
+ "%s: unable to map Dsi ctrl base\n", __func__);
pm_runtime_enable(&pdev->dev);
msm_host->cfg_hnd = dsi_get_config(msm_host);
- if (!msm_host->cfg_hnd) {
- pr_err("%s: get config failed\n", __func__);
- return -EINVAL;
- }
+ if (!msm_host->cfg_hnd)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "%s: get config failed\n", __func__);
cfg = msm_host->cfg_hnd->cfg;
msm_host->id = dsi_host_get_id(msm_host);
- if (msm_host->id < 0) {
- pr_err("%s: unable to identify DSI host index\n", __func__);
- return msm_host->id;
- }
+ if (msm_host->id < 0)
+ return dev_err_probe(&pdev->dev, msm_host->id,
+ "%s: unable to identify DSI host index\n",
+ __func__);
/* fixup base address by io offset */
msm_host->ctrl_base += cfg->io_offset;
@@ -1918,42 +1925,32 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
return ret;
ret = dsi_clk_init(msm_host);
- if (ret) {
- pr_err("%s: unable to initialize dsi clks\n", __func__);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "%s: unable to initialize dsi clks\n", __func__);
msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
- if (!msm_host->rx_buf) {
- pr_err("%s: alloc rx temp buf failed\n", __func__);
+ if (!msm_host->rx_buf)
return -ENOMEM;
- }
ret = devm_pm_opp_set_clkname(&pdev->dev, "byte");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(&pdev->dev);
- if (ret && ret != -ENODEV) {
- dev_err(&pdev->dev, "invalid OPP table in device tree\n");
- return ret;
- }
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&pdev->dev, ret, "invalid OPP table in device tree\n");
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (!msm_host->irq) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return -EINVAL;
- }
+ if (!msm_host->irq)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get irq\n");
/* do not autoenable, will be enabled later */
ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
"dsi_isr", msm_host);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
- msm_host->irq, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed to request IRQ%u\n",
+ msm_host->irq);
init_completion(&msm_host->dma_comp);
init_completion(&msm_host->video_comp);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index a210b7c9e5ca..4fabb01345aa 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -74,17 +74,35 @@ static int dsi_mgr_setup_components(int id)
int ret;
if (!IS_BONDED_DSI()) {
+ /*
+ * Set the usecase before calling msm_dsi_host_register(), which would
+ * already program the PLL source mux based on a default usecase.
+ */
+ msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+
ret = msm_dsi_host_register(msm_dsi->host);
if (ret)
return ret;
-
- msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
- msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
} else if (other_dsi) {
struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi;
struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
other_dsi : msm_dsi;
+
+ /*
+ * PLL0 is to drive both DSI link clocks in bonded DSI mode.
+ *
+ * Set the usecase before calling msm_dsi_host_register(), which would
+ * already program the PLL source mux based on a default usecase.
+ */
+ msm_dsi_phy_set_usecase(clk_master_dsi->phy,
+ MSM_DSI_PHY_MASTER);
+ msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
+ MSM_DSI_PHY_SLAVE);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
+
/* Register slave host first, so that slave DSI device
* has a chance to probe, and do not block the master
* DSI device's probe.
@@ -98,14 +116,6 @@ static int dsi_mgr_setup_components(int id)
ret = msm_dsi_host_register(master_link_dsi->host);
if (ret)
return ret;
-
- /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */
- msm_dsi_phy_set_usecase(clk_master_dsi->phy,
- MSM_DSI_PHY_MASTER);
- msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
- MSM_DSI_PHY_SLAVE);
- msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
- msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
}
return 0;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 8985818bb2e0..1925418d9999 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -6,6 +6,7 @@
#ifndef __DSI_PHY_H__
#define __DSI_PHY_H__
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
@@ -84,9 +85,7 @@ struct msm_dsi_dphy_timing {
u8 hs_halfbyte_en_ckln;
};
-#define DSI_BYTE_PLL_CLK 0
-#define DSI_PIXEL_PLL_CLK 1
-#define NUM_PROVIDED_CLKS 2
+#define NUM_PROVIDED_CLKS (DSI_PIXEL_PLL_CLK + 1)
#define DSI_LANE_MAX 5
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 677c62571811..9812b4d69197 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 2c3cbe0f2870..3a1c8ece6657 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 1383e3a4e050..90348a2af3e9 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 5311ab7f3c70..f3643320ff2f 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 798168180c1a..a92decbee5b5 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -3,6 +3,8 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
@@ -305,7 +307,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
writel(pll->phy->cphy_mode ? 0x00 : 0x10,
base + REG_DSI_7nm_PHY_PLL_CMODE_1);
writel(config->pll_clock_inverters,
- base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS);
+ base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -572,11 +574,11 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
- cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
- cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+ cached->bit_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK, cmn_clk_cfg0);
+ cached->pix_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK, cmn_clk_cfg0);
cmn_clk_cfg1 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- cached->pll_mux = cmn_clk_cfg1 & 0x3;
+ cached->pll_mux = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, cmn_clk_cfg1);
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
@@ -598,7 +600,8 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_pll_cmn_clk_cfg0_write(pll_7nm,
DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
- dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK,
+ cached->pll_mux);
ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
pll_7nm->vco_current_rate,
@@ -736,11 +739,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
* don't register a pclk_mux clock and just use post_out_div instead
*/
if (pll_7nm->phy->cphy_mode) {
- u32 data;
-
- data = readl(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- writel(data | 3, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm,
+ DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK,
+ DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL(3));
phy_pll_out_dsi_parent = pll_post_out_div;
} else {
snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 37b3809c6bdd..248541ff4492 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -12,8 +12,8 @@
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
+#include <drm/display/drm_hdmi_state_helper.h>
-#include <sound/hdmi-codec.h>
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -24,7 +24,7 @@ void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
spin_lock_irqsave(&hdmi->reg_lock, flags);
if (power_on) {
ctrl |= HDMI_CTRL_ENABLE;
- if (!hdmi->hdmi_mode) {
+ if (!hdmi->connector->display_info.is_hdmi) {
ctrl |= HDMI_CTRL_HDMI;
hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
ctrl &= ~HDMI_CTRL_HDMI;
@@ -165,8 +165,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->dev = dev;
hdmi->encoder = encoder;
- hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
-
ret = msm_hdmi_bridge_init(hdmi);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
@@ -246,111 +244,6 @@ static const struct hdmi_platform_config hdmi_tx_8974_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
-/*
- * HDMI audio codec callbacks
- */
-static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct hdmi *hdmi = dev_get_drvdata(dev);
- unsigned int chan;
- unsigned int channel_allocation = 0;
- unsigned int rate;
- unsigned int level_shift = 0; /* 0dB */
- bool down_mix = false;
-
- DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
- params->sample_width, params->cea.channels);
-
- switch (params->cea.channels) {
- case 2:
- /* FR and FL speakers */
- channel_allocation = 0;
- chan = MSM_HDMI_AUDIO_CHANNEL_2;
- break;
- case 4:
- /* FC, LFE, FR and FL speakers */
- channel_allocation = 0x3;
- chan = MSM_HDMI_AUDIO_CHANNEL_4;
- break;
- case 6:
- /* RR, RL, FC, LFE, FR and FL speakers */
- channel_allocation = 0x0B;
- chan = MSM_HDMI_AUDIO_CHANNEL_6;
- break;
- case 8:
- /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
- channel_allocation = 0x1F;
- chan = MSM_HDMI_AUDIO_CHANNEL_8;
- break;
- default:
- return -EINVAL;
- }
-
- switch (params->sample_rate) {
- case 32000:
- rate = HDMI_SAMPLE_RATE_32KHZ;
- break;
- case 44100:
- rate = HDMI_SAMPLE_RATE_44_1KHZ;
- break;
- case 48000:
- rate = HDMI_SAMPLE_RATE_48KHZ;
- break;
- case 88200:
- rate = HDMI_SAMPLE_RATE_88_2KHZ;
- break;
- case 96000:
- rate = HDMI_SAMPLE_RATE_96KHZ;
- break;
- case 176400:
- rate = HDMI_SAMPLE_RATE_176_4KHZ;
- break;
- case 192000:
- rate = HDMI_SAMPLE_RATE_192KHZ;
- break;
- default:
- DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
- params->sample_rate);
- return -EINVAL;
- }
-
- msm_hdmi_audio_set_sample_rate(hdmi, rate);
- msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
- level_shift, down_mix);
-
- return 0;
-}
-
-static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
-{
- struct hdmi *hdmi = dev_get_drvdata(dev);
-
- msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
-}
-
-static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
- .hw_params = msm_hdmi_audio_hw_params,
- .audio_shutdown = msm_hdmi_audio_shutdown,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &msm_hdmi_audio_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
-{
- hdmi->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
-}
-
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
@@ -362,12 +255,6 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
return err;
priv->hdmi = hdmi;
- err = msm_hdmi_register_audio_driver(hdmi, dev);
- if (err) {
- DRM_ERROR("Failed to attach an audio codec %d\n", err);
- hdmi->audio_pdev = NULL;
- }
-
return 0;
}
@@ -377,9 +264,6 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
if (priv->hdmi) {
- if (priv->hdmi->audio_pdev)
- platform_device_unregister(priv->hdmi->audio_pdev);
-
if (priv->hdmi->bridge)
msm_hdmi_hpd_disable(priv->hdmi);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index a62d2aedfbb7..a5f481c39277 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -24,8 +24,8 @@ struct hdmi_platform_config;
struct hdmi_audio {
bool enabled;
- struct hdmi_audio_infoframe infoframe;
int rate;
+ int channels;
};
struct hdmi_hdcp_ctrl;
@@ -33,7 +33,6 @@ struct hdmi_hdcp_ctrl;
struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
- struct platform_device *audio_pdev;
const struct hdmi_platform_config *config;
@@ -67,8 +66,6 @@ struct hdmi {
/* the encoder we are hooked to (outside of hdmi block) */
struct drm_encoder *encoder;
- bool hdmi_mode; /* are we in hdmi mode? */
-
int irq;
struct workqueue_struct *workq;
@@ -207,26 +204,16 @@ static inline int msm_hdmi_pll_8998_init(struct platform_device *pdev)
/*
* audio:
*/
-/* Supported HDMI Audio channels and rates */
-#define MSM_HDMI_AUDIO_CHANNEL_2 0
-#define MSM_HDMI_AUDIO_CHANNEL_4 1
-#define MSM_HDMI_AUDIO_CHANNEL_6 2
-#define MSM_HDMI_AUDIO_CHANNEL_8 3
-
-#define HDMI_SAMPLE_RATE_32KHZ 0
-#define HDMI_SAMPLE_RATE_44_1KHZ 1
-#define HDMI_SAMPLE_RATE_48KHZ 2
-#define HDMI_SAMPLE_RATE_88_2KHZ 3
-#define HDMI_SAMPLE_RATE_96KHZ 4
-#define HDMI_SAMPLE_RATE_176_4KHZ 5
-#define HDMI_SAMPLE_RATE_192KHZ 6
+struct hdmi_codec_daifmt;
+struct hdmi_codec_params;
int msm_hdmi_audio_update(struct hdmi *hdmi);
-int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
- uint32_t num_of_channels, uint32_t channel_allocation,
- uint32_t level_shift, bool down_mix);
-void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
-
+int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge);
/*
* hdmi bridge:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 4c2058c4adc1..8bb975e82c17 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -4,11 +4,13 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/display/drm_hdmi_state_helper.h>
+
#include <linux/hdmi.h>
-#include "hdmi.h"
-/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
-static int nchannels[] = { 2, 4, 6, 8 };
+#include <sound/hdmi-codec.h>
+
+#include "hdmi.h"
/* Supported HDMI Audio sample rates */
#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
@@ -74,16 +76,17 @@ static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
int msm_hdmi_audio_update(struct hdmi *hdmi)
{
struct hdmi_audio *audio = &hdmi->audio;
- struct hdmi_audio_infoframe *info = &audio->infoframe;
const struct hdmi_msm_audio_arcs *arcs = NULL;
bool enabled = audio->enabled;
uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
- uint32_t infofrm_ctrl, audio_config;
+ uint32_t audio_config;
+
+ if (!hdmi->connector->display_info.is_hdmi)
+ return -EINVAL;
+
+ DBG("audio: enabled=%d, channels=%d, rate=%d",
+ audio->enabled, audio->channels, audio->rate);
- DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, "
- "level_shift_value=%d, downmix_inhibit=%d, rate=%d",
- audio->enabled, info->channels, info->channel_allocation,
- info->level_shift_value, info->downmix_inhibit, audio->rate);
DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock);
if (enabled && !(hdmi->power_on && hdmi->pixclock)) {
@@ -104,7 +107,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1);
- infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG);
/* Clear N/CTS selection bits */
@@ -113,7 +115,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
if (enabled) {
uint32_t n, cts, multiplier;
enum hdmi_acr_cts select;
- uint8_t buf[14];
n = arcs->lut[audio->rate].n;
cts = arcs->lut[audio->rate].cts;
@@ -155,20 +156,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
HDMI_ACR_1_N(n));
hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2,
- COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
+ COND(audio->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
HDMI_AUDIO_PKT_CTRL2_OVERRIDE);
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND;
- /* configure infoframe: */
- hdmi_audio_infoframe_pack(info, buf, sizeof(buf));
- hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
- (buf[3] << 0) | (buf[4] << 8) |
- (buf[5] << 16) | (buf[6] << 24));
- hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
- (buf[7] << 0) | (buf[8] << 8));
-
hdmi_write(hdmi, REG_HDMI_GC, 0);
vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE;
@@ -176,11 +169,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
-
audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
@@ -190,17 +178,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE;
}
hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl);
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl);
hdmi_write(hdmi, REG_HDMI_AUD_INT,
COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) |
@@ -214,41 +197,72 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
return 0;
}
-int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
- uint32_t num_of_channels, uint32_t channel_allocation,
- uint32_t level_shift, bool down_mix)
+int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct hdmi_audio *audio;
-
- if (!hdmi)
- return -ENXIO;
-
- audio = &hdmi->audio;
-
- if (num_of_channels >= ARRAY_SIZE(nchannels))
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ unsigned int rate;
+ int ret;
+
+ drm_dbg_driver(bridge->dev, "%u Hz, %d bit, %d channels\n",
+ params->sample_rate,
+ params->sample_width,
+ params->cea.channels);
+
+ switch (params->sample_rate) {
+ case 32000:
+ rate = MSM_HDMI_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ rate = MSM_HDMI_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ rate = MSM_HDMI_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ rate = MSM_HDMI_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ rate = MSM_HDMI_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ rate = MSM_HDMI_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ rate = MSM_HDMI_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ drm_err(bridge->dev, "rate[%d] not supported!\n",
+ params->sample_rate);
return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &params->cea);
+ if (ret)
+ return ret;
- audio->enabled = enabled;
- audio->infoframe.channels = nchannels[num_of_channels];
- audio->infoframe.channel_allocation = channel_allocation;
- audio->infoframe.level_shift_value = level_shift;
- audio->infoframe.downmix_inhibit = down_mix;
+ hdmi->audio.rate = rate;
+ hdmi->audio.channels = params->cea.channels;
+ hdmi->audio.enabled = true;
return msm_hdmi_audio_update(hdmi);
}
-void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
+void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
{
- struct hdmi_audio *audio;
-
- if (!hdmi)
- return;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
- audio = &hdmi->audio;
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
- if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX))
- return;
+ hdmi->audio.rate = 0;
+ hdmi->audio.channels = 2;
+ hdmi->audio.enabled = false;
- audio->rate = rate;
msm_hdmi_audio_update(hdmi);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 4a5b5112227f..1456354c8af4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -7,6 +7,8 @@
#include <linux/delay.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include "msm_kms.h"
#include "hdmi.h"
@@ -67,24 +69,20 @@ static void power_off(struct drm_bridge *bridge)
}
#define AVI_IFRAME_LINE_NUMBER 1
+#define SPD_IFRAME_LINE_NUMBER 1
+#define VENSPEC_IFRAME_LINE_NUMBER 3
-static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
+static int msm_hdmi_config_avi_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
{
- struct drm_crtc *crtc = hdmi->encoder->crtc;
- const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- union hdmi_infoframe frame;
- u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ u32 buf[4] = {};
u32 val;
- int len;
+ int i;
- drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- hdmi->connector, mode);
-
- len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (len < 0) {
+ if (len != HDMI_INFOFRAME_SIZE(AVI) || len - 3 > sizeof(buf)) {
DRM_DEV_ERROR(&hdmi->pdev->dev,
"failed to configure avi infoframe\n");
- return;
+ return -EINVAL;
}
/*
@@ -93,57 +91,245 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
* written to the LSB byte of AVI_INFO0 and the version is written to
* the third byte from the LSB of AVI_INFO3
*/
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(0),
+ memcpy(buf, &buffer[3], len - 3);
+
+ buf[3] |= buffer[1] << 24;
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val |= HDMI_INFOFRAME_CTRL0_AVI_SEND |
+ HDMI_INFOFRAME_CTRL0_AVI_CONT;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
+ val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ return 0;
+}
+
+static int msm_hdmi_config_audio_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 val;
+
+ if (len != HDMI_INFOFRAME_SIZE(AUDIO)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure audio infoframe\n");
+ return -EINVAL;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
buffer[3] |
buffer[4] << 8 |
buffer[5] << 16 |
buffer[6] << 24);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(1),
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
buffer[7] |
buffer[8] << 8 |
buffer[9] << 16 |
buffer[10] << 24);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(2),
- buffer[11] |
- buffer[12] << 8 |
- buffer[13] << 16 |
- buffer[14] << 24);
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(3),
- buffer[15] |
- buffer[16] << 8 |
- buffer[1] << 24);
+ return 0;
+}
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
- HDMI_INFOFRAME_CTRL0_AVI_SEND |
- HDMI_INFOFRAME_CTRL0_AVI_CONT);
+static int msm_hdmi_config_spd_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 buf[7] = {};
+ u32 val;
+ int i;
- val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
- val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
- val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+ if (len != HDMI_INFOFRAME_SIZE(SPD) || len - 3 > sizeof(buf)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure SPD infoframe\n");
+ return -EINVAL;
+ }
+
+ /* checksum gets written together with the body of the frame */
+ hdmi_write(hdmi, REG_HDMI_GENERIC1_HDR,
+ buffer[0] |
+ buffer[1] << 8 |
+ buffer[2] << 16);
+
+ memcpy(buf, &buffer[3], len - 3);
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val |= HDMI_GEN_PKT_CTRL_GENERIC1_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC1_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC1_LINE(SPD_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ return 0;
}
-static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+static int msm_hdmi_config_hdmi_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 buf[7] = {};
+ u32 val;
+ int i;
+
+ if (len < HDMI_INFOFRAME_HEADER_SIZE + HDMI_VENDOR_INFOFRAME_SIZE ||
+ len - 3 > sizeof(buf)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure HDMI infoframe\n");
+ return -EINVAL;
+ }
+
+ /* checksum gets written together with the body of the frame */
+ hdmi_write(hdmi, REG_HDMI_GENERIC0_HDR,
+ buffer[0] |
+ buffer[1] << 8 |
+ buffer[2] << 16);
+
+ memcpy(buf, &buffer[3], len - 3);
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_GENERIC0(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val |= HDMI_GEN_PKT_CTRL_GENERIC0_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC0_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE |
+ HDMI_GEN_PKT_CTRL_GENERIC0_LINE(VENSPEC_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ return 0;
+}
+
+static int msm_hdmi_bridge_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ u32 val;
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ val &= ~(HDMI_INFOFRAME_CTRL0_AVI_SEND |
+ HDMI_INFOFRAME_CTRL0_AVI_CONT);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ val &= ~(HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_SPD:
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val &= ~(HDMI_GEN_PKT_CTRL_GENERIC1_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC1_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val &= ~(HDMI_GEN_PKT_CTRL_GENERIC0_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC0_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE |
+ HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ break;
+
+ default:
+ drm_dbg_driver(hdmi_bridge->base.dev, "Unsupported infoframe type %x\n", type);
+ }
+
+ return 0;
+}
+
+static int msm_hdmi_bridge_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+
+ msm_hdmi_bridge_clear_infoframe(bridge, type);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return msm_hdmi_config_avi_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return msm_hdmi_config_audio_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return msm_hdmi_config_spd_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return msm_hdmi_config_hdmi_infoframe(hdmi, buffer, len);
+ default:
+ drm_dbg_driver(hdmi_bridge->base.dev, "Unsupported infoframe type %x\n", type);
+ return 0;
+ }
+}
+
+static void msm_hdmi_set_timings(struct hdmi *hdmi,
+ const struct drm_display_mode *mode);
+
+static void msm_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
DBG("power up");
+ connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ hdmi->pixclock = conn_state->hdmi.tmds_char_rate;
+
+ msm_hdmi_set_timings(hdmi, &crtc_state->adjusted_mode);
+
if (!hdmi->power_on) {
msm_hdmi_phy_resource_enable(phy);
msm_hdmi_power_on(bridge);
hdmi->power_on = true;
- if (hdmi->hdmi_mode) {
- msm_hdmi_config_avi_infoframe(hdmi);
+ if (connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
- }
}
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
+
msm_hdmi_phy_powerup(phy, hdmi->pixclock);
msm_hdmi_set_mode(hdmi, true);
@@ -152,7 +338,8 @@ static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
msm_hdmi_hdcp_on(hdmi->hdcp_ctrl);
}
-static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
@@ -169,25 +356,18 @@ static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
if (hdmi->power_on) {
power_off(bridge);
hdmi->power_on = false;
- if (hdmi->hdmi_mode)
+ if (hdmi->connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
msm_hdmi_phy_resource_disable(phy);
}
}
-static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
+static void msm_hdmi_set_timings(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
{
- struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
- struct hdmi *hdmi = hdmi_bridge->hdmi;
int hstart, hend, vstart, vend;
uint32_t frame_ctrl;
- mode = adjusted_mode;
-
- hdmi->pixclock = mode->clock * 1000;
-
hstart = mode->htotal - mode->hsync_start;
hend = mode->htotal - mode->hsync_start + mode->hdisplay;
@@ -232,7 +412,7 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
- if (hdmi->hdmi_mode)
+ if (hdmi->connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
}
@@ -251,32 +431,19 @@ static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridg
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
- if (drm_edid) {
- /*
- * FIXME: This should use connector->display_info.is_hdmi from a
- * path that has read the EDID and called
- * drm_edid_connector_update().
- */
- const struct edid *edid = drm_edid_raw(drm_edid);
-
- hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- }
-
return drm_edid;
}
-static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_info *info,
- const struct drm_display_mode *mode)
+static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = bridge->dev->dev_private;
struct msm_kms *kms = priv->kms;
- long actual, requested;
-
- requested = 1000 * mode->clock;
+ long actual;
/* for mdp5/apq8074, we manage our own pixel clk (as opposed to
* mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
@@ -284,27 +451,34 @@ static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge
*/
if (kms->funcs->round_pixclk)
actual = kms->funcs->round_pixclk(kms,
- requested, hdmi_bridge->hdmi->encoder);
+ tmds_rate,
+ hdmi_bridge->hdmi->encoder);
else if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], requested);
+ actual = clk_round_rate(hdmi->pwr_clks[0], tmds_rate);
else
- actual = requested;
+ actual = tmds_rate;
- DBG("requested=%ld, actual=%ld", requested, actual);
+ DBG("requested=%lld, actual=%ld", tmds_rate, actual);
- if (actual != requested)
+ if (actual != tmds_rate)
return MODE_CLOCK_RANGE;
return 0;
}
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
- .pre_enable = msm_hdmi_bridge_pre_enable,
- .post_disable = msm_hdmi_bridge_post_disable,
- .mode_set = msm_hdmi_bridge_mode_set,
- .mode_valid = msm_hdmi_bridge_mode_valid,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = msm_hdmi_bridge_atomic_pre_enable,
+ .atomic_post_disable = msm_hdmi_bridge_atomic_post_disable,
.edid_read = msm_hdmi_bridge_edid_read,
.detect = msm_hdmi_bridge_detect,
+ .hdmi_tmds_char_rate_valid = msm_hdmi_bridge_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = msm_hdmi_bridge_clear_infoframe,
+ .hdmi_write_infoframe = msm_hdmi_bridge_write_infoframe,
+ .hdmi_audio_prepare = msm_hdmi_bridge_audio_prepare,
+ .hdmi_audio_shutdown = msm_hdmi_bridge_audio_shutdown,
};
static void
@@ -336,9 +510,15 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge->funcs = &msm_hdmi_bridge_funcs;
bridge->ddc = hdmi->i2c;
bridge->type = DRM_MODE_CONNECTOR_HDMIA;
+ bridge->vendor = "Qualcomm";
+ bridge->product = "Snapdragon";
bridge->ops = DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_HDMI |
DRM_BRIDGE_OP_EDID;
+ bridge->hdmi_audio_max_i2s_playback_channels = 8;
+ bridge->hdmi_audio_dev = &hdmi->pdev->dev;
+ bridge->hdmi_audio_dai_port = -1;
ret = devm_drm_bridge_add(hdmi->dev->dev, bridge);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index a7a2384044ff..87a91148a731 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -183,10 +183,16 @@ static unsigned get_crtc_mask(struct drm_atomic_state *state)
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
- int i;
+ int i, ret = 0;
+ /*
+ * FIXME: stop setting allow_modeset and move this check to the DPU
+ * driver.
+ */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if ((old_crtc_state->ctm && !new_crtc_state->ctm) ||
@@ -196,6 +202,11 @@ int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
}
}
+ if (kms && kms->funcs && kms->funcs->check_mode_changed)
+ ret = kms->funcs->check_mode_changed(kms, state);
+ if (ret)
+ return ret;
+
return drm_atomic_helper_check(dev, state);
}
@@ -221,6 +232,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
+ atomic_set(&kms->fault_snapshot_capture, 0);
+
/*
* Now that there is no in-progress flush, prepare the
* current update:
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ff7a7a9f7b0d..c3588dc9e537 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -894,6 +894,7 @@ static const struct drm_driver msm_driver = {
DRIVER_RENDER |
DRIVER_ATOMIC |
DRIVER_MODESET |
+ DRIVER_SYNCOBJ_TIMELINE |
DRIVER_SYNCOBJ,
.open = msm_open,
.postclose = msm_postclose,
diff --git a/drivers/gpu/drm/msm/msm_dsc_helper.h b/drivers/gpu/drm/msm/msm_dsc_helper.h
index b9049fe1e279..63f95523b2cb 100644
--- a/drivers/gpu/drm/msm/msm_dsc_helper.h
+++ b/drivers/gpu/drm/msm/msm_dsc_helper.h
@@ -13,17 +13,6 @@
#include <drm/display/drm_dsc_helper.h>
/**
- * msm_dsc_get_slices_per_intf() - calculate number of slices per interface
- * @dsc: Pointer to drm dsc config struct
- * @intf_width: interface width in pixels
- * Returns: Integer representing the number of slices for the given interface
- */
-static inline u32 msm_dsc_get_slices_per_intf(const struct drm_dsc_config *dsc, u32 intf_width)
-{
- return DIV_ROUND_UP(intf_width, dsc->slice_width);
-}
-
-/**
* msm_dsc_get_bytes_per_line() - calculate bytes per line
* @dsc: Pointer to drm dsc config struct
* Returns: Integer value representing bytes per line. DSI and DP need
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index dee470403036..3e9aa2cc38ef 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -509,7 +509,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
}
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = -SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
break;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8557998e0c92..c380d9d9f5af 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -281,6 +281,15 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
if (submit) {
int i;
+ if (state->fault_info.ttbr0) {
+ struct msm_gpu_fault_info *info = &state->fault_info;
+ struct msm_mmu *mmu = submit->aspace->mmu;
+
+ msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
+ &info->asid);
+ msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
+ }
+
state->bos = kcalloc(submit->nr_bos,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 7cabc8480d7c..e25009150579 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -101,6 +101,14 @@ struct msm_gpu_fault_info {
int flags;
const char *type;
const char *block;
+
+ /* Information about what we think/expect is the current SMMU state,
+ * for example expected_ttbr0 should match smmu_info.ttbr0 which
+ * was read back from SMMU registers.
+ */
+ phys_addr_t pgtbl_ttbr0;
+ u64 ptes[4];
+ int asid;
};
/**
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 2a94e82316f9..fd73dcd3f30e 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -195,6 +195,28 @@ struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
return &iommu->domain->geometry;
}
+int
+msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4])
+{
+ struct msm_iommu_pagetable *pagetable;
+ struct arm_lpae_io_pgtable_walk_data wd = {};
+
+ if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
+ return -EINVAL;
+
+ pagetable = to_pagetable(mmu);
+
+ if (!pagetable->pgtbl_ops->pgtable_walk)
+ return -EINVAL;
+
+ pagetable->pgtbl_ops->pgtable_walk(pagetable->pgtbl_ops, iova, &wd);
+
+ for (int i = 0; i < ARRAY_SIZE(wd.ptes); i++)
+ ptes[i] = wd.ptes[i];
+
+ return 0;
+}
+
static const struct msm_mmu_funcs pagetable_funcs = {
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
@@ -243,7 +265,7 @@ static const struct iommu_flush_ops tlb_ops = {
.tlb_add_page = msm_iommu_tlb_add_page,
};
-static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg);
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
@@ -319,7 +341,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
return &pagetable->base;
}
-static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
@@ -343,6 +365,17 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0;
}
+static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ struct msm_iommu *iommu = arg;
+
+ if (iommu->base.handler)
+ return iommu->base.handler(iommu->base.arg, iova, flags, NULL);
+
+ return -ENOSYS;
+}
+
static void msm_iommu_resume_translation(struct msm_mmu *mmu)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
@@ -437,6 +470,21 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
return &iommu->base;
}
+struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks)
+{
+ struct msm_iommu *iommu;
+ struct msm_mmu *mmu;
+
+ mmu = msm_iommu_new(dev, quirks);
+ if (IS_ERR_OR_NULL(mmu))
+ return mmu;
+
+ iommu = to_msm_iommu(mmu);
+ iommu_set_fault_handler(iommu->domain, msm_disp_fault_handler, iommu);
+
+ return mmu;
+}
+
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
@@ -448,7 +496,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
return mmu;
iommu = to_msm_iommu(mmu);
- iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
+ iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
/* Enable stall on iommu fault: */
if (adreno_smmu->set_stall)
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 38965e12a6bf..35d5397e73b4 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -164,12 +164,26 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
vblank_ctrl_queue_work(priv, crtc, false);
}
+static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void *data)
+{
+ struct msm_kms *kms = arg;
+
+ if (atomic_read(&kms->fault_snapshot_capture) == 0) {
+ msm_disp_snapshot_state(kms->dev);
+ atomic_inc(&kms->fault_snapshot_capture);
+ }
+
+ return -ENOSYS;
+}
+
struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
{
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct device *iommu_dev;
/*
@@ -181,7 +195,7 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
else
iommu_dev = mdss_dev;
- mmu = msm_iommu_new(iommu_dev, 0);
+ mmu = msm_iommu_disp_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
@@ -195,8 +209,11 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
if (IS_ERR(aspace)) {
dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
mmu->funcs->destroy(mmu);
+ return aspace;
}
+ msm_mmu_set_fault_handler(aspace->mmu, kms, msm_kms_fault_handler);
+
return aspace;
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e60162744c66..43b58d052ee6 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -60,6 +60,13 @@ struct msm_kms_funcs {
void (*disable_commit)(struct msm_kms *kms);
/**
+ * @check_mode_changed:
+ *
+ * Verify if the commit requires a full modeset on one of CRTCs.
+ */
+ int (*check_mode_changed)(struct msm_kms *kms, struct drm_atomic_state *state);
+
+ /**
* Prepare for atomic commit. This is called after any previous
* (async or otherwise) commit has completed.
*/
@@ -128,6 +135,9 @@ struct msm_kms {
int irq;
bool irq_requested;
+ /* rate limit the snapshot capture to once per attach */
+ atomic_t fault_snapshot_capture;
+
/* mapper-id used to request GEM buffer mapped for scanout: */
struct msm_gem_address_space *aspace;
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 88af4f490881..daf91529e02b 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -42,6 +42,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks);
+struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
int (*handler)(void *arg, unsigned long iova, int flags, void *data))
@@ -53,7 +54,8 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
- int *asid);
+ int *asid);
+int msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]);
struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index 35f7f40e405b..d2c8c46bb041 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -17,6 +17,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="CLK_EN" pos="5" type="boolean"/>
<bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
<bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
+ <bitfield name="DSICLK_SEL" low="0" high="1" type="uint"/>
</reg32>
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
diff --git a/drivers/gpu/drm/msm/registers/display/hdmi.xml b/drivers/gpu/drm/msm/registers/display/hdmi.xml
index 1cf1b14fbd91..0ebb96297dae 100644
--- a/drivers/gpu/drm/msm/registers/display/hdmi.xml
+++ b/drivers/gpu/drm/msm/registers/display/hdmi.xml
@@ -131,7 +131,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
-->
<bitfield name="GENERIC0_SEND" pos="0" type="boolean"/>
<bitfield name="GENERIC0_CONT" pos="1" type="boolean"/>
- <bitfield name="GENERIC0_UPDATE" low="2" high="3" type="uint"/> <!-- ??? -->
+ <bitfield name="GENERIC0_UPDATE" pos="2" type="boolean"/>
<bitfield name="GENERIC1_SEND" pos="4" type="boolean"/>
<bitfield name="GENERIC1_CONT" pos="5" type="boolean"/>
<bitfield name="GENERIC0_LINE" low="16" high="21" type="uint"/>
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index e825c8a1d9ca..00015412cb3e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -3,25 +3,30 @@
#define __NVIF_IOCTL_H__
struct nvif_ioctl_v0 {
- __u8 version;
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(nvif_ioctl_v0_hdr, __hdr,
+ __u8 version;
#define NVIF_IOCTL_V0_SCLASS 0x01
#define NVIF_IOCTL_V0_NEW 0x02
#define NVIF_IOCTL_V0_DEL 0x03
#define NVIF_IOCTL_V0_MTHD 0x04
#define NVIF_IOCTL_V0_MAP 0x07
#define NVIF_IOCTL_V0_UNMAP 0x08
- __u8 type;
- __u8 pad02[4];
+ __u8 type;
+ __u8 pad02[4];
#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
#define NVIF_IOCTL_V0_OWNER_ANY 0xff
- __u8 owner;
+ __u8 owner;
#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00
#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff
- __u8 route;
- __u64 token;
- __u64 object;
+ __u8 route;
+ __u64 token;
+ __u64 object;
+ );
__u8 data[]; /* ioctl data (below) */
};
+static_assert(offsetof(struct nvif_ioctl_v0, data) == sizeof(struct nvif_ioctl_v0_hdr),
+ "struct member likely outside of struct_group()");
struct nvif_ioctl_sclass_v0 {
/* nvif_ioctl ... */
@@ -51,12 +56,17 @@ struct nvif_ioctl_del {
};
struct nvif_ioctl_mthd_v0 {
- /* nvif_ioctl ... */
- __u8 version;
- __u8 method;
- __u8 pad02[6];
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(nvif_ioctl_mthd_v0_hdr, __hdr,
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 method;
+ __u8 pad02[6];
+ );
__u8 data[]; /* method data (class.h) */
};
+static_assert(offsetof(struct nvif_ioctl_mthd_v0, data) == sizeof(struct nvif_ioctl_mthd_v0_hdr),
+ "struct member likely outside of struct_group()");
struct nvif_ioctl_map_v0 {
/* nvif_ioctl ... */
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index bcda0105160f..55691ec44aba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -79,21 +79,8 @@ nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
!drm_dp_read_lttpr_common_caps(aux, dpcd, outp->dp.lttpr.caps)) {
int nr = drm_dp_lttpr_count(outp->dp.lttpr.caps);
- if (nr) {
- drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
- DP_PHY_REPEATER_MODE_TRANSPARENT);
-
- if (nr > 0) {
- ret = drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
- DP_PHY_REPEATER_MODE_NON_TRANSPARENT);
- if (ret != 1) {
- drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
- DP_PHY_REPEATER_MODE_TRANSPARENT);
- } else {
- outp->dp.lttpr.nr = nr;
- }
- }
- }
+ if (!drm_dp_lttpr_init(aux, nr))
+ outp->dp.lttpr.nr = nr;
}
ret = drm_dp_read_dpcd_caps(aux, dpcd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 8ea98f06d39a..825c867eba7c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -79,8 +79,8 @@ struct nouveau_svm {
#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
struct nouveau_pfnmap_args {
- struct nvif_ioctl_v0 i;
- struct nvif_ioctl_mthd_v0 m;
+ struct nvif_ioctl_v0_hdr i;
+ struct nvif_ioctl_mthd_v0_hdr m;
struct nvif_vmm_pfnmap_v0 p;
};
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 0b87278ac0f8..70af63d70976 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -57,7 +57,7 @@ int
nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_sclass_v0 sclass;
} *args = NULL;
int ret, cnt = 0, i;
@@ -101,7 +101,7 @@ int
nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_mthd_v0 mthd;
} *args;
u32 args_size;
@@ -135,7 +135,7 @@ void
nvif_object_unmap_handle(struct nvif_object *object)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_unmap unmap;
} args = {
.ioctl.type = NVIF_IOCTL_V0_UNMAP,
@@ -149,7 +149,7 @@ nvif_object_map_handle(struct nvif_object *object, void *argv, u32 argc,
u64 *handle, u64 *length)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_map_v0 map;
} *args;
u32 argn = sizeof(*args) + argc;
@@ -211,7 +211,7 @@ void
nvif_object_dtor(struct nvif_object *object)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_del del;
} args = {
.ioctl.type = NVIF_IOCTL_V0_DEL,
@@ -230,7 +230,7 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
s32 oclass, void *data, u32 size, struct nvif_object *object)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_new_v0 new;
} *args;
int ret = 0;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d7469c565d1d..e059b06e0239 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -573,6 +573,16 @@ config DRM_PANEL_RAYDIUM_RM67191
Say Y here if you want to enable support for Raydium RM67191 FHD
(1080x1920) DSI panel.
+config DRM_PANEL_RAYDIUM_RM67200
+ tristate "Raydium RM67200-based DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for Raydium RM67200-based
+ DSI video mode panels. This panel controller can be found in the
+ Wanchanglong W552793BAA panel found on the Rockchip RK3588 EVB1
+ evaluation boards.
+
config DRM_PANEL_RAYDIUM_RM68200
tristate "Raydium RM68200 720x1280 DSI video mode panel"
depends on OF
@@ -925,6 +935,15 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_SUMMIT
+ tristate "Apple Summit display panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for the "Summit" display panel
+ used as a touchbar on certain Apple laptops.
+
config DRM_PANEL_SYNAPTICS_R63353
tristate "Synaptics R63353-based panels"
depends on OF
@@ -996,6 +1015,18 @@ config DRM_PANEL_VISIONOX_RM69299
Say Y here if you want to enable support for Visionox
RM69299 DSI Video Mode panel.
+config DRM_PANEL_VISIONOX_RM692E5
+ tristate "Visionox RM692E5"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DSC_HELPER
+ select DRM_DISPLAY_HELPER
+ help
+ Say Y here if you want to enable support for Visionox RM692E5 amoled
+ display panels, such as the one found in the Nothing Phone (1)
+ smartphone.
+
config DRM_PANEL_VISIONOX_VTDR6130
tristate "Visionox VTDR6130"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 7dcf72646cac..1bb8ae46b59b 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS) += panel-osd-osd101t2587-53ts.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67191) += panel-raydium-rm67191.o
+obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67200) += panel-raydium-rm67200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o
@@ -89,6 +90,7 @@ obj-$(CONFIG_DRM_PANEL_SHARP_LS060T1SX01) += panel-sharp-ls060t1sx01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_SUMMIT) += panel-summit.o
obj-$(CONFIG_DRM_PANEL_SYNAPTICS_R63353) += panel-synaptics-r63353.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o
@@ -100,6 +102,7 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_RM692E5) += panel-visionox-rm692e5.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_R66451) += panel-visionox-r66451.o
obj-$(CONFIG_DRM_PANEL_WIDECHIPS_WS2401) += panel-widechips-ws2401.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
index 266a087fe14c..3c24a63b6be8 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
@@ -607,7 +607,7 @@ static int ili9882t_add(struct ili9882t *ili)
ili->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(ili->enable_gpio)) {
- dev_err(dev, "cannot get reset-gpios %ld\n",
+ dev_err(dev, "cannot get enable-gpios %ld\n",
PTR_ERR(ili->enable_gpio));
return PTR_ERR(ili->enable_gpio);
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67200.c b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
new file mode 100644
index 000000000000..64b685dc11f6
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2024 Collabora
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct raydium_rm67200_panel_info {
+ struct drm_display_mode mode;
+ const struct regulator_bulk_data *regulators;
+ int num_regulators;
+ void (*panel_setup)(struct mipi_dsi_multi_context *ctx);
+};
+
+struct raydium_rm67200 {
+ struct drm_panel panel;
+ const struct raydium_rm67200_panel_info *panel_info;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+ int num_supplies;
+};
+
+static inline struct raydium_rm67200 *to_raydium_rm67200(struct drm_panel *panel)
+{
+ return container_of(panel, struct raydium_rm67200, panel);
+}
+
+static void raydium_rm67200_reset(struct raydium_rm67200 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(60);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(60);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(60);
+}
+
+static void raydium_rm67200_write(struct mipi_dsi_multi_context *ctx,
+ u8 arg1, u8 arg2)
+{
+ u8 d[] = { arg1, arg2 };
+
+ mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d));
+}
+
+static void w552793baa_setup(struct mipi_dsi_multi_context *ctx)
+{
+ raydium_rm67200_write(ctx, 0xfe, 0x21);
+ raydium_rm67200_write(ctx, 0x04, 0x00);
+ raydium_rm67200_write(ctx, 0x00, 0x64);
+ raydium_rm67200_write(ctx, 0x2a, 0x00);
+ raydium_rm67200_write(ctx, 0x26, 0x64);
+ raydium_rm67200_write(ctx, 0x54, 0x00);
+ raydium_rm67200_write(ctx, 0x50, 0x64);
+ raydium_rm67200_write(ctx, 0x7b, 0x00);
+ raydium_rm67200_write(ctx, 0x77, 0x64);
+ raydium_rm67200_write(ctx, 0xa2, 0x00);
+ raydium_rm67200_write(ctx, 0x9d, 0x64);
+ raydium_rm67200_write(ctx, 0xc9, 0x00);
+ raydium_rm67200_write(ctx, 0xc5, 0x64);
+ raydium_rm67200_write(ctx, 0x01, 0x71);
+ raydium_rm67200_write(ctx, 0x27, 0x71);
+ raydium_rm67200_write(ctx, 0x51, 0x71);
+ raydium_rm67200_write(ctx, 0x78, 0x71);
+ raydium_rm67200_write(ctx, 0x9e, 0x71);
+ raydium_rm67200_write(ctx, 0xc6, 0x71);
+ raydium_rm67200_write(ctx, 0x02, 0x89);
+ raydium_rm67200_write(ctx, 0x28, 0x89);
+ raydium_rm67200_write(ctx, 0x52, 0x89);
+ raydium_rm67200_write(ctx, 0x79, 0x89);
+ raydium_rm67200_write(ctx, 0x9f, 0x89);
+ raydium_rm67200_write(ctx, 0xc7, 0x89);
+ raydium_rm67200_write(ctx, 0x03, 0x9e);
+ raydium_rm67200_write(ctx, 0x29, 0x9e);
+ raydium_rm67200_write(ctx, 0x53, 0x9e);
+ raydium_rm67200_write(ctx, 0x7a, 0x9e);
+ raydium_rm67200_write(ctx, 0xa0, 0x9e);
+ raydium_rm67200_write(ctx, 0xc8, 0x9e);
+ raydium_rm67200_write(ctx, 0x09, 0x00);
+ raydium_rm67200_write(ctx, 0x05, 0xb0);
+ raydium_rm67200_write(ctx, 0x31, 0x00);
+ raydium_rm67200_write(ctx, 0x2b, 0xb0);
+ raydium_rm67200_write(ctx, 0x5a, 0x00);
+ raydium_rm67200_write(ctx, 0x55, 0xb0);
+ raydium_rm67200_write(ctx, 0x80, 0x00);
+ raydium_rm67200_write(ctx, 0x7c, 0xb0);
+ raydium_rm67200_write(ctx, 0xa7, 0x00);
+ raydium_rm67200_write(ctx, 0xa3, 0xb0);
+ raydium_rm67200_write(ctx, 0xce, 0x00);
+ raydium_rm67200_write(ctx, 0xca, 0xb0);
+ raydium_rm67200_write(ctx, 0x06, 0xc0);
+ raydium_rm67200_write(ctx, 0x2d, 0xc0);
+ raydium_rm67200_write(ctx, 0x56, 0xc0);
+ raydium_rm67200_write(ctx, 0x7d, 0xc0);
+ raydium_rm67200_write(ctx, 0xa4, 0xc0);
+ raydium_rm67200_write(ctx, 0xcb, 0xc0);
+ raydium_rm67200_write(ctx, 0x07, 0xcf);
+ raydium_rm67200_write(ctx, 0x2f, 0xcf);
+ raydium_rm67200_write(ctx, 0x58, 0xcf);
+ raydium_rm67200_write(ctx, 0x7e, 0xcf);
+ raydium_rm67200_write(ctx, 0xa5, 0xcf);
+ raydium_rm67200_write(ctx, 0xcc, 0xcf);
+ raydium_rm67200_write(ctx, 0x08, 0xdd);
+ raydium_rm67200_write(ctx, 0x30, 0xdd);
+ raydium_rm67200_write(ctx, 0x59, 0xdd);
+ raydium_rm67200_write(ctx, 0x7f, 0xdd);
+ raydium_rm67200_write(ctx, 0xa6, 0xdd);
+ raydium_rm67200_write(ctx, 0xcd, 0xdd);
+ raydium_rm67200_write(ctx, 0x0e, 0x15);
+ raydium_rm67200_write(ctx, 0x0a, 0xe9);
+ raydium_rm67200_write(ctx, 0x36, 0x15);
+ raydium_rm67200_write(ctx, 0x32, 0xe9);
+ raydium_rm67200_write(ctx, 0x5f, 0x15);
+ raydium_rm67200_write(ctx, 0x5b, 0xe9);
+ raydium_rm67200_write(ctx, 0x85, 0x15);
+ raydium_rm67200_write(ctx, 0x81, 0xe9);
+ raydium_rm67200_write(ctx, 0xad, 0x15);
+ raydium_rm67200_write(ctx, 0xa9, 0xe9);
+ raydium_rm67200_write(ctx, 0xd3, 0x15);
+ raydium_rm67200_write(ctx, 0xcf, 0xe9);
+ raydium_rm67200_write(ctx, 0x0b, 0x14);
+ raydium_rm67200_write(ctx, 0x33, 0x14);
+ raydium_rm67200_write(ctx, 0x5c, 0x14);
+ raydium_rm67200_write(ctx, 0x82, 0x14);
+ raydium_rm67200_write(ctx, 0xaa, 0x14);
+ raydium_rm67200_write(ctx, 0xd0, 0x14);
+ raydium_rm67200_write(ctx, 0x0c, 0x36);
+ raydium_rm67200_write(ctx, 0x34, 0x36);
+ raydium_rm67200_write(ctx, 0x5d, 0x36);
+ raydium_rm67200_write(ctx, 0x83, 0x36);
+ raydium_rm67200_write(ctx, 0xab, 0x36);
+ raydium_rm67200_write(ctx, 0xd1, 0x36);
+ raydium_rm67200_write(ctx, 0x0d, 0x6b);
+ raydium_rm67200_write(ctx, 0x35, 0x6b);
+ raydium_rm67200_write(ctx, 0x5e, 0x6b);
+ raydium_rm67200_write(ctx, 0x84, 0x6b);
+ raydium_rm67200_write(ctx, 0xac, 0x6b);
+ raydium_rm67200_write(ctx, 0xd2, 0x6b);
+ raydium_rm67200_write(ctx, 0x13, 0x5a);
+ raydium_rm67200_write(ctx, 0x0f, 0x94);
+ raydium_rm67200_write(ctx, 0x3b, 0x5a);
+ raydium_rm67200_write(ctx, 0x37, 0x94);
+ raydium_rm67200_write(ctx, 0x64, 0x5a);
+ raydium_rm67200_write(ctx, 0x60, 0x94);
+ raydium_rm67200_write(ctx, 0x8a, 0x5a);
+ raydium_rm67200_write(ctx, 0x86, 0x94);
+ raydium_rm67200_write(ctx, 0xb2, 0x5a);
+ raydium_rm67200_write(ctx, 0xae, 0x94);
+ raydium_rm67200_write(ctx, 0xd8, 0x5a);
+ raydium_rm67200_write(ctx, 0xd4, 0x94);
+ raydium_rm67200_write(ctx, 0x10, 0xd1);
+ raydium_rm67200_write(ctx, 0x38, 0xd1);
+ raydium_rm67200_write(ctx, 0x61, 0xd1);
+ raydium_rm67200_write(ctx, 0x87, 0xd1);
+ raydium_rm67200_write(ctx, 0xaf, 0xd1);
+ raydium_rm67200_write(ctx, 0xd5, 0xd1);
+ raydium_rm67200_write(ctx, 0x11, 0x04);
+ raydium_rm67200_write(ctx, 0x39, 0x04);
+ raydium_rm67200_write(ctx, 0x62, 0x04);
+ raydium_rm67200_write(ctx, 0x88, 0x04);
+ raydium_rm67200_write(ctx, 0xb0, 0x04);
+ raydium_rm67200_write(ctx, 0xd6, 0x04);
+ raydium_rm67200_write(ctx, 0x12, 0x05);
+ raydium_rm67200_write(ctx, 0x3a, 0x05);
+ raydium_rm67200_write(ctx, 0x63, 0x05);
+ raydium_rm67200_write(ctx, 0x89, 0x05);
+ raydium_rm67200_write(ctx, 0xb1, 0x05);
+ raydium_rm67200_write(ctx, 0xd7, 0x05);
+ raydium_rm67200_write(ctx, 0x18, 0xaa);
+ raydium_rm67200_write(ctx, 0x14, 0x36);
+ raydium_rm67200_write(ctx, 0x42, 0xaa);
+ raydium_rm67200_write(ctx, 0x3d, 0x36);
+ raydium_rm67200_write(ctx, 0x69, 0xaa);
+ raydium_rm67200_write(ctx, 0x65, 0x36);
+ raydium_rm67200_write(ctx, 0x8f, 0xaa);
+ raydium_rm67200_write(ctx, 0x8b, 0x36);
+ raydium_rm67200_write(ctx, 0xb7, 0xaa);
+ raydium_rm67200_write(ctx, 0xb3, 0x36);
+ raydium_rm67200_write(ctx, 0xdd, 0xaa);
+ raydium_rm67200_write(ctx, 0xd9, 0x36);
+ raydium_rm67200_write(ctx, 0x15, 0x74);
+ raydium_rm67200_write(ctx, 0x3f, 0x74);
+ raydium_rm67200_write(ctx, 0x66, 0x74);
+ raydium_rm67200_write(ctx, 0x8c, 0x74);
+ raydium_rm67200_write(ctx, 0xb4, 0x74);
+ raydium_rm67200_write(ctx, 0xda, 0x74);
+ raydium_rm67200_write(ctx, 0x16, 0x9f);
+ raydium_rm67200_write(ctx, 0x40, 0x9f);
+ raydium_rm67200_write(ctx, 0x67, 0x9f);
+ raydium_rm67200_write(ctx, 0x8d, 0x9f);
+ raydium_rm67200_write(ctx, 0xb5, 0x9f);
+ raydium_rm67200_write(ctx, 0xdb, 0x9f);
+ raydium_rm67200_write(ctx, 0x17, 0xdc);
+ raydium_rm67200_write(ctx, 0x41, 0xdc);
+ raydium_rm67200_write(ctx, 0x68, 0xdc);
+ raydium_rm67200_write(ctx, 0x8e, 0xdc);
+ raydium_rm67200_write(ctx, 0xb6, 0xdc);
+ raydium_rm67200_write(ctx, 0xdc, 0xdc);
+ raydium_rm67200_write(ctx, 0x1d, 0xff);
+ raydium_rm67200_write(ctx, 0x19, 0x03);
+ raydium_rm67200_write(ctx, 0x47, 0xff);
+ raydium_rm67200_write(ctx, 0x43, 0x03);
+ raydium_rm67200_write(ctx, 0x6e, 0xff);
+ raydium_rm67200_write(ctx, 0x6a, 0x03);
+ raydium_rm67200_write(ctx, 0x94, 0xff);
+ raydium_rm67200_write(ctx, 0x90, 0x03);
+ raydium_rm67200_write(ctx, 0xbc, 0xff);
+ raydium_rm67200_write(ctx, 0xb8, 0x03);
+ raydium_rm67200_write(ctx, 0xe2, 0xff);
+ raydium_rm67200_write(ctx, 0xde, 0x03);
+ raydium_rm67200_write(ctx, 0x1a, 0x35);
+ raydium_rm67200_write(ctx, 0x44, 0x35);
+ raydium_rm67200_write(ctx, 0x6b, 0x35);
+ raydium_rm67200_write(ctx, 0x91, 0x35);
+ raydium_rm67200_write(ctx, 0xb9, 0x35);
+ raydium_rm67200_write(ctx, 0xdf, 0x35);
+ raydium_rm67200_write(ctx, 0x1b, 0x45);
+ raydium_rm67200_write(ctx, 0x45, 0x45);
+ raydium_rm67200_write(ctx, 0x6c, 0x45);
+ raydium_rm67200_write(ctx, 0x92, 0x45);
+ raydium_rm67200_write(ctx, 0xba, 0x45);
+ raydium_rm67200_write(ctx, 0xe0, 0x45);
+ raydium_rm67200_write(ctx, 0x1c, 0x55);
+ raydium_rm67200_write(ctx, 0x46, 0x55);
+ raydium_rm67200_write(ctx, 0x6d, 0x55);
+ raydium_rm67200_write(ctx, 0x93, 0x55);
+ raydium_rm67200_write(ctx, 0xbb, 0x55);
+ raydium_rm67200_write(ctx, 0xe1, 0x55);
+ raydium_rm67200_write(ctx, 0x22, 0xff);
+ raydium_rm67200_write(ctx, 0x1e, 0x68);
+ raydium_rm67200_write(ctx, 0x4c, 0xff);
+ raydium_rm67200_write(ctx, 0x48, 0x68);
+ raydium_rm67200_write(ctx, 0x73, 0xff);
+ raydium_rm67200_write(ctx, 0x6f, 0x68);
+ raydium_rm67200_write(ctx, 0x99, 0xff);
+ raydium_rm67200_write(ctx, 0x95, 0x68);
+ raydium_rm67200_write(ctx, 0xc1, 0xff);
+ raydium_rm67200_write(ctx, 0xbd, 0x68);
+ raydium_rm67200_write(ctx, 0xe7, 0xff);
+ raydium_rm67200_write(ctx, 0xe3, 0x68);
+ raydium_rm67200_write(ctx, 0x1f, 0x7e);
+ raydium_rm67200_write(ctx, 0x49, 0x7e);
+ raydium_rm67200_write(ctx, 0x70, 0x7e);
+ raydium_rm67200_write(ctx, 0x96, 0x7e);
+ raydium_rm67200_write(ctx, 0xbe, 0x7e);
+ raydium_rm67200_write(ctx, 0xe4, 0x7e);
+ raydium_rm67200_write(ctx, 0x20, 0x97);
+ raydium_rm67200_write(ctx, 0x4a, 0x97);
+ raydium_rm67200_write(ctx, 0x71, 0x97);
+ raydium_rm67200_write(ctx, 0x97, 0x97);
+ raydium_rm67200_write(ctx, 0xbf, 0x97);
+ raydium_rm67200_write(ctx, 0xe5, 0x97);
+ raydium_rm67200_write(ctx, 0x21, 0xb5);
+ raydium_rm67200_write(ctx, 0x4b, 0xb5);
+ raydium_rm67200_write(ctx, 0x72, 0xb5);
+ raydium_rm67200_write(ctx, 0x98, 0xb5);
+ raydium_rm67200_write(ctx, 0xc0, 0xb5);
+ raydium_rm67200_write(ctx, 0xe6, 0xb5);
+ raydium_rm67200_write(ctx, 0x25, 0xf0);
+ raydium_rm67200_write(ctx, 0x23, 0xe8);
+ raydium_rm67200_write(ctx, 0x4f, 0xf0);
+ raydium_rm67200_write(ctx, 0x4d, 0xe8);
+ raydium_rm67200_write(ctx, 0x76, 0xf0);
+ raydium_rm67200_write(ctx, 0x74, 0xe8);
+ raydium_rm67200_write(ctx, 0x9c, 0xf0);
+ raydium_rm67200_write(ctx, 0x9a, 0xe8);
+ raydium_rm67200_write(ctx, 0xc4, 0xf0);
+ raydium_rm67200_write(ctx, 0xc2, 0xe8);
+ raydium_rm67200_write(ctx, 0xea, 0xf0);
+ raydium_rm67200_write(ctx, 0xe8, 0xe8);
+ raydium_rm67200_write(ctx, 0x24, 0xff);
+ raydium_rm67200_write(ctx, 0x4e, 0xff);
+ raydium_rm67200_write(ctx, 0x75, 0xff);
+ raydium_rm67200_write(ctx, 0x9b, 0xff);
+ raydium_rm67200_write(ctx, 0xc3, 0xff);
+ raydium_rm67200_write(ctx, 0xe9, 0xff);
+ raydium_rm67200_write(ctx, 0xfe, 0x3d);
+ raydium_rm67200_write(ctx, 0x00, 0x04);
+ raydium_rm67200_write(ctx, 0xfe, 0x23);
+ raydium_rm67200_write(ctx, 0x08, 0x82);
+ raydium_rm67200_write(ctx, 0x0a, 0x00);
+ raydium_rm67200_write(ctx, 0x0b, 0x00);
+ raydium_rm67200_write(ctx, 0x0c, 0x01);
+ raydium_rm67200_write(ctx, 0x16, 0x00);
+ raydium_rm67200_write(ctx, 0x18, 0x02);
+ raydium_rm67200_write(ctx, 0x1b, 0x04);
+ raydium_rm67200_write(ctx, 0x19, 0x04);
+ raydium_rm67200_write(ctx, 0x1c, 0x81);
+ raydium_rm67200_write(ctx, 0x1f, 0x00);
+ raydium_rm67200_write(ctx, 0x20, 0x03);
+ raydium_rm67200_write(ctx, 0x23, 0x04);
+ raydium_rm67200_write(ctx, 0x21, 0x01);
+ raydium_rm67200_write(ctx, 0x54, 0x63);
+ raydium_rm67200_write(ctx, 0x55, 0x54);
+ raydium_rm67200_write(ctx, 0x6e, 0x45);
+ raydium_rm67200_write(ctx, 0x6d, 0x36);
+ raydium_rm67200_write(ctx, 0xfe, 0x3d);
+ raydium_rm67200_write(ctx, 0x55, 0x78);
+ raydium_rm67200_write(ctx, 0xfe, 0x20);
+ raydium_rm67200_write(ctx, 0x26, 0x30);
+ raydium_rm67200_write(ctx, 0xfe, 0x3d);
+ raydium_rm67200_write(ctx, 0x20, 0x71);
+ raydium_rm67200_write(ctx, 0x50, 0x8f);
+ raydium_rm67200_write(ctx, 0x51, 0x8f);
+ raydium_rm67200_write(ctx, 0xfe, 0x00);
+ raydium_rm67200_write(ctx, 0x35, 0x00);
+}
+
+static int raydium_rm67200_prepare(struct drm_panel *panel)
+{
+ struct raydium_rm67200 *ctx = to_raydium_rm67200(panel);
+ int ret;
+
+ ret = regulator_bulk_enable(ctx->num_supplies, ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ raydium_rm67200_reset(ctx);
+
+ msleep(60);
+
+ return 0;
+}
+
+static int raydium_rm67200_unprepare(struct drm_panel *panel)
+{
+ struct raydium_rm67200 *ctx = to_raydium_rm67200(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ctx->num_supplies, ctx->supplies);
+
+ msleep(60);
+
+ return 0;
+}
+
+static int raydium_rm67200_enable(struct drm_panel *panel)
+{
+ struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = rm67200->dsi };
+
+ rm67200->panel_info->panel_setup(&ctx);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 30);
+
+ return ctx.accum_err;
+}
+
+static int raydium_rm67200_disable(struct drm_panel *panel)
+{
+ struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = rm67200->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 60);
+
+ return ctx.accum_err;
+}
+
+static int raydium_rm67200_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct raydium_rm67200 *ctx = to_raydium_rm67200(panel);
+
+ return drm_connector_helper_get_modes_fixed(connector, &ctx->panel_info->mode);
+}
+
+static const struct drm_panel_funcs raydium_rm67200_funcs = {
+ .prepare = raydium_rm67200_prepare,
+ .unprepare = raydium_rm67200_unprepare,
+ .get_modes = raydium_rm67200_get_modes,
+ .enable = raydium_rm67200_enable,
+ .disable = raydium_rm67200_disable,
+};
+
+static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct raydium_rm67200 *ctx;
+ int ret = 0;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->panel_info = device_get_match_data(dev);
+ if (!ctx->panel_info)
+ return -EINVAL;
+
+ ctx->num_supplies = ctx->panel_info->num_regulators;
+ ret = devm_regulator_bulk_get_const(&dsi->dev,
+ ctx->panel_info->num_regulators,
+ ctx->panel_info->regulators,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM;
+ ctx->panel.prepare_prev_first = true;
+
+ drm_panel_init(&ctx->panel, dev, &raydium_rm67200_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ }
+
+ return ret;
+}
+
+static void raydium_rm67200_remove(struct mipi_dsi_device *dsi)
+{
+ struct raydium_rm67200 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct regulator_bulk_data w552793baa_regulators[] = {
+ { .supply = "vdd", }, /* 2.8V */
+ { .supply = "iovcc", }, /* 1.8V */
+ { .supply = "vsp", }, /* +5.5V */
+ { .supply = "vsn", }, /* -5.5V */
+};
+
+static const struct raydium_rm67200_panel_info w552793baa_info = {
+ .mode = {
+ .clock = 132000,
+ .hdisplay = 1080,
+ .hsync_start = 1095,
+ .hsync_end = 1125,
+ .htotal = 1129,
+ .vdisplay = 1920,
+ .vsync_start = 1935,
+ .vsync_end = 1950,
+ .vtotal = 1952,
+ .width_mm = 68, /* 68.04mm */
+ .height_mm = 121, /* 120.96mm */
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+ .regulators = w552793baa_regulators,
+ .num_regulators = ARRAY_SIZE(w552793baa_regulators),
+ .panel_setup = w552793baa_setup,
+};
+
+static const struct of_device_id raydium_rm67200_of_match[] = {
+ { .compatible = "wanchanglong,w552793baa", .data = &w552793baa_info },
+ { /*sentinel*/ }
+};
+MODULE_DEVICE_TABLE(of, raydium_rm67200_of_match);
+
+static struct mipi_dsi_driver raydium_rm67200_driver = {
+ .probe = raydium_rm67200_probe,
+ .remove = raydium_rm67200_remove,
+ .driver = {
+ .name = "panel-raydium-rm67200",
+ .of_match_table = raydium_rm67200_of_match,
+ },
+};
+module_mipi_dsi_driver(raydium_rm67200_driver);
+
+MODULE_AUTHOR("Sebastian Reichel <sebastian.reichel@collabora.com>");
+MODULE_DESCRIPTION("DRM driver for RM67200-equipped DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9b2f128fd309..232b03c1a259 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1374,6 +1374,64 @@ static const struct panel_desc bananapi_s070wv20_ct16 = {
},
};
+static const struct display_timing boe_av101hdt_a10_timing = {
+ .pixelclock = { 74210000, 75330000, 76780000, },
+ .hactive = { 1280, 1280, 1280, },
+ .hfront_porch = { 10, 42, 33, },
+ .hback_porch = { 10, 18, 33, },
+ .hsync_len = { 30, 10, 30, },
+ .vactive = { 720, 720, 720, },
+ .vfront_porch = { 200, 183, 200, },
+ .vback_porch = { 8, 8, 8, },
+ .vsync_len = { 2, 19, 2, },
+ .flags = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc boe_av101hdt_a10 = {
+ .timings = &boe_av101hdt_a10_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 224,
+ .height = 126,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct display_timing boe_av123z7m_n17_timing = {
+ .pixelclock = { 86600000, 88000000, 90800000, },
+ .hactive = { 1920, 1920, 1920, },
+ .hfront_porch = { 10, 10, 10, },
+ .hback_porch = { 10, 10, 10, },
+ .hsync_len = { 9, 12, 25, },
+ .vactive = { 720, 720, 720, },
+ .vfront_porch = { 7, 10, 13, },
+ .vback_porch = { 7, 10, 13, },
+ .vsync_len = { 7, 11, 14, },
+ .flags = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc boe_av123z7m_n17 = {
+ .timings = &boe_av123z7m_n17_timing,
+ .bpc = 8,
+ .num_timings = 1,
+ .size = {
+ .width = 292,
+ .height = 110,
+ },
+ .delay = {
+ .prepare = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode boe_bp101wx1_100_mode = {
.clock = 78945,
.hdisplay = 1280,
@@ -4814,6 +4872,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "bananapi,s070wv20-ct16",
.data = &bananapi_s070wv20_ct16,
}, {
+ .compatible = "boe,av101hdt-a10",
+ .data = &boe_av101hdt_a10,
+ }, {
+ .compatible = "boe,av123z7m-n17",
+ .data = &boe_av123z7m_n17,
+ }, {
.compatible = "boe,bp082wx1-100",
.data = &boe_bp082wx1_100,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
index 472195d4bbbe..97f4bb4e1029 100644
--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -47,93 +47,40 @@ static inline struct sony_td4353_jdi *to_sony_td4353_jdi(struct drm_panel *panel
static int sony_td4353_jdi_on(struct sony_td4353_jdi *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_column_address(dsi, 0x0000, 1080 - 1);
- if (ret < 0) {
- dev_err(dev, "Failed to set column address: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_page_address(dsi, 0x0000, 2160 - 1);
- if (ret < 0) {
- dev_err(dev, "Failed to set page address: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear scanline: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 1080 - 1);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 2160 - 1);
+ mipi_dsi_dcs_set_tear_scanline_multi(&dsi_ctx, 0);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS,
- 0x00, 0x00, 0x08, 0x6f);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(70);
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS,
+ 0x00, 0x00, 0x08, 0x6f);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to turn display on: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return dsi_ctx.accum_err;
}
-static int sony_td4353_jdi_off(struct sony_td4353_jdi *ctx)
+static void sony_td4353_jdi_off(struct sony_td4353_jdi *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(22);
-
- ret = mipi_dsi_dcs_set_tear_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear off: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(80);
-
- return 0;
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 22);
+ mipi_dsi_dcs_set_tear_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 80);
}
static void sony_td4353_assert_reset_gpios(struct sony_td4353_jdi *ctx, int mode)
@@ -146,14 +93,11 @@ static void sony_td4353_assert_reset_gpios(struct sony_td4353_jdi *ctx, int mode
static int sony_td4353_jdi_prepare(struct drm_panel *panel)
{
struct sony_td4353_jdi *ctx = to_sony_td4353_jdi(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
msleep(100);
@@ -161,7 +105,6 @@ static int sony_td4353_jdi_prepare(struct drm_panel *panel)
ret = sony_td4353_jdi_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to power on panel: %d\n", ret);
sony_td4353_assert_reset_gpios(ctx, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
return ret;
@@ -173,12 +116,8 @@ static int sony_td4353_jdi_prepare(struct drm_panel *panel)
static int sony_td4353_jdi_unprepare(struct drm_panel *panel)
{
struct sony_td4353_jdi *ctx = to_sony_td4353_jdi(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = sony_td4353_jdi_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to power off panel: %d\n", ret);
+ sony_td4353_jdi_off(ctx);
sony_td4353_assert_reset_gpios(ctx, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c
new file mode 100644
index 000000000000..e780faee1857
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-summit.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/backlight.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+#include <video/mipi_display.h>
+
+struct summit_data {
+ struct mipi_dsi_device *dsi;
+ struct backlight_device *bl;
+ struct drm_panel panel;
+};
+
+static int summit_set_brightness(struct device *dev)
+{
+ struct summit_data *s_data = dev_get_drvdata(dev);
+ int level = backlight_get_brightness(s_data->bl);
+
+ return mipi_dsi_dcs_set_display_brightness(s_data->dsi, level);
+}
+
+static int summit_bl_update_status(struct backlight_device *dev)
+{
+ return summit_set_brightness(&dev->dev);
+}
+
+static const struct backlight_ops summit_bl_ops = {
+ .update_status = summit_bl_update_status,
+};
+
+static struct drm_display_mode summit_mode = {
+ .vdisplay = 2008,
+ .hdisplay = 60,
+ .hsync_start = 60 + 8,
+ .hsync_end = 60 + 8 + 80,
+ .htotal = 60 + 8 + 80 + 40,
+ .vsync_start = 2008 + 1,
+ .vsync_end = 2008 + 1 + 15,
+ .vtotal = 2008 + 1 + 15 + 6,
+ .clock = ((60 + 8 + 80 + 40) * (2008 + 1 + 15 + 6) * 60) / 1000,
+ .type = DRM_MODE_TYPE_DRIVER,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static int summit_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ connector->display_info.non_desktop = true;
+ drm_object_property_set_value(&connector->base,
+ connector->dev->mode_config.non_desktop_property,
+ connector->display_info.non_desktop);
+
+ return drm_connector_helper_get_modes_fixed(connector, &summit_mode);
+}
+
+static const struct drm_panel_funcs summit_panel_funcs = {
+ .get_modes = summit_get_modes,
+};
+
+static int summit_probe(struct mipi_dsi_device *dsi)
+{
+ struct backlight_properties props = { 0 };
+ struct device *dev = &dsi->dev;
+ struct summit_data *s_data;
+ int ret;
+
+ s_data = devm_kzalloc(dev, sizeof(*s_data), GFP_KERNEL);
+ if (!s_data)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, s_data);
+ s_data->dsi = dsi;
+
+ ret = device_property_read_u32(dev, "max-brightness", &props.max_brightness);
+ if (ret)
+ return ret;
+ props.type = BACKLIGHT_RAW;
+
+ s_data->bl = devm_backlight_device_register(dev, dev_name(dev),
+ dev, s_data, &summit_bl_ops, &props);
+ if (IS_ERR(s_data->bl))
+ return PTR_ERR(s_data->bl);
+
+ drm_panel_init(&s_data->panel, dev, &summit_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_panel_add(&s_data->panel);
+
+ return mipi_dsi_attach(dsi);
+}
+
+static void summit_remove(struct mipi_dsi_device *dsi)
+{
+ struct summit_data *s_data = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&s_data->panel);
+}
+
+static int summit_suspend(struct device *dev)
+{
+ struct summit_data *s_data = dev_get_drvdata(dev);
+
+ return mipi_dsi_dcs_set_display_brightness(s_data->dsi, 0);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(summit_pm_ops, summit_suspend,
+ summit_set_brightness);
+
+static const struct of_device_id summit_of_match[] = {
+ { .compatible = "apple,summit" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, summit_of_match);
+
+static struct mipi_dsi_driver summit_driver = {
+ .probe = summit_probe,
+ .remove = summit_remove,
+ .driver = {
+ .name = "panel-summit",
+ .of_match_table = summit_of_match,
+ .pm = pm_sleep_ptr(&summit_pm_ops),
+ },
+};
+module_mipi_dsi_driver(summit_driver);
+
+MODULE_DESCRIPTION("Summit Display Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm692e5.c b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
new file mode 100644
index 000000000000..4db7fa8d74c4
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Eugene Lepshy <fekz115@gmail.com>
+ * Copyright (c) 2025, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct visionox_rm692e5 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_config dsc;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data visionox_rm692e5_supplies[] = {
+ { .supply = "vddio" }, /* 1p8 */
+ { .supply = "vdd" }, /* 3p3 */
+};
+
+static inline
+struct visionox_rm692e5 *to_visionox_rm692e5(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_rm692e5, panel);
+}
+
+static void visionox_rm692e5_reset(struct visionox_rm692e5 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(32);
+}
+
+static int visionox_rm692e5_on(struct visionox_rm692e5 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbd, 0x07);
+ mipi_dsi_usleep_range(&dsi_ctx, 17000, 18000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x11);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x00ab);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x54, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0xe8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x69, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6b, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6c, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6e, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x71, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x72, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x73, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x74, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x46);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x79, 0x54);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7a, 0x62);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7b, 0x69);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7c, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x79);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x7d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x7e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x83, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x84, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x85, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x86, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x87, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x88, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x89, 0xbe);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8a, 0x3a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8b, 0xfc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8c, 0x3a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0xfa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x3a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8f, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x90, 0x3b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x3b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x93, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x3b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x95, 0xb6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x96, 0x4b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0xf6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x98, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x99, 0x34);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9a, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9b, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9c, 0x5c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9d, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9e, 0x8c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9f, 0xf4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_START, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa3, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa6, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa7, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_CONTINUE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xaa, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa0, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x6b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xce, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfa, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x000d);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_rm692e5_disable(struct drm_panel *panel)
+{
+ struct visionox_rm692e5 *ctx = to_visionox_rm692e5(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_rm692e5_prepare(struct drm_panel *panel)
+{
+ struct visionox_rm692e5 *ctx = to_visionox_rm692e5(panel);
+ struct drm_dsc_picture_parameter_set pps;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_rm692e5_supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ visionox_rm692e5_reset(ctx);
+
+ ret = visionox_rm692e5_on(ctx);
+ if (ret < 0) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ goto err;
+ }
+
+ drm_dsc_pps_payload_pack(&pps, &ctx->dsc);
+ mipi_dsi_picture_parameter_set_multi(&dsi_ctx, &pps);
+ mipi_dsi_compression_mode_ext_multi(&dsi_ctx, true, MIPI_DSI_COMPRESSION_DSC, 0);
+
+ mipi_dsi_msleep(&dsi_ctx, 28);
+
+ if (dsi_ctx.accum_err < 0) {
+ ret = dsi_ctx.accum_err;
+ goto err;
+ }
+
+ return dsi_ctx.accum_err;
+err:
+ regulator_bulk_disable(ARRAY_SIZE(visionox_rm692e5_supplies),
+ ctx->supplies);
+ return ret;
+}
+
+static int visionox_rm692e5_unprepare(struct drm_panel *panel)
+{
+ struct visionox_rm692e5 *ctx = to_visionox_rm692e5(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(visionox_rm692e5_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode visionox_rm692e5_modes[] = {
+ /* Let's initialize the highest frequency first */
+ { /* 120Hz mode */
+ .clock = (1080 + 26 + 39 + 36) * (2400 + 16 + 21 + 16) * 120 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 39,
+ .htotal = 1080 + 26 + 39 + 36,
+ .vdisplay = 2400,
+ .vsync_start = 2400 + 16,
+ .vsync_end = 2400 + 16 + 21,
+ .vtotal = 2400 + 16 + 21 + 16,
+ .width_mm = 68,
+ .height_mm = 152,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+ { /* 90Hz mode */
+ .clock = (1080 + 26 + 39 + 36) * (2400 + 16 + 21 + 16) * 90 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 39,
+ .htotal = 1080 + 26 + 39 + 36,
+ .vdisplay = 2400,
+ .vsync_start = 2400 + 16,
+ .vsync_end = 2400 + 16 + 21,
+ .vtotal = 2400 + 16 + 21 + 16,
+ .width_mm = 68,
+ .height_mm = 152,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+ { /* 60Hz mode */
+ .clock = (1080 + 26 + 39 + 36) * (2400 + 16 + 21 + 16) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 39,
+ .htotal = 1080 + 26 + 39 + 36,
+ .vdisplay = 2400,
+ .vsync_start = 2400 + 16,
+ .vsync_end = 2400 + 16 + 21,
+ .vtotal = 2400 + 16 + 21 + 16,
+ .width_mm = 68,
+ .height_mm = 152,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+};
+
+static int visionox_rm692e5_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ int count = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(visionox_rm692e5_modes); i++)
+ count += drm_connector_helper_get_modes_fixed(connector,
+ &visionox_rm692e5_modes[i]);
+
+ return count;
+}
+
+static const struct drm_panel_funcs visionox_rm692e5_panel_funcs = {
+ .prepare = visionox_rm692e5_prepare,
+ .unprepare = visionox_rm692e5_unprepare,
+ .disable = visionox_rm692e5_disable,
+ .get_modes = visionox_rm692e5_get_modes,
+};
+
+static int visionox_rm692e5_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int visionox_rm692e5_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness;
+}
+
+static const struct backlight_ops visionox_rm692e5_bl_ops = {
+ .update_status = visionox_rm692e5_bl_update_status,
+ .get_brightness = visionox_rm692e5_bl_get_brightness,
+};
+
+static struct backlight_device *
+visionox_rm692e5_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 2047,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &visionox_rm692e5_bl_ops, &props);
+}
+
+static int visionox_rm692e5_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_rm692e5 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(&dsi->dev,
+ ARRAY_SIZE(visionox_rm692e5_supplies),
+ visionox_rm692e5_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &visionox_rm692e5_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = visionox_rm692e5_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ dsi->dsc = &ctx->dsc;
+ ctx->dsc.dsc_version_major = 1;
+ ctx->dsc.dsc_version_minor = 1;
+ ctx->dsc.slice_height = 20;
+ ctx->dsc.slice_width = 540;
+ ctx->dsc.slice_count = 1080 / ctx->dsc.slice_width;
+ ctx->dsc.bits_per_component = 10;
+ ctx->dsc.bits_per_pixel = 8 << 4;
+ ctx->dsc.block_pred_enable = true;
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void visionox_rm692e5_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_rm692e5 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id visionox_rm692e5_of_match[] = {
+ { .compatible = "visionox,rm692e5" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_rm692e5_of_match);
+
+static struct mipi_dsi_driver visionox_rm692e5_driver = {
+ .probe = visionox_rm692e5_probe,
+ .remove = visionox_rm692e5_remove,
+ .driver = {
+ .name = "panel-visionox-rm692e5",
+ .of_match_table = visionox_rm692e5_of_match,
+ },
+};
+module_mipi_dsi_driver(visionox_rm692e5_driver);
+
+MODULE_AUTHOR("Eugene Lepshy <fekz115@gmail.com>");
+MODULE_AUTHOR("Danila Tikhonov <danila@jiaxyga.com>");
+MODULE_DESCRIPTION("DRM driver for Visionox RM692E5 cmd mode dsi panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 4a9c4afa9ad7..0f52766a3120 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -636,8 +636,8 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev,
u32 ehdr)
{
struct panthor_fw_build_info_hdr hdr;
- char header[9];
- const char git_sha_header[sizeof(header)] = "git_sha: ";
+ static const char git_sha_header[] = "git_sha: ";
+ const int header_len = sizeof(git_sha_header) - 1;
int ret;
ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr));
@@ -651,8 +651,7 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev,
return 0;
}
- if (memcmp(git_sha_header, fw->data + hdr.meta_start,
- sizeof(git_sha_header))) {
+ if (memcmp(git_sha_header, fw->data + hdr.meta_start, header_len)) {
/* Not the expected header, this isn't metadata we understand */
return 0;
}
@@ -665,7 +664,7 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev,
}
drm_info(&ptdev->base, "Firmware git sha: %s\n",
- fw->data + hdr.meta_start + sizeof(git_sha_header));
+ fw->data + hdr.meta_start + header_len);
return 0;
}
diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h
index 22448abde992..6598d96c6d2a 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.h
+++ b/drivers/gpu/drm/panthor/panthor_fw.h
@@ -102,9 +102,9 @@ struct panthor_fw_cs_output_iface {
#define CS_STATUS_BLOCKED_REASON_SB_WAIT 1
#define CS_STATUS_BLOCKED_REASON_PROGRESS_WAIT 2
#define CS_STATUS_BLOCKED_REASON_SYNC_WAIT 3
-#define CS_STATUS_BLOCKED_REASON_DEFERRED 5
-#define CS_STATUS_BLOCKED_REASON_RES 6
-#define CS_STATUS_BLOCKED_REASON_FLUSH 7
+#define CS_STATUS_BLOCKED_REASON_DEFERRED 4
+#define CS_STATUS_BLOCKED_REASON_RESOURCE 5
+#define CS_STATUS_BLOCKED_REASON_FLUSH 6
#define CS_STATUS_BLOCKED_REASON_MASK GENMASK(3, 0)
u32 status_blocked_reason;
u32 status_wait_sync_value_hi;
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
index db0285ce5812..3bdf61c14264 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.c
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -97,6 +97,9 @@ struct panthor_heap_pool {
/** @gpu_contexts: Buffer object containing the GPU heap contexts. */
struct panthor_kernel_bo *gpu_contexts;
+
+ /** @size: Size of all chunks across all heaps in the pool. */
+ atomic_t size;
};
static int panthor_heap_ctx_stride(struct panthor_device *ptdev)
@@ -118,7 +121,7 @@ static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id)
panthor_get_heap_ctx_offset(pool, id);
}
-static void panthor_free_heap_chunk(struct panthor_vm *vm,
+static void panthor_free_heap_chunk(struct panthor_heap_pool *pool,
struct panthor_heap *heap,
struct panthor_heap_chunk *chunk)
{
@@ -127,12 +130,13 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm,
heap->chunk_count--;
mutex_unlock(&heap->lock);
+ atomic_sub(heap->chunk_size, &pool->size);
+
panthor_kernel_bo_destroy(chunk->bo);
kfree(chunk);
}
-static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
- struct panthor_vm *vm,
+static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool,
struct panthor_heap *heap,
bool initial_chunk)
{
@@ -144,7 +148,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
if (!chunk)
return -ENOMEM;
- chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size,
+ chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
PANTHOR_VM_KERNEL_AUTO_VA);
@@ -180,6 +184,8 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
heap->chunk_count++;
mutex_unlock(&heap->lock);
+ atomic_add(heap->chunk_size, &pool->size);
+
return 0;
err_destroy_bo:
@@ -191,17 +197,16 @@ err_free_chunk:
return ret;
}
-static void panthor_free_heap_chunks(struct panthor_vm *vm,
+static void panthor_free_heap_chunks(struct panthor_heap_pool *pool,
struct panthor_heap *heap)
{
struct panthor_heap_chunk *chunk, *tmp;
list_for_each_entry_safe(chunk, tmp, &heap->chunks, node)
- panthor_free_heap_chunk(vm, heap, chunk);
+ panthor_free_heap_chunk(pool, heap, chunk);
}
-static int panthor_alloc_heap_chunks(struct panthor_device *ptdev,
- struct panthor_vm *vm,
+static int panthor_alloc_heap_chunks(struct panthor_heap_pool *pool,
struct panthor_heap *heap,
u32 chunk_count)
{
@@ -209,7 +214,7 @@ static int panthor_alloc_heap_chunks(struct panthor_device *ptdev,
u32 i;
for (i = 0; i < chunk_count; i++) {
- ret = panthor_alloc_heap_chunk(ptdev, vm, heap, true);
+ ret = panthor_alloc_heap_chunk(pool, heap, true);
if (ret)
return ret;
}
@@ -226,7 +231,7 @@ panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle)
if (!heap)
return -EINVAL;
- panthor_free_heap_chunks(pool->vm, heap);
+ panthor_free_heap_chunks(pool, heap);
mutex_destroy(&heap->lock);
kfree(heap);
return 0;
@@ -308,8 +313,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
heap->max_chunks = max_chunks;
heap->target_in_flight = target_in_flight;
- ret = panthor_alloc_heap_chunks(pool->ptdev, vm, heap,
- initial_chunk_count);
+ ret = panthor_alloc_heap_chunks(pool, heap, initial_chunk_count);
if (ret)
goto err_free_heap;
@@ -342,7 +346,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
return id;
err_free_heap:
- panthor_free_heap_chunks(pool->vm, heap);
+ panthor_free_heap_chunks(pool, heap);
mutex_destroy(&heap->lock);
kfree(heap);
@@ -389,6 +393,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
removed = chunk;
list_del(&chunk->node);
heap->chunk_count--;
+ atomic_sub(heap->chunk_size, &pool->size);
break;
}
}
@@ -466,7 +471,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool,
* further jobs in this queue fail immediately instead of having to
* wait for the job timeout.
*/
- ret = panthor_alloc_heap_chunk(pool->ptdev, pool->vm, heap, false);
+ ret = panthor_alloc_heap_chunk(pool, heap, false);
if (ret)
goto out_unlock;
@@ -560,6 +565,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
if (ret)
goto err_destroy_pool;
+ atomic_add(pool->gpu_contexts->obj->size, &pool->size);
+
return pool;
err_destroy_pool:
@@ -594,8 +601,10 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
xa_for_each(&pool->xa, i, heap)
drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i));
- if (!IS_ERR_OR_NULL(pool->gpu_contexts))
+ if (!IS_ERR_OR_NULL(pool->gpu_contexts)) {
+ atomic_sub(pool->gpu_contexts->obj->size, &pool->size);
panthor_kernel_bo_destroy(pool->gpu_contexts);
+ }
/* Reflects the fact the pool has been destroyed. */
pool->vm = NULL;
@@ -605,27 +614,16 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
}
/**
- * panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool
- * @pool: Pool whose total chunk size to calculate.
+ * panthor_heap_pool_size() - Get a heap pool's total size
+ * @pool: Pool whose total chunks size to return
*
- * This function adds the size of all heap chunks across all heaps in the
- * argument pool. It also adds the size of the gpu contexts kernel bo.
- * It is meant to be used by fdinfo for displaying the size of internal
- * driver BO's that aren't exposed to userspace through a GEM handle.
+ * Returns the aggregated size of all chunks for all heaps in the pool
*
*/
size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
{
- struct panthor_heap *heap;
- unsigned long i;
- size_t size = 0;
-
- down_read(&pool->lock);
- xa_for_each(&pool->xa, i, heap)
- size += heap->chunk_size * heap->chunk_count;
- up_read(&pool->lock);
-
- size += pool->gpu_contexts->obj->size;
+ if (!pool)
+ return 0;
- return size;
+ return atomic_read(&pool->size);
}
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 8c6fc587ddc3..12a02e28f50f 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1963,13 +1963,7 @@ void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats
xa_lock(&pfile->vms->xa);
xa_for_each(&pfile->vms->xa, i, vm) {
- size_t size = 0;
-
- mutex_lock(&vm->heaps.lock);
- if (vm->heaps.pool)
- size = panthor_heap_pool_size(vm->heaps.pool);
- mutex_unlock(&vm->heaps.lock);
-
+ size_t size = panthor_heap_pool_size(vm->heaps.pool);
stats->resident += size;
if (vm->as.id >= 0)
stats->active += size;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 1a276db095ff..4d31d1967716 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -9,6 +9,7 @@
#include <drm/panthor_drm.h>
#include <linux/build_bug.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -631,10 +632,10 @@ struct panthor_group {
struct panthor_gpu_usage data;
/**
- * @lock: Mutex to govern concurrent access from drm file's fdinfo callback
- * and job post-completion processing function
+ * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo
+ * callback and job post-completion processing function
*/
- struct mutex lock;
+ spinlock_t lock;
/** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
size_t kbo_sizes;
@@ -910,8 +911,6 @@ static void group_release_work(struct work_struct *work)
release_work);
u32 i;
- mutex_destroy(&group->fdinfo.lock);
-
for (i = 0; i < group->queue_count; i++)
group_free_queue(group, group->queues[i]);
@@ -2861,12 +2860,12 @@ static void update_fdinfo_stats(struct panthor_job *job)
struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
- mutex_lock(&group->fdinfo.lock);
- if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
- fdinfo->cycles += data->cycles.after - data->cycles.before;
- if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
- fdinfo->time += data->time.after - data->time.before;
- mutex_unlock(&group->fdinfo.lock);
+ scoped_guard(spinlock, &group->fdinfo.lock) {
+ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
+ fdinfo->cycles += data->cycles.after - data->cycles.before;
+ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
+ fdinfo->time += data->time.after - data->time.before;
+ }
}
void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
@@ -2880,12 +2879,11 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
xa_lock(&gpool->xa);
xa_for_each(&gpool->xa, i, group) {
- mutex_lock(&group->fdinfo.lock);
+ guard(spinlock)(&group->fdinfo.lock);
pfile->stats.cycles += group->fdinfo.data.cycles;
pfile->stats.time += group->fdinfo.data.time;
group->fdinfo.data.cycles = 0;
group->fdinfo.data.time = 0;
- mutex_unlock(&group->fdinfo.lock);
}
xa_unlock(&gpool->xa);
}
@@ -3537,7 +3535,7 @@ int panthor_group_create(struct panthor_file *pfile,
mutex_unlock(&sched->reset.lock);
add_group_kbo_sizes(group->ptdev, group);
- mutex_init(&group->fdinfo.lock);
+ spin_lock_init(&group->fdinfo.lock);
return gid;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 05c13102a8cb..d22889fbfa9c 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-static void r300_gpu_init(struct radeon_device *rdev)
+/* rs400_gpu_init also calls this! */
+void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 1e00f6b99f94..8f5e07834fcc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -165,6 +165,7 @@ void r200_set_safe_registers(struct radeon_device *rdev);
*/
extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
+extern void r300_gpu_init(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
extern int r300_asic_reset(struct radeon_device *rdev, bool hard);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6f071e61f764..bbd39348a7ab 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -530,7 +530,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* @mc: memory controller structure holding memory informations
* @base: base address at which to put VRAM
*
- * Function will place try to place VRAM at base address provided
+ * Function will try to place VRAM at base address provided
* as parameter (which is so far either PCI aperture address or
* for IGP TOM base address).
*
@@ -557,7 +557,7 @@ int radeon_wb_init(struct radeon_device *rdev)
*
* Note 3: when limiting vram it's safe to overwritte real_vram_size because
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
- * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
+ * not affected by bogus hw of Novell bug 204882 + along with lots of ubuntu
* ones)
*
* Note 4: IGP TOM addr should be the same as the aperture addr, we don't
@@ -594,7 +594,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
* @rdev: radeon device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
- * Function will place try to place GTT before or after VRAM.
+ * Function will try to place GTT before or after VRAM.
*
* If GTT size is bigger than space left then we ajust GTT size.
* Thus function will never fails.
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index daff61586be5..8ff4f18b51a9 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -840,7 +840,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
}
radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
rdev->fence_drv[ring].initialized = true;
- dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n",
+ dev_info(rdev->dev, "fence driver on ring %d uses gpu addr 0x%016llx\n",
ring, rdev->fence_drv[ring].gpu_addr);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index d6c18fd740ec..13cd0a688a65 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -256,8 +256,22 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
static void rs400_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: is this correct ? */
- r420_pipes_init(rdev);
+ /* Earlier code was calling r420_pipes_init and then
+ * rs400_mc_wait_for_idle(rdev). The problem is that
+ * at least on my Mobility Radeon Xpress 200M RC410 card
+ * that ends up in this code path ends up num_gb_pipes == 3
+ * while the card seems to have only one pipe. With the
+ * r420 pipe initialization method.
+ *
+ * Problems shown up as HyperZ glitches, see:
+ * https://bugs.freedesktop.org/show_bug.cgi?id=110897
+ *
+ * Delegating initialization to r300 code seems to work
+ * and results in proper pipe numbers. The rs400 cards
+ * are said to be not r400, but r300 kind of cards.
+ */
+ r300_gpu_init(rdev);
+
if (rs400_mc_wait_for_idle(rdev)) {
pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n",
RREG32(RADEON_MC_STATUS));
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6c95575ce109..26197aceb001 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6198,7 +6198,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
if (wptr & RB_OVERFLOW) {
wptr &= ~RB_OVERFLOW;
- /* When a ring buffer overflow happen start parsing interrupt
+ /* When a ring buffer overflow happens, start parsing interrupts
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 0844175c37c5..a8265a1bf9ff 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -32,27 +32,32 @@
#include "rockchip_drm_drv.h"
-#define RK3288_GRF_SOC_CON6 0x25c
-#define RK3288_EDP_LCDC_SEL BIT(5)
-#define RK3399_GRF_SOC_CON20 0x6250
-#define RK3399_EDP_LCDC_SEL BIT(5)
-
-#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
-
#define PSR_WAIT_LINE_FLAG_TIMEOUT_MS 100
+#define GRF_REG_FIELD(_reg, _lsb, _msb) { \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .msb = _msb, \
+ .valid = true, \
+ }
+
+struct rockchip_grf_reg_field {
+ u32 reg;
+ u32 lsb;
+ u32 msb;
+ bool valid;
+};
+
/**
* struct rockchip_dp_chip_data - splite the grf setting of kind of chips
- * @lcdsel_grf_reg: grf register offset of lcdc select
- * @lcdsel_big: reg value of selecting vop big for eDP
- * @lcdsel_lit: reg value of selecting vop little for eDP
+ * @lcdc_sel: grf register field of lcdc_sel
* @chip_type: specific chip type
+ * @reg: register base address
*/
struct rockchip_dp_chip_data {
- u32 lcdsel_grf_reg;
- u32 lcdsel_big;
- u32 lcdsel_lit;
+ const struct rockchip_grf_reg_field lcdc_sel;
u32 chip_type;
+ u32 reg;
};
struct rockchip_dp_device {
@@ -84,6 +89,26 @@ static struct rockchip_dp_device *pdata_encoder_to_dp(struct analogix_dp_plat_da
return container_of(plat_data, struct rockchip_dp_device, plat_data);
}
+static int rockchip_grf_write(struct regmap *grf, u32 reg, u32 mask, u32 val)
+{
+ return regmap_write(grf, reg, (mask << 16) | (val & mask));
+}
+
+static int rockchip_grf_field_write(struct regmap *grf,
+ const struct rockchip_grf_reg_field *field,
+ u32 val)
+{
+ u32 mask;
+
+ if (!field->valid)
+ return 0;
+
+ mask = GENMASK(field->msb, field->lsb);
+ val <<= field->lsb;
+
+ return rockchip_grf_write(grf, field->reg, mask, val);
+}
+
static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
{
reset_control_assert(dp->rst);
@@ -181,7 +206,6 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int ret;
- u32 val;
crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
if (!crtc)
@@ -192,24 +216,19 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
- if (ret < 0)
- return;
-
- if (ret)
- val = dp->data->lcdsel_lit;
- else
- val = dp->data->lcdsel_big;
-
- DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
-
ret = clk_prepare_enable(dp->grfclk);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "failed to enable grfclk %d\n", ret);
return;
}
- ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
+ ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ if (ret < 0)
+ return;
+
+ DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, ret);
if (ret != 0)
DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
@@ -379,6 +398,8 @@ static int rockchip_dp_probe(struct platform_device *pdev)
const struct rockchip_dp_chip_data *dp_data;
struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
+ struct resource *res;
+ int i;
int ret;
dp_data = of_device_get_match_data(dev);
@@ -393,9 +414,24 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ i = 0;
+ while (dp_data[i].reg) {
+ if (dp_data[i].reg == res->start) {
+ dp->data = &dp_data[i];
+ break;
+ }
+
+ i++;
+ }
+
+ if (!dp->data)
+ return dev_err_probe(dev, -EINVAL, "no chip-data for %s node\n",
+ dev->of_node->name);
+
dp->dev = dev;
dp->adp = ERR_PTR(-ENODEV);
- dp->data = dp_data;
dp->plat_data.panel = panel;
dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on = rockchip_dp_poweron;
@@ -447,18 +483,22 @@ static int rockchip_dp_resume(struct device *dev)
static DEFINE_RUNTIME_DEV_PM_OPS(rockchip_dp_pm_ops, rockchip_dp_suspend,
rockchip_dp_resume, NULL);
-static const struct rockchip_dp_chip_data rk3399_edp = {
- .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL),
- .chip_type = RK3399_EDP,
+static const struct rockchip_dp_chip_data rk3399_edp[] = {
+ {
+ .lcdc_sel = GRF_REG_FIELD(0x6250, 5, 5),
+ .chip_type = RK3399_EDP,
+ .reg = 0xff970000,
+ },
+ { /* sentinel */ }
};
-static const struct rockchip_dp_chip_data rk3288_dp = {
- .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL),
- .chip_type = RK3288_DP,
+static const struct rockchip_dp_chip_data rk3288_dp[] = {
+ {
+ .lcdc_sel = GRF_REG_FIELD(0x025c, 5, 5),
+ .chip_type = RK3288_DP,
+ .reg = 0xff970000,
+ },
+ { /* sentinel */ }
};
static const struct of_device_id rockchip_dp_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index e7a6669c46b0..f737e7d46e66 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -203,7 +203,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
- drm_err(hdmi, "Unable to get rockchip,grf\n");
+ dev_err(hdmi->dev, "Unable to get rockchip,grf\n");
return PTR_ERR(hdmi->regmap);
}
@@ -214,7 +214,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
if (IS_ERR(hdmi->ref_clk)) {
ret = PTR_ERR(hdmi->ref_clk);
if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "failed to get reference clock\n");
+ dev_err(hdmi->dev, "failed to get reference clock\n");
return ret;
}
@@ -222,7 +222,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
if (IS_ERR(hdmi->grf_clk)) {
ret = PTR_ERR(hdmi->grf_clk);
if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "failed to get grf clock\n");
+ dev_err(hdmi->dev, "failed to get grf clock\n");
return ret;
}
@@ -302,16 +302,16 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
ret = clk_prepare_enable(hdmi->grf_clk);
if (ret < 0) {
- drm_err(hdmi, "failed to enable grfclk %d\n", ret);
+ dev_err(hdmi->dev, "failed to enable grfclk %d\n", ret);
return;
}
ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val);
if (ret != 0)
- drm_err(hdmi, "Could not write to GRF: %d\n", ret);
+ dev_err(hdmi->dev, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(hdmi->grf_clk);
- drm_dbg(hdmi, "vop %s output to hdmi\n", ret ? "LIT" : "BIG");
+ dev_dbg(hdmi->dev, "vop %s output to hdmi\n", ret ? "LIT" : "BIG");
}
static int
@@ -574,7 +574,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
ret = rockchip_hdmi_parse_dt(hdmi);
if (ret) {
if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "Unable to parse OF data\n");
+ dev_err(hdmi->dev, "Unable to parse OF data\n");
return ret;
}
@@ -582,7 +582,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "failed to get phy\n");
+ dev_err(hdmi->dev, "failed to get phy\n");
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index f41151d49fca..3d1dddb34603 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -242,7 +242,7 @@ static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work)
if (drm) {
changed = drm_helper_hpd_irq_event(drm);
if (changed)
- drm_dbg(hdmi, "connector status changed\n");
+ dev_dbg(hdmi->dev, "connector status changed\n");
}
}
@@ -472,7 +472,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
}
}
if (hdmi->port_id < 0) {
- drm_err(hdmi, "Failed to match HDMI port ID\n");
+ dev_err(hdmi->dev, "Failed to match HDMI port ID\n");
return hdmi->port_id;
}
@@ -496,20 +496,20 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
- drm_err(hdmi, "Unable to get rockchip,grf\n");
+ dev_err(hdmi->dev, "Unable to get rockchip,grf\n");
return PTR_ERR(hdmi->regmap);
}
hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,vo-grf");
if (IS_ERR(hdmi->vo_regmap)) {
- drm_err(hdmi, "Unable to get rockchip,vo-grf\n");
+ dev_err(hdmi->dev, "Unable to get rockchip,vo-grf\n");
return PTR_ERR(hdmi->vo_regmap);
}
ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks);
if (ret < 0) {
- drm_err(hdmi, "Failed to get clocks: %d\n", ret);
+ dev_err(hdmi->dev, "Failed to get clocks: %d\n", ret);
return ret;
}
@@ -517,7 +517,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
GPIOD_OUT_HIGH);
if (IS_ERR(hdmi->enable_gpio)) {
ret = PTR_ERR(hdmi->enable_gpio);
- drm_err(hdmi, "Failed to request enable GPIO: %d\n", ret);
+ dev_err(hdmi->dev, "Failed to request enable GPIO: %d\n", ret);
return ret;
}
@@ -525,7 +525,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "failed to get phy: %d\n", ret);
+ dev_err(hdmi->dev, "failed to get phy: %d\n", ret);
return ret;
}
@@ -564,7 +564,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
connector = drm_bridge_connector_init(drm, encoder);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
- drm_err(hdmi, "failed to init bridge connector: %d\n", ret);
+ dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 439edc165ff6..180fad5d49ad 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -484,9 +484,11 @@ static void rockchip_drm_platform_remove(struct platform_device *pdev)
static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
{
- struct drm_device *drm = platform_get_drvdata(pdev);
+ if (component_master_is_bound(&pdev->dev, &rockchip_drm_ops)) {
+ struct drm_device *drm = platform_get_drvdata(pdev);
- drm_atomic_helper_shutdown(drm);
+ drm_atomic_helper_shutdown(drm);
+ }
}
static const struct of_device_id rockchip_drm_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index afc946ead870..d0f5fea15e21 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -33,7 +33,6 @@
#include <drm/drm_vblank.h>
#include <uapi/linux/videodev2.h>
-#include <dt-bindings/soc/rockchip,vop2.h>
#include "rockchip_drm_gem.h"
#include "rockchip_drm_vop2.h"
@@ -102,147 +101,8 @@ enum vop2_afbc_format {
VOP2_AFBC_FMT_INVALID = -1,
};
-union vop2_alpha_ctrl {
- u32 val;
- struct {
- /* [0:1] */
- u32 color_mode:1;
- u32 alpha_mode:1;
- /* [2:3] */
- u32 blend_mode:2;
- u32 alpha_cal_mode:1;
- /* [5:7] */
- u32 factor_mode:3;
- /* [8:9] */
- u32 alpha_en:1;
- u32 src_dst_swap:1;
- u32 reserved:6;
- /* [16:23] */
- u32 glb_alpha:8;
- } bits;
-};
-
-struct vop2_alpha {
- union vop2_alpha_ctrl src_color_ctrl;
- union vop2_alpha_ctrl dst_color_ctrl;
- union vop2_alpha_ctrl src_alpha_ctrl;
- union vop2_alpha_ctrl dst_alpha_ctrl;
-};
-
-struct vop2_alpha_config {
- bool src_premulti_en;
- bool dst_premulti_en;
- bool src_pixel_alpha_en;
- bool dst_pixel_alpha_en;
- u16 src_glb_alpha_value;
- u16 dst_glb_alpha_value;
-};
-
-struct vop2_win {
- struct vop2 *vop2;
- struct drm_plane base;
- const struct vop2_win_data *data;
- struct regmap_field *reg[VOP2_WIN_MAX_REG];
-
- /**
- * @win_id: graphic window id, a cluster may be split into two
- * graphics windows.
- */
- u8 win_id;
- u8 delay;
- u32 offset;
-
- enum drm_plane_type type;
-};
-
-struct vop2_video_port {
- struct drm_crtc crtc;
- struct vop2 *vop2;
- struct clk *dclk;
- struct clk *dclk_src;
- unsigned int id;
- const struct vop2_video_port_data *data;
-
- struct completion dsp_hold_completion;
-
- /**
- * @win_mask: Bitmask of windows attached to the video port;
- */
- u32 win_mask;
-
- struct vop2_win *primary_plane;
- struct drm_pending_vblank_event *event;
-
- unsigned int nlayers;
-};
-
-struct vop2 {
- struct device *dev;
- struct drm_device *drm;
- struct vop2_video_port vps[ROCKCHIP_MAX_CRTC];
-
- const struct vop2_data *data;
- /*
- * Number of windows that are registered as plane, may be less than the
- * total number of hardware windows.
- */
- u32 registered_num_wins;
-
- struct resource *res;
- void __iomem *regs;
- struct regmap *map;
-
- struct regmap *sys_grf;
- struct regmap *vop_grf;
- struct regmap *vo1_grf;
- struct regmap *sys_pmu;
-
- /* physical map length of vop2 register */
- u32 len;
-
- void __iomem *lut_regs;
-
- /* protects crtc enable/disable */
- struct mutex vop2_lock;
-
- int irq;
-
- /*
- * Some global resources are shared between all video ports(crtcs), so
- * we need a ref counter here.
- */
- unsigned int enable_count;
- struct clk *hclk;
- struct clk *aclk;
- struct clk *pclk;
- struct clk *pll_hdmiphy0;
-
- /* optional internal rgb encoder */
- struct rockchip_rgb *rgb;
-
- /* must be put at the end of the struct */
- struct vop2_win win[];
-};
-
#define VOP2_MAX_DCLK_RATE 600000000
-#define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \
- (x) == ROCKCHIP_VOP2_EP_HDMI1)
-
-#define vop2_output_if_is_dp(x) ((x) == ROCKCHIP_VOP2_EP_DP0 || \
- (x) == ROCKCHIP_VOP2_EP_DP1)
-
-#define vop2_output_if_is_edp(x) ((x) == ROCKCHIP_VOP2_EP_EDP0 || \
- (x) == ROCKCHIP_VOP2_EP_EDP1)
-
-#define vop2_output_if_is_mipi(x) ((x) == ROCKCHIP_VOP2_EP_MIPI0 || \
- (x) == ROCKCHIP_VOP2_EP_MIPI1)
-
-#define vop2_output_if_is_lvds(x) ((x) == ROCKCHIP_VOP2_EP_LVDS0 || \
- (x) == ROCKCHIP_VOP2_EP_LVDS1)
-
-#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0)
-
/*
* bus-format types.
*/
@@ -276,16 +136,6 @@ static DRM_ENUM_NAME_FN(drm_get_bus_format_name, drm_bus_format_enum_list)
static const struct regmap_config vop2_regmap_config;
-static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc)
-{
- return container_of(crtc, struct vop2_video_port, crtc);
-}
-
-static struct vop2_win *to_vop2_win(struct drm_plane *p)
-{
- return container_of(p, struct vop2_win, base);
-}
-
static void vop2_lock(struct vop2 *vop2)
{
mutex_lock(&vop2->vop2_lock);
@@ -296,44 +146,6 @@ static void vop2_unlock(struct vop2 *vop2)
mutex_unlock(&vop2->vop2_lock);
}
-static void vop2_writel(struct vop2 *vop2, u32 offset, u32 v)
-{
- regmap_write(vop2->map, offset, v);
-}
-
-static void vop2_vp_write(struct vop2_video_port *vp, u32 offset, u32 v)
-{
- regmap_write(vp->vop2->map, vp->data->offset + offset, v);
-}
-
-static u32 vop2_readl(struct vop2 *vop2, u32 offset)
-{
- u32 val;
-
- regmap_read(vop2->map, offset, &val);
-
- return val;
-}
-
-static u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset)
-{
- u32 val;
-
- regmap_read(vp->vop2->map, vp->data->offset + offset, &val);
-
- return val;
-}
-
-static void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v)
-{
- regmap_field_write(win->reg[reg], v);
-}
-
-static bool vop2_cluster_window(const struct vop2_win *win)
-{
- return win->data->feature & WIN_FEATURE_CLUSTER;
-}
-
/*
* Note:
* The write mask function is documented but missing on rk3566/8, writes
@@ -543,7 +355,7 @@ static bool vop2_output_uv_swap(u32 bus_format, u32 output_mode)
static bool vop2_output_rg_swap(struct vop2 *vop2, u32 bus_format)
{
- if (vop2->data->soc_id == 3588) {
+ if (vop2->version == VOP_VERSION_RK3588) {
if (bus_format == MEDIA_BUS_FMT_YUV8_1X24 ||
bus_format == MEDIA_BUS_FMT_YUV10_1X30)
return true;
@@ -596,7 +408,7 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
if (modifier == DRM_FORMAT_MOD_INVALID)
return false;
- if (vop2->data->soc_id == 3568 || vop2->data->soc_id == 3566) {
+ if (vop2->version == VOP_VERSION_RK3568) {
if (vop2_cluster_window(win)) {
if (modifier == DRM_FORMAT_MOD_LINEAR) {
drm_dbg_kms(vop2->drm,
@@ -607,7 +419,7 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
}
if (format == DRM_FORMAT_XRGB2101010 || format == DRM_FORMAT_XBGR2101010) {
- if (vop2->data->soc_id == 3588) {
+ if (vop2->version == VOP_VERSION_RK3588) {
if (!rockchip_afbc(plane, modifier)) {
drm_dbg_kms(vop2->drm, "Only support 32 bpp format with afbc\n");
return false;
@@ -1006,6 +818,7 @@ static void rk3588_vop2_power_domain_enable_all(struct vop2 *vop2)
static void vop2_enable(struct vop2 *vop2)
{
int ret;
+ u32 version;
ret = pm_runtime_resume_and_get(vop2->dev);
if (ret < 0) {
@@ -1025,10 +838,20 @@ static void vop2_enable(struct vop2 *vop2)
return;
}
+ version = vop2_readl(vop2, RK3568_VERSION_INFO);
+ if (version != vop2->version) {
+ drm_err(vop2->drm, "Hardware version(0x%08x) mismatch\n", version);
+ return;
+ }
+
+ /*
+ * rk3566 share the same vop version with rk3568, so
+ * we need to use soc_id for identification here.
+ */
if (vop2->data->soc_id == 3566)
vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
- if (vop2->data->soc_id == 3588)
+ if (vop2->version == VOP_VERSION_RK3588)
rk3588_vop2_power_domain_enable_all(vop2);
vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
@@ -1109,7 +932,7 @@ static void vop2_vp_dsp_lut_update_enable(struct vop2_video_port *vp)
static inline bool vop2_supports_seamless_gamma_lut_update(struct vop2 *vop2)
{
- return (vop2->data->soc_id != 3566 && vop2->data->soc_id != 3568);
+ return vop2->version != VOP_VERSION_RK3568;
}
static bool vop2_gamma_lut_in_use(struct vop2 *vop2, struct vop2_video_port *vp)
@@ -1451,12 +1274,15 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
&fb->format->format,
afbc_en ? "AFBC" : "", &yrgb_mst);
- if (vop2->data->soc_id > 3568) {
+ if (vop2->version > VOP_VERSION_RK3568) {
vop2_win_write(win, VOP2_WIN_AXI_BUS_ID, win->data->axi_bus_id);
vop2_win_write(win, VOP2_WIN_AXI_YRGB_R_ID, win->data->axi_yrgb_r_id);
vop2_win_write(win, VOP2_WIN_AXI_UV_R_ID, win->data->axi_uv_r_id);
}
+ if (vop2->version >= VOP_VERSION_RK3576)
+ vop2_win_write(win, VOP2_WIN_VP_SEL, vp->id);
+
if (vop2_cluster_window(win))
vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en);
@@ -1511,7 +1337,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
* this bit is gating disable, we should write 1 to
* disable gating when enable afbc.
*/
- if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568)
+ if (vop2->version == VOP_VERSION_RK3568)
vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 0);
else
vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 1);
@@ -1521,10 +1347,15 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
else
vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
+ if (vop2->version >= VOP_VERSION_RK3576) {
+ vop2_win_write(win, VOP2_WIN_AFBC_PLD_OFFSET_EN, 1);
+ vop2_win_write(win, VOP2_WIN_AFBC_PLD_OFFSET, yrgb_mst);
+ }
+
transform_offset = vop2_afbc_transform_offset(pstate, half_block_en);
vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info);
- vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, transform_offset);
+ vop2_win_write(win, VOP2_WIN_TRANSFORM_OFFSET, transform_offset);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_OFFSET, ((src->x1 >> 16) | src->y1));
vop2_win_write(win, VOP2_WIN_AFBC_DSP_OFFSET, (dest->x1 | (dest->y1 << 16)));
vop2_win_write(win, VOP2_WIN_AFBC_PIC_VIR_WIDTH, stride);
@@ -1535,7 +1366,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
} else {
if (vop2_cluster_window(win)) {
vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 0);
- vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, 0);
+ vop2_win_write(win, VOP2_WIN_TRANSFORM_OFFSET, 0);
}
vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4));
@@ -1554,10 +1385,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
rb_swap = vop2_win_rb_swap(fb->format->format);
vop2_win_write(win, VOP2_WIN_RB_SWAP, rb_swap);
- if (!vop2_cluster_window(win)) {
- uv_swap = vop2_win_uv_swap(fb->format->format);
- vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
- }
+ uv_swap = vop2_win_uv_swap(fb->format->format);
+ vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
if (fb->format->is_yuv) {
vop2_win_write(win, VOP2_WIN_UV_VIR, DIV_ROUND_UP(fb->pitches[1], 4));
@@ -1731,6 +1560,7 @@ static void vop2_dither_setup(struct drm_crtc *crtc, u32 *dsp_ctrl)
static void vop2_post_config(struct drm_crtc *crtc)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct vop2 *vop2 = vp->vop2;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u16 vtotal = mode->crtc_vtotal;
u16 hdisplay = mode->crtc_hdisplay;
@@ -1741,18 +1571,10 @@ static void vop2_post_config(struct drm_crtc *crtc)
u32 top_margin = 100, bottom_margin = 100;
u16 hsize = hdisplay * (left_margin + right_margin) / 200;
u16 vsize = vdisplay * (top_margin + bottom_margin) / 200;
- u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
u16 hact_end, vact_end;
u32 val;
- u32 bg_dly;
- u32 pre_scan_dly;
- bg_dly = vp->data->pre_scan_max_dly[3];
- vop2_writel(vp->vop2, RK3568_VP_BG_MIX_CTRL(vp->id),
- FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly));
-
- pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
- vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
+ vop2->ops->setup_bg_dly(vp);
vsize = rounddown(vsize, 2);
hsize = rounddown(hsize, 2);
@@ -1788,347 +1610,6 @@ static void vop2_post_config(struct drm_crtc *crtc)
vop2_vp_write(vp, RK3568_VP_DSP_BG, 0);
}
-static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
-{
- struct vop2 *vop2 = vp->vop2;
- struct drm_crtc *crtc = &vp->crtc;
- u32 die, dip;
-
- die = vop2_readl(vop2, RK3568_DSP_IF_EN);
- dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
-
- switch (id) {
- case ROCKCHIP_VOP2_EP_RGB0:
- die &= ~RK3568_SYS_DSP_INFACE_EN_RGB_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_RGB |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_RGB_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
- if (polflags & POLFLAG_DCLK_INV)
- regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3));
- else
- regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16));
- break;
- case ROCKCHIP_VOP2_EP_HDMI0:
- die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_EDP0:
- die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_EDP |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_MIPI0:
- die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_MIPI0 |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_MIPI1:
- die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_MIPI1 |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_LVDS0:
- die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_LVDS0 |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_LVDS1:
- die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_LVDS1 |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
- break;
- default:
- drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
- return 0;
- }
-
- dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
-
- vop2_writel(vop2, RK3568_DSP_IF_EN, die);
- vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
-
- return crtc->state->adjusted_mode.crtc_clock * 1000LL;
-}
-
-/*
- * calc the dclk on rk3588
- * the available div of dclk is 1, 2, 4
- */
-static unsigned long rk3588_calc_dclk(unsigned long child_clk, unsigned long max_dclk)
-{
- if (child_clk * 4 <= max_dclk)
- return child_clk * 4;
- else if (child_clk * 2 <= max_dclk)
- return child_clk * 2;
- else if (child_clk <= max_dclk)
- return child_clk;
- else
- return 0;
-}
-
-/*
- * 4 pixclk/cycle on rk3588
- * RGB/eDP/HDMI: if_pixclk >= dclk_core
- * DP: dp_pixclk = dclk_out <= dclk_core
- * DSI: mipi_pixclk <= dclk_out <= dclk_core
- */
-static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
- int *dclk_core_div, int *dclk_out_div,
- int *if_pixclk_div, int *if_dclk_div)
-{
- struct vop2 *vop2 = vp->vop2;
- struct drm_crtc *crtc = &vp->crtc;
- struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
- struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
- int output_mode = vcstate->output_mode;
- unsigned long v_pixclk = adjusted_mode->crtc_clock * 1000LL; /* video timing pixclk */
- unsigned long dclk_core_rate = v_pixclk >> 2;
- unsigned long dclk_rate = v_pixclk;
- unsigned long dclk_out_rate;
- unsigned long if_pixclk_rate;
- int K = 1;
-
- if (vop2_output_if_is_hdmi(id)) {
- /*
- * K = 2: dclk_core = if_pixclk_rate > if_dclk_rate
- * K = 1: dclk_core = hdmie_edp_dclk > if_pixclk_rate
- */
- if (output_mode == ROCKCHIP_OUT_MODE_YUV420) {
- dclk_rate = dclk_rate >> 1;
- K = 2;
- }
-
- /*
- * if_pixclk_rate = (dclk_core_rate << 1) / K;
- * if_dclk_rate = dclk_core_rate / K;
- * *if_pixclk_div = dclk_rate / if_pixclk_rate;
- * *if_dclk_div = dclk_rate / if_dclk_rate;
- */
- *if_pixclk_div = 2;
- *if_dclk_div = 4;
- } else if (vop2_output_if_is_edp(id)) {
- /*
- * edp_pixclk = edp_dclk > dclk_core
- */
- if_pixclk_rate = v_pixclk / K;
- dclk_rate = if_pixclk_rate * K;
- /*
- * *if_pixclk_div = dclk_rate / if_pixclk_rate;
- * *if_dclk_div = *if_pixclk_div;
- */
- *if_pixclk_div = K;
- *if_dclk_div = K;
- } else if (vop2_output_if_is_dp(id)) {
- if (output_mode == ROCKCHIP_OUT_MODE_YUV420)
- dclk_out_rate = v_pixclk >> 3;
- else
- dclk_out_rate = v_pixclk >> 2;
-
- dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
- if (!dclk_rate) {
- drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n",
- dclk_out_rate);
- return 0;
- }
- *dclk_out_div = dclk_rate / dclk_out_rate;
- } else if (vop2_output_if_is_mipi(id)) {
- if_pixclk_rate = dclk_core_rate / K;
- /*
- * dclk_core = dclk_out * K = if_pixclk * K = v_pixclk / 4
- */
- dclk_out_rate = if_pixclk_rate;
- /*
- * dclk_rate = N * dclk_core_rate N = (1,2,4 ),
- * we get a little factor here
- */
- dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
- if (!dclk_rate) {
- drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n",
- dclk_out_rate);
- return 0;
- }
- *dclk_out_div = dclk_rate / dclk_out_rate;
- /*
- * mipi pixclk == dclk_out
- */
- *if_pixclk_div = 1;
- } else if (vop2_output_if_is_dpi(id)) {
- dclk_rate = v_pixclk;
- }
-
- *dclk_core_div = dclk_rate / dclk_core_rate;
- *if_pixclk_div = ilog2(*if_pixclk_div);
- *if_dclk_div = ilog2(*if_dclk_div);
- *dclk_core_div = ilog2(*dclk_core_div);
- *dclk_out_div = ilog2(*dclk_out_div);
-
- drm_dbg(vop2->drm, "dclk: %ld, pixclk_div: %d, dclk_div: %d\n",
- dclk_rate, *if_pixclk_div, *if_dclk_div);
-
- return dclk_rate;
-}
-
-/*
- * MIPI port mux on rk3588:
- * 0: Video Port2
- * 1: Video Port3
- * 3: Video Port 1(MIPI1 only)
- */
-static u32 rk3588_get_mipi_port_mux(int vp_id)
-{
- if (vp_id == 1)
- return 3;
- else if (vp_id == 3)
- return 1;
- else
- return 0;
-}
-
-static u32 rk3588_get_hdmi_pol(u32 flags)
-{
- u32 val;
-
- val = (flags & DRM_MODE_FLAG_NHSYNC) ? BIT(HSYNC_POSITIVE) : 0;
- val |= (flags & DRM_MODE_FLAG_NVSYNC) ? BIT(VSYNC_POSITIVE) : 0;
-
- return val;
-}
-
-static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
-{
- struct vop2 *vop2 = vp->vop2;
- int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_div;
- unsigned long clock;
- u32 die, dip, div, vp_clk_div, val;
-
- clock = rk3588_calc_cru_cfg(vp, id, &dclk_core_div, &dclk_out_div,
- &if_pixclk_div, &if_dclk_div);
- if (!clock)
- return 0;
-
- vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div);
- vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div);
-
- die = vop2_readl(vop2, RK3568_DSP_IF_EN);
- dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
- div = vop2_readl(vop2, RK3568_DSP_IF_CTRL);
-
- switch (id) {
- case ROCKCHIP_VOP2_EP_HDMI0:
- div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
- div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
- val = rk3588_get_hdmi_pol(polflags);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1));
- regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5));
- break;
- case ROCKCHIP_VOP2_EP_HDMI1:
- div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
- div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV, if_dclk_div);
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
- val = rk3588_get_hdmi_pol(polflags);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4));
- regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7));
- break;
- case ROCKCHIP_VOP2_EP_EDP0:
- div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
- div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_EDP0 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0));
- break;
- case ROCKCHIP_VOP2_EP_EDP1:
- div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
- div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_EDP1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3));
- break;
- case ROCKCHIP_VOP2_EP_MIPI0:
- div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_MIPI0_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX;
- val = rk3588_get_mipi_port_mux(vp->id);
- die |= RK3588_SYS_DSP_INFACE_EN_MIPI0 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX, !!val);
- break;
- case ROCKCHIP_VOP2_EP_MIPI1:
- div &= ~RK3588_DSP_IF_MIPI1_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_MIPI1_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
- val = rk3588_get_mipi_port_mux(vp->id);
- die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, val);
- break;
- case ROCKCHIP_VOP2_EP_DP0:
- die &= ~RK3588_SYS_DSP_INFACE_EN_DP0_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_DP0 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP0_MUX, vp->id);
- dip &= ~RK3588_DSP_IF_POL__DP0_PIN_POL;
- dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_DP1:
- die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
- dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL;
- dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags);
- break;
- default:
- drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
- return 0;
- }
-
- dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
-
- vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div);
- vop2_writel(vop2, RK3568_DSP_IF_EN, die);
- vop2_writel(vop2, RK3568_DSP_IF_CTRL, div);
- vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
-
- return clock;
-}
-
-static unsigned long vop2_set_intf_mux(struct vop2_video_port *vp, int ep_id, u32 polflags)
-{
- struct vop2 *vop2 = vp->vop2;
-
- if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568)
- return rk3568_set_intf_mux(vp, ep_id, polflags);
- else if (vop2->data->soc_id == 3588)
- return rk3588_set_intf_mux(vp, ep_id, polflags);
- else
- return 0;
-}
-
static int us_to_vertical_line(struct drm_display_mode *mode, int us)
{
return us * mode->clock / mode->htotal / 1000;
@@ -2201,7 +1682,7 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
* process multi(1/2/4/8) pixels per cycle, so the dclk feed by the
* system cru may be the 1/2 or 1/4 of mode->clock.
*/
- clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
+ clock = vop2->ops->setup_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
}
if (!clock) {
@@ -2270,11 +1751,14 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
* Switch to HDMI PHY PLL as DCLK source for display modes up
* to 4K@60Hz, if available, otherwise keep using the system CRU.
*/
- if (vop2->pll_hdmiphy0 && clock <= VOP2_MAX_DCLK_RATE) {
+ if ((vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) && clock <= VOP2_MAX_DCLK_RATE) {
drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) {
+ if (!vop2->pll_hdmiphy0)
+ break;
+
if (!vp->dclk_src)
vp->dclk_src = clk_get_parent(vp->dclk);
@@ -2284,6 +1768,20 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
"Could not switch to HDMI0 PHY PLL: %d\n", ret);
break;
}
+
+ if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) {
+ if (!vop2->pll_hdmiphy1)
+ break;
+
+ if (!vp->dclk_src)
+ vp->dclk_src = clk_get_parent(vp->dclk);
+
+ ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1);
+ if (ret < 0)
+ drm_warn(vop2->drm,
+ "Could not switch to HDMI1 PHY PLL: %d\n", ret);
+ break;
+ }
}
}
@@ -2351,454 +1849,13 @@ static int vop2_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static bool is_opaque(u16 alpha)
-{
- return (alpha >> 8) == 0xff;
-}
-
-static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config,
- struct vop2_alpha *alpha)
-{
- int src_glb_alpha_en = is_opaque(alpha_config->src_glb_alpha_value) ? 0 : 1;
- int dst_glb_alpha_en = is_opaque(alpha_config->dst_glb_alpha_value) ? 0 : 1;
- int src_color_mode = alpha_config->src_premulti_en ?
- ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL;
- int dst_color_mode = alpha_config->dst_premulti_en ?
- ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL;
-
- alpha->src_color_ctrl.val = 0;
- alpha->dst_color_ctrl.val = 0;
- alpha->src_alpha_ctrl.val = 0;
- alpha->dst_alpha_ctrl.val = 0;
-
- if (!alpha_config->src_pixel_alpha_en)
- alpha->src_color_ctrl.bits.blend_mode = ALPHA_GLOBAL;
- else if (alpha_config->src_pixel_alpha_en && !src_glb_alpha_en)
- alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX;
- else
- alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL;
-
- alpha->src_color_ctrl.bits.alpha_en = 1;
-
- if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_GLOBAL) {
- alpha->src_color_ctrl.bits.color_mode = src_color_mode;
- alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL;
- } else if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_PER_PIX) {
- alpha->src_color_ctrl.bits.color_mode = src_color_mode;
- alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_ONE;
- } else {
- alpha->src_color_ctrl.bits.color_mode = ALPHA_SRC_PRE_MUL;
- alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL;
- }
- alpha->src_color_ctrl.bits.glb_alpha = alpha_config->src_glb_alpha_value >> 8;
- alpha->src_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
- alpha->src_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
-
- alpha->dst_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
- alpha->dst_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
- alpha->dst_color_ctrl.bits.blend_mode = ALPHA_GLOBAL;
- alpha->dst_color_ctrl.bits.glb_alpha = alpha_config->dst_glb_alpha_value >> 8;
- alpha->dst_color_ctrl.bits.color_mode = dst_color_mode;
- alpha->dst_color_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
-
- alpha->src_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
- alpha->src_alpha_ctrl.bits.blend_mode = alpha->src_color_ctrl.bits.blend_mode;
- alpha->src_alpha_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
- alpha->src_alpha_ctrl.bits.factor_mode = ALPHA_ONE;
-
- alpha->dst_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
- if (alpha_config->dst_pixel_alpha_en && !dst_glb_alpha_en)
- alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX;
- else
- alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL;
- alpha->dst_alpha_ctrl.bits.alpha_cal_mode = ALPHA_NO_SATURATION;
- alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
-}
-
-static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
-{
- struct vop2_video_port *vp;
- int used_layer = 0;
- int i;
-
- for (i = 0; i < port_id; i++) {
- vp = &vop2->vps[i];
- used_layer += hweight32(vp->win_mask);
- }
-
- return used_layer;
-}
-
-static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win)
-{
- struct vop2_alpha_config alpha_config;
- struct vop2_alpha alpha;
- struct drm_plane_state *bottom_win_pstate;
- bool src_pixel_alpha_en = false;
- u16 src_glb_alpha_val, dst_glb_alpha_val;
- bool premulti_en = false;
- bool swap = false;
- u32 offset = 0;
-
- /* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */
- bottom_win_pstate = main_win->base.state;
- src_glb_alpha_val = 0;
- dst_glb_alpha_val = main_win->base.state->alpha;
-
- if (!bottom_win_pstate->fb)
- return;
-
- alpha_config.src_premulti_en = premulti_en;
- alpha_config.dst_premulti_en = false;
- alpha_config.src_pixel_alpha_en = src_pixel_alpha_en;
- alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */
- alpha_config.src_glb_alpha_value = src_glb_alpha_val;
- alpha_config.dst_glb_alpha_value = dst_glb_alpha_val;
- vop2_parse_alpha(&alpha_config, &alpha);
-
- alpha.src_color_ctrl.bits.src_dst_swap = swap;
-
- switch (main_win->data->phys_id) {
- case ROCKCHIP_VOP2_CLUSTER0:
- offset = 0x0;
- break;
- case ROCKCHIP_VOP2_CLUSTER1:
- offset = 0x10;
- break;
- case ROCKCHIP_VOP2_CLUSTER2:
- offset = 0x20;
- break;
- case ROCKCHIP_VOP2_CLUSTER3:
- offset = 0x30;
- break;
- }
-
- vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset,
- alpha.src_color_ctrl.val);
- vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset,
- alpha.dst_color_ctrl.val);
- vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL + offset,
- alpha.src_alpha_ctrl.val);
- vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL + offset,
- alpha.dst_alpha_ctrl.val);
-}
-
-static void vop2_setup_alpha(struct vop2_video_port *vp)
-{
- struct vop2 *vop2 = vp->vop2;
- struct drm_framebuffer *fb;
- struct vop2_alpha_config alpha_config;
- struct vop2_alpha alpha;
- struct drm_plane *plane;
- int pixel_alpha_en;
- int premulti_en, gpremulti_en = 0;
- int mixer_id;
- u32 offset;
- bool bottom_layer_alpha_en = false;
- u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE;
-
- mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id);
- alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */
-
- drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
- struct vop2_win *win = to_vop2_win(plane);
-
- if (plane->state->normalized_zpos == 0 &&
- !is_opaque(plane->state->alpha) &&
- !vop2_cluster_window(win)) {
- /*
- * If bottom layer have global alpha effect [except cluster layer,
- * because cluster have deal with bottom layer global alpha value
- * at cluster mix], bottom layer mix need deal with global alpha.
- */
- bottom_layer_alpha_en = true;
- dst_global_alpha = plane->state->alpha;
- }
- }
-
- drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
- struct vop2_win *win = to_vop2_win(plane);
- int zpos = plane->state->normalized_zpos;
-
- /*
- * Need to configure alpha from second layer.
- */
- if (zpos == 0)
- continue;
-
- if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
- premulti_en = 1;
- else
- premulti_en = 0;
-
- plane = &win->base;
- fb = plane->state->fb;
-
- pixel_alpha_en = fb->format->has_alpha;
-
- alpha_config.src_premulti_en = premulti_en;
-
- if (bottom_layer_alpha_en && zpos == 1) {
- gpremulti_en = premulti_en;
- /* Cd = Cs + (1 - As) * Cd * Agd */
- alpha_config.dst_premulti_en = false;
- alpha_config.src_pixel_alpha_en = pixel_alpha_en;
- alpha_config.src_glb_alpha_value = plane->state->alpha;
- alpha_config.dst_glb_alpha_value = dst_global_alpha;
- } else if (vop2_cluster_window(win)) {
- /* Mix output data only have pixel alpha */
- alpha_config.dst_premulti_en = true;
- alpha_config.src_pixel_alpha_en = true;
- alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
- alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
- } else {
- /* Cd = Cs + (1 - As) * Cd */
- alpha_config.dst_premulti_en = true;
- alpha_config.src_pixel_alpha_en = pixel_alpha_en;
- alpha_config.src_glb_alpha_value = plane->state->alpha;
- alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
- }
-
- vop2_parse_alpha(&alpha_config, &alpha);
-
- offset = (mixer_id + zpos - 1) * 0x10;
- vop2_writel(vop2, RK3568_MIX0_SRC_COLOR_CTRL + offset,
- alpha.src_color_ctrl.val);
- vop2_writel(vop2, RK3568_MIX0_DST_COLOR_CTRL + offset,
- alpha.dst_color_ctrl.val);
- vop2_writel(vop2, RK3568_MIX0_SRC_ALPHA_CTRL + offset,
- alpha.src_alpha_ctrl.val);
- vop2_writel(vop2, RK3568_MIX0_DST_ALPHA_CTRL + offset,
- alpha.dst_alpha_ctrl.val);
- }
-
- if (vp->id == 0) {
- if (bottom_layer_alpha_en) {
- /* Transfer pixel alpha to hdr mix */
- alpha_config.src_premulti_en = gpremulti_en;
- alpha_config.dst_premulti_en = true;
- alpha_config.src_pixel_alpha_en = true;
- alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
- alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
- vop2_parse_alpha(&alpha_config, &alpha);
-
- vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL,
- alpha.src_color_ctrl.val);
- vop2_writel(vop2, RK3568_HDR0_DST_COLOR_CTRL,
- alpha.dst_color_ctrl.val);
- vop2_writel(vop2, RK3568_HDR0_SRC_ALPHA_CTRL,
- alpha.src_alpha_ctrl.val);
- vop2_writel(vop2, RK3568_HDR0_DST_ALPHA_CTRL,
- alpha.dst_alpha_ctrl.val);
- } else {
- vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, 0);
- }
- }
-}
-
-static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
-{
- struct vop2 *vop2 = vp->vop2;
- struct drm_plane *plane;
- u32 layer_sel = 0;
- u32 port_sel;
- u8 layer_id;
- u8 old_layer_id;
- u8 layer_sel_id;
- unsigned int ofs;
- u32 ovl_ctrl;
- int i;
- struct vop2_video_port *vp0 = &vop2->vps[0];
- struct vop2_video_port *vp1 = &vop2->vps[1];
- struct vop2_video_port *vp2 = &vop2->vps[2];
- struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
-
- ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
- ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
- if (vcstate->yuv_overlay)
- ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
- else
- ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id);
-
- vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
-
- port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
- port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT;
-
- if (vp0->nlayers)
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX,
- vp0->nlayers - 1);
- else
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 8);
-
- if (vp1->nlayers)
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX,
- (vp0->nlayers + vp1->nlayers - 1));
- else
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8);
-
- if (vp2->nlayers)
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX,
- (vp2->nlayers + vp1->nlayers + vp0->nlayers - 1));
- else
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8);
-
- layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
-
- ofs = 0;
- for (i = 0; i < vp->id; i++)
- ofs += vop2->vps[i].nlayers;
-
- drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
- struct vop2_win *win = to_vop2_win(plane);
- struct vop2_win *old_win;
-
- layer_id = (u8)(plane->state->normalized_zpos + ofs);
-
- /*
- * Find the layer this win bind in old state.
- */
- for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) {
- layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf;
- if (layer_sel_id == win->data->layer_sel_id)
- break;
- }
-
- /*
- * Find the win bind to this layer in old state
- */
- for (i = 0; i < vop2->data->win_size; i++) {
- old_win = &vop2->win[i];
- layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf;
- if (layer_sel_id == old_win->data->layer_sel_id)
- break;
- }
-
- switch (win->data->phys_id) {
- case ROCKCHIP_VOP2_CLUSTER0:
- port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER0;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER0, vp->id);
- break;
- case ROCKCHIP_VOP2_CLUSTER1:
- port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id);
- break;
- case ROCKCHIP_VOP2_CLUSTER2:
- port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER2;
- port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER2, vp->id);
- break;
- case ROCKCHIP_VOP2_CLUSTER3:
- port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER3;
- port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER3, vp->id);
- break;
- case ROCKCHIP_VOP2_ESMART0:
- port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id);
- break;
- case ROCKCHIP_VOP2_ESMART1:
- port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id);
- break;
- case ROCKCHIP_VOP2_ESMART2:
- port_sel &= ~RK3588_OVL_PORT_SEL__ESMART2;
- port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART2, vp->id);
- break;
- case ROCKCHIP_VOP2_ESMART3:
- port_sel &= ~RK3588_OVL_PORT_SEL__ESMART3;
- port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART3, vp->id);
- break;
- case ROCKCHIP_VOP2_SMART0:
- port_sel &= ~RK3568_OVL_PORT_SEL__SMART0;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id);
- break;
- case ROCKCHIP_VOP2_SMART1:
- port_sel &= ~RK3568_OVL_PORT_SEL__SMART1;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART1, vp->id);
- break;
- }
-
- layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7);
- layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id);
- /*
- * When we bind a window from layerM to layerN, we also need to move the old
- * window on layerN to layerM to avoid one window selected by two or more layers.
- */
- layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7);
- layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id);
- }
-
- vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
- vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
-}
-
-static void vop2_setup_dly_for_windows(struct vop2 *vop2)
-{
- struct vop2_win *win;
- int i = 0;
- u32 cdly = 0, sdly = 0;
-
- for (i = 0; i < vop2->data->win_size; i++) {
- u32 dly;
-
- win = &vop2->win[i];
- dly = win->delay;
-
- switch (win->data->phys_id) {
- case ROCKCHIP_VOP2_CLUSTER0:
- cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_0, dly);
- cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_1, dly);
- break;
- case ROCKCHIP_VOP2_CLUSTER1:
- cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_0, dly);
- cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_1, dly);
- break;
- case ROCKCHIP_VOP2_ESMART0:
- sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART0, dly);
- break;
- case ROCKCHIP_VOP2_ESMART1:
- sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly);
- break;
- case ROCKCHIP_VOP2_SMART0:
- case ROCKCHIP_VOP2_ESMART2:
- sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly);
- break;
- case ROCKCHIP_VOP2_SMART1:
- case ROCKCHIP_VOP2_ESMART3:
- sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly);
- break;
- }
- }
-
- vop2_writel(vop2, RK3568_CLUSTER_DLY_NUM, cdly);
- vop2_writel(vop2, RK3568_SMART_DLY_NUM, sdly);
-}
-
static void vop2_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
struct vop2 *vop2 = vp->vop2;
- struct drm_plane *plane;
-
- vp->win_mask = 0;
-
- drm_atomic_crtc_for_each_plane(plane, crtc) {
- struct vop2_win *win = to_vop2_win(plane);
-
- win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT];
- vp->win_mask |= BIT(win->data->phys_id);
-
- if (vop2_cluster_window(win))
- vop2_setup_cluster_alpha(vop2, win);
- }
-
- if (!vp->win_mask)
- return;
-
- vop2_setup_layer_mixer(vp);
- vop2_setup_alpha(vp);
- vop2_setup_dly_for_windows(vop2);
+ vop2->ops->setup_overlay(vp);
}
static void vop2_crtc_atomic_flush(struct drm_crtc *crtc,
@@ -3110,6 +2167,52 @@ static const struct drm_crtc_funcs vop2_crtc_funcs = {
.late_register = vop2_crtc_late_register,
};
+static irqreturn_t rk3576_vp_isr(int irq, void *data)
+{
+ struct vop2_video_port *vp = data;
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ uint32_t irqs;
+ int ret = IRQ_NONE;
+
+ if (!pm_runtime_get_if_in_use(vop2->dev))
+ return IRQ_NONE;
+
+ irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id));
+ vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs);
+
+ if (irqs & VP_INT_DSP_HOLD_VALID) {
+ complete(&vp->dsp_hold_completion);
+ ret = IRQ_HANDLED;
+ }
+
+ if (irqs & VP_INT_FS_FIELD) {
+ drm_crtc_handle_vblank(crtc);
+ spin_lock(&crtc->dev->event_lock);
+ if (vp->event) {
+ u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE);
+
+ if (!(val & BIT(vp->id))) {
+ drm_crtc_send_vblank_event(crtc, vp->event);
+ vp->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+ spin_unlock(&crtc->dev->event_lock);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (irqs & VP_INT_POST_BUF_EMPTY) {
+ drm_err_ratelimited(vop2->drm, "POST_BUF_EMPTY irq err at vp%d\n", vp->id);
+ ret = IRQ_HANDLED;
+ }
+
+ pm_runtime_put(vop2->dev);
+
+ return ret;
+}
+
static irqreturn_t vop2_isr(int irq, void *data)
{
struct vop2 *vop2 = data;
@@ -3125,41 +2228,43 @@ static irqreturn_t vop2_isr(int irq, void *data)
if (!pm_runtime_get_if_in_use(vop2->dev))
return IRQ_NONE;
- for (i = 0; i < vop2_data->nr_vps; i++) {
- struct vop2_video_port *vp = &vop2->vps[i];
- struct drm_crtc *crtc = &vp->crtc;
- u32 irqs;
+ if (vop2->version < VOP_VERSION_RK3576) {
+ for (i = 0; i < vop2_data->nr_vps; i++) {
+ struct vop2_video_port *vp = &vop2->vps[i];
+ struct drm_crtc *crtc = &vp->crtc;
+ u32 irqs;
- irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id));
- vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs);
+ irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id));
+ vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs);
- if (irqs & VP_INT_DSP_HOLD_VALID) {
- complete(&vp->dsp_hold_completion);
- ret = IRQ_HANDLED;
- }
-
- if (irqs & VP_INT_FS_FIELD) {
- drm_crtc_handle_vblank(crtc);
- spin_lock(&crtc->dev->event_lock);
- if (vp->event) {
- u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE);
+ if (irqs & VP_INT_DSP_HOLD_VALID) {
+ complete(&vp->dsp_hold_completion);
+ ret = IRQ_HANDLED;
+ }
- if (!(val & BIT(vp->id))) {
- drm_crtc_send_vblank_event(crtc, vp->event);
- vp->event = NULL;
- drm_crtc_vblank_put(crtc);
+ if (irqs & VP_INT_FS_FIELD) {
+ drm_crtc_handle_vblank(crtc);
+ spin_lock(&crtc->dev->event_lock);
+ if (vp->event) {
+ u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE);
+
+ if (!(val & BIT(vp->id))) {
+ drm_crtc_send_vblank_event(crtc, vp->event);
+ vp->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
}
- }
- spin_unlock(&crtc->dev->event_lock);
+ spin_unlock(&crtc->dev->event_lock);
- ret = IRQ_HANDLED;
- }
+ ret = IRQ_HANDLED;
+ }
- if (irqs & VP_INT_POST_BUF_EMPTY) {
- drm_err_ratelimited(vop2->drm,
- "POST_BUF_EMPTY irq err at vp%d\n",
- vp->id);
- ret = IRQ_HANDLED;
+ if (irqs & VP_INT_POST_BUF_EMPTY) {
+ drm_err_ratelimited(vop2->drm,
+ "POST_BUF_EMPTY irq err at vp%d\n",
+ vp->id);
+ ret = IRQ_HANDLED;
+ }
}
}
@@ -3213,22 +2318,29 @@ static int vop2_plane_init(struct vop2 *vop2, struct vop2_win *win,
return 0;
}
-static struct vop2_video_port *find_vp_without_primary(struct vop2 *vop2)
+/*
+ * On RK3566 these windows don't have an independent
+ * framebuffer. They can only share/mirror the framebuffer
+ * with smart0, esmart0 and cluster0 respectively.
+ * And RK3566 share the same vop version with Rk3568, so we
+ * need to use soc_id for identification here.
+ */
+static bool vop2_is_mirror_win(struct vop2_win *win)
{
- int i;
-
- for (i = 0; i < vop2->data->nr_vps; i++) {
- struct vop2_video_port *vp = &vop2->vps[i];
-
- if (!vp->crtc.port)
- continue;
- if (vp->primary_plane)
- continue;
+ struct vop2 *vop2 = win->vop2;
- return vp;
+ if (vop2->data->soc_id == 3566) {
+ switch (win->data->phys_id) {
+ case ROCKCHIP_VOP2_SMART1:
+ case ROCKCHIP_VOP2_ESMART1:
+ case ROCKCHIP_VOP2_CLUSTER1:
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
}
-
- return NULL;
}
static int vop2_create_crtcs(struct vop2 *vop2)
@@ -3239,7 +2351,9 @@ static int vop2_create_crtcs(struct vop2 *vop2)
struct drm_plane *plane;
struct device_node *port;
struct vop2_video_port *vp;
- int i, nvp, nvps = 0;
+ struct vop2_win *win;
+ u32 possible_crtcs;
+ int i, j, nvp, nvps = 0;
int ret;
for (i = 0; i < vop2_data->nr_vps; i++) {
@@ -3255,10 +2369,9 @@ static int vop2_create_crtcs(struct vop2 *vop2)
snprintf(dclk_name, sizeof(dclk_name), "dclk_vp%d", vp->id);
vp->dclk = devm_clk_get(vop2->dev, dclk_name);
- if (IS_ERR(vp->dclk)) {
- drm_err(vop2->drm, "failed to get %s\n", dclk_name);
- return PTR_ERR(vp->dclk);
- }
+ if (IS_ERR(vp->dclk))
+ return dev_err_probe(drm->dev, PTR_ERR(vp->dclk),
+ "failed to get %s\n", dclk_name);
np = of_graph_get_remote_node(dev->of_node, i, -1);
if (!np) {
@@ -3268,55 +2381,79 @@ static int vop2_create_crtcs(struct vop2 *vop2)
of_node_put(np);
port = of_graph_get_port_by_id(dev->of_node, i);
- if (!port) {
- drm_err(vop2->drm, "no port node found for video_port%d\n", i);
- return -ENOENT;
- }
-
+ if (!port)
+ return dev_err_probe(drm->dev, -ENOENT,
+ "no port node found for video_port%d\n", i);
vp->crtc.port = port;
nvps++;
}
nvp = 0;
- for (i = 0; i < vop2->registered_num_wins; i++) {
- struct vop2_win *win = &vop2->win[i];
- u32 possible_crtcs = 0;
-
- if (vop2->data->soc_id == 3566) {
- /*
- * On RK3566 these windows don't have an independent
- * framebuffer. They share the framebuffer with smart0,
- * esmart0 and cluster0 respectively.
- */
- switch (win->data->phys_id) {
- case ROCKCHIP_VOP2_SMART1:
- case ROCKCHIP_VOP2_ESMART1:
- case ROCKCHIP_VOP2_CLUSTER1:
+ /* Register a primary plane for every crtc */
+ for (i = 0; i < vop2_data->nr_vps; i++) {
+ vp = &vop2->vps[i];
+
+ if (!vp->crtc.port)
+ continue;
+
+ for (j = 0; j < vop2->registered_num_wins; j++) {
+ win = &vop2->win[j];
+
+ /* Aready registered as primary plane */
+ if (win->base.type == DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
+ /* If this win can not attached to this VP */
+ if (!(win->data->possible_vp_mask & BIT(vp->id)))
+ continue;
+
+ if (vop2_is_mirror_win(win))
continue;
- }
- }
- if (win->type == DRM_PLANE_TYPE_PRIMARY) {
- vp = find_vp_without_primary(vop2);
- if (vp) {
+ if (win->type == DRM_PLANE_TYPE_PRIMARY) {
possible_crtcs = BIT(nvp);
vp->primary_plane = win;
+ ret = vop2_plane_init(vop2, win, possible_crtcs);
+ if (ret)
+ return dev_err_probe(drm->dev, ret,
+ "failed to init primary plane %s\n",
+ win->data->name);
nvp++;
- } else {
- /* change the unused primary window to overlay window */
- win->type = DRM_PLANE_TYPE_OVERLAY;
+ break;
}
}
+ }
- if (win->type == DRM_PLANE_TYPE_OVERLAY)
- possible_crtcs = (1 << nvps) - 1;
+ /* Register all unused window as overlay plane */
+ for (i = 0; i < vop2->registered_num_wins; i++) {
+ win = &vop2->win[i];
- ret = vop2_plane_init(vop2, win, possible_crtcs);
- if (ret) {
- drm_err(vop2->drm, "failed to init plane %s: %d\n",
- win->data->name, ret);
- return ret;
+ /* Aready registered as primary plane */
+ if (win->base.type == DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
+ if (vop2_is_mirror_win(win))
+ continue;
+
+ win->type = DRM_PLANE_TYPE_OVERLAY;
+
+ possible_crtcs = 0;
+ nvp = 0;
+ for (j = 0; j < vop2_data->nr_vps; j++) {
+ vp = &vop2->vps[j];
+
+ if (!vp->crtc.port)
+ continue;
+
+ if (win->data->possible_vp_mask & BIT(vp->id))
+ possible_crtcs |= BIT(nvp);
+ nvp++;
}
+
+ ret = vop2_plane_init(vop2, win, possible_crtcs);
+ if (ret)
+ return dev_err_probe(drm->dev, ret, "failed to init overlay plane %s\n",
+ win->data->name);
}
for (i = 0; i < vop2_data->nr_vps; i++) {
@@ -3330,10 +2467,9 @@ static int vop2_create_crtcs(struct vop2 *vop2)
ret = drm_crtc_init_with_planes(drm, &vp->crtc, plane, NULL,
&vop2_crtc_funcs,
"video_port%d", vp->id);
- if (ret) {
- drm_err(vop2->drm, "crtc init for video_port%d failed\n", i);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(drm->dev, ret,
+ "crtc init for video_port%d failed\n", i);
drm_crtc_helper_add(&vp->crtc, &vop2_crtc_helper_funcs);
if (vop2->lut_regs) {
@@ -3400,184 +2536,26 @@ static int vop2_find_rgb_encoder(struct vop2 *vop2)
return -ENOENT;
}
-static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
- [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0),
- [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5),
- [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14),
- [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18),
- [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31),
- [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31),
- [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31),
- [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31),
- [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31),
- [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19),
- [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15),
- [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31),
- [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
- [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
- [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
- [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3),
- [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8),
- /* RK3588 only, reserved bit on rk3568*/
- [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
-
- /* Scale */
- [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
- [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31),
- [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15),
- [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 12, 13),
- [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3),
- [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28),
- [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29),
-
- /* cluster regs */
- [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1),
- [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0),
- [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7),
-
- /* afbc regs */
- [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6),
- [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9),
- [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10),
- [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4),
- [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7),
- [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8),
- [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31),
- [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31),
- [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15),
- [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31),
- [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31),
- [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31),
- [VOP2_WIN_AFBC_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_TRANSFORM_OFFSET, 0, 31),
- [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0),
- [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1),
- [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2),
- [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3),
- [VOP2_WIN_UV_SWAP] = { .reg = 0xffffffff },
- [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff },
- [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff },
- [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff },
- [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff },
- [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff },
- [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff },
-};
-
-static int vop2_cluster_init(struct vop2_win *win)
-{
- struct vop2 *vop2 = win->vop2;
- struct reg_field *cluster_regs;
- int ret, i;
-
- cluster_regs = kmemdup(vop2_cluster_regs, sizeof(vop2_cluster_regs),
- GFP_KERNEL);
- if (!cluster_regs)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(vop2_cluster_regs); i++)
- if (cluster_regs[i].reg != 0xffffffff)
- cluster_regs[i].reg += win->offset;
-
- ret = devm_regmap_field_bulk_alloc(vop2->dev, vop2->map, win->reg,
- cluster_regs,
- ARRAY_SIZE(vop2_cluster_regs));
-
- kfree(cluster_regs);
-
- return ret;
-};
-
-static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = {
- [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0),
- [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5),
- [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12),
- [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14),
- [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16),
- [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31),
- [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31),
- [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28),
- [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31),
- [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31),
- [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17),
- [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15),
- [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31),
- [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0),
- [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1),
- [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3),
- [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
- [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
- [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
- [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
- [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
- /* RK3588 only, reserved register on rk3568 */
- [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
-
- /* Scale */
- [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
- [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31),
- [VOP2_WIN_SCALE_CBCR_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 0, 15),
- [VOP2_WIN_SCALE_CBCR_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 16, 31),
- [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1),
- [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3),
- [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5),
- [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7),
- [VOP2_WIN_CBCR_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 8, 9),
- [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 10, 11),
- [VOP2_WIN_CBCR_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 12, 13),
- [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 14, 15),
- [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17),
- [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8),
- [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9),
- [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10),
- [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11),
- [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff },
- [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff },
- [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_TRANSFORM_OFFSET] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff },
-};
-
-static int vop2_esmart_init(struct vop2_win *win)
+static int vop2_regmap_init(struct vop2_win *win, const struct reg_field *regs,
+ int nr_regs)
{
struct vop2 *vop2 = win->vop2;
- struct reg_field *esmart_regs;
- int ret, i;
-
- esmart_regs = kmemdup(vop2_esmart_regs, sizeof(vop2_esmart_regs),
- GFP_KERNEL);
- if (!esmart_regs)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(vop2_esmart_regs); i++)
- if (esmart_regs[i].reg != 0xffffffff)
- esmart_regs[i].reg += win->offset;
+ int i;
- ret = devm_regmap_field_bulk_alloc(vop2->dev, vop2->map, win->reg,
- esmart_regs,
- ARRAY_SIZE(vop2_esmart_regs));
+ for (i = 0; i < nr_regs; i++) {
+ const struct reg_field field = {
+ .reg = (regs[i].reg != 0xffffffff) ?
+ regs[i].reg + win->offset : regs[i].reg,
+ .lsb = regs[i].lsb,
+ .msb = regs[i].msb
+ };
- kfree(esmart_regs);
+ win->reg[i] = devm_regmap_field_alloc(vop2->dev, vop2->map, field);
+ if (IS_ERR(win->reg[i]))
+ return PTR_ERR(win->reg[i]);
+ }
- return ret;
+ return 0;
};
static int vop2_win_init(struct vop2 *vop2)
@@ -3596,9 +2574,11 @@ static int vop2_win_init(struct vop2 *vop2)
win->win_id = i;
win->vop2 = vop2;
if (vop2_cluster_window(win))
- ret = vop2_cluster_init(win);
+ ret = vop2_regmap_init(win, vop2->data->cluster_reg,
+ vop2->data->nr_cluster_regs);
else
- ret = vop2_esmart_init(win);
+ ret = vop2_regmap_init(win, vop2->data->smart_reg,
+ vop2->data->nr_smart_regs);
if (ret)
return ret;
}
@@ -3655,15 +2635,16 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
vop2->dev = dev;
vop2->data = vop2_data;
+ vop2->ops = vop2_data->ops;
+ vop2->version = vop2_data->version;
vop2->drm = drm;
dev_set_drvdata(dev, vop2);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vop");
- if (!res) {
- drm_err(vop2->drm, "failed to get vop2 register byname\n");
- return -EINVAL;
- }
+ if (!res)
+ return dev_err_probe(drm->dev, -EINVAL,
+ "failed to get vop2 register byname\n");
vop2->res = res;
vop2->regs = devm_ioremap_resource(dev, res);
@@ -3688,56 +2669,59 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_GRF) {
vop2->sys_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
if (IS_ERR(vop2->sys_grf))
- return dev_err_probe(dev, PTR_ERR(vop2->sys_grf), "cannot get sys_grf");
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->sys_grf),
+ "cannot get sys_grf\n");
}
if (vop2_data->feature & VOP2_FEATURE_HAS_VOP_GRF) {
vop2->vop_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vop-grf");
if (IS_ERR(vop2->vop_grf))
- return dev_err_probe(dev, PTR_ERR(vop2->vop_grf), "cannot get vop_grf");
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->vop_grf),
+ "cannot get vop_grf\n");
}
if (vop2_data->feature & VOP2_FEATURE_HAS_VO1_GRF) {
vop2->vo1_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vo1-grf");
if (IS_ERR(vop2->vo1_grf))
- return dev_err_probe(dev, PTR_ERR(vop2->vo1_grf), "cannot get vo1_grf");
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->vo1_grf),
+ "cannot get vo1_grf\n");
}
if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_PMU) {
vop2->sys_pmu = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,pmu");
if (IS_ERR(vop2->sys_pmu))
- return dev_err_probe(dev, PTR_ERR(vop2->sys_pmu), "cannot get sys_pmu");
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->sys_pmu),
+ "cannot get sys_pmu\n");
}
vop2->hclk = devm_clk_get(vop2->dev, "hclk");
- if (IS_ERR(vop2->hclk)) {
- drm_err(vop2->drm, "failed to get hclk source\n");
- return PTR_ERR(vop2->hclk);
- }
+ if (IS_ERR(vop2->hclk))
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->hclk),
+ "failed to get hclk source\n");
vop2->aclk = devm_clk_get(vop2->dev, "aclk");
- if (IS_ERR(vop2->aclk)) {
- drm_err(vop2->drm, "failed to get aclk source\n");
- return PTR_ERR(vop2->aclk);
- }
+ if (IS_ERR(vop2->aclk))
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->aclk),
+ "failed to get aclk source\n");
vop2->pclk = devm_clk_get_optional(vop2->dev, "pclk_vop");
- if (IS_ERR(vop2->pclk)) {
- drm_err(vop2->drm, "failed to get pclk source\n");
- return PTR_ERR(vop2->pclk);
- }
+ if (IS_ERR(vop2->pclk))
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->pclk),
+ "failed to get pclk source\n");
vop2->pll_hdmiphy0 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy0");
- if (IS_ERR(vop2->pll_hdmiphy0)) {
- drm_err(vop2->drm, "failed to get pll_hdmiphy0\n");
- return PTR_ERR(vop2->pll_hdmiphy0);
- }
+ if (IS_ERR(vop2->pll_hdmiphy0))
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->pll_hdmiphy0),
+ "failed to get pll_hdmiphy0\n");
+
+ vop2->pll_hdmiphy1 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy1");
+ if (IS_ERR(vop2->pll_hdmiphy1))
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->pll_hdmiphy1),
+ "failed to get pll_hdmiphy1\n");
vop2->irq = platform_get_irq(pdev, 0);
- if (vop2->irq < 0) {
- drm_err(vop2->drm, "cannot find irq for vop2\n");
- return vop2->irq;
- }
+ if (vop2->irq < 0)
+ return dev_err_probe(drm->dev, vop2->irq, "cannot find irq for vop2\n");
mutex_init(&vop2->vop2_lock);
@@ -3749,6 +2733,30 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
+ if (vop2->version >= VOP_VERSION_RK3576) {
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, drm) {
+ struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ int vp_irq;
+ const char *irq_name = devm_kasprintf(dev, GFP_KERNEL, "vp%d", vp->id);
+
+ if (!irq_name)
+ return -ENOMEM;
+
+ vp_irq = platform_get_irq_byname(pdev, irq_name);
+ if (vp_irq < 0)
+ return dev_err_probe(drm->dev, vp_irq,
+ "cannot find irq for vop2 vp%d\n", vp->id);
+
+ ret = devm_request_irq(dev, vp_irq, rk3576_vp_isr, IRQF_SHARED, irq_name,
+ vp);
+ if (ret)
+ dev_err_probe(drm->dev, ret,
+ "request irq for vop2 vp%d failed\n", vp->id);
+ }
+ }
+
ret = vop2_find_rgb_encoder(vop2);
if (ret >= 0) {
vop2->rgb = rockchip_rgb_init(dev, &vop2->vps[ret].crtc,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 29cc7fb8f6d8..680bedbb770e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -9,9 +9,19 @@
#include <linux/regmap.h>
#include <drm/drm_modes.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
+#define VOP2_VERSION(major, minor, build) ((major) << 24 | (minor) << 16 | (build))
+
+/* The VOP version of new SoC is bigger than the old */
+#define VOP_VERSION_RK3568 VOP2_VERSION(0x40, 0x15, 0x8023)
+#define VOP_VERSION_RK3588 VOP2_VERSION(0x40, 0x17, 0x6786)
+#define VOP_VERSION_RK3528 VOP2_VERSION(0x50, 0x17, 0x1263)
+#define VOP_VERSION_RK3562 VOP2_VERSION(0x50, 0x17, 0x4350)
+#define VOP_VERSION_RK3576 VOP2_VERSION(0x50, 0x19, 0x9765)
+
#define VOP2_VP_FEATURE_OUTPUT_10BIT BIT(0)
#define VOP2_FEATURE_HAS_SYS_GRF BIT(0)
@@ -34,6 +44,13 @@ enum win_dly_mode {
VOP2_DLY_MODE_MAX,
};
+enum vop2_dly_module {
+ VOP2_DLY_WIN, /** Win delay cycle for this VP */
+ VOP2_DLY_LAYER_MIX, /** Layer Mix delay cycle for this VP */
+ VOP2_DLY_HDR_MIX, /** HDR delay cycle for this VP */
+ VOP2_DLY_MAX,
+};
+
enum vop2_scale_up_mode {
VOP2_SCALE_UP_NRST_NBOR,
VOP2_SCALE_UP_BIL,
@@ -58,6 +75,23 @@ enum vop2_scale_down_mode {
#define VOP2_PD_DSC_4K BIT(6)
#define VOP2_PD_ESMART BIT(7)
+#define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \
+ (x) == ROCKCHIP_VOP2_EP_HDMI1)
+
+#define vop2_output_if_is_dp(x) ((x) == ROCKCHIP_VOP2_EP_DP0 || \
+ (x) == ROCKCHIP_VOP2_EP_DP1)
+
+#define vop2_output_if_is_edp(x) ((x) == ROCKCHIP_VOP2_EP_EDP0 || \
+ (x) == ROCKCHIP_VOP2_EP_EDP1)
+
+#define vop2_output_if_is_mipi(x) ((x) == ROCKCHIP_VOP2_EP_MIPI0 || \
+ (x) == ROCKCHIP_VOP2_EP_MIPI1)
+
+#define vop2_output_if_is_lvds(x) ((x) == ROCKCHIP_VOP2_EP_LVDS0 || \
+ (x) == ROCKCHIP_VOP2_EP_LVDS1)
+
+#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0)
+
enum vop2_win_regs {
VOP2_WIN_ENABLE,
VOP2_WIN_FORMAT,
@@ -113,16 +147,22 @@ enum vop2_win_regs {
VOP2_WIN_AFBC_UV_SWAP,
VOP2_WIN_AFBC_AUTO_GATING_EN,
VOP2_WIN_AFBC_BLOCK_SPLIT_EN,
+ VOP2_WIN_AFBC_PLD_OFFSET_EN,
VOP2_WIN_AFBC_PIC_VIR_WIDTH,
VOP2_WIN_AFBC_TILE_NUM,
VOP2_WIN_AFBC_PIC_OFFSET,
VOP2_WIN_AFBC_PIC_SIZE,
VOP2_WIN_AFBC_DSP_OFFSET,
- VOP2_WIN_AFBC_TRANSFORM_OFFSET,
+ VOP2_WIN_AFBC_PLD_OFFSET,
+ VOP2_WIN_TRANSFORM_OFFSET,
VOP2_WIN_AFBC_HDR_PTR,
VOP2_WIN_AFBC_HALF_BLOCK_EN,
VOP2_WIN_AFBC_ROTATE_270,
VOP2_WIN_AFBC_ROTATE_90,
+
+ VOP2_WIN_VP_SEL,
+ VOP2_WIN_DLY_NUM,
+
VOP2_WIN_MAX_REG,
};
@@ -140,6 +180,7 @@ struct vop2_win_data {
unsigned int phys_id;
u32 base;
+ u32 possible_vp_mask;
enum drm_plane_type type;
u32 nformats;
@@ -148,9 +189,9 @@ struct vop2_win_data {
const unsigned int supported_rotations;
/**
- * @layer_sel_id: defined by register OVERLAY_LAYER_SEL of VOP2
+ * @layer_sel_id: defined by register OVERLAY_LAYER_SEL or PORTn_LAYER_SEL
*/
- unsigned int layer_sel_id;
+ unsigned int layer_sel_id[ROCKCHIP_MAX_CRTC];
uint64_t feature;
uint8_t axi_bus_id;
@@ -162,6 +203,23 @@ struct vop2_win_data {
const u8 dly[VOP2_DLY_MODE_MAX];
};
+struct vop2_win {
+ struct vop2 *vop2;
+ struct drm_plane base;
+ const struct vop2_win_data *data;
+ struct regmap_field *reg[VOP2_WIN_MAX_REG];
+
+ /**
+ * @win_id: graphic window id, a cluster may be split into two
+ * graphics windows.
+ */
+ u8 win_id;
+ u8 delay;
+ u32 offset;
+
+ enum drm_plane_type type;
+};
+
struct vop2_video_port_data {
unsigned int id;
u32 feature;
@@ -170,22 +228,116 @@ struct vop2_video_port_data {
struct vop_rect max_output;
const u8 pre_scan_max_dly[4];
unsigned int offset;
+ /**
+ * @pixel_rate: pixel per cycle
+ */
+ u8 pixel_rate;
+};
+
+struct vop2_video_port {
+ struct drm_crtc crtc;
+ struct vop2 *vop2;
+ struct clk *dclk;
+ struct clk *dclk_src;
+ unsigned int id;
+ const struct vop2_video_port_data *data;
+
+ struct completion dsp_hold_completion;
+
+ /**
+ * @win_mask: Bitmask of windows attached to the video port;
+ */
+ u32 win_mask;
+
+ struct vop2_win *primary_plane;
+ struct drm_pending_vblank_event *event;
+
+ unsigned int nlayers;
+};
+
+/**
+ * struct vop2_ops - helper operations for vop2 hardware
+ *
+ * These hooks are used by the common part of the vop2 driver to
+ * implement the proper behaviour of different variants.
+ */
+struct vop2_ops {
+ unsigned long (*setup_intf_mux)(struct vop2_video_port *vp, int ep_id, u32 polflags);
+ void (*setup_bg_dly)(struct vop2_video_port *vp);
+ void (*setup_overlay)(struct vop2_video_port *vp);
};
struct vop2_data {
u8 nr_vps;
u64 feature;
+ u32 version;
+ const struct vop2_ops *ops;
const struct vop2_win_data *win;
const struct vop2_video_port_data *vp;
+ const struct reg_field *cluster_reg;
+ const struct reg_field *smart_reg;
const struct vop2_regs_dump *regs_dump;
struct vop_rect max_input;
struct vop_rect max_output;
+ unsigned int nr_cluster_regs;
+ unsigned int nr_smart_regs;
unsigned int win_size;
unsigned int regs_dump_size;
unsigned int soc_id;
};
+struct vop2 {
+ u32 version;
+ struct device *dev;
+ struct drm_device *drm;
+ struct vop2_video_port vps[ROCKCHIP_MAX_CRTC];
+
+ const struct vop2_data *data;
+ const struct vop2_ops *ops;
+ /*
+ * Number of windows that are registered as plane, may be less than the
+ * total number of hardware windows.
+ */
+ u32 registered_num_wins;
+
+ struct resource *res;
+ void __iomem *regs;
+ struct regmap *map;
+
+ struct regmap *sys_grf;
+ struct regmap *vop_grf;
+ struct regmap *vo1_grf;
+ struct regmap *sys_pmu;
+
+ /* physical map length of vop2 register */
+ u32 len;
+
+ void __iomem *lut_regs;
+
+ /* protects crtc enable/disable */
+ struct mutex vop2_lock;
+
+ int irq;
+
+ /*
+ * Some global resources are shared between all video ports(crtcs), so
+ * we need a ref counter here.
+ */
+ unsigned int enable_count;
+ struct clk *hclk;
+ struct clk *aclk;
+ struct clk *pclk;
+ struct clk *pll_hdmiphy0;
+ struct clk *pll_hdmiphy1;
+
+ /* optional internal rgb encoder */
+ struct rockchip_rgb *rgb;
+
+ /* must be put at the end of the struct */
+ struct vop2_win win[];
+};
+
/* interrupt define */
#define FS_NEW_INTR BIT(4)
#define ADDR_SAME_INTR BIT(5)
@@ -240,10 +392,13 @@ enum dst_factor_mode {
#define RK3568_REG_CFG_DONE 0x000
#define RK3568_VERSION_INFO 0x004
#define RK3568_SYS_AUTO_GATING_CTRL 0x008
+#define RK3576_SYS_MMU_CTRL_IMD 0x020
#define RK3568_SYS_AXI_LUT_CTRL 0x024
#define RK3568_DSP_IF_EN 0x028
+#define RK3576_SYS_PORT_CTRL_IMD 0x028
#define RK3568_DSP_IF_CTRL 0x02c
#define RK3568_DSP_IF_POL 0x030
+#define RK3576_SYS_CLUSTER_PD_CTRL_IMD 0x030
#define RK3588_SYS_PD_CTRL 0x034
#define RK3568_WB_CTRL 0x40
#define RK3568_WB_XSCAL_FACTOR 0x44
@@ -263,6 +418,55 @@ enum dst_factor_mode {
#define RK3568_VP_INT_CLR(vp) (0xA4 + (vp) * 0x10)
#define RK3568_VP_INT_STATUS(vp) (0xA8 + (vp) * 0x10)
#define RK3568_VP_INT_RAW_STATUS(vp) (0xAC + (vp) * 0x10)
+#define RK3576_WB_CTRL 0x100
+#define RK3576_WB_XSCAL_FACTOR 0x104
+#define RK3576_WB_YRGB_MST 0x108
+#define RK3576_WB_CBR_MST 0x10C
+#define RK3576_WB_VIR_STRIDE 0x110
+#define RK3576_WB_TIMEOUT_CTRL 0x114
+#define RK3576_MIPI0_IF_CTRL 0x180
+#define RK3576_HDMI0_IF_CTRL 0x184
+#define RK3576_EDP0_IF_CTRL 0x188
+#define RK3576_DP0_IF_CTRL 0x18C
+#define RK3576_RGB_IF_CTRL 0x194
+#define RK3576_DP1_IF_CTRL 0x1A4
+#define RK3576_DP2_IF_CTRL 0x1B0
+
+/* Extra OVL register definition */
+#define RK3576_SYS_EXTRA_ALPHA_CTRL 0x500
+#define RK3576_CLUSTER0_MIX_SRC_COLOR_CTRL 0x530
+#define RK3576_CLUSTER0_MIX_DST_COLOR_CTRL 0x534
+#define RK3576_CLUSTER0_MIX_SRC_ALPHA_CTRL 0x538
+#define RK3576_CLUSTER0_MIX_DST_ALPHA_CTRL 0x53c
+#define RK3576_CLUSTER1_MIX_SRC_COLOR_CTRL 0x540
+#define RK3576_CLUSTER1_MIX_DST_COLOR_CTRL 0x544
+#define RK3576_CLUSTER1_MIX_SRC_ALPHA_CTRL 0x548
+#define RK3576_CLUSTER1_MIX_DST_ALPHA_CTRL 0x54c
+
+/* OVL registers for Video Port definition */
+#define RK3576_OVL_CTRL(vp) (0x600 + (vp) * 0x100)
+#define RK3576_OVL_LAYER_SEL(vp) (0x604 + (vp) * 0x100)
+#define RK3576_OVL_MIX0_SRC_COLOR_CTRL(vp) (0x620 + (vp) * 0x100)
+#define RK3576_OVL_MIX0_DST_COLOR_CTRL(vp) (0x624 + (vp) * 0x100)
+#define RK3576_OVL_MIX0_SRC_ALPHA_CTRL(vp) (0x628 + (vp) * 0x100)
+#define RK3576_OVL_MIX0_DST_ALPHA_CTRL(vp) (0x62C + (vp) * 0x100)
+#define RK3576_OVL_MIX1_SRC_COLOR_CTRL(vp) (0x630 + (vp) * 0x100)
+#define RK3576_OVL_MIX1_DST_COLOR_CTRL(vp) (0x634 + (vp) * 0x100)
+#define RK3576_OVL_MIX1_SRC_ALPHA_CTRL(vp) (0x638 + (vp) * 0x100)
+#define RK3576_OVL_MIX1_DST_ALPHA_CTRL(vp) (0x63C + (vp) * 0x100)
+#define RK3576_OVL_MIX2_SRC_COLOR_CTRL(vp) (0x640 + (vp) * 0x100)
+#define RK3576_OVL_MIX2_DST_COLOR_CTRL(vp) (0x644 + (vp) * 0x100)
+#define RK3576_OVL_MIX2_SRC_ALPHA_CTRL(vp) (0x648 + (vp) * 0x100)
+#define RK3576_OVL_MIX2_DST_ALPHA_CTRL(vp) (0x64C + (vp) * 0x100)
+#define RK3576_EXTRA_OVL_SRC_COLOR_CTRL(vp) (0x650 + (vp) * 0x100)
+#define RK3576_EXTRA_OVL_DST_COLOR_CTRL(vp) (0x654 + (vp) * 0x100)
+#define RK3576_EXTRA_OVL_SRC_ALPHA_CTRL(vp) (0x658 + (vp) * 0x100)
+#define RK3576_EXTRA_OVL_DST_ALPHA_CTRL(vp) (0x65C + (vp) * 0x100)
+#define RK3576_OVL_HDR_SRC_COLOR_CTRL(vp) (0x660 + (vp) * 0x100)
+#define RK3576_OVL_HDR_DST_COLOR_CTRL(vp) (0x664 + (vp) * 0x100)
+#define RK3576_OVL_HDR_SRC_ALPHA_CTRL(vp) (0x668 + (vp) * 0x100)
+#define RK3576_OVL_HDR_DST_ALPHA_CTRL(vp) (0x66C + (vp) * 0x100)
+#define RK3576_OVL_BG_MIX_CTRL(vp) (0x670 + (vp) * 0x100)
/* Video Port registers definition */
#define RK3568_VP0_CTRL_BASE 0x0C00
@@ -335,7 +539,7 @@ enum dst_factor_mode {
#define RK3568_CLUSTER_WIN_DSP_INFO 0x24
#define RK3568_CLUSTER_WIN_DSP_ST 0x28
#define RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB 0x30
-#define RK3568_CLUSTER_WIN_AFBCD_TRANSFORM_OFFSET 0x3C
+#define RK3568_CLUSTER_WIN_TRANSFORM_OFFSET 0x3C
#define RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL 0x50
#define RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE 0x54
#define RK3568_CLUSTER_WIN_AFBCD_HDR_PTR 0x58
@@ -345,7 +549,11 @@ enum dst_factor_mode {
#define RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET 0x68
#define RK3568_CLUSTER_WIN_AFBCD_CTRL 0x6C
+#define RK3576_CLUSTER_WIN_AFBCD_PLD_PTR_OFFSET 0x78
+
#define RK3568_CLUSTER_CTRL 0x100
+#define RK3576_CLUSTER_PORT_SEL_IMD 0x1F4
+#define RK3576_CLUSTER_DLY_NUM 0x1F8
/* (E)smart register definition, offset relative to window base */
#define RK3568_SMART_CTRL0 0x00
@@ -396,6 +604,9 @@ enum dst_factor_mode {
#define RK3568_SMART_REGION3_SCL_FACTOR_CBR 0xC8
#define RK3568_SMART_REGION3_SCL_OFFSET 0xCC
#define RK3568_SMART_COLOR_KEY_CTRL 0xD0
+#define RK3576_SMART_ALPHA_MAP 0xD8
+#define RK3576_SMART_PORT_SEL_IMD 0xF4
+#define RK3576_SMART_DLY_NUM 0xF8
/* HDR register definition */
#define RK3568_HDR_LUT_CTRL 0x2000
@@ -544,6 +755,17 @@ enum dst_factor_mode {
#define POLFLAG_DCLK_INV BIT(3)
+#define RK3576_OVL_CTRL__YUV_MODE BIT(0)
+#define RK3576_OVL_BG_MIX_CTRL__BG_DLY GENMASK(31, 24)
+
+#define RK3576_DSP_IF_CFG_DONE_IMD BIT(31)
+#define RK3576_DSP_IF_DCLK_SEL_OUT BIT(21)
+#define RK3576_DSP_IF_PCLK_DIV BIT(20)
+#define RK3576_DSP_IF_PIN_POL GENMASK(5, 4)
+#define RK3576_DSP_IF_MUX GENMASK(3, 2)
+#define RK3576_DSP_IF_CLK_OUT_EN BIT(1)
+#define RK3576_DSP_IF_EN BIT(0)
+
enum vop2_layer_phy_id {
ROCKCHIP_VOP2_CLUSTER0 = 0,
ROCKCHIP_VOP2_CLUSTER1,
@@ -560,4 +782,52 @@ enum vop2_layer_phy_id {
extern const struct component_ops vop2_component_ops;
+static inline void vop2_writel(struct vop2 *vop2, u32 offset, u32 v)
+{
+ regmap_write(vop2->map, offset, v);
+}
+
+static inline void vop2_vp_write(struct vop2_video_port *vp, u32 offset, u32 v)
+{
+ regmap_write(vp->vop2->map, vp->data->offset + offset, v);
+}
+
+static inline u32 vop2_readl(struct vop2 *vop2, u32 offset)
+{
+ u32 val;
+
+ regmap_read(vop2->map, offset, &val);
+
+ return val;
+}
+
+static inline u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset)
+{
+ u32 val;
+
+ regmap_read(vp->vop2->map, vp->data->offset + offset, &val);
+
+ return val;
+}
+
+static inline void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v)
+{
+ regmap_field_write(win->reg[reg], v);
+}
+
+static inline bool vop2_cluster_window(const struct vop2_win *win)
+{
+ return win->data->feature & WIN_FEATURE_CLUSTER;
+}
+
+static inline struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct vop2_video_port, crtc);
+}
+
+static inline struct vop2_win *to_vop2_win(struct drm_plane *p)
+{
+ return container_of(p, struct vop2_win, base);
+}
+
#endif /* _ROCKCHIP_DRM_VOP2_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 385cf6881504..a673779de3d2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -448,17 +448,14 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
static int rk3288_lvds_probe(struct platform_device *pdev,
struct rockchip_lvds *lvds)
{
- int ret;
-
lvds->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->regs))
return PTR_ERR(lvds->regs);
- lvds->pclk = devm_clk_get(lvds->dev, "pclk_lvds");
- if (IS_ERR(lvds->pclk)) {
- DRM_DEV_ERROR(lvds->dev, "could not get pclk_lvds\n");
- return PTR_ERR(lvds->pclk);
- }
+ lvds->pclk = devm_clk_get_prepared(lvds->dev, "pclk_lvds");
+ if (IS_ERR(lvds->pclk))
+ return dev_err_probe(lvds->dev, PTR_ERR(lvds->pclk),
+ "could not get or prepare pclk_lvds\n");
lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
GFP_KERNEL);
@@ -467,25 +464,19 @@ static int rk3288_lvds_probe(struct platform_device *pdev,
lvds->pins->p = devm_pinctrl_get(lvds->dev);
if (IS_ERR(lvds->pins->p)) {
- DRM_DEV_ERROR(lvds->dev, "no pinctrl handle\n");
+ dev_warn(lvds->dev, "no pinctrl handle\n");
devm_kfree(lvds->dev, lvds->pins);
lvds->pins = NULL;
} else {
lvds->pins->default_state =
pinctrl_lookup_state(lvds->pins->p, "lcdc");
if (IS_ERR(lvds->pins->default_state)) {
- DRM_DEV_ERROR(lvds->dev, "no default pinctrl state\n");
+ dev_warn(lvds->dev, "no default pinctrl state\n");
devm_kfree(lvds->dev, lvds->pins);
lvds->pins = NULL;
}
}
- ret = clk_prepare(lvds->pclk);
- if (ret < 0) {
- DRM_DEV_ERROR(lvds->dev, "failed to prepare pclk_lvds\n");
- return ret;
- }
-
return 0;
}
@@ -555,11 +546,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
lvds->drm_dev = drm_dev;
port = of_graph_get_port_by_id(dev->of_node, 1);
- if (!port) {
- DRM_DEV_ERROR(dev,
- "can't found port point, please init lvds panel port!\n");
- return -EINVAL;
- }
+ if (!port)
+ return dev_err_probe(dev, -EINVAL,
+ "can't found port point, please init lvds panel port!\n");
+
for_each_child_of_node(port, endpoint) {
child_count++;
of_property_read_u32(endpoint, "reg", &endpoint_id);
@@ -571,8 +561,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
}
}
if (!child_count) {
- DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "lvds port does not have any children\n");
goto err_put_port;
} else if (ret) {
dev_err_probe(dev, ret, "failed to find panel and bridge node\n");
@@ -589,8 +578,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
lvds->output = rockchip_lvds_name_to_output(name);
if (lvds->output < 0) {
- DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name);
- ret = lvds->output;
+ ret = dev_err_probe(dev, lvds->output, "invalid output type [%s]\n", name);
goto err_put_remote;
}
@@ -601,8 +589,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
lvds->format = rockchip_lvds_name_to_format(name);
if (lvds->format < 0) {
- DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name);
- ret = lvds->format;
+ ret = dev_err_probe(dev, lvds->format,
+ "invalid data-mapping format [%s]\n", name);
goto err_put_remote;
}
@@ -612,8 +600,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to initialize encoder: %d\n", ret);
+ drm_err(drm_dev,
+ "failed to initialize encoder: %d\n", ret);
goto err_put_remote;
}
@@ -626,8 +614,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
&rockchip_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
if (ret < 0) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to initialize connector: %d\n", ret);
+ drm_err(drm_dev,
+ "failed to initialize connector: %d\n", ret);
goto err_free_encoder;
}
@@ -641,9 +629,9 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
connector = drm_bridge_connector_init(lvds->drm_dev, encoder);
if (IS_ERR(connector)) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to initialize bridge connector: %pe\n",
- connector);
+ drm_err(drm_dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
ret = PTR_ERR(connector);
goto err_free_encoder;
}
@@ -651,8 +639,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to attach encoder: %d\n", ret);
+ drm_err(drm_dev, "failed to attach encoder: %d\n", ret);
goto err_free_connector;
}
@@ -714,34 +701,25 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- if (IS_ERR(lvds->grf)) {
- DRM_DEV_ERROR(dev, "missing rockchip,grf property\n");
- return PTR_ERR(lvds->grf);
- }
+ if (IS_ERR(lvds->grf))
+ return dev_err_probe(dev, PTR_ERR(lvds->grf), "missing rockchip,grf property\n");
ret = lvds->soc_data->probe(pdev, lvds);
- if (ret) {
- DRM_DEV_ERROR(dev, "Platform initialization failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Platform initialization failed\n");
dev_set_drvdata(dev, lvds);
ret = component_add(&pdev->dev, &rockchip_lvds_component_ops);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to add component\n");
- clk_unprepare(lvds->pclk);
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to add component\n");
- return ret;
+ return 0;
}
static void rockchip_lvds_remove(struct platform_device *pdev)
{
- struct rockchip_lvds *lvds = platform_get_drvdata(pdev);
-
component_del(&pdev->dev, &rockchip_lvds_component_ops);
- clk_unprepare(lvds->pclk);
}
struct platform_driver rockchip_lvds_driver = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 65a88f489693..14958d6b3d2e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -4,17 +4,56 @@
* Author: Andy Yan <andy.yan@rock-chips.com>
*/
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/component.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include "rockchip_drm_vop2.h"
+union vop2_alpha_ctrl {
+ u32 val;
+ struct {
+ /* [0:1] */
+ u32 color_mode:1;
+ u32 alpha_mode:1;
+ /* [2:3] */
+ u32 blend_mode:2;
+ u32 alpha_cal_mode:1;
+ /* [5:7] */
+ u32 factor_mode:3;
+ /* [8:9] */
+ u32 alpha_en:1;
+ u32 src_dst_swap:1;
+ u32 reserved:6;
+ /* [16:23] */
+ u32 glb_alpha:8;
+ } bits;
+};
+
+struct vop2_alpha {
+ union vop2_alpha_ctrl src_color_ctrl;
+ union vop2_alpha_ctrl dst_color_ctrl;
+ union vop2_alpha_ctrl src_alpha_ctrl;
+ union vop2_alpha_ctrl dst_alpha_ctrl;
+};
+
+struct vop2_alpha_config {
+ bool src_premulti_en;
+ bool dst_premulti_en;
+ bool src_pixel_alpha_en;
+ bool dst_pixel_alpha_en;
+ u16 src_glb_alpha_value;
+ u16 dst_glb_alpha_value;
+};
+
static const uint32_t formats_cluster[] = {
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
@@ -32,6 +71,37 @@ static const uint32_t formats_cluster[] = {
DRM_FORMAT_Y210, /* yuv422_10bit non-Linear mode only */
};
+/*
+ * The cluster windows on rk3576 support:
+ * RGB: linear mode and afbc
+ * YUV: linear mode and rfbc
+ * rfbc is a rockchip defined non-linear mode, produced by
+ * Video decoder
+ */
+static const uint32_t formats_rk3576_cluster[] = {
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV21, /* yvu420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV61, /* yvu422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV42, /* yvu444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */
+};
+
static const uint32_t formats_esmart[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
@@ -78,6 +148,41 @@ static const uint32_t formats_rk356x_esmart[] = {
DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */
};
+/*
+ * Add XRGB2101010/ARGB2101010ARGB1555/XRGB1555
+ */
+static const uint32_t formats_rk3576_esmart[] = {
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV21, /* yvu420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV61, /* yvu422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV42, /* yvu444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_YVYU, /* yuv422_8bit[YVYU] linear mode */
+ DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */
+ DRM_FORMAT_YUYV, /* yuv422_8bit[YUYV] linear mode */
+ DRM_FORMAT_UYVY, /* yuv422_8bit[UYVY] linear mode */
+};
+
static const uint32_t formats_smart[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
@@ -131,6 +236,321 @@ static const uint64_t format_modifiers_afbc[] = {
DRM_FORMAT_MOD_INVALID,
};
+/* used from rk3576, afbc 32*8 half mode */
+static const uint64_t format_modifiers_rk3576_afbc[] = {
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_CBR |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_CBR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_CBR |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_CBR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ /* SPLIT mandates SPARSE, RGB modes mandates YTR */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const struct reg_field rk3568_vop_cluster_regs[VOP2_WIN_MAX_REG] = {
+ [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0),
+ [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5),
+ [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14),
+ [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18),
+ [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31),
+ [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31),
+ [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31),
+ [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31),
+ [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31),
+ [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19),
+ [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15),
+ [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31),
+ [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
+ [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
+ [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8),
+ /* RK3588 only, reserved bit on rk3568*/
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
+
+ /* Scale */
+ [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
+ [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31),
+ [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15),
+ [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 12, 13),
+ [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3),
+ [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28),
+ [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29),
+
+ /* cluster regs */
+ [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1),
+ [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0),
+ [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7),
+
+ /* afbc regs */
+ [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6),
+ [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9),
+ [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10),
+ [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4),
+ [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7),
+ [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8),
+ [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31),
+ [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31),
+ [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15),
+ [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31),
+ [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31),
+ [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31),
+ [VOP2_WIN_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_TRANSFORM_OFFSET, 0, 31),
+ [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0),
+ [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1),
+ [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2),
+ [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3),
+ [VOP2_WIN_UV_SWAP] = { .reg = 0xffffffff },
+ [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff },
+ [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff },
+ [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff },
+ [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff },
+ [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff },
+};
+
+static const struct reg_field rk3568_vop_smart_regs[VOP2_WIN_MAX_REG] = {
+ [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0),
+ [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5),
+ [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12),
+ [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14),
+ [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16),
+ [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31),
+ [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31),
+ [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28),
+ [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31),
+ [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31),
+ [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17),
+ [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15),
+ [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31),
+ [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0),
+ [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1),
+ [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3),
+ [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
+ [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
+ [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
+ /* RK3588 only, reserved register on rk3568 */
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
+
+ /* Scale */
+ [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
+ [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31),
+ [VOP2_WIN_SCALE_CBCR_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 0, 15),
+ [VOP2_WIN_SCALE_CBCR_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 16, 31),
+ [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1),
+ [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3),
+ [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5),
+ [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7),
+ [VOP2_WIN_CBCR_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 8, 9),
+ [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 10, 11),
+ [VOP2_WIN_CBCR_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 12, 13),
+ [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 14, 15),
+ [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17),
+ [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8),
+ [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9),
+ [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10),
+ [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11),
+ [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff },
+ [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_TRANSFORM_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff },
+};
+
+static const struct reg_field rk3576_vop_cluster_regs[VOP2_WIN_MAX_REG] = {
+ [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0),
+ [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5),
+ [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14),
+ [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 17, 17),
+ [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18),
+ [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31),
+ [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31),
+ [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31),
+ [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31),
+ [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31),
+ [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19),
+ [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15),
+ [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31),
+ [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
+ [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
+ [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 4),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 9),
+ /* Read only bit on rk3576, writing on this bit have no effect.*/
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
+
+ [VOP2_WIN_VP_SEL] = REG_FIELD(RK3576_CLUSTER_PORT_SEL_IMD, 0, 1),
+ [VOP2_WIN_DLY_NUM] = REG_FIELD(RK3576_CLUSTER_DLY_NUM, 0, 7),
+
+ /* Scale */
+ [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
+ [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31),
+ [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3),
+ [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15),
+ [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 22, 23),
+ [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28),
+ [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29),
+
+ /* cluster regs */
+ [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1),
+ [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0),
+ [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7),
+
+ /* afbc regs */
+ [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6),
+ [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9),
+ [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10),
+ [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4),
+ [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7),
+ [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8),
+ [VOP2_WIN_AFBC_PLD_OFFSET_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 16, 16),
+ [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31),
+ [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31),
+ [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15),
+ [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31),
+ [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31),
+ [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31),
+ [VOP2_WIN_AFBC_PLD_OFFSET] = REG_FIELD(RK3576_CLUSTER_WIN_AFBCD_PLD_PTR_OFFSET, 0, 31),
+ [VOP2_WIN_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_TRANSFORM_OFFSET, 0, 31),
+ [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0),
+ [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1),
+ [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2),
+ [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3),
+ [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff },
+ [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff },
+ [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff },
+ [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff },
+ [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff },
+};
+
+static const struct reg_field rk3576_vop_smart_regs[VOP2_WIN_MAX_REG] = {
+ [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0),
+ [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5),
+ [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12),
+ [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14),
+ [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16),
+ [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31),
+ [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31),
+ [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28),
+ [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31),
+ [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31),
+ [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17),
+ [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15),
+ [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31),
+ [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0),
+ [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1),
+ [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3),
+ [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
+ [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
+ [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
+ [VOP2_WIN_VP_SEL] = REG_FIELD(RK3576_SMART_PORT_SEL_IMD, 0, 1),
+ [VOP2_WIN_DLY_NUM] = REG_FIELD(RK3576_SMART_DLY_NUM, 0, 7),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
+
+ /* Scale */
+ [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
+ [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31),
+ [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1),
+ [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3),
+ [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5),
+ [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7),
+ [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17),
+ [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8),
+ [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9),
+ [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10),
+ [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11),
+ [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff },
+
+ /* CBCR share the same scale factor as YRGB */
+ [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff },
+ [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff},
+ [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff},
+ [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff},
+
+ [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_TRANSFORM_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff },
+};
+
static const struct vop2_video_port_data rk3568_vop_video_ports[] = {
{
.id = 0,
@@ -177,10 +597,12 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
.name = "Smart0-win0",
.phys_id = ROCKCHIP_VOP2_SMART0,
.base = 0x1c00,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_smart,
.nformats = ARRAY_SIZE(formats_smart),
.format_modifiers = format_modifiers,
- .layer_sel_id = 3,
+ /* 0xf means this layer can't attached to this VP */
+ .layer_sel_id = { 3, 3, 3, 0xf },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
@@ -189,11 +611,12 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
}, {
.name = "Smart1-win0",
.phys_id = ROCKCHIP_VOP2_SMART1,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_smart,
.nformats = ARRAY_SIZE(formats_smart),
.format_modifiers = format_modifiers,
.base = 0x1e00,
- .layer_sel_id = 7,
+ .layer_sel_id = { 7, 7, 7, 0xf },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
@@ -202,11 +625,12 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
}, {
.name = "Esmart1-win0",
.phys_id = ROCKCHIP_VOP2_ESMART1,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_rk356x_esmart,
.nformats = ARRAY_SIZE(formats_rk356x_esmart),
.format_modifiers = format_modifiers,
.base = 0x1a00,
- .layer_sel_id = 6,
+ .layer_sel_id = { 6, 6, 6, 0xf },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
@@ -215,11 +639,12 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
}, {
.name = "Esmart0-win0",
.phys_id = ROCKCHIP_VOP2_ESMART0,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_rk356x_esmart,
.nformats = ARRAY_SIZE(formats_rk356x_esmart),
.format_modifiers = format_modifiers,
.base = 0x1800,
- .layer_sel_id = 2,
+ .layer_sel_id = { 2, 2, 2, 0xf },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
@@ -229,10 +654,11 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
.name = "Cluster0-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER0,
.base = 0x1000,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 0,
+ .layer_sel_id = { 0, 0, 0, 0xf },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.max_upscale_factor = 4,
@@ -244,10 +670,11 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
.name = "Cluster1-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER1,
.base = 0x1200,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 1,
+ .layer_sel_id = { 1, 1, 1, 0xf },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
@@ -340,6 +767,292 @@ static const struct vop2_regs_dump rk3568_regs_dump[] = {
},
};
+static const struct vop2_video_port_data rk3576_vop_video_ports[] = {
+ {
+ .id = 0,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
+ .gamma_lut_len = 1024,
+ .cubic_lut_len = 9 * 9 * 9, /* 9x9x9 */
+ .max_output = { 4096, 2304 },
+ /* win layer_mix hdr */
+ .pre_scan_max_dly = { 10, 8, 2, 0 },
+ .offset = 0xc00,
+ .pixel_rate = 2,
+ }, {
+ .id = 1,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
+ .gamma_lut_len = 1024,
+ .cubic_lut_len = 729, /* 9x9x9 */
+ .max_output = { 2560, 1600 },
+ /* win layer_mix hdr */
+ .pre_scan_max_dly = { 10, 6, 0, 0 },
+ .offset = 0xd00,
+ .pixel_rate = 1,
+ }, {
+ .id = 2,
+ .gamma_lut_len = 1024,
+ .max_output = { 1920, 1080 },
+ /* win layer_mix hdr */
+ .pre_scan_max_dly = { 10, 6, 0, 0 },
+ .offset = 0xe00,
+ .pixel_rate = 1,
+ },
+};
+
+/*
+ * rk3576 vop with 2 cluster, 4 esmart win.
+ * Every cluster can work as 4K win or split into two 2K win.
+ * All win in cluster support AFBCD.
+ *
+ * Every esmart win support 4 Multi-region.
+ *
+ * VP0 can use Cluster0/1 and Esmart0/2
+ * VP1 can use Cluster0/1 and Esmart1/3
+ * VP2 can use Esmart0/1/2/3
+ *
+ * Scale filter mode:
+ *
+ * * Cluster:
+ * * Support prescale down:
+ * * H/V: gt2/avg2 or gt4/avg4
+ * * After prescale down:
+ * * nearest-neighbor/bilinear/multi-phase filter for scale up
+ * * nearest-neighbor/bilinear/multi-phase filter for scale down
+ *
+ * * Esmart:
+ * * Support prescale down:
+ * * H: gt2/avg2 or gt4/avg4
+ * * V: gt2 or gt4
+ * * After prescale down:
+ * * nearest-neighbor/bilinear/bicubic for scale up
+ * * nearest-neighbor/bilinear for scale down
+ *
+ * AXI config::
+ *
+ * * Cluster0 win0: 0xa, 0xb [AXI0]
+ * * Cluster0 win1: 0xc, 0xd [AXI0]
+ * * Cluster1 win0: 0x6, 0x7 [AXI0]
+ * * Cluster1 win1: 0x8, 0x9 [AXI0]
+ * * Esmart0: 0x10, 0x11 [AXI0]
+ * * Esmart1: 0x12, 0x13 [AXI0]
+ * * Esmart2: 0xa, 0xb [AXI1]
+ * * Esmart3: 0xc, 0xd [AXI1]
+ * * Lut dma rid: 0x1, 0x2, 0x3 [AXI0]
+ * * DCI dma rid: 0x4 [AXI0]
+ * * Metadata rid: 0x5 [AXI0]
+ *
+ * * Limit:
+ * * (1) Cluster0/1 are fixed on AXI0 by IC design
+ * * (2) 0x0 and 0xf can't be used;
+ * * (3) 5 Bits ID for eache axi bus
+ * * (3) cluster and lut/dci/metadata rid must smaller than 0xf,
+ * * if Cluster rid is bigger than 0xf, VOP will dead at the
+ * * system bandwidth very terrible scene.
+ */
+static const struct vop2_win_data rk3576_vop_win_data[] = {
+ {
+ .name = "Cluster0-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER0,
+ .base = 0x1000,
+ .possible_vp_mask = BIT(0) | BIT(1),
+ .formats = formats_rk3576_cluster,
+ .nformats = ARRAY_SIZE(formats_rk3576_cluster),
+ .format_modifiers = format_modifiers_rk3576_afbc,
+ .layer_sel_id = { 0, 0, 0xf, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0xa,
+ .axi_uv_r_id = 0xb,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Cluster1-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER1,
+ .base = 0x1200,
+ .possible_vp_mask = BIT(0) | BIT(1),
+ .formats = formats_rk3576_cluster,
+ .nformats = ARRAY_SIZE(formats_rk3576_cluster),
+ .format_modifiers = format_modifiers_rk3576_afbc,
+ .layer_sel_id = { 1, 1, 0xf, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 6,
+ .axi_uv_r_id = 7,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Esmart0-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART0,
+ .base = 0x1800,
+ .possible_vp_mask = BIT(0) | BIT(2),
+ .formats = formats_rk3576_esmart,
+ .nformats = ARRAY_SIZE(formats_rk3576_esmart),
+ .format_modifiers = format_modifiers,
+ .layer_sel_id = { 2, 0xf, 0, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0x10,
+ .axi_uv_r_id = 0x11,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ }, {
+ .name = "Esmart1-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART1,
+ .base = 0x1a00,
+ .possible_vp_mask = BIT(1) | BIT(2),
+ .formats = formats_rk3576_esmart,
+ .nformats = ARRAY_SIZE(formats_rk3576_esmart),
+ .format_modifiers = format_modifiers,
+ .layer_sel_id = { 0xf, 2, 1, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0x12,
+ .axi_uv_r_id = 0x13,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ }, {
+ .name = "Esmart2-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART2,
+ .base = 0x1c00,
+ .possible_vp_mask = BIT(0) | BIT(2),
+ .formats = formats_rk3576_esmart,
+ .nformats = ARRAY_SIZE(formats_rk3576_esmart),
+ .format_modifiers = format_modifiers,
+ .layer_sel_id = { 3, 0xf, 2, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 0x0a,
+ .axi_uv_r_id = 0x0b,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ }, {
+ .name = "Esmart3-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART3,
+ .base = 0x1e00,
+ .possible_vp_mask = BIT(1) | BIT(2),
+ .formats = formats_rk3576_esmart,
+ .nformats = ARRAY_SIZE(formats_rk3576_esmart),
+ .format_modifiers = format_modifiers,
+ .layer_sel_id = { 0xf, 3, 3, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 0x0c,
+ .axi_uv_r_id = 0x0d,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ },
+};
+
+static const struct vop2_regs_dump rk3576_regs_dump[] = {
+ {
+ .name = "SYS",
+ .base = RK3568_REG_CFG_DONE,
+ .size = 0x200,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0
+ }, {
+ .name = "OVL_SYS",
+ .base = RK3576_SYS_EXTRA_ALPHA_CTRL,
+ .size = 0x50,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "OVL_VP0",
+ .base = RK3576_OVL_CTRL(0),
+ .size = 0x80,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "OVL_VP1",
+ .base = RK3576_OVL_CTRL(1),
+ .size = 0x80,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "OVL_VP2",
+ .base = RK3576_OVL_CTRL(2),
+ .size = 0x80,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "VP0",
+ .base = RK3568_VP0_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP1",
+ .base = RK3568_VP1_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP2",
+ .base = RK3568_VP2_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "Cluster0",
+ .base = RK3568_CLUSTER0_CTRL_BASE,
+ .size = 0x200,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster1",
+ .base = RK3568_CLUSTER1_CTRL_BASE,
+ .size = 0x200,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Esmart0",
+ .base = RK3568_ESMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart1",
+ .base = RK3568_ESMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart2",
+ .base = RK3588_ESMART2_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart3",
+ .base = RK3588_ESMART3_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ },
+};
+
static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
{
.id = 0,
@@ -409,10 +1122,11 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.name = "Cluster0-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER0,
.base = 0x1000,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 0,
+ .layer_sel_id = { 0, 0, 0, 0 },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.axi_bus_id = 0,
@@ -427,10 +1141,11 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.name = "Cluster1-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER1,
.base = 0x1200,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 1,
+ .layer_sel_id = { 1, 1, 1, 1 },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
@@ -445,10 +1160,11 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.name = "Cluster2-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER2,
.base = 0x1400,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 4,
+ .layer_sel_id = { 4, 4, 4, 4 },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
@@ -463,10 +1179,11 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.name = "Cluster3-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER3,
.base = 0x1600,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 5,
+ .layer_sel_id = { 5, 5, 5, 5 },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
@@ -480,11 +1197,12 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
}, {
.name = "Esmart0-win0",
.phys_id = ROCKCHIP_VOP2_ESMART0,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_esmart,
.nformats = ARRAY_SIZE(formats_esmart),
.format_modifiers = format_modifiers,
.base = 0x1800,
- .layer_sel_id = 2,
+ .layer_sel_id = { 2, 2, 2, 2 },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
.axi_bus_id = 0,
@@ -496,11 +1214,12 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
}, {
.name = "Esmart1-win0",
.phys_id = ROCKCHIP_VOP2_ESMART1,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_esmart,
.nformats = ARRAY_SIZE(formats_esmart),
.format_modifiers = format_modifiers,
.base = 0x1a00,
- .layer_sel_id = 3,
+ .layer_sel_id = { 3, 3, 3, 3 },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
.axi_bus_id = 0,
@@ -513,10 +1232,11 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.name = "Esmart2-win0",
.phys_id = ROCKCHIP_VOP2_ESMART2,
.base = 0x1c00,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_esmart,
.nformats = ARRAY_SIZE(formats_esmart),
.format_modifiers = format_modifiers,
- .layer_sel_id = 6,
+ .layer_sel_id = { 6, 6, 6, 6 },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
.axi_bus_id = 1,
@@ -528,11 +1248,12 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
}, {
.name = "Esmart3-win0",
.phys_id = ROCKCHIP_VOP2_ESMART3,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_esmart,
.nformats = ARRAY_SIZE(formats_esmart),
.format_modifiers = format_modifiers,
.base = 0x1e00,
- .layer_sel_id = 7,
+ .layer_sel_id = { 7, 7, 7, 7 },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
.axi_bus_id = 1,
@@ -647,7 +1368,1017 @@ static const struct vop2_regs_dump rk3588_regs_dump[] = {
},
};
+static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ u32 die, dip;
+
+ die = vop2_readl(vop2, RK3568_DSP_IF_EN);
+ dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
+
+ switch (id) {
+ case ROCKCHIP_VOP2_EP_RGB0:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_RGB_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_RGB |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_RGB_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
+ if (polflags & POLFLAG_DCLK_INV)
+ regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3));
+ else
+ regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16));
+ break;
+ case ROCKCHIP_VOP2_EP_HDMI0:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_EDP0:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_EDP |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI0:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_MIPI0 |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI1:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_MIPI1 |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_LVDS0:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_LVDS0 |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_LVDS1:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_LVDS1 |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
+ break;
+ default:
+ drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
+ return 0;
+ }
+
+ dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
+
+ vop2_writel(vop2, RK3568_DSP_IF_EN, die);
+ vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
+
+ return crtc->state->adjusted_mode.crtc_clock * 1000LL;
+}
+
+static unsigned long rk3576_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+ u8 port_pix_rate = vp->data->pixel_rate;
+ int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_sel;
+ u32 ctrl, vp_clk_div, reg, dclk_div;
+ unsigned long dclk_in_rate, dclk_core_rate;
+
+ if (vcstate->output_mode == ROCKCHIP_OUT_MODE_YUV420 || adjusted_mode->crtc_clock > 600000)
+ dclk_div = 2;
+ else
+ dclk_div = 1;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ dclk_core_rate = adjusted_mode->crtc_clock / 2;
+ else
+ dclk_core_rate = adjusted_mode->crtc_clock / port_pix_rate;
+
+ dclk_in_rate = adjusted_mode->crtc_clock / dclk_div;
+
+ dclk_core_div = dclk_in_rate > dclk_core_rate ? 1 : 0;
+
+ if (vop2_output_if_is_edp(id))
+ if_pixclk_div = port_pix_rate == 2 ? RK3576_DSP_IF_PCLK_DIV : 0;
+ else
+ if_pixclk_div = port_pix_rate == 1 ? RK3576_DSP_IF_PCLK_DIV : 0;
+
+ if (vcstate->output_mode == ROCKCHIP_OUT_MODE_YUV420) {
+ if_dclk_sel = RK3576_DSP_IF_DCLK_SEL_OUT;
+ dclk_out_div = 1;
+ } else {
+ if_dclk_sel = 0;
+ dclk_out_div = 0;
+ }
+
+ switch (id) {
+ case ROCKCHIP_VOP2_EP_HDMI0:
+ reg = RK3576_HDMI0_IF_CTRL;
+ break;
+ case ROCKCHIP_VOP2_EP_EDP0:
+ reg = RK3576_EDP0_IF_CTRL;
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI0:
+ reg = RK3576_MIPI0_IF_CTRL;
+ break;
+ case ROCKCHIP_VOP2_EP_DP0:
+ reg = RK3576_DP0_IF_CTRL;
+ break;
+ case ROCKCHIP_VOP2_EP_DP1:
+ reg = RK3576_DP1_IF_CTRL;
+ break;
+ default:
+ drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
+ return 0;
+ }
+
+ ctrl = vop2_readl(vop2, reg);
+ ctrl &= ~RK3576_DSP_IF_DCLK_SEL_OUT;
+ ctrl &= ~RK3576_DSP_IF_PCLK_DIV;
+ ctrl &= ~RK3576_DSP_IF_MUX;
+ ctrl |= RK3576_DSP_IF_CFG_DONE_IMD;
+ ctrl |= if_dclk_sel | if_pixclk_div;
+ ctrl |= RK3576_DSP_IF_CLK_OUT_EN | RK3576_DSP_IF_EN;
+ ctrl |= FIELD_PREP(RK3576_DSP_IF_MUX, vp->id);
+ ctrl |= FIELD_PREP(RK3576_DSP_IF_PIN_POL, polflags);
+ vop2_writel(vop2, reg, ctrl);
+
+ vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div);
+ vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div);
+
+ vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div);
+
+ return dclk_in_rate * 1000LL;
+}
+
+/*
+ * calc the dclk on rk3588
+ * the available div of dclk is 1, 2, 4
+ */
+static unsigned long rk3588_calc_dclk(unsigned long child_clk, unsigned long max_dclk)
+{
+ if (child_clk * 4 <= max_dclk)
+ return child_clk * 4;
+ else if (child_clk * 2 <= max_dclk)
+ return child_clk * 2;
+ else if (child_clk <= max_dclk)
+ return child_clk;
+ else
+ return 0;
+}
+
+/*
+ * 4 pixclk/cycle on rk3588
+ * RGB/eDP/HDMI: if_pixclk >= dclk_core
+ * DP: dp_pixclk = dclk_out <= dclk_core
+ * DSI: mipi_pixclk <= dclk_out <= dclk_core
+ */
+static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
+ int *dclk_core_div, int *dclk_out_div,
+ int *if_pixclk_div, int *if_dclk_div)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+ int output_mode = vcstate->output_mode;
+ unsigned long v_pixclk = adjusted_mode->crtc_clock * 1000LL; /* video timing pixclk */
+ unsigned long dclk_core_rate = v_pixclk >> 2;
+ unsigned long dclk_rate = v_pixclk;
+ unsigned long dclk_out_rate;
+ unsigned long if_pixclk_rate;
+ int K = 1;
+
+ if (vop2_output_if_is_hdmi(id)) {
+ /*
+ * K = 2: dclk_core = if_pixclk_rate > if_dclk_rate
+ * K = 1: dclk_core = hdmie_edp_dclk > if_pixclk_rate
+ */
+ if (output_mode == ROCKCHIP_OUT_MODE_YUV420) {
+ dclk_rate = dclk_rate >> 1;
+ K = 2;
+ }
+
+ /*
+ * if_pixclk_rate = (dclk_core_rate << 1) / K;
+ * if_dclk_rate = dclk_core_rate / K;
+ * *if_pixclk_div = dclk_rate / if_pixclk_rate;
+ * *if_dclk_div = dclk_rate / if_dclk_rate;
+ */
+ *if_pixclk_div = 2;
+ *if_dclk_div = 4;
+ } else if (vop2_output_if_is_edp(id)) {
+ /*
+ * edp_pixclk = edp_dclk > dclk_core
+ */
+ if_pixclk_rate = v_pixclk / K;
+ dclk_rate = if_pixclk_rate * K;
+ /*
+ * *if_pixclk_div = dclk_rate / if_pixclk_rate;
+ * *if_dclk_div = *if_pixclk_div;
+ */
+ *if_pixclk_div = K;
+ *if_dclk_div = K;
+ } else if (vop2_output_if_is_dp(id)) {
+ if (output_mode == ROCKCHIP_OUT_MODE_YUV420)
+ dclk_out_rate = v_pixclk >> 3;
+ else
+ dclk_out_rate = v_pixclk >> 2;
+
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
+ if (!dclk_rate) {
+ drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n",
+ dclk_out_rate);
+ return 0;
+ }
+ *dclk_out_div = dclk_rate / dclk_out_rate;
+ } else if (vop2_output_if_is_mipi(id)) {
+ if_pixclk_rate = dclk_core_rate / K;
+ /*
+ * dclk_core = dclk_out * K = if_pixclk * K = v_pixclk / 4
+ */
+ dclk_out_rate = if_pixclk_rate;
+ /*
+ * dclk_rate = N * dclk_core_rate N = (1,2,4 ),
+ * we get a little factor here
+ */
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
+ if (!dclk_rate) {
+ drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n",
+ dclk_out_rate);
+ return 0;
+ }
+ *dclk_out_div = dclk_rate / dclk_out_rate;
+ /*
+ * mipi pixclk == dclk_out
+ */
+ *if_pixclk_div = 1;
+ } else if (vop2_output_if_is_dpi(id)) {
+ dclk_rate = v_pixclk;
+ }
+
+ *dclk_core_div = dclk_rate / dclk_core_rate;
+ *if_pixclk_div = ilog2(*if_pixclk_div);
+ *if_dclk_div = ilog2(*if_dclk_div);
+ *dclk_core_div = ilog2(*dclk_core_div);
+ *dclk_out_div = ilog2(*dclk_out_div);
+
+ drm_dbg(vop2->drm, "dclk: %ld, pixclk_div: %d, dclk_div: %d\n",
+ dclk_rate, *if_pixclk_div, *if_dclk_div);
+
+ return dclk_rate;
+}
+
+/*
+ * MIPI port mux on rk3588:
+ * 0: Video Port2
+ * 1: Video Port3
+ * 3: Video Port 1(MIPI1 only)
+ */
+static u32 rk3588_get_mipi_port_mux(int vp_id)
+{
+ if (vp_id == 1)
+ return 3;
+ else if (vp_id == 3)
+ return 1;
+ else
+ return 0;
+}
+
+static u32 rk3588_get_hdmi_pol(u32 flags)
+{
+ u32 val;
+
+ val = (flags & DRM_MODE_FLAG_NHSYNC) ? BIT(HSYNC_POSITIVE) : 0;
+ val |= (flags & DRM_MODE_FLAG_NVSYNC) ? BIT(VSYNC_POSITIVE) : 0;
+
+ return val;
+}
+
+static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
+{
+ struct vop2 *vop2 = vp->vop2;
+ int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_div;
+ unsigned long clock;
+ u32 die, dip, div, vp_clk_div, val;
+
+ clock = rk3588_calc_cru_cfg(vp, id, &dclk_core_div, &dclk_out_div,
+ &if_pixclk_div, &if_dclk_div);
+ if (!clock)
+ return 0;
+
+ vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div);
+ vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div);
+
+ die = vop2_readl(vop2, RK3568_DSP_IF_EN);
+ dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
+ div = vop2_readl(vop2, RK3568_DSP_IF_CTRL);
+
+ switch (id) {
+ case ROCKCHIP_VOP2_EP_HDMI0:
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
+ val = rk3588_get_hdmi_pol(polflags);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5));
+ break;
+ case ROCKCHIP_VOP2_EP_HDMI1:
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
+ val = rk3588_get_hdmi_pol(polflags);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7));
+ break;
+ case ROCKCHIP_VOP2_EP_EDP0:
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_EDP0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0));
+ break;
+ case ROCKCHIP_VOP2_EP_EDP1:
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_EDP1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3));
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI0:
+ div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_MIPI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX;
+ val = rk3588_get_mipi_port_mux(vp->id);
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX, !!val);
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI1:
+ div &= ~RK3588_DSP_IF_MIPI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_MIPI1_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
+ val = rk3588_get_mipi_port_mux(vp->id);
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, val);
+ break;
+ case ROCKCHIP_VOP2_EP_DP0:
+ die &= ~RK3588_SYS_DSP_INFACE_EN_DP0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_DP0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP0_MUX, vp->id);
+ dip &= ~RK3588_DSP_IF_POL__DP0_PIN_POL;
+ dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_DP1:
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
+ dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL;
+ dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags);
+ break;
+ default:
+ drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
+ return 0;
+ }
+
+ dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
+
+ vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div);
+ vop2_writel(vop2, RK3568_DSP_IF_EN, die);
+ vop2_writel(vop2, RK3568_DSP_IF_CTRL, div);
+ vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
+
+ return clock;
+}
+
+static bool is_opaque(u16 alpha)
+{
+ return (alpha >> 8) == 0xff;
+}
+
+static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config,
+ struct vop2_alpha *alpha)
+{
+ int src_glb_alpha_en = is_opaque(alpha_config->src_glb_alpha_value) ? 0 : 1;
+ int dst_glb_alpha_en = is_opaque(alpha_config->dst_glb_alpha_value) ? 0 : 1;
+ int src_color_mode = alpha_config->src_premulti_en ?
+ ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL;
+ int dst_color_mode = alpha_config->dst_premulti_en ?
+ ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL;
+
+ alpha->src_color_ctrl.val = 0;
+ alpha->dst_color_ctrl.val = 0;
+ alpha->src_alpha_ctrl.val = 0;
+ alpha->dst_alpha_ctrl.val = 0;
+
+ if (!alpha_config->src_pixel_alpha_en)
+ alpha->src_color_ctrl.bits.blend_mode = ALPHA_GLOBAL;
+ else if (alpha_config->src_pixel_alpha_en && !src_glb_alpha_en)
+ alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX;
+ else
+ alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL;
+
+ alpha->src_color_ctrl.bits.alpha_en = 1;
+
+ if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_GLOBAL) {
+ alpha->src_color_ctrl.bits.color_mode = src_color_mode;
+ alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL;
+ } else if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_PER_PIX) {
+ alpha->src_color_ctrl.bits.color_mode = src_color_mode;
+ alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_ONE;
+ } else {
+ alpha->src_color_ctrl.bits.color_mode = ALPHA_SRC_PRE_MUL;
+ alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL;
+ }
+ alpha->src_color_ctrl.bits.glb_alpha = alpha_config->src_glb_alpha_value >> 8;
+ alpha->src_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
+ alpha->src_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
+
+ alpha->dst_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
+ alpha->dst_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
+ alpha->dst_color_ctrl.bits.blend_mode = ALPHA_GLOBAL;
+ alpha->dst_color_ctrl.bits.glb_alpha = alpha_config->dst_glb_alpha_value >> 8;
+ alpha->dst_color_ctrl.bits.color_mode = dst_color_mode;
+ alpha->dst_color_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
+
+ alpha->src_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
+ alpha->src_alpha_ctrl.bits.blend_mode = alpha->src_color_ctrl.bits.blend_mode;
+ alpha->src_alpha_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
+ alpha->src_alpha_ctrl.bits.factor_mode = ALPHA_ONE;
+
+ alpha->dst_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
+ if (alpha_config->dst_pixel_alpha_en && !dst_glb_alpha_en)
+ alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX;
+ else
+ alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL;
+ alpha->dst_alpha_ctrl.bits.alpha_cal_mode = ALPHA_NO_SATURATION;
+ alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
+}
+
+static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
+{
+ struct vop2_video_port *vp;
+ int used_layer = 0;
+ int i;
+
+ for (i = 0; i < port_id; i++) {
+ vp = &vop2->vps[i];
+ used_layer += hweight32(vp->win_mask);
+ }
+
+ return used_layer;
+}
+
+static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win)
+{
+ struct vop2_alpha_config alpha_config;
+ struct vop2_alpha alpha;
+ struct drm_plane_state *bottom_win_pstate;
+ bool src_pixel_alpha_en = false;
+ u16 src_glb_alpha_val, dst_glb_alpha_val;
+ u32 src_color_ctrl_reg, dst_color_ctrl_reg, src_alpha_ctrl_reg, dst_alpha_ctrl_reg;
+ u32 offset = 0;
+ bool premulti_en = false;
+ bool swap = false;
+
+ /* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */
+ bottom_win_pstate = main_win->base.state;
+ src_glb_alpha_val = 0;
+ dst_glb_alpha_val = main_win->base.state->alpha;
+
+ if (!bottom_win_pstate->fb)
+ return;
+
+ alpha_config.src_premulti_en = premulti_en;
+ alpha_config.dst_premulti_en = false;
+ alpha_config.src_pixel_alpha_en = src_pixel_alpha_en;
+ alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */
+ alpha_config.src_glb_alpha_value = src_glb_alpha_val;
+ alpha_config.dst_glb_alpha_value = dst_glb_alpha_val;
+ vop2_parse_alpha(&alpha_config, &alpha);
+
+ alpha.src_color_ctrl.bits.src_dst_swap = swap;
+
+ switch (main_win->data->phys_id) {
+ case ROCKCHIP_VOP2_CLUSTER0:
+ offset = 0x0;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER1:
+ offset = 0x10;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER2:
+ offset = 0x20;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER3:
+ offset = 0x30;
+ break;
+ }
+
+ if (vop2->version <= VOP_VERSION_RK3588) {
+ src_color_ctrl_reg = RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL;
+ dst_color_ctrl_reg = RK3568_CLUSTER0_MIX_DST_COLOR_CTRL;
+ src_alpha_ctrl_reg = RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL;
+ dst_alpha_ctrl_reg = RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL;
+ } else {
+ src_color_ctrl_reg = RK3576_CLUSTER0_MIX_SRC_COLOR_CTRL;
+ dst_color_ctrl_reg = RK3576_CLUSTER0_MIX_DST_COLOR_CTRL;
+ src_alpha_ctrl_reg = RK3576_CLUSTER0_MIX_SRC_ALPHA_CTRL;
+ dst_alpha_ctrl_reg = RK3576_CLUSTER0_MIX_DST_ALPHA_CTRL;
+ }
+
+ vop2_writel(vop2, src_color_ctrl_reg + offset, alpha.src_color_ctrl.val);
+ vop2_writel(vop2, dst_color_ctrl_reg + offset, alpha.dst_color_ctrl.val);
+ vop2_writel(vop2, src_alpha_ctrl_reg + offset, alpha.src_alpha_ctrl.val);
+ vop2_writel(vop2, dst_alpha_ctrl_reg + offset, alpha.dst_alpha_ctrl.val);
+}
+
+static void vop2_setup_alpha(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_framebuffer *fb;
+ struct vop2_alpha_config alpha_config;
+ struct vop2_alpha alpha;
+ struct drm_plane *plane;
+ int pixel_alpha_en;
+ int premulti_en, gpremulti_en = 0;
+ int mixer_id;
+ u32 src_color_ctrl_reg, dst_color_ctrl_reg, src_alpha_ctrl_reg, dst_alpha_ctrl_reg;
+ u32 offset;
+ bool bottom_layer_alpha_en = false;
+ u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE;
+
+ if (vop2->version <= VOP_VERSION_RK3588)
+ mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id);
+ else
+ mixer_id = 0;
+
+ alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */
+
+ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+
+ if (plane->state->normalized_zpos == 0 &&
+ !is_opaque(plane->state->alpha) &&
+ !vop2_cluster_window(win)) {
+ /*
+ * If bottom layer have global alpha effect [except cluster layer,
+ * because cluster have deal with bottom layer global alpha value
+ * at cluster mix], bottom layer mix need deal with global alpha.
+ */
+ bottom_layer_alpha_en = true;
+ dst_global_alpha = plane->state->alpha;
+ }
+ }
+
+ if (vop2->version <= VOP_VERSION_RK3588) {
+ src_color_ctrl_reg = RK3568_MIX0_SRC_COLOR_CTRL;
+ dst_color_ctrl_reg = RK3568_MIX0_DST_COLOR_CTRL;
+ src_alpha_ctrl_reg = RK3568_MIX0_SRC_ALPHA_CTRL;
+ dst_alpha_ctrl_reg = RK3568_MIX0_DST_ALPHA_CTRL;
+ } else {
+ src_color_ctrl_reg = RK3576_OVL_MIX0_SRC_COLOR_CTRL(vp->id);
+ dst_color_ctrl_reg = RK3576_OVL_MIX0_DST_COLOR_CTRL(vp->id);
+ src_alpha_ctrl_reg = RK3576_OVL_MIX0_SRC_ALPHA_CTRL(vp->id);
+ dst_alpha_ctrl_reg = RK3576_OVL_MIX0_DST_ALPHA_CTRL(vp->id);
+ }
+
+ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+ int zpos = plane->state->normalized_zpos;
+
+ /*
+ * Need to configure alpha from second layer.
+ */
+ if (zpos == 0)
+ continue;
+
+ if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ premulti_en = 1;
+ else
+ premulti_en = 0;
+
+ plane = &win->base;
+ fb = plane->state->fb;
+
+ pixel_alpha_en = fb->format->has_alpha;
+
+ alpha_config.src_premulti_en = premulti_en;
+
+ if (bottom_layer_alpha_en && zpos == 1) {
+ gpremulti_en = premulti_en;
+ /* Cd = Cs + (1 - As) * Cd * Agd */
+ alpha_config.dst_premulti_en = false;
+ alpha_config.src_pixel_alpha_en = pixel_alpha_en;
+ alpha_config.src_glb_alpha_value = plane->state->alpha;
+ alpha_config.dst_glb_alpha_value = dst_global_alpha;
+ } else if (vop2_cluster_window(win)) {
+ /* Mix output data only have pixel alpha */
+ alpha_config.dst_premulti_en = true;
+ alpha_config.src_pixel_alpha_en = true;
+ alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
+ alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
+ } else {
+ /* Cd = Cs + (1 - As) * Cd */
+ alpha_config.dst_premulti_en = true;
+ alpha_config.src_pixel_alpha_en = pixel_alpha_en;
+ alpha_config.src_glb_alpha_value = plane->state->alpha;
+ alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
+ }
+
+ vop2_parse_alpha(&alpha_config, &alpha);
+
+ offset = (mixer_id + zpos - 1) * 0x10;
+
+ vop2_writel(vop2, src_color_ctrl_reg + offset, alpha.src_color_ctrl.val);
+ vop2_writel(vop2, dst_color_ctrl_reg + offset, alpha.dst_color_ctrl.val);
+ vop2_writel(vop2, src_alpha_ctrl_reg + offset, alpha.src_alpha_ctrl.val);
+ vop2_writel(vop2, dst_alpha_ctrl_reg + offset, alpha.dst_alpha_ctrl.val);
+ }
+
+ if (vp->id == 0) {
+ if (vop2->version <= VOP_VERSION_RK3588) {
+ src_color_ctrl_reg = RK3568_HDR0_SRC_COLOR_CTRL;
+ dst_color_ctrl_reg = RK3568_HDR0_DST_COLOR_CTRL;
+ src_alpha_ctrl_reg = RK3568_HDR0_SRC_ALPHA_CTRL;
+ dst_alpha_ctrl_reg = RK3568_HDR0_DST_ALPHA_CTRL;
+ } else {
+ src_color_ctrl_reg = RK3576_OVL_HDR_SRC_COLOR_CTRL(vp->id);
+ dst_color_ctrl_reg = RK3576_OVL_HDR_DST_COLOR_CTRL(vp->id);
+ src_alpha_ctrl_reg = RK3576_OVL_HDR_SRC_ALPHA_CTRL(vp->id);
+ dst_alpha_ctrl_reg = RK3576_OVL_HDR_DST_ALPHA_CTRL(vp->id);
+ }
+
+ if (bottom_layer_alpha_en) {
+ /* Transfer pixel alpha to hdr mix */
+ alpha_config.src_premulti_en = gpremulti_en;
+ alpha_config.dst_premulti_en = true;
+ alpha_config.src_pixel_alpha_en = true;
+ alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
+ alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
+
+ vop2_parse_alpha(&alpha_config, &alpha);
+
+ vop2_writel(vop2, src_color_ctrl_reg, alpha.src_color_ctrl.val);
+ vop2_writel(vop2, dst_color_ctrl_reg, alpha.dst_color_ctrl.val);
+ vop2_writel(vop2, src_alpha_ctrl_reg, alpha.src_alpha_ctrl.val);
+ vop2_writel(vop2, dst_alpha_ctrl_reg, alpha.dst_alpha_ctrl.val);
+ } else {
+ vop2_writel(vop2, src_color_ctrl_reg, 0);
+ }
+ }
+}
+
+static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_plane *plane;
+ u32 layer_sel = 0;
+ u32 port_sel;
+ u8 layer_id;
+ u8 old_layer_id;
+ u8 layer_sel_id;
+ unsigned int ofs;
+ u32 ovl_ctrl;
+ int i;
+ struct vop2_video_port *vp0 = &vop2->vps[0];
+ struct vop2_video_port *vp1 = &vop2->vps[1];
+ struct vop2_video_port *vp2 = &vop2->vps[2];
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
+
+ ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
+ ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ if (vcstate->yuv_overlay)
+ ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
+ else
+ ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id);
+
+ vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
+
+ port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+ port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT;
+
+ if (vp0->nlayers)
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX,
+ vp0->nlayers - 1);
+ else
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 8);
+
+ if (vp1->nlayers)
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX,
+ (vp0->nlayers + vp1->nlayers - 1));
+ else
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8);
+
+ if (vp2->nlayers)
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX,
+ (vp2->nlayers + vp1->nlayers + vp0->nlayers - 1));
+ else
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8);
+
+ layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+
+ ofs = 0;
+ for (i = 0; i < vp->id; i++)
+ ofs += vop2->vps[i].nlayers;
+
+ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+ struct vop2_win *old_win;
+
+ layer_id = (u8)(plane->state->normalized_zpos + ofs);
+ /*
+ * Find the layer this win bind in old state.
+ */
+ for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) {
+ layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf;
+ if (layer_sel_id == win->data->layer_sel_id[vp->id])
+ break;
+ }
+
+ /*
+ * Find the win bind to this layer in old state
+ */
+ for (i = 0; i < vop2->data->win_size; i++) {
+ old_win = &vop2->win[i];
+ layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf;
+ if (layer_sel_id == old_win->data->layer_sel_id[vp->id])
+ break;
+ }
+
+ switch (win->data->phys_id) {
+ case ROCKCHIP_VOP2_CLUSTER0:
+ port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER0;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER0, vp->id);
+ break;
+ case ROCKCHIP_VOP2_CLUSTER1:
+ port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id);
+ break;
+ case ROCKCHIP_VOP2_CLUSTER2:
+ port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER2;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER2, vp->id);
+ break;
+ case ROCKCHIP_VOP2_CLUSTER3:
+ port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER3;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER3, vp->id);
+ break;
+ case ROCKCHIP_VOP2_ESMART0:
+ port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id);
+ break;
+ case ROCKCHIP_VOP2_ESMART1:
+ port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id);
+ break;
+ case ROCKCHIP_VOP2_ESMART2:
+ port_sel &= ~RK3588_OVL_PORT_SEL__ESMART2;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART2, vp->id);
+ break;
+ case ROCKCHIP_VOP2_ESMART3:
+ port_sel &= ~RK3588_OVL_PORT_SEL__ESMART3;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART3, vp->id);
+ break;
+ case ROCKCHIP_VOP2_SMART0:
+ port_sel &= ~RK3568_OVL_PORT_SEL__SMART0;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id);
+ break;
+ case ROCKCHIP_VOP2_SMART1:
+ port_sel &= ~RK3568_OVL_PORT_SEL__SMART1;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART1, vp->id);
+ break;
+ }
+
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id[vp->id]);
+ /*
+ * When we bind a window from layerM to layerN, we also need to move the old
+ * window on layerN to layerM to avoid one window selected by two or more layers.
+ */
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id,
+ old_win->data->layer_sel_id[vp->id]);
+ }
+
+ vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
+ vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
+}
+
+static void rk3568_vop2_setup_dly_for_windows(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct vop2_win *win;
+ int i = 0;
+ u32 cdly = 0, sdly = 0;
+
+ for (i = 0; i < vop2->data->win_size; i++) {
+ u32 dly;
+
+ win = &vop2->win[i];
+ dly = win->delay;
+
+ switch (win->data->phys_id) {
+ case ROCKCHIP_VOP2_CLUSTER0:
+ cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_0, dly);
+ cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_1, dly);
+ break;
+ case ROCKCHIP_VOP2_CLUSTER1:
+ cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_0, dly);
+ cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_1, dly);
+ break;
+ case ROCKCHIP_VOP2_ESMART0:
+ sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART0, dly);
+ break;
+ case ROCKCHIP_VOP2_ESMART1:
+ sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly);
+ break;
+ case ROCKCHIP_VOP2_SMART0:
+ case ROCKCHIP_VOP2_ESMART2:
+ sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly);
+ break;
+ case ROCKCHIP_VOP2_SMART1:
+ case ROCKCHIP_VOP2_ESMART3:
+ sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly);
+ break;
+ }
+ }
+
+ vop2_writel(vop2, RK3568_CLUSTER_DLY_NUM, cdly);
+ vop2_writel(vop2, RK3568_SMART_DLY_NUM, sdly);
+}
+
+static void rk3568_vop2_setup_overlay(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_plane *plane;
+
+ vp->win_mask = 0;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+
+ win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT];
+
+ vp->win_mask |= BIT(win->data->phys_id);
+
+ if (vop2_cluster_window(win))
+ vop2_setup_cluster_alpha(vop2, win);
+ }
+
+ if (!vp->win_mask)
+ return;
+
+ rk3568_vop2_setup_layer_mixer(vp);
+ vop2_setup_alpha(vp);
+ rk3568_vop2_setup_dly_for_windows(vp);
+}
+
+static void rk3576_vop2_setup_layer_mixer(struct vop2_video_port *vp)
+{
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_plane *plane;
+ u32 layer_sel = 0xffff; /* 0xf means this layer is disabled */
+ u32 ovl_ctrl;
+
+ ovl_ctrl = vop2_readl(vop2, RK3576_OVL_CTRL(vp->id));
+ if (vcstate->yuv_overlay)
+ ovl_ctrl |= RK3576_OVL_CTRL__YUV_MODE;
+ else
+ ovl_ctrl &= ~RK3576_OVL_CTRL__YUV_MODE;
+
+ vop2_writel(vop2, RK3576_OVL_CTRL(vp->id), ovl_ctrl);
+
+ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos,
+ 0xf);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos,
+ win->data->layer_sel_id[vp->id]);
+ }
+
+ vop2_writel(vop2, RK3576_OVL_LAYER_SEL(vp->id), layer_sel);
+}
+
+static void rk3576_vop2_setup_dly_for_windows(struct vop2_video_port *vp)
+{
+ struct drm_plane *plane;
+ struct vop2_win *win;
+
+ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ win = to_vop2_win(plane);
+ vop2_win_write(win, VOP2_WIN_DLY_NUM, 0);
+ }
+}
+
+static void rk3576_vop2_setup_overlay(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_plane *plane;
+
+ vp->win_mask = 0;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+
+ win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT];
+ vp->win_mask |= BIT(win->data->phys_id);
+
+ if (vop2_cluster_window(win))
+ vop2_setup_cluster_alpha(vop2, win);
+ }
+
+ if (!vp->win_mask)
+ return;
+
+ rk3576_vop2_setup_layer_mixer(vp);
+ vop2_setup_alpha(vp);
+ rk3576_vop2_setup_dly_for_windows(vp);
+}
+
+static void rk3568_vop2_setup_bg_dly(struct vop2_video_port *vp)
+{
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ u16 hdisplay = mode->crtc_hdisplay;
+ u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ u32 bg_dly;
+ u32 pre_scan_dly;
+
+ bg_dly = vp->data->pre_scan_max_dly[3];
+ vop2_writel(vp->vop2, RK3568_VP_BG_MIX_CTRL(vp->id),
+ FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly));
+
+ pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
+ vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
+}
+
+static void rk3576_vop2_setup_bg_dly(struct vop2_video_port *vp)
+{
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ u16 hdisplay = mode->crtc_hdisplay;
+ u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ u32 bg_dly;
+ u32 pre_scan_dly;
+
+ bg_dly = vp->data->pre_scan_max_dly[VOP2_DLY_WIN] +
+ vp->data->pre_scan_max_dly[VOP2_DLY_LAYER_MIX] +
+ vp->data->pre_scan_max_dly[VOP2_DLY_HDR_MIX];
+
+ vop2_writel(vp->vop2, RK3576_OVL_BG_MIX_CTRL(vp->id),
+ FIELD_PREP(RK3576_OVL_BG_MIX_CTRL__BG_DLY, bg_dly));
+
+ pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
+ vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
+}
+
+static const struct vop2_ops rk3568_vop_ops = {
+ .setup_intf_mux = rk3568_set_intf_mux,
+ .setup_bg_dly = rk3568_vop2_setup_bg_dly,
+ .setup_overlay = rk3568_vop2_setup_overlay,
+};
+
+static const struct vop2_ops rk3576_vop_ops = {
+ .setup_intf_mux = rk3576_set_intf_mux,
+ .setup_bg_dly = rk3576_vop2_setup_bg_dly,
+ .setup_overlay = rk3576_vop2_setup_overlay,
+};
+
+static const struct vop2_ops rk3588_vop_ops = {
+ .setup_intf_mux = rk3588_set_intf_mux,
+ .setup_bg_dly = rk3568_vop2_setup_bg_dly,
+ .setup_overlay = rk3568_vop2_setup_overlay,
+};
+
static const struct vop2_data rk3566_vop = {
+ .version = VOP_VERSION_RK3568,
.feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
.max_input = { 4096, 2304 },
@@ -655,12 +2386,18 @@ static const struct vop2_data rk3566_vop = {
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
+ .cluster_reg = rk3568_vop_cluster_regs,
+ .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs),
+ .smart_reg = rk3568_vop_smart_regs,
+ .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs),
.regs_dump = rk3568_regs_dump,
.regs_dump_size = ARRAY_SIZE(rk3568_regs_dump),
+ .ops = &rk3568_vop_ops,
.soc_id = 3566,
};
static const struct vop2_data rk3568_vop = {
+ .version = VOP_VERSION_RK3568,
.feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
.max_input = { 4096, 2304 },
@@ -668,12 +2405,37 @@ static const struct vop2_data rk3568_vop = {
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
+ .cluster_reg = rk3568_vop_cluster_regs,
+ .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs),
+ .smart_reg = rk3568_vop_smart_regs,
+ .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs),
.regs_dump = rk3568_regs_dump,
.regs_dump_size = ARRAY_SIZE(rk3568_regs_dump),
+ .ops = &rk3568_vop_ops,
.soc_id = 3568,
};
+static const struct vop2_data rk3576_vop = {
+ .version = VOP_VERSION_RK3576,
+ .feature = VOP2_FEATURE_HAS_SYS_PMU,
+ .nr_vps = 3,
+ .max_input = { 4096, 4320 },
+ .max_output = { 4096, 4320 },
+ .vp = rk3576_vop_video_ports,
+ .win = rk3576_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3576_vop_win_data),
+ .cluster_reg = rk3576_vop_cluster_regs,
+ .nr_cluster_regs = ARRAY_SIZE(rk3576_vop_cluster_regs),
+ .smart_reg = rk3576_vop_smart_regs,
+ .nr_smart_regs = ARRAY_SIZE(rk3576_vop_smart_regs),
+ .regs_dump = rk3576_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3576_regs_dump),
+ .ops = &rk3576_vop_ops,
+ .soc_id = 3576,
+};
+
static const struct vop2_data rk3588_vop = {
+ .version = VOP_VERSION_RK3588,
.feature = VOP2_FEATURE_HAS_SYS_GRF | VOP2_FEATURE_HAS_VO1_GRF |
VOP2_FEATURE_HAS_VOP_GRF | VOP2_FEATURE_HAS_SYS_PMU,
.nr_vps = 4,
@@ -682,8 +2444,13 @@ static const struct vop2_data rk3588_vop = {
.vp = rk3588_vop_video_ports,
.win = rk3588_vop_win_data,
.win_size = ARRAY_SIZE(rk3588_vop_win_data),
+ .cluster_reg = rk3568_vop_cluster_regs,
+ .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs),
+ .smart_reg = rk3568_vop_smart_regs,
+ .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs),
.regs_dump = rk3588_regs_dump,
.regs_dump_size = ARRAY_SIZE(rk3588_regs_dump),
+ .ops = &rk3588_vop_ops,
.soc_id = 3588,
};
@@ -695,6 +2462,9 @@ static const struct of_device_id vop2_dt_match[] = {
.compatible = "rockchip,rk3568-vop",
.data = &rk3568_vop,
}, {
+ .compatible = "rockchip,rk3576-vop",
+ .data = &rk3576_vop,
+ }, {
.compatible = "rockchip,rk3588-vop",
.data = &rk3588_vop
}, {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 69bcf0e99d57..a6d2a4722d82 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -28,10 +28,9 @@
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
-#include "gpu_scheduler_trace.h"
+#include "sched_internal.h"
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
+#include "gpu_scheduler_trace.h"
/**
* drm_sched_entity_init - Init a context entity used by scheduler when
@@ -92,7 +91,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
* the lowest priority available.
*/
if (entity->priority >= sched_list[0]->num_rqs) {
- drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
+ dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n",
entity->priority, sched_list[0]->num_rqs);
entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
(s32) DRM_SCHED_PRIORITY_KERNEL);
@@ -152,18 +151,6 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
return false;
}
-/* Return true if entity could provide a job. */
-bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
-{
- if (spsc_queue_peek(&entity->job_queue) == NULL)
- return false;
-
- if (READ_ONCE(entity->dependency))
- return false;
-
- return true;
-}
-
/**
* drm_sched_entity_error - return error of last scheduled job
* @entity: scheduler entity to check
@@ -255,7 +242,7 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
/* The entity is guaranteed to not be used by the scheduler */
prev = rcu_dereference_check(entity->last_scheduled, true);
dma_fence_get(prev);
- while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+ while ((job = drm_sched_entity_queue_pop(entity))) {
struct drm_sched_fence *s_fence = job->s_fence;
dma_fence_get(&s_fence->finished);
@@ -477,7 +464,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
struct drm_sched_job *sched_job;
- sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ sched_job = drm_sched_entity_queue_peek(entity);
if (!sched_job)
return NULL;
@@ -513,7 +500,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
struct drm_sched_job *next;
- next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ next = drm_sched_entity_queue_peek(entity);
if (next) {
struct drm_sched_rq *rq;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 0f35f009b9d3..e971528504a5 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -29,6 +29,8 @@
#include <drm/gpu_scheduler.h>
+#include "sched_internal.h"
+
static struct kmem_cache *sched_fence_slab;
static int __init drm_sched_fence_slab_init(void)
diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h
new file mode 100644
index 000000000000..599cf6e1bb74
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/sched_internal.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _DRM_GPU_SCHEDULER_INTERNAL_H_
+#define _DRM_GPU_SCHEDULER_INTERNAL_H_
+
+
+/* Used to choose between FIFO and RR job-scheduling */
+extern int drm_sched_policy;
+
+#define DRM_SCHED_POLICY_RR 0
+#define DRM_SCHED_POLICY_FIFO 1
+
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+
+void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity);
+void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity);
+
+void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq, ktime_t ts);
+
+void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
+struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
+
+struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *s_entity,
+ void *owner);
+void drm_sched_fence_init(struct drm_sched_fence *fence,
+ struct drm_sched_entity *entity);
+void drm_sched_fence_free(struct drm_sched_fence *fence);
+
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
+ struct dma_fence *parent);
+void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
+
+/**
+ * drm_sched_entity_queue_pop - Low level helper for popping queued jobs
+ *
+ * @entity: scheduler entity
+ *
+ * Low level helper for popping queued jobs.
+ *
+ * Returns: The job dequeued or NULL.
+ */
+static inline struct drm_sched_job *
+drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
+{
+ struct spsc_node *node;
+
+ node = spsc_queue_pop(&entity->job_queue);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct drm_sched_job, queue_node);
+}
+
+/**
+ * drm_sched_entity_queue_peek - Low level helper for peeking at the job queue
+ *
+ * @entity: scheduler entity
+ *
+ * Low level helper for peeking at the job queue
+ *
+ * Returns: The job at the head of the queue or NULL.
+ */
+static inline struct drm_sched_job *
+drm_sched_entity_queue_peek(struct drm_sched_entity *entity)
+{
+ struct spsc_node *node;
+
+ node = spsc_queue_peek(&entity->job_queue);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct drm_sched_job, queue_node);
+}
+
+/* Return true if entity could provide a job. */
+static inline bool
+drm_sched_entity_is_ready(struct drm_sched_entity *entity)
+{
+ if (!spsc_queue_count(&entity->job_queue))
+ return false;
+
+ if (READ_ONCE(entity->dependency))
+ return false;
+
+ return true;
+}
+
+#endif
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 8c36a59afb72..bfea608a7106 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -78,6 +78,8 @@
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
+#include "sched_internal.h"
+
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
@@ -87,9 +89,6 @@ static struct lockdep_map drm_sched_lockdep_map = {
};
#endif
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
-
int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
/**
@@ -103,9 +102,9 @@ static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
{
u32 credits;
- drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit,
- atomic_read(&sched->credit_count),
- &credits));
+ WARN_ON(check_sub_overflow(sched->credit_limit,
+ atomic_read(&sched->credit_count),
+ &credits));
return credits;
}
@@ -123,16 +122,18 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
{
struct drm_sched_job *s_job;
- s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ s_job = drm_sched_entity_queue_peek(entity);
if (!s_job)
return false;
/* If a job exceeds the credit limit, truncate it to the credit limit
* itself to guarantee forward progress.
*/
- if (drm_WARN(sched, s_job->credits > sched->credit_limit,
- "Jobs may not exceed the credit limit, truncate.\n"))
+ if (s_job->credits > sched->credit_limit) {
+ dev_WARN(sched->dev,
+ "Jobs may not exceed the credit limit, truncate.\n");
s_job->credits = sched->credit_limit;
+ }
return drm_sched_available_credits(sched) >= s_job->credits;
}
@@ -790,7 +791,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
* or worse--a blank screen--leave a trail in the
* logs, so this can be debugged easier.
*/
- drm_err(job->sched, "%s: entity has no rq!\n", __func__);
+ dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__);
return -ENOENT;
}
@@ -1014,11 +1015,13 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency);
* Cleans up the resources allocated with drm_sched_job_init().
*
* Drivers should call this from their error unwind code if @job is aborted
- * before drm_sched_job_arm() is called.
+ * before it was submitted to an entity with drm_sched_entity_push_job().
+ *
+ * Since calling drm_sched_job_arm() causes the job's fences to be initialized,
+ * it is up to the driver to ensure that fences that were exposed to external
+ * parties get signaled. drm_sched_job_cleanup() does not ensure this.
*
- * After that point of no return @job is committed to be executed by the
- * scheduler, and this function should be called from the
- * &drm_sched_backend_ops.free_job callback.
+ * This function must also be called in &struct drm_sched_backend_ops.free_job
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
@@ -1029,7 +1032,7 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
/* drm_sched_job_arm() has been called */
dma_fence_put(&job->s_fence->finished);
} else {
- /* aborted job before committing to run it */
+ /* aborted job before arming */
drm_sched_fence_free(job->s_fence);
}
@@ -1264,7 +1267,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
/* This is a gross violation--tell drivers what the problem is.
*/
- drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
+ dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
__func__);
return -EINVAL;
} else if (sched->sched_rq) {
@@ -1272,7 +1275,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
* fine-tune their DRM calling order, and return all
* is good.
*/
- drm_warn(sched, "%s: scheduler already initialized!\n", __func__);
+ dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
return 0;
}
@@ -1327,7 +1330,7 @@ Out_unroll:
Out_check_own:
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
- drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
+ dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_sched_init);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 54a73753eff9..ba315c66a04d 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -1900,7 +1900,6 @@ int ltdc_load(struct drm_device *ddev)
struct drm_panel *panel;
struct drm_crtc *crtc;
struct reset_control *rstc;
- struct resource *res;
int irq, i, nb_endpoints;
int ret = -ENODEV;
@@ -1966,8 +1965,7 @@ int ltdc_load(struct drm_device *ddev)
reset_control_deassert(rstc);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ldev->regs = devm_ioremap_resource(dev, res);
+ ldev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ldev->regs)) {
DRM_ERROR("Unable to get ltdc registers\n");
ret = PTR_ERR(ldev->regs);
diff --git a/drivers/gpu/drm/tests/drm_atomic_state_test.c b/drivers/gpu/drm/tests/drm_atomic_state_test.c
index 5945c3298901..2f6ac7a09f44 100644
--- a/drivers/gpu/drm/tests/drm_atomic_state_test.c
+++ b/drivers/gpu/drm/tests/drm_atomic_state_test.c
@@ -189,7 +189,7 @@ static int set_up_atomic_state(struct kunit *test,
static void drm_test_check_connector_changed_modeset(struct kunit *test)
{
struct drm_atomic_test_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector *old_conn, *new_conn;
struct drm_atomic_state *state;
struct drm_device *drm;
@@ -203,14 +203,13 @@ static void drm_test_check_connector_changed_modeset(struct kunit *test)
old_conn = &priv->connectors[0];
new_conn = &priv->connectors[1];
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
// first modeset to enable
- ret = set_up_atomic_state(test, priv, old_conn, ctx);
+ ret = set_up_atomic_state(test, priv, old_conn, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
new_conn_state = drm_atomic_get_connector_state(state, new_conn);
@@ -231,6 +230,9 @@ static void drm_test_check_connector_changed_modeset(struct kunit *test)
ret = drm_atomic_commit(state);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_EQ(test, modeset_counter, initial_modeset_count + 1);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -263,7 +265,7 @@ static void drm_test_check_valid_clones(struct kunit *test)
int ret;
const struct drm_clone_mode_test *param = test->param_value;
struct drm_atomic_test_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_device *drm;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
@@ -273,13 +275,12 @@ static void drm_test_check_valid_clones(struct kunit *test)
drm = &priv->drm;
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = set_up_atomic_state(test, priv, NULL, ctx);
+ ret = set_up_atomic_state(test, priv, NULL, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
crtc_state = drm_atomic_get_crtc_state(state, priv->crtc);
@@ -292,6 +293,9 @@ static void drm_test_check_valid_clones(struct kunit *test)
ret = drm_atomic_helper_check_modeset(drm, state);
KUNIT_ASSERT_EQ(test, ret, param->expected_result);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static void drm_check_in_clone_mode_desc(const struct drm_clone_mode_test *t,
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 08992636ec05..35cd3405d045 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -60,6 +60,11 @@ struct convert_to_rgb888_result {
const u8 expected[TEST_BUF_SIZE];
};
+struct convert_to_bgr888_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
struct convert_to_argb8888_result {
unsigned int dst_pitch;
const u32 expected[TEST_BUF_SIZE];
@@ -107,6 +112,7 @@ struct convert_xrgb8888_case {
struct convert_to_argb1555_result argb1555_result;
struct convert_to_rgba5551_result rgba5551_result;
struct convert_to_rgb888_result rgb888_result;
+ struct convert_to_bgr888_result bgr888_result;
struct convert_to_argb8888_result argb8888_result;
struct convert_to_xrgb2101010_result xrgb2101010_result;
struct convert_to_argb2101010_result argb2101010_result;
@@ -151,6 +157,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = { 0x00, 0x00, 0xFF },
},
+ .bgr888_result = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = { 0xFF, 0x00, 0x00 },
+ },
.argb8888_result = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = { 0xFFFF0000 },
@@ -217,6 +227,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = { 0x00, 0x00, 0xFF },
},
+ .bgr888_result = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = { 0xFF, 0x00, 0x00 },
+ },
.argb8888_result = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = { 0xFFFF0000 },
@@ -330,6 +344,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
},
},
+ .bgr888_result = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = {
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
+ 0xFF, 0x00, 0x00, 0x00, 0xFF, 0x00,
+ 0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF,
+ 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF,
+ },
+ },
.argb8888_result = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = {
@@ -468,6 +491,17 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
+ .bgr888_result = {
+ .dst_pitch = 15,
+ .expected = {
+ 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05, 0xA8, 0xF3, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xA8, 0x03, 0x03, 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ },
.argb8888_result = {
.dst_pitch = 20,
.expected = {
@@ -914,6 +948,52 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
+static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_bgr888_result *result = &params->bgr888_result;
+ size_t dst_size;
+ u8 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_BGR888, result->dst_pitch,
+ &params->clip, 0);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ /*
+ * BGR888 expected results are already in little-endian
+ * order, so there's no need to convert the test output.
+ */
+ drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, &params->clip,
+ &fmtcnv_state);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+ int blit_result = 0;
+
+ blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, &params->clip,
+ &fmtcnv_state);
+
+ KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+}
+
static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
@@ -1851,6 +1931,7 @@ static struct kunit_case drm_format_helper_test_cases[] = {
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb1555, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgba5551, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_bgr888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params),
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 23ecc00accb2..e97efd3af9ed 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -273,7 +273,7 @@ drm_kunit_helper_connector_hdmi_init(struct kunit *test,
static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_crtc_state *crtc_state;
@@ -296,13 +296,12 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
new_conn_state = drm_atomic_get_connector_state(state, conn);
@@ -327,6 +326,9 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
KUNIT_EXPECT_TRUE(test, crtc_state->mode_changed);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -337,7 +339,7 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_crtc_state *crtc_state;
@@ -360,13 +362,12 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
new_conn_state = drm_atomic_get_connector_state(state, conn);
@@ -393,6 +394,9 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
KUNIT_EXPECT_FALSE(test, crtc_state->mode_changed);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -403,7 +407,7 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *preferred;
@@ -426,13 +430,12 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -449,6 +452,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -459,7 +465,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *mode;
@@ -477,17 +483,16 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -504,6 +509,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -514,7 +522,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *preferred;
@@ -537,13 +545,12 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -562,6 +569,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
DRM_HDMI_BROADCAST_RGB_FULL);
KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -572,7 +582,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *mode;
@@ -590,17 +600,16 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -619,6 +628,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
DRM_HDMI_BROADCAST_RGB_FULL);
KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -629,7 +641,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *preferred;
@@ -652,13 +664,12 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -677,6 +688,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
DRM_HDMI_BROADCAST_RGB_LIMITED);
KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -687,7 +701,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *mode;
@@ -705,17 +719,16 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -734,6 +747,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
DRM_HDMI_BROADCAST_RGB_LIMITED);
KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -744,7 +760,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_crtc_state *crtc_state;
@@ -771,13 +787,12 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
new_conn_state = drm_atomic_get_connector_state(state, conn);
@@ -808,6 +823,9 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
KUNIT_EXPECT_TRUE(test, crtc_state->mode_changed);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -818,7 +836,7 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_crtc_state *crtc_state;
@@ -845,13 +863,12 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
new_conn_state = drm_atomic_get_connector_state(state, conn);
@@ -880,6 +897,9 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
KUNIT_EXPECT_FALSE(test, crtc_state->mode_changed);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -889,7 +909,7 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
static void drm_test_check_output_bpc_dvi(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -919,10 +939,9 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -930,6 +949,9 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -939,7 +961,7 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_mode *preferred;
struct drm_connector *conn;
@@ -964,10 +986,9 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -976,6 +997,9 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 8);
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1000);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -986,7 +1010,7 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_mode *preferred;
struct drm_connector *conn;
@@ -1011,10 +1035,9 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1023,6 +1046,9 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 10);
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1250);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1033,7 +1059,7 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_mode *preferred;
struct drm_connector *conn;
@@ -1058,10 +1084,9 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1070,6 +1095,9 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 12);
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1500);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1083,7 +1111,7 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct drm_display_mode *preferred;
struct drm_crtc_state *crtc_state;
@@ -1104,16 +1132,15 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
/* You shouldn't be doing that at home. */
conn->hdmi.funcs = &reject_connector_hdmi_funcs;
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -1123,6 +1150,9 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1139,7 +1169,7 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1176,10 +1206,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1188,6 +1217,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1250);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1206,7 +1238,7 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1248,10 +1280,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1259,6 +1290,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1269,7 +1303,7 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *mode;
@@ -1310,11 +1344,10 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
rate = mode->clock * 1500;
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1322,6 +1355,9 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1331,7 +1367,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1376,10 +1412,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1387,6 +1422,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
KUNIT_EXPECT_LT(test, conn_state->hdmi.output_bpc, 12);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1396,7 +1434,7 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1443,10 +1481,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1454,6 +1491,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
KUNIT_EXPECT_LT(test, conn_state->hdmi.output_bpc, 12);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1464,7 +1504,7 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1501,10 +1541,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1512,6 +1551,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1522,7 +1564,7 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1561,10 +1603,9 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1572,13 +1613,16 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/* Test that atomic check succeeds when disabling a connector. */
static void drm_test_check_disable_connector(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
struct drm_atomic_state *state;
@@ -1593,8 +1637,7 @@ static void drm_test_check_disable_connector(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
conn = &priv->connector;
preferred = find_preferred_mode(conn);
@@ -1602,10 +1645,10 @@ static void drm_test_check_disable_connector(struct kunit *test)
drm = &priv->drm;
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -1623,6 +1666,9 @@ static void drm_test_check_disable_connector(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 3c0b7824c0be..a4eb68f0decc 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -80,47 +80,6 @@ __drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
}
EXPORT_SYMBOL_GPL(__drm_kunit_helper_alloc_drm_device_with_driver);
-static void action_drm_release_context(void *ptr)
-{
- struct drm_modeset_acquire_ctx *ctx = ptr;
-
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
-}
-
-/**
- * drm_kunit_helper_acquire_ctx_alloc - Allocates an acquire context
- * @test: The test context object
- *
- * Allocates and initializes a modeset acquire context.
- *
- * The context is tied to the kunit test context, so we must not call
- * drm_modeset_acquire_fini() on it, it will be done so automatically.
- *
- * Returns:
- * An ERR_PTR on error, a pointer to the newly allocated context otherwise
- */
-struct drm_modeset_acquire_ctx *
-drm_kunit_helper_acquire_ctx_alloc(struct kunit *test)
-{
- struct drm_modeset_acquire_ctx *ctx;
- int ret;
-
- ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, ctx);
-
- drm_modeset_acquire_init(ctx, 0);
-
- ret = kunit_add_action_or_reset(test,
- action_drm_release_context,
- ctx);
- if (ret)
- return ERR_PTR(ret);
-
- return ctx;
-}
-EXPORT_SYMBOL_GPL(drm_kunit_helper_acquire_ctx_alloc);
-
static void kunit_action_drm_atomic_state_put(void *ptr)
{
struct drm_atomic_state *state = ptr;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 94cbdb1337c0..54c84c9801c1 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -1,5 +1,17 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_APPLETBDRM
+ tristate "DRM support for Apple Touch Bars"
+ depends on DRM && USB && MMU
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ help
+ Say Y here if you want support for the display of Touch Bars on x86
+ MacBook Pros.
+
+ To compile this driver as a module, choose M here: the
+ module will be called appletbdrm.
+
config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 60816d2eb4ff..0a3a7837a58b 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_APPLETBDRM) += appletbdrm.o
obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c
new file mode 100644
index 000000000000..394c8f9bd41a
--- /dev/null
+++ b/drivers/gpu/drm/tiny/appletbdrm.c
@@ -0,0 +1,841 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple Touch Bar DRM Driver
+ *
+ * Copyright (c) 2023 Kerem Karabay <kekrby@gmail.com>
+ */
+
+#include <linux/align.h>
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/container_of.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <linux/usb.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#define APPLETBDRM_PIXEL_FORMAT cpu_to_le32(0x52474241) /* RGBA, the actual format is BGR888 */
+#define APPLETBDRM_BITS_PER_PIXEL 24
+
+#define APPLETBDRM_MSG_CLEAR_DISPLAY cpu_to_le32(0x434c5244) /* CLRD */
+#define APPLETBDRM_MSG_GET_INFORMATION cpu_to_le32(0x47494e46) /* GINF */
+#define APPLETBDRM_MSG_UPDATE_COMPLETE cpu_to_le32(0x5544434c) /* UDCL */
+#define APPLETBDRM_MSG_SIGNAL_READINESS cpu_to_le32(0x52454459) /* REDY */
+
+#define APPLETBDRM_BULK_MSG_TIMEOUT 1000
+
+#define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm)
+#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev))
+
+struct appletbdrm_msg_request_header {
+ __le16 unk_00;
+ __le16 unk_02;
+ __le32 unk_04;
+ __le32 unk_08;
+ __le32 size;
+} __packed;
+
+struct appletbdrm_msg_response_header {
+ u8 unk_00[16];
+ __le32 msg;
+} __packed;
+
+struct appletbdrm_msg_simple_request {
+ struct appletbdrm_msg_request_header header;
+ __le32 msg;
+ u8 unk_14[8];
+ __le32 size;
+} __packed;
+
+struct appletbdrm_msg_information {
+ struct appletbdrm_msg_response_header header;
+ u8 unk_14[12];
+ __le32 width;
+ __le32 height;
+ u8 bits_per_pixel;
+ __le32 bytes_per_row;
+ __le32 orientation;
+ __le32 bitmap_info;
+ __le32 pixel_format;
+ __le32 width_inches; /* floating point */
+ __le32 height_inches; /* floating point */
+} __packed;
+
+struct appletbdrm_frame {
+ __le16 begin_x;
+ __le16 begin_y;
+ __le16 width;
+ __le16 height;
+ __le32 buf_size;
+ u8 buf[];
+} __packed;
+
+struct appletbdrm_fb_request_footer {
+ u8 unk_00[12];
+ __le32 unk_0c;
+ u8 unk_10[12];
+ __le32 unk_1c;
+ __le64 timestamp;
+ u8 unk_28[12];
+ __le32 unk_34;
+ u8 unk_38[20];
+ __le32 unk_4c;
+} __packed;
+
+struct appletbdrm_fb_request {
+ struct appletbdrm_msg_request_header header;
+ __le16 unk_10;
+ u8 msg_id;
+ u8 unk_13[29];
+ /*
+ * Contents of `data`:
+ * - struct appletbdrm_frame frames[];
+ * - struct appletbdrm_fb_request_footer footer;
+ * - padding to make the total size a multiple of 16
+ */
+ u8 data[];
+} __packed;
+
+struct appletbdrm_fb_request_response {
+ struct appletbdrm_msg_response_header header;
+ u8 unk_14[12];
+ __le64 timestamp;
+} __packed;
+
+struct appletbdrm_device {
+ struct device *dmadev;
+
+ unsigned int in_ep;
+ unsigned int out_ep;
+
+ unsigned int width;
+ unsigned int height;
+
+ struct drm_device drm;
+ struct drm_display_mode mode;
+ struct drm_connector connector;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+};
+
+struct appletbdrm_plane_state {
+ struct drm_shadow_plane_state base;
+ struct appletbdrm_fb_request *request;
+ struct appletbdrm_fb_request_response *response;
+ size_t request_size;
+ size_t frames_size;
+};
+
+static inline struct appletbdrm_plane_state *to_appletbdrm_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct appletbdrm_plane_state, base.base);
+}
+
+static int appletbdrm_send_request(struct appletbdrm_device *adev,
+ struct appletbdrm_msg_request_header *request, size_t size)
+{
+ struct usb_device *udev = adev_to_udev(adev);
+ struct drm_device *drm = &adev->drm;
+ int ret, actual_size;
+
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, adev->out_ep),
+ request, size, &actual_size, APPLETBDRM_BULK_MSG_TIMEOUT);
+ if (ret) {
+ drm_err(drm, "Failed to send message (%d)\n", ret);
+ return ret;
+ }
+
+ if (actual_size != size) {
+ drm_err(drm, "Actual size (%d) doesn't match expected size (%zu)\n",
+ actual_size, size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int appletbdrm_read_response(struct appletbdrm_device *adev,
+ struct appletbdrm_msg_response_header *response,
+ size_t size, __le32 expected_response)
+{
+ struct usb_device *udev = adev_to_udev(adev);
+ struct drm_device *drm = &adev->drm;
+ int ret, actual_size;
+ bool readiness_signal_received = false;
+
+retry:
+ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, adev->in_ep),
+ response, size, &actual_size, APPLETBDRM_BULK_MSG_TIMEOUT);
+ if (ret) {
+ drm_err(drm, "Failed to read response (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * The device responds to the first request sent in a particular
+ * timeframe after the USB device configuration is set with a readiness
+ * signal, in which case the response should be read again
+ */
+ if (response->msg == APPLETBDRM_MSG_SIGNAL_READINESS) {
+ if (!readiness_signal_received) {
+ readiness_signal_received = true;
+ goto retry;
+ }
+
+ drm_err(drm, "Encountered unexpected readiness signal\n");
+ return -EINTR;
+ }
+
+ if (actual_size != size) {
+ drm_err(drm, "Actual size (%d) doesn't match expected size (%zu)\n",
+ actual_size, size);
+ return -EBADMSG;
+ }
+
+ if (response->msg != expected_response) {
+ drm_err(drm, "Unexpected response from device (expected %p4cc found %p4cc)\n",
+ &expected_response, &response->msg);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int appletbdrm_send_msg(struct appletbdrm_device *adev, __le32 msg)
+{
+ struct appletbdrm_msg_simple_request *request;
+ int ret;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->header.unk_00 = cpu_to_le16(2);
+ request->header.unk_02 = cpu_to_le16(0x1512);
+ request->header.size = cpu_to_le32(sizeof(*request) - sizeof(request->header));
+ request->msg = msg;
+ request->size = request->header.size;
+
+ ret = appletbdrm_send_request(adev, &request->header, sizeof(*request));
+
+ kfree(request);
+
+ return ret;
+}
+
+static int appletbdrm_clear_display(struct appletbdrm_device *adev)
+{
+ return appletbdrm_send_msg(adev, APPLETBDRM_MSG_CLEAR_DISPLAY);
+}
+
+static int appletbdrm_signal_readiness(struct appletbdrm_device *adev)
+{
+ return appletbdrm_send_msg(adev, APPLETBDRM_MSG_SIGNAL_READINESS);
+}
+
+static int appletbdrm_get_information(struct appletbdrm_device *adev)
+{
+ struct appletbdrm_msg_information *info;
+ struct drm_device *drm = &adev->drm;
+ u8 bits_per_pixel;
+ __le32 pixel_format;
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = appletbdrm_send_msg(adev, APPLETBDRM_MSG_GET_INFORMATION);
+ if (ret)
+ return ret;
+
+ ret = appletbdrm_read_response(adev, &info->header, sizeof(*info),
+ APPLETBDRM_MSG_GET_INFORMATION);
+ if (ret)
+ goto free_info;
+
+ bits_per_pixel = info->bits_per_pixel;
+ pixel_format = get_unaligned(&info->pixel_format);
+
+ adev->width = get_unaligned_le32(&info->width);
+ adev->height = get_unaligned_le32(&info->height);
+
+ if (bits_per_pixel != APPLETBDRM_BITS_PER_PIXEL) {
+ drm_err(drm, "Encountered unexpected bits per pixel value (%d)\n", bits_per_pixel);
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ if (pixel_format != APPLETBDRM_PIXEL_FORMAT) {
+ drm_err(drm, "Encountered unknown pixel format (%p4cc)\n", &pixel_format);
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+free_info:
+ kfree(info);
+
+ return ret;
+}
+
+static u32 rect_size(struct drm_rect *rect)
+{
+ return drm_rect_width(rect) * drm_rect_height(rect) *
+ (BITS_TO_BYTES(APPLETBDRM_BITS_PER_PIXEL));
+}
+
+static int appletbdrm_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct appletbdrm_device *adev = drm_to_adev(connector->dev);
+
+ return drm_connector_helper_get_modes_fixed(connector, &adev->mode);
+}
+
+static const u32 appletbdrm_primary_plane_formats[] = {
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_XRGB8888, /* emulated */
+};
+
+static int appletbdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(new_plane_state);
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ size_t frames_size = 0;
+ size_t request_size;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, new_plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ frames_size += struct_size((struct appletbdrm_frame *)0, buf, rect_size(&damage));
+ }
+
+ if (!frames_size)
+ return 0;
+
+ request_size = ALIGN(sizeof(struct appletbdrm_fb_request) +
+ frames_size +
+ sizeof(struct appletbdrm_fb_request_footer), 16);
+
+ appletbdrm_state->request = kzalloc(request_size, GFP_KERNEL);
+
+ if (!appletbdrm_state->request)
+ return -ENOMEM;
+
+ appletbdrm_state->response = kzalloc(sizeof(*appletbdrm_state->response), GFP_KERNEL);
+
+ if (!appletbdrm_state->response)
+ return -ENOMEM;
+
+ appletbdrm_state->request_size = request_size;
+ appletbdrm_state->frames_size = frames_size;
+
+ return 0;
+}
+
+static int appletbdrm_flush_damage(struct appletbdrm_device *adev,
+ struct drm_plane_state *old_state,
+ struct drm_plane_state *state)
+{
+ struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(state);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct appletbdrm_fb_request_response *response = appletbdrm_state->response;
+ struct appletbdrm_fb_request_footer *footer;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_framebuffer *fb = state->fb;
+ struct appletbdrm_fb_request *request = appletbdrm_state->request;
+ struct drm_device *drm = &adev->drm;
+ struct appletbdrm_frame *frame;
+ u64 timestamp = ktime_get_ns();
+ struct drm_rect damage;
+ size_t frames_size = appletbdrm_state->frames_size;
+ size_t request_size = appletbdrm_state->request_size;
+ int ret;
+
+ if (!frames_size)
+ return 0;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret) {
+ drm_err(drm, "Failed to start CPU framebuffer access (%d)\n", ret);
+ goto end_fb_cpu_access;
+ }
+
+ request->header.unk_00 = cpu_to_le16(2);
+ request->header.unk_02 = cpu_to_le16(0x12);
+ request->header.unk_04 = cpu_to_le32(9);
+ request->header.size = cpu_to_le32(request_size - sizeof(request->header));
+ request->unk_10 = cpu_to_le16(1);
+ request->msg_id = timestamp;
+
+ frame = (struct appletbdrm_frame *)request->data;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct drm_rect dst_clip = state->dst;
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR(frame->buf);
+ u32 buf_size = rect_size(&damage);
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ /*
+ * The coordinates need to be translated to the coordinate
+ * system the device expects, see the comment in
+ * appletbdrm_setup_mode_config
+ */
+ frame->begin_x = cpu_to_le16(damage.y1);
+ frame->begin_y = cpu_to_le16(adev->height - damage.x2);
+ frame->width = cpu_to_le16(drm_rect_height(&damage));
+ frame->height = cpu_to_le16(drm_rect_width(&damage));
+ frame->buf_size = cpu_to_le32(buf_size);
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ drm_fb_xrgb8888_to_bgr888(&dst, NULL, &shadow_plane_state->data[0], fb, &damage, &shadow_plane_state->fmtcnv_state);
+ break;
+ default:
+ drm_fb_memcpy(&dst, NULL, &shadow_plane_state->data[0], fb, &damage);
+ break;
+ }
+
+ frame = (void *)frame + struct_size(frame, buf, buf_size);
+ }
+
+ footer = (struct appletbdrm_fb_request_footer *)&request->data[frames_size];
+
+ footer->unk_0c = cpu_to_le32(0xfffe);
+ footer->unk_1c = cpu_to_le32(0x80001);
+ footer->unk_34 = cpu_to_le32(0x80002);
+ footer->unk_4c = cpu_to_le32(0xffff);
+ footer->timestamp = cpu_to_le64(timestamp);
+
+ ret = appletbdrm_send_request(adev, &request->header, request_size);
+ if (ret)
+ goto end_fb_cpu_access;
+
+ ret = appletbdrm_read_response(adev, &response->header, sizeof(*response),
+ APPLETBDRM_MSG_UPDATE_COMPLETE);
+ if (ret)
+ goto end_fb_cpu_access;
+
+ if (response->timestamp != footer->timestamp) {
+ drm_err(drm, "Response timestamp (%llu) doesn't match request timestamp (%llu)\n",
+ le64_to_cpu(response->timestamp), timestamp);
+ goto end_fb_cpu_access;
+ }
+
+end_fb_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static void appletbdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *old_state)
+{
+ struct appletbdrm_device *adev = drm_to_adev(plane->dev);
+ struct drm_device *drm = plane->dev;
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ appletbdrm_flush_damage(adev, old_plane_state, plane_state);
+
+ drm_dev_exit(idx);
+}
+
+static void appletbdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct appletbdrm_device *adev = drm_to_adev(dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ appletbdrm_clear_display(adev);
+
+ drm_dev_exit(idx);
+}
+
+static void appletbdrm_primary_plane_reset(struct drm_plane *plane)
+{
+ struct appletbdrm_plane_state *appletbdrm_state;
+
+ WARN_ON(plane->state);
+
+ appletbdrm_state = kzalloc(sizeof(*appletbdrm_state), GFP_KERNEL);
+ if (!appletbdrm_state)
+ return;
+
+ __drm_gem_reset_shadow_plane(plane, &appletbdrm_state->base);
+}
+
+static struct drm_plane_state *appletbdrm_primary_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_shadow_plane_state *new_shadow_plane_state;
+ struct appletbdrm_plane_state *appletbdrm_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ appletbdrm_state = kzalloc(sizeof(*appletbdrm_state), GFP_KERNEL);
+ if (!appletbdrm_state)
+ return NULL;
+
+ /* Request and response are not duplicated and are allocated in .atomic_check */
+ appletbdrm_state->request = NULL;
+ appletbdrm_state->response = NULL;
+
+ appletbdrm_state->request_size = 0;
+ appletbdrm_state->frames_size = 0;
+
+ new_shadow_plane_state = &appletbdrm_state->base;
+
+ __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
+
+ return &new_shadow_plane_state->base;
+}
+
+static void appletbdrm_primary_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(state);
+
+ kfree(appletbdrm_state->request);
+ kfree(appletbdrm_state->response);
+
+ __drm_gem_destroy_shadow_plane_state(&appletbdrm_state->base);
+
+ kfree(appletbdrm_state);
+}
+
+static const struct drm_plane_helper_funcs appletbdrm_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = appletbdrm_primary_plane_helper_atomic_check,
+ .atomic_update = appletbdrm_primary_plane_helper_atomic_update,
+ .atomic_disable = appletbdrm_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs appletbdrm_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = appletbdrm_primary_plane_reset,
+ .atomic_duplicate_state = appletbdrm_primary_plane_duplicate_state,
+ .atomic_destroy_state = appletbdrm_primary_plane_destroy_state,
+ .destroy = drm_plane_cleanup,
+};
+
+static enum drm_mode_status appletbdrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct appletbdrm_device *adev = drm_to_adev(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &adev->mode);
+}
+
+static const struct drm_mode_config_funcs appletbdrm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_connector_funcs appletbdrm_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .destroy = drm_connector_cleanup,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs appletbdrm_connector_helper_funcs = {
+ .get_modes = appletbdrm_connector_helper_get_modes,
+};
+
+static const struct drm_crtc_helper_funcs appletbdrm_crtc_helper_funcs = {
+ .mode_valid = appletbdrm_crtc_helper_mode_valid,
+};
+
+static const struct drm_crtc_funcs appletbdrm_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_encoder_funcs appletbdrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct appletbdrm_device *adev = drm_to_adev(dev);
+
+ if (!adev->dmadev)
+ return ERR_PTR(-ENODEV);
+
+ return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev);
+}
+
+DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops);
+
+static const struct drm_driver appletbdrm_drm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ .gem_prime_import = appletbdrm_driver_gem_prime_import,
+ .name = "appletbdrm",
+ .desc = "Apple Touch Bar DRM Driver",
+ .major = 1,
+ .minor = 0,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ .fops = &appletbdrm_drm_fops,
+};
+
+static int appletbdrm_setup_mode_config(struct appletbdrm_device *adev)
+{
+ struct drm_connector *connector = &adev->connector;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_device *drm = &adev->drm;
+ int ret;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret) {
+ drm_err(drm, "Failed to initialize mode configuration\n");
+ return ret;
+ }
+
+ primary_plane = &adev->primary_plane;
+ ret = drm_universal_plane_init(drm, primary_plane, 0,
+ &appletbdrm_primary_plane_funcs,
+ appletbdrm_primary_plane_formats,
+ ARRAY_SIZE(appletbdrm_primary_plane_formats),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(drm, "Failed to initialize universal plane object\n");
+ return ret;
+ }
+
+ drm_plane_helper_add(primary_plane, &appletbdrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ crtc = &adev->crtc;
+ ret = drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
+ &appletbdrm_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(drm, "Failed to initialize CRTC object\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &appletbdrm_crtc_helper_funcs);
+
+ encoder = &adev->encoder;
+ ret = drm_encoder_init(drm, encoder, &appletbdrm_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(drm, "Failed to initialize encoder\n");
+ return ret;
+ }
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /*
+ * The coordinate system used by the device is different from the
+ * coordinate system of the framebuffer in that the x and y axes are
+ * swapped, and that the y axis is inverted; so what the device reports
+ * as the height is actually the width of the framebuffer and vice
+ * versa.
+ */
+ drm->mode_config.max_width = max(adev->height, DRM_SHADOW_PLANE_MAX_WIDTH);
+ drm->mode_config.max_height = max(adev->width, DRM_SHADOW_PLANE_MAX_HEIGHT);
+ drm->mode_config.preferred_depth = APPLETBDRM_BITS_PER_PIXEL;
+ drm->mode_config.funcs = &appletbdrm_mode_config_funcs;
+
+ adev->mode = (struct drm_display_mode) {
+ DRM_MODE_INIT(60, adev->height, adev->width,
+ DRM_MODE_RES_MM(adev->height, 218),
+ DRM_MODE_RES_MM(adev->width, 218))
+ };
+
+ ret = drm_connector_init(drm, connector,
+ &appletbdrm_connector_funcs, DRM_MODE_CONNECTOR_USB);
+ if (ret) {
+ drm_err(drm, "Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &appletbdrm_connector_helper_funcs);
+
+ ret = drm_connector_set_panel_orientation(connector,
+ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP);
+ if (ret) {
+ drm_err(drm, "Failed to set panel orientation\n");
+ return ret;
+ }
+
+ connector->display_info.non_desktop = true;
+ ret = drm_object_property_set_value(&connector->base,
+ drm->mode_config.non_desktop_property, true);
+ if (ret) {
+ drm_err(drm, "Failed to set non-desktop property\n");
+ return ret;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+
+ if (ret) {
+ drm_err(drm, "Failed to initialize simple display pipe\n");
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ return 0;
+}
+
+static int appletbdrm_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ struct device *dev = &intf->dev;
+ struct appletbdrm_device *adev;
+ struct drm_device *drm = NULL;
+ int ret;
+
+ ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL);
+ if (ret) {
+ drm_err(drm, "appletbdrm: Failed to find bulk endpoints\n");
+ return ret;
+ }
+
+ adev = devm_drm_dev_alloc(dev, &appletbdrm_drm_driver, struct appletbdrm_device, drm);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ adev->in_ep = bulk_in->bEndpointAddress;
+ adev->out_ep = bulk_out->bEndpointAddress;
+ adev->dmadev = dev;
+
+ drm = &adev->drm;
+
+ usb_set_intfdata(intf, adev);
+
+ ret = appletbdrm_get_information(adev);
+ if (ret) {
+ drm_err(drm, "Failed to get display information\n");
+ return ret;
+ }
+
+ ret = appletbdrm_signal_readiness(adev);
+ if (ret) {
+ drm_err(drm, "Failed to signal readiness\n");
+ return ret;
+ }
+
+ ret = appletbdrm_setup_mode_config(adev);
+ if (ret) {
+ drm_err(drm, "Failed to setup mode config\n");
+ return ret;
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret) {
+ drm_err(drm, "Failed to register DRM device\n");
+ return ret;
+ }
+
+ ret = appletbdrm_clear_display(adev);
+ if (ret) {
+ drm_err(drm, "Failed to clear display\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void appletbdrm_disconnect(struct usb_interface *intf)
+{
+ struct appletbdrm_device *adev = usb_get_intfdata(intf);
+ struct drm_device *drm = &adev->drm;
+
+ put_device(adev->dmadev);
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+}
+
+static void appletbdrm_shutdown(struct usb_interface *intf)
+{
+ struct appletbdrm_device *adev = usb_get_intfdata(intf);
+
+ /*
+ * The framebuffer needs to be cleared on shutdown since its content
+ * persists across boots
+ */
+ drm_atomic_helper_shutdown(&adev->drm);
+}
+
+static const struct usb_device_id appletbdrm_usb_id_table[] = {
+ { USB_DEVICE_INTERFACE_CLASS(0x05ac, 0x8302, USB_CLASS_AUDIO_VIDEO) },
+ {}
+};
+MODULE_DEVICE_TABLE(usb, appletbdrm_usb_id_table);
+
+static struct usb_driver appletbdrm_usb_driver = {
+ .name = "appletbdrm",
+ .probe = appletbdrm_probe,
+ .disconnect = appletbdrm_disconnect,
+ .shutdown = appletbdrm_shutdown,
+ .id_table = appletbdrm_usb_id_table,
+};
+module_usb_driver(appletbdrm_usb_driver);
+
+MODULE_AUTHOR("Kerem Karabay <kekrby@gmail.com>");
+MODULE_DESCRIPTION("Apple Touch Bar DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index 2748d1f21d86..7cf0f0ea1bfe 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -253,7 +253,6 @@ static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
struct device_node *encoder_node = NULL, *endpoint_node = NULL;
struct drm_connector *connector = NULL;
struct drm_device *drm = &arcpgu->drm;
- struct resource *res;
int ret;
arcpgu->clk = devm_clk_get(drm->dev, "pxlclk");
@@ -270,8 +269,7 @@ static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
drm->mode_config.max_height = 1080;
drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
+ arcpgu->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(arcpgu->regs))
return PTR_ERR(arcpgu->regs);
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 52ba6c699bc8..5c3b51eb0a97 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -456,7 +456,7 @@ static void repaper_frame_fixed_repeat(struct repaper_epd *epd, u8 fixed_value,
enum repaper_stage stage)
{
u64 start = local_clock();
- u64 end = start + (epd->factored_stage_time * 1000 * 1000);
+ u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000);
do {
repaper_frame_fixed(epd, fixed_value, stage);
@@ -467,7 +467,7 @@ static void repaper_frame_data_repeat(struct repaper_epd *epd, const u8 *image,
const u8 *mask, enum repaper_stage stage)
{
u64 start = local_clock();
- u64 end = start + (epd->factored_stage_time * 1000 * 1000);
+ u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000);
do {
repaper_frame_data(epd, image, mask, stage);
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index dad298127226..40d07a35293a 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,7 +4,7 @@
ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \
- ttm_device.o ttm_sys_manager.o
+ ttm_device.o ttm_sys_manager.o ttm_backup.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
new file mode 100644
index 000000000000..93c007f18855
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_backup.h>
+#include <linux/page-flags.h>
+#include <linux/swap.h>
+
+/*
+ * Casting from randomized struct file * to struct ttm_backup * is fine since
+ * struct ttm_backup is never defined nor dereferenced.
+ */
+static struct file *ttm_backup_to_file(struct ttm_backup *backup)
+{
+ return (void *)backup;
+}
+
+static struct ttm_backup *ttm_file_to_backup(struct file *file)
+{
+ return (void *)file;
+}
+
+/*
+ * Need to map shmem indices to handle since a handle value
+ * of 0 means error, following the swp_entry_t convention.
+ */
+static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
+{
+ return (unsigned long)idx + 1;
+}
+
+static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
+{
+ return handle - 1;
+}
+
+/**
+ * ttm_backup_drop() - release memory associated with a handle
+ * @backup: The struct backup pointer used to obtain the handle
+ * @handle: The handle obtained from the @backup_page function.
+ */
+void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
+{
+ loff_t start = ttm_backup_handle_to_shmem_idx(handle);
+
+ start <<= PAGE_SHIFT;
+ shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
+ start + PAGE_SIZE - 1);
+}
+
+/**
+ * ttm_backup_copy_page() - Copy the contents of a previously backed
+ * up page
+ * @backup: The struct backup pointer used to back up the page.
+ * @dst: The struct page to copy into.
+ * @handle: The handle returned when the page was backed up.
+ * @intr: Try to perform waits interruptable or at least killable.
+ *
+ * Return: 0 on success, Negative error code on failure, notably
+ * -EINTR if @intr was set to true and a signal is pending.
+ */
+int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
+ pgoff_t handle, bool intr)
+{
+ struct file *filp = ttm_backup_to_file(backup);
+ struct address_space *mapping = filp->f_mapping;
+ struct folio *from_folio;
+ pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
+
+ from_folio = shmem_read_folio(mapping, idx);
+ if (IS_ERR(from_folio))
+ return PTR_ERR(from_folio);
+
+ copy_highpage(dst, folio_file_page(from_folio, idx));
+ folio_put(from_folio);
+
+ return 0;
+}
+
+/**
+ * ttm_backup_backup_page() - Backup a page
+ * @backup: The struct backup pointer to use.
+ * @page: The page to back up.
+ * @writeback: Whether to perform immediate writeback of the page.
+ * This may have performance implications.
+ * @idx: A unique integer for each page and each struct backup.
+ * This allows the backup implementation to avoid managing
+ * its address space separately.
+ * @page_gfp: The gfp value used when the page was allocated.
+ * This is used for accounting purposes.
+ * @alloc_gfp: The gfp to be used when allocating memory.
+ *
+ * Context: If called from reclaim context, the caller needs to
+ * assert that the shrinker gfp has __GFP_FS set, to avoid
+ * deadlocking on lock_page(). If @writeback is set to true and
+ * called from reclaim context, the caller also needs to assert
+ * that the shrinker gfp has __GFP_IO set, since without it,
+ * we're not allowed to start backup IO.
+ *
+ * Return: A handle on success. Negative error code on failure.
+ *
+ * Note: This function could be extended to back up a folio and
+ * implementations would then split the folio internally if needed.
+ * Drawback is that the caller would then have to keep track of
+ * the folio size- and usage.
+ */
+s64
+ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
+ bool writeback, pgoff_t idx, gfp_t page_gfp,
+ gfp_t alloc_gfp)
+{
+ struct file *filp = ttm_backup_to_file(backup);
+ struct address_space *mapping = filp->f_mapping;
+ unsigned long handle = 0;
+ struct folio *to_folio;
+ int ret;
+
+ to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
+ if (IS_ERR(to_folio))
+ return PTR_ERR(to_folio);
+
+ folio_mark_accessed(to_folio);
+ folio_lock(to_folio);
+ folio_mark_dirty(to_folio);
+ copy_highpage(folio_file_page(to_folio, idx), page);
+ handle = ttm_backup_shmem_idx_to_handle(idx);
+
+ if (writeback && !folio_mapped(to_folio) &&
+ folio_clear_dirty_for_io(to_folio)) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = SWAP_CLUSTER_MAX,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ .for_reclaim = 1,
+ };
+ folio_set_reclaim(to_folio);
+ ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc);
+ if (!folio_test_writeback(to_folio))
+ folio_clear_reclaim(to_folio);
+ /*
+ * If writepage succeeds, it unlocks the folio.
+ * writepage() errors are otherwise dropped, since writepage()
+ * is only best effort here.
+ */
+ if (ret)
+ folio_unlock(to_folio);
+ } else {
+ folio_unlock(to_folio);
+ }
+
+ folio_put(to_folio);
+
+ return handle;
+}
+
+/**
+ * ttm_backup_fini() - Free the struct backup resources after last use.
+ * @backup: Pointer to the struct backup whose resources to free.
+ *
+ * After a call to this function, it's illegal to use the @backup pointer.
+ */
+void ttm_backup_fini(struct ttm_backup *backup)
+{
+ fput(ttm_backup_to_file(backup));
+}
+
+/**
+ * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
+ * left for backup.
+ *
+ * This function is intended also for driver use to indicate whether a
+ * backup attempt is meaningful.
+ *
+ * Return: An approximate size of backup space available.
+ */
+u64 ttm_backup_bytes_avail(void)
+{
+ /*
+ * The idea behind backing up to shmem is that shmem objects may
+ * eventually be swapped out. So no point swapping out if there
+ * is no or low swap-space available. But the accuracy of this
+ * number also depends on shmem actually swapping out backed-up
+ * shmem objects without too much buffering.
+ */
+ return (u64)get_nr_swap_pages() << PAGE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
+
+/**
+ * ttm_backup_shmem_create() - Create a shmem-based struct backup.
+ * @size: The maximum size (in bytes) to back up.
+ *
+ * Create a backup utilizing shmem objects.
+ *
+ * Return: A pointer to a struct ttm_backup on success,
+ * an error pointer on error.
+ */
+struct ttm_backup *ttm_backup_shmem_create(loff_t size)
+{
+ struct file *filp;
+
+ filp = shmem_file_setup("ttm shmem backup", size, 0);
+
+ return ttm_file_to_backup(filp);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 917096bd5f68..15cab9bda17f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -28,7 +28,7 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-
+#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <drm/ttm/ttm_bo.h>
@@ -769,12 +769,10 @@ error_destroy_tt:
return ret;
}
-static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk,
+static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
struct ttm_buffer_object *bo,
bool *needs_unlock)
{
- struct ttm_operation_ctx *ctx = walk->ctx;
-
*needs_unlock = false;
if (dma_resv_trylock(bo->base.resv)) {
@@ -877,7 +875,7 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
* since if we do it the other way around, and the trylock fails,
* we need to drop the lru lock to put the bo.
*/
- if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock))
+ if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock))
bo_locked = true;
else if (!walk->ticket || walk->ctx->no_wait_gpu ||
walk->trylock_only)
@@ -920,3 +918,242 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
return progress;
}
+EXPORT_SYMBOL(ttm_lru_walk_for_evict);
+
+static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs)
+{
+ struct ttm_buffer_object *bo = curs->bo;
+
+ if (bo) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
+ curs->bo = NULL;
+ }
+}
+
+/**
+ * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor
+ * and clean up any iteration it was used for.
+ * @curs: The cursor.
+ */
+void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ spin_lock(lru_lock);
+ ttm_resource_cursor_fini(&curs->res_curs);
+ spin_unlock(lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
+
+/**
+ * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
+ * @curs: The ttm_bo_lru_cursor to initialize.
+ * @man: The ttm resource_manager whose LRU lists to iterate over.
+ * @ctx: The ttm_operation_ctx to govern the locking.
+ *
+ * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking
+ * or prelocked buffer objects are available as detailed by
+ * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not
+ * supported.
+ *
+ * Return: Pointer to @curs. The function does not fail.
+ */
+struct ttm_bo_lru_cursor *
+ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx)
+{
+ memset(curs, 0, sizeof(*curs));
+ ttm_resource_cursor_init(&curs->res_curs, man);
+ curs->ctx = ctx;
+
+ return curs;
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
+
+static struct ttm_buffer_object *
+ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs)
+{
+ struct ttm_buffer_object *bo = res->bo;
+
+ if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock))
+ return NULL;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ return NULL;
+ }
+
+ curs->bo = bo;
+ return bo;
+}
+
+/**
+ * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and
+ * ttm_bo_lru_cursor_first().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_resource *res = NULL;
+ struct ttm_buffer_object *bo;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+
+ spin_lock(lru_lock);
+ for (;;) {
+ res = ttm_resource_manager_next(&curs->res_curs);
+ if (!res)
+ break;
+
+ bo = ttm_bo_from_res_reserved(res, curs);
+ if (bo)
+ break;
+ }
+
+ spin_unlock(lru_lock);
+ return res ? bo : NULL;
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
+
+/**
+ * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+
+ spin_lock(lru_lock);
+ res = ttm_resource_manager_first(&curs->res_curs);
+ if (!res) {
+ spin_unlock(lru_lock);
+ return NULL;
+ }
+
+ bo = ttm_bo_from_res_reserved(res, curs);
+ spin_unlock(lru_lock);
+
+ return bo ? bo : ttm_bo_lru_cursor_next(curs);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
+
+/**
+ * ttm_bo_shrink() - Helper to shrink a ttm buffer object.
+ * @ctx: The struct ttm_operation_ctx used for the shrinking operation.
+ * @bo: The buffer object.
+ * @flags: Flags governing the shrinking behaviour.
+ *
+ * The function uses the ttm_tt_back_up functionality to back up or
+ * purge a struct ttm_tt. If the bo is not in system, it's first
+ * moved there.
+ *
+ * Return: The number of pages shrunken or purged, or
+ * negative error code on failure.
+ */
+long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct ttm_bo_shrink_flags flags)
+{
+ static const struct ttm_place sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = 0,
+ };
+ static struct ttm_placement sys_placement = {
+ .num_placement = 1,
+ .placement = &sys_placement_flags,
+ };
+ struct ttm_tt *tt = bo->ttm;
+ long lret;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) {
+ int ret = ttm_bo_validate(bo, &sys_placement, ctx);
+
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret) {
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
+ return ret;
+ }
+ }
+
+ ttm_bo_unmap_virtual(bo);
+ lret = ttm_bo_wait_ctx(bo, ctx);
+ if (lret < 0)
+ return lret;
+
+ if (bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags)
+ {.purge = flags.purge,
+ .writeback = flags.writeback});
+
+ if (lret <= 0 && bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ if (lret < 0 && lret != -EINTR)
+ return -EBUSY;
+
+ return lret;
+}
+EXPORT_SYMBOL(ttm_bo_shrink);
+
+/**
+ * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking
+ * @ctx: The struct ttm_operation_ctx governing the shrinking.
+ * @bo: The candidate for shrinking.
+ *
+ * Check whether the object, given the information available to TTM,
+ * is suitable for shinking, This function can and should be used
+ * before attempting to shrink an object.
+ *
+ * Return: true if suitable. false if not.
+ */
+bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+ return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count &&
+ (!ctx->no_wait_gpu ||
+ dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP));
+}
+EXPORT_SYMBOL(ttm_bo_shrink_suitable);
+
+/**
+ * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU
+ * during shrinking
+ *
+ * In some situations, like direct reclaim, waiting (in particular gpu waiting)
+ * should be avoided since it may stall a system that could otherwise make progress
+ * shrinking something else less time consuming.
+ *
+ * Return: true if gpu waiting should be avoided, false if not.
+ */
+bool ttm_bo_shrink_avoid_wait(void)
+{
+ return !current_is_kswapd();
+}
+EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index c9eba76d5143..83b10706ba89 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -41,12 +41,20 @@
#include <asm/set_memory.h>
#endif
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_pool.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_bo.h>
#include "ttm_module.h"
+#ifdef CONFIG_FAULT_INJECTION
+#include <linux/fault-inject.h>
+static DECLARE_FAULT_ATTR(backup_fault_inject);
+#else
+#define should_fail(...) false
+#endif
+
/**
* struct ttm_pool_dma - Helper object for coherent DMA mappings
*
@@ -75,6 +83,35 @@ struct ttm_pool_alloc_state {
enum ttm_caching tt_caching;
};
+/**
+ * struct ttm_pool_tt_restore - State representing restore from backup
+ * @pool: The pool used for page allocation while restoring.
+ * @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state.
+ * @alloced_page: Pointer to the page most recently allocated from a pool or system.
+ * @first_dma: The dma address corresponding to @alloced_page if dma_mapping
+ * is requested.
+ * @alloced_pages: The number of allocated pages present in the struct ttm_tt
+ * page vector from this restore session.
+ * @restored_pages: The number of 4K pages restored for @alloced_page (which
+ * is typically a multi-order page).
+ * @page_caching: The struct ttm_tt requested caching
+ * @order: The order of @alloced_page.
+ *
+ * Recovery from backup might fail when we've recovered less than the
+ * full ttm_tt. In order not to loose any data (yet), keep information
+ * around that allows us to restart a failed ttm backup recovery.
+ */
+struct ttm_pool_tt_restore {
+ struct ttm_pool *pool;
+ struct ttm_pool_alloc_state snapshot_alloc;
+ struct page *alloced_page;
+ dma_addr_t first_dma;
+ pgoff_t alloced_pages;
+ pgoff_t restored_pages;
+ enum ttm_caching page_caching;
+ unsigned int order;
+};
+
static unsigned long page_pool_size;
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
@@ -199,12 +236,11 @@ static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc)
return 0;
}
-/* Map pages of 1 << order size and fill the DMA address array */
+/* DMA Map pages of 1 << order size and return the resulting dma_address. */
static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
- struct page *p, dma_addr_t **dma_addr)
+ struct page *p, dma_addr_t *dma_addr)
{
dma_addr_t addr;
- unsigned int i;
if (pool->use_dma_alloc) {
struct ttm_pool_dma *dma = (void *)p->private;
@@ -218,10 +254,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
return -EFAULT;
}
- for (i = 1 << order; i ; --i) {
- *(*dma_addr)++ = addr;
- addr += PAGE_SIZE;
- }
+ *dma_addr = addr;
return 0;
}
@@ -372,6 +405,196 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
}
/*
+ * Split larger pages so that we can free each PAGE_SIZE page as soon
+ * as it has been backed up, in order to avoid memory pressure during
+ * reclaim.
+ */
+static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct page *p)
+{
+ unsigned int order = ttm_pool_page_order(pool, p);
+ pgoff_t nr;
+
+ if (!order)
+ return;
+
+ split_page(p, order);
+ nr = 1UL << order;
+ while (nr--)
+ (p++)->private = 0;
+}
+
+/**
+ * DOC: Partial backup and restoration of a struct ttm_tt.
+ *
+ * Swapout using ttm_backup_backup_page() and swapin using
+ * ttm_backup_copy_page() may fail.
+ * The former most likely due to lack of swap-space or memory, the latter due
+ * to lack of memory or because of signal interruption during waits.
+ *
+ * Backup failure is easily handled by using a ttm_tt pages vector that holds
+ * both backup handles and page pointers. This has to be taken into account when
+ * restoring such a ttm_tt from backup, and when freeing it while backed up.
+ * When restoring, for simplicity, new pages are actually allocated from the
+ * pool and the contents of any old pages are copied in and then the old pages
+ * are released.
+ *
+ * For restoration failures, the struct ttm_pool_tt_restore holds sufficient state
+ * to be able to resume an interrupted restore, and that structure is freed once
+ * the restoration is complete. If the struct ttm_tt is destroyed while there
+ * is a valid struct ttm_pool_tt_restore attached, that is also properly taken
+ * care of.
+ */
+
+/* Is restore ongoing for the currently allocated page? */
+static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore)
+{
+ return restore && restore->restored_pages < (1 << restore->order);
+}
+
+/* DMA unmap and free a multi-order page, either to the relevant pool or to system. */
+static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page,
+ const dma_addr_t *dma_addr, enum ttm_caching caching)
+{
+ struct ttm_pool_type *pt = NULL;
+ unsigned int order;
+ pgoff_t nr;
+
+ if (pool) {
+ order = ttm_pool_page_order(pool, page);
+ nr = (1UL << order);
+ if (dma_addr)
+ ttm_pool_unmap(pool, *dma_addr, nr);
+
+ pt = ttm_pool_select_type(pool, caching, order);
+ } else {
+ order = page->private;
+ nr = (1UL << order);
+ }
+
+ if (pt)
+ ttm_pool_type_give(pt, page);
+ else
+ ttm_pool_free_page(pool, caching, order, page);
+
+ return nr;
+}
+
+/* Populate the page-array using the most recent allocated multi-order page. */
+static void ttm_pool_allocated_page_commit(struct page *allocated,
+ dma_addr_t first_dma,
+ struct ttm_pool_alloc_state *alloc,
+ pgoff_t nr)
+{
+ pgoff_t i;
+
+ for (i = 0; i < nr; ++i)
+ *alloc->pages++ = allocated++;
+
+ alloc->remaining_pages -= nr;
+
+ if (!alloc->dma_addr)
+ return;
+
+ for (i = 0; i < nr; ++i) {
+ *alloc->dma_addr++ = first_dma;
+ first_dma += PAGE_SIZE;
+ }
+}
+
+/*
+ * When restoring, restore backed-up content to the newly allocated page and
+ * if successful, populate the page-table and dma-address arrays.
+ */
+static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
+ struct ttm_backup *backup,
+ const struct ttm_operation_ctx *ctx,
+ struct ttm_pool_alloc_state *alloc)
+
+{
+ pgoff_t i, nr = 1UL << restore->order;
+ struct page **first_page = alloc->pages;
+ struct page *p;
+ int ret = 0;
+
+ for (i = restore->restored_pages; i < nr; ++i) {
+ p = first_page[i];
+ if (ttm_backup_page_ptr_is_handle(p)) {
+ unsigned long handle = ttm_backup_page_ptr_to_handle(p);
+
+ if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible &&
+ should_fail(&backup_fault_inject, 1)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (handle == 0) {
+ restore->restored_pages++;
+ continue;
+ }
+
+ ret = ttm_backup_copy_page(backup, restore->alloced_page + i,
+ handle, ctx->interruptible);
+ if (ret)
+ break;
+
+ ttm_backup_drop(backup, handle);
+ } else if (p) {
+ /*
+ * We could probably avoid splitting the old page
+ * using clever logic, but ATM we don't care, as
+ * we prioritize releasing memory ASAP. Note that
+ * here, the old retained page is always write-back
+ * cached.
+ */
+ ttm_pool_split_for_swap(restore->pool, p);
+ copy_highpage(restore->alloced_page + i, p);
+ __free_pages(p, 0);
+ }
+
+ restore->restored_pages++;
+ first_page[i] = ttm_backup_handle_to_page_ptr(0);
+ }
+
+ if (ret) {
+ if (!restore->restored_pages) {
+ dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL;
+
+ ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
+ dma_addr, restore->page_caching);
+ restore->restored_pages = nr;
+ }
+ return ret;
+ }
+
+ ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma,
+ alloc, nr);
+ if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page))
+ alloc->caching_divide = alloc->pages;
+ restore->snapshot_alloc = *alloc;
+ restore->alloced_pages += nr;
+
+ return 0;
+}
+
+/* If restoring, save information needed for ttm_pool_restore_commit(). */
+static void
+ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order,
+ struct page *p,
+ enum ttm_caching page_caching,
+ dma_addr_t first_dma,
+ struct ttm_pool_tt_restore *restore,
+ const struct ttm_pool_alloc_state *alloc)
+{
+ restore->pool = pool;
+ restore->order = order;
+ restore->restored_pages = 0;
+ restore->page_caching = page_caching;
+ restore->first_dma = first_dma;
+ restore->alloced_page = p;
+ restore->snapshot_alloc = *alloc;
+}
+
+/*
* Called when we got a page, either from a pool or newly allocated.
* if needed, dma map the page and populate the dma address array.
* Populate the page address array.
@@ -380,10 +603,11 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
*/
static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
struct page *p, enum ttm_caching page_caching,
- struct ttm_pool_alloc_state *alloc)
+ struct ttm_pool_alloc_state *alloc,
+ struct ttm_pool_tt_restore *restore)
{
- pgoff_t i, nr = 1UL << order;
bool caching_consistent;
+ dma_addr_t first_dma;
int r = 0;
caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p);
@@ -395,17 +619,20 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
}
if (alloc->dma_addr) {
- r = ttm_pool_map(pool, order, p, &alloc->dma_addr);
+ r = ttm_pool_map(pool, order, p, &first_dma);
if (r)
return r;
}
- alloc->remaining_pages -= nr;
- for (i = 0; i < nr; ++i)
- *alloc->pages++ = p++;
+ if (restore) {
+ ttm_pool_page_allocated_restore(pool, order, p, page_caching,
+ first_dma, restore, alloc);
+ } else {
+ ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order);
- if (caching_consistent)
- alloc->caching_divide = alloc->pages;
+ if (caching_consistent)
+ alloc->caching_divide = alloc->pages;
+ }
return 0;
}
@@ -428,22 +655,24 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = &tt->pages[start_page];
- unsigned int order;
+ struct ttm_backup *backup = tt->backup;
pgoff_t i, nr;
for (i = start_page; i < end_page; i += nr, pages += nr) {
- struct ttm_pool_type *pt = NULL;
+ struct page *p = *pages;
- order = ttm_pool_page_order(pool, *pages);
- nr = (1UL << order);
- if (tt->dma_address)
- ttm_pool_unmap(pool, tt->dma_address[i], nr);
+ nr = 1;
+ if (ttm_backup_page_ptr_is_handle(p)) {
+ unsigned long handle = ttm_backup_page_ptr_to_handle(p);
- pt = ttm_pool_select_type(pool, caching, order);
- if (pt)
- ttm_pool_type_give(pt, *pages);
- else
- ttm_pool_free_page(pool, caching, order, *pages);
+ if (handle != 0)
+ ttm_backup_drop(backup, handle);
+ } else if (p) {
+ dma_addr_t *dma_addr = tt->dma_address ?
+ tt->dma_address + i : NULL;
+
+ nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching);
+ }
}
}
@@ -467,22 +696,11 @@ static unsigned int ttm_pool_alloc_find_order(unsigned int highest,
return min_t(unsigned int, highest, __fls(alloc->remaining_pages));
}
-/**
- * ttm_pool_alloc - Fill a ttm_tt object
- *
- * @pool: ttm_pool to use
- * @tt: ttm_tt object to fill
- * @ctx: operation context
- *
- * Fill the ttm_tt object with pages and also make sure to DMA map them when
- * necessary.
- *
- * Returns: 0 on successe, negative error code otherwise.
- */
-int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
- struct ttm_operation_ctx *ctx)
+static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx,
+ struct ttm_pool_alloc_state *alloc,
+ struct ttm_pool_tt_restore *restore)
{
- struct ttm_pool_alloc_state alloc;
enum ttm_caching page_caching;
gfp_t gfp_flags = GFP_USER;
pgoff_t caching_divide;
@@ -491,10 +709,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
struct page *p;
int r;
- ttm_pool_alloc_state_init(tt, &alloc);
-
- WARN_ON(!alloc.remaining_pages || ttm_tt_is_populated(tt));
- WARN_ON(alloc.dma_addr && !pool->dev);
+ WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt));
+ WARN_ON(alloc->dma_addr && !pool->dev);
if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
@@ -509,9 +725,9 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
page_caching = tt->caching;
allow_pools = true;
- for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, &alloc);
- alloc.remaining_pages;
- order = ttm_pool_alloc_find_order(order, &alloc)) {
+ for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc);
+ alloc->remaining_pages;
+ order = ttm_pool_alloc_find_order(order, alloc)) {
struct ttm_pool_type *pt;
/* First, try to allocate a page from a pool if one exists. */
@@ -541,31 +757,121 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
r = -ENOMEM;
goto error_free_all;
}
- r = ttm_pool_page_allocated(pool, order, p, page_caching, &alloc);
+ r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc,
+ restore);
if (r)
goto error_free_page;
+
+ if (ttm_pool_restore_valid(restore)) {
+ r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc);
+ if (r)
+ goto error_free_all;
+ }
}
- r = ttm_pool_apply_caching(&alloc);
+ r = ttm_pool_apply_caching(alloc);
if (r)
goto error_free_all;
+ kfree(tt->restore);
+ tt->restore = NULL;
+
return 0;
error_free_page:
ttm_pool_free_page(pool, page_caching, order, p);
error_free_all:
- caching_divide = alloc.caching_divide - tt->pages;
+ if (tt->restore)
+ return r;
+
+ caching_divide = alloc->caching_divide - tt->pages;
ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
ttm_pool_free_range(pool, tt, ttm_cached, caching_divide,
- tt->num_pages - alloc.remaining_pages);
+ tt->num_pages - alloc->remaining_pages);
return r;
}
+
+/**
+ * ttm_pool_alloc - Fill a ttm_tt object
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_pool_alloc_state alloc;
+
+ if (WARN_ON(ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ ttm_pool_alloc_state_init(tt, &alloc);
+
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL);
+}
EXPORT_SYMBOL(ttm_pool_alloc);
/**
+ * ttm_pool_restore_and_alloc - Fill a ttm_tt, restoring previously backed-up
+ * content.
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary. Read in backed-up content.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ struct ttm_pool_alloc_state alloc;
+
+ if (WARN_ON(!ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ if (!tt->restore) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+
+ ttm_pool_alloc_state_init(tt, &alloc);
+ if (ctx->gfp_retry_mayfail)
+ gfp |= __GFP_RETRY_MAYFAIL;
+
+ tt->restore = kzalloc(sizeof(*tt->restore), gfp);
+ if (!tt->restore)
+ return -ENOMEM;
+
+ tt->restore->snapshot_alloc = alloc;
+ tt->restore->pool = pool;
+ tt->restore->restored_pages = 1;
+ } else {
+ struct ttm_pool_tt_restore *restore = tt->restore;
+ int ret;
+
+ alloc = restore->snapshot_alloc;
+ if (ttm_pool_restore_valid(tt->restore)) {
+ ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc);
+ if (ret)
+ return ret;
+ }
+ if (!alloc.remaining_pages)
+ return 0;
+ }
+
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore);
+}
+
+/**
* ttm_pool_free - Free the backing pages from a ttm_tt object
*
* @pool: Pool to give pages back to.
@@ -583,6 +889,169 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
EXPORT_SYMBOL(ttm_pool_free);
/**
+ * ttm_pool_drop_backed_up() - Release content of a swapped-out struct ttm_tt
+ * @tt: The struct ttm_tt.
+ *
+ * Release handles with associated content or any remaining pages of
+ * a backed-up struct ttm_tt.
+ */
+void ttm_pool_drop_backed_up(struct ttm_tt *tt)
+{
+ struct ttm_pool_tt_restore *restore;
+ pgoff_t start_page = 0;
+
+ WARN_ON(!ttm_tt_is_backed_up(tt));
+
+ restore = tt->restore;
+
+ /*
+ * Unmap and free any uncommitted restore page.
+ * any tt page-array backup entries already read back has
+ * been cleared already
+ */
+ if (ttm_pool_restore_valid(restore)) {
+ dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL;
+
+ ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
+ dma_addr, restore->page_caching);
+ restore->restored_pages = 1UL << restore->order;
+ }
+
+ /*
+ * If a restore is ongoing, part of the tt pages may have a
+ * caching different than writeback.
+ */
+ if (restore) {
+ pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages;
+
+ start_page = restore->alloced_pages;
+ WARN_ON(mid > start_page);
+ /* Pages that might be dma-mapped and non-cached */
+ ttm_pool_free_range(restore->pool, tt, tt->caching,
+ 0, mid);
+ /* Pages that might be dma-mapped but cached */
+ ttm_pool_free_range(restore->pool, tt, ttm_cached,
+ mid, restore->alloced_pages);
+ kfree(restore);
+ tt->restore = NULL;
+ }
+
+ ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages);
+}
+
+/**
+ * ttm_pool_backup() - Back up or purge a struct ttm_tt
+ * @pool: The pool used when allocating the struct ttm_tt.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags to govern the backup behaviour.
+ *
+ * Back up or purge a struct ttm_tt. If @purge is true, then
+ * all pages will be freed directly to the system rather than to the pool
+ * they were allocated from, making the function behave similarly to
+ * ttm_pool_free(). If @purge is false the pages will be backed up instead,
+ * exchanged for handles.
+ * A subsequent call to ttm_pool_restore_and_alloc() will then read back the content and
+ * a subsequent call to ttm_pool_drop_backed_up() will drop it.
+ * If backup of a page fails for whatever reason, @ttm will still be
+ * partially backed up, retaining those pages for which backup fails.
+ * In that case, this function can be retried, possibly after freeing up
+ * memory resources.
+ *
+ * Return: Number of pages actually backed up or freed, or negative
+ * error code on error.
+ */
+long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_backup_flags *flags)
+{
+ struct ttm_backup *backup = tt->backup;
+ struct page *page;
+ unsigned long handle;
+ gfp_t alloc_gfp;
+ gfp_t gfp;
+ int ret = 0;
+ pgoff_t shrunken = 0;
+ pgoff_t i, num_pages;
+
+ if (WARN_ON(ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ if ((!ttm_backup_bytes_avail() && !flags->purge) ||
+ pool->use_dma_alloc || ttm_tt_is_backed_up(tt))
+ return -EBUSY;
+
+#ifdef CONFIG_X86
+ /* Anything returned to the system needs to be cached. */
+ if (tt->caching != ttm_cached)
+ set_pages_array_wb(tt->pages, tt->num_pages);
+#endif
+
+ if (tt->dma_address || flags->purge) {
+ for (i = 0; i < tt->num_pages; i += num_pages) {
+ unsigned int order;
+
+ page = tt->pages[i];
+ if (unlikely(!page)) {
+ num_pages = 1;
+ continue;
+ }
+
+ order = ttm_pool_page_order(pool, page);
+ num_pages = 1UL << order;
+ if (tt->dma_address)
+ ttm_pool_unmap(pool, tt->dma_address[i],
+ num_pages);
+ if (flags->purge) {
+ shrunken += num_pages;
+ page->private = 0;
+ __free_pages(page, order);
+ memset(tt->pages + i, 0,
+ num_pages * sizeof(*tt->pages));
+ }
+ }
+ }
+
+ if (flags->purge)
+ return shrunken;
+
+ if (pool->use_dma32)
+ gfp = GFP_DMA32;
+ else
+ gfp = GFP_HIGHUSER;
+
+ alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL;
+
+ num_pages = tt->num_pages;
+
+ /* Pretend doing fault injection by shrinking only half of the pages. */
+ if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1))
+ num_pages = DIV_ROUND_UP(num_pages, 2);
+
+ for (i = 0; i < num_pages; ++i) {
+ s64 shandle;
+
+ page = tt->pages[i];
+ if (unlikely(!page))
+ continue;
+
+ ttm_pool_split_for_swap(pool, page);
+
+ shandle = ttm_backup_backup_page(backup, page, flags->writeback, i,
+ gfp, alloc_gfp);
+ if (shandle < 0) {
+ /* We allow partially shrunken tts */
+ ret = shandle;
+ break;
+ }
+ handle = shandle;
+ tt->pages[i] = ttm_backup_handle_to_page_ptr(handle);
+ put_page(page);
+ shrunken++;
+ }
+
+ return shrunken ? shrunken : ret;
+}
+
+/**
* ttm_pool_init - Initialize a pool
*
* @pool: the pool to initialize
@@ -843,6 +1312,10 @@ int ttm_pool_mgr_init(unsigned long num_pages)
&ttm_pool_debugfs_globals_fops);
debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
&ttm_pool_debugfs_shrink_fops);
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("backup_fault_inject", ttm_debugfs_root,
+ &backup_fault_inject);
+#endif
#endif
mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3baf215eca23..df0aa6c4b8b8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -40,6 +40,7 @@
#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include <drm/drm_util.h>
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
@@ -158,6 +159,8 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
ttm->caching = caching;
+ ttm->restore = NULL;
+ ttm->backup = NULL;
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
@@ -182,6 +185,13 @@ void ttm_tt_fini(struct ttm_tt *ttm)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
+ if (ttm_tt_is_backed_up(ttm))
+ ttm_pool_drop_backed_up(ttm);
+ if (ttm->backup) {
+ ttm_backup_fini(ttm->backup);
+ ttm->backup = NULL;
+ }
+
if (ttm->pages)
kvfree(ttm->pages);
else
@@ -254,6 +264,49 @@ out_err:
EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
/**
+ * ttm_tt_backup() - Helper to back up a struct ttm_tt.
+ * @bdev: The TTM device.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags that govern the backup behaviour.
+ *
+ * Update the page accounting and call ttm_pool_shrink_tt to free pages
+ * or back them up.
+ *
+ * Return: Number of pages freed or swapped out, or negative error code on
+ * error.
+ */
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags)
+{
+ long ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(tt->backup)))
+ return 0;
+
+ ret = ttm_pool_backup(&bdev->pool, tt, &flags);
+ if (ret > 0) {
+ tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
+ tt->page_flags |= TTM_TT_FLAG_BACKED_UP;
+ }
+
+ return ret;
+}
+
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx);
+
+ if (ret)
+ return ret;
+
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_restore);
+
+/**
* ttm_tt_swapout - swap out tt object
*
* @bdev: TTM device structure.
@@ -348,6 +401,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
goto error;
ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
@@ -477,3 +531,32 @@ unsigned long ttm_tt_pages_limit(void)
return ttm_pages_limit;
}
EXPORT_SYMBOL(ttm_tt_pages_limit);
+
+/**
+ * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt
+ * @tt: The ttm_tt for wich to allocate and assign a backup structure.
+ *
+ * Assign a backup structure to be used for tt backup. This should
+ * typically be done at bo creation, to avoid allocations at shrinking
+ * time.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ttm_tt_setup_backup(struct ttm_tt *tt)
+{
+ struct ttm_backup *backup =
+ ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
+
+ if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
+ return -EINVAL;
+
+ if (IS_ERR(backup))
+ return PTR_ERR(backup);
+
+ if (tt->backup)
+ ttm_backup_fini(tt->backup);
+
+ tt->backup = backup;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_setup_backup);
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
index 87dccaecc3e5..db994aeaa0f9 100644
--- a/drivers/gpu/drm/vboxvideo/hgsmi_base.c
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
@@ -181,40 +181,3 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
return rc;
}
-
-/**
- * hgsmi_cursor_position - Report the guest cursor position. The host may
- * wish to use this information to re-position its
- * own cursor (though this is currently unlikely).
- * The current host cursor position is returned.
- * Return: 0 or negative errno value.
- * @ctx: The context containing the heap used.
- * @report_position: Are we reporting a position?
- * @x: Guest cursor X position.
- * @y: Guest cursor Y position.
- * @x_host: Host cursor X position is stored here. Optional.
- * @y_host: Host cursor Y position is stored here. Optional.
- */
-int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
- u32 x, u32 y, u32 *x_host, u32 *y_host)
-{
- struct vbva_cursor_position *p;
-
- p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
- VBVA_CURSOR_POSITION);
- if (!p)
- return -ENOMEM;
-
- p->report_position = report_position;
- p->x = x;
- p->y = y;
-
- hgsmi_buffer_submit(ctx, p);
-
- *x_host = p->x;
- *y_host = p->y;
-
- hgsmi_buffer_free(ctx, p);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
index 55fcee3a6470..643c4448bdcb 100644
--- a/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
@@ -34,8 +34,6 @@ int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret);
int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
u32 hot_x, u32 hot_y, u32 width, u32 height,
u8 *pixels, u32 len);
-int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
- u32 x, u32 y, u32 *x_host, u32 *y_host);
bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
struct vbva_buffer *vbva, s32 screen);
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index 40a05869a50e..992e8f5c5c6e 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -724,7 +724,7 @@ static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
static int vc4_pv_muxing_test_init(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct pv_muxing_priv *priv;
struct drm_device *drm;
struct vc4_dev *vc4;
@@ -737,13 +737,15 @@ static int vc4_pv_muxing_test_init(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
priv->vc4 = vc4;
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
- priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
return 0;
}
@@ -782,7 +784,7 @@ static struct kunit_suite vc5_pv_muxing_test_suite = {
*/
static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *test)
{
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct vc4_crtc_state *new_vc4_crtc_state;
struct vc4_hvs_state *new_hvs_state;
@@ -795,11 +797,10 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
vc4 = vc5_mock_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
@@ -822,7 +823,7 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
@@ -843,6 +844,9 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use);
KUNIT_EXPECT_NE(test, hdmi0_channel, hdmi1_channel);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -854,7 +858,7 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
*/
static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
{
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct vc4_crtc_state *new_vc4_crtc_state;
struct vc4_hvs_state *new_hvs_state;
@@ -867,11 +871,10 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
vc4 = vc5_mock_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
@@ -905,7 +908,7 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
@@ -929,6 +932,9 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
KUNIT_EXPECT_EQ(test, old_hdmi1_channel, hdmi1_channel);
}
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -949,7 +955,7 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
static void
drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct kunit *test)
{
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct vc4_crtc_state *new_vc4_crtc_state;
struct drm_device *drm;
@@ -959,11 +965,10 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
vc4 = vc5_mock_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
@@ -975,7 +980,7 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
@@ -987,6 +992,9 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
VC4_ENCODER_TYPE_HDMI0);
KUNIT_EXPECT_NULL(test, new_vc4_crtc_state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static struct kunit_case vc5_pv_muxing_bugs_tests[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index e878eddc9c3f..37238a12baa5 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -2926,15 +2926,16 @@ static int vc5_hdmi_init_resources(struct drm_device *drm,
struct resource *res;
int ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi");
- if (!res)
- return -ENODEV;
-
- vc4_hdmi->hdmicore_regs = devm_ioremap(dev, res->start,
- resource_size(res));
- if (!vc4_hdmi->hdmicore_regs)
- return -ENOMEM;
+ vc4_hdmi->hdmicore_regs = devm_platform_ioremap_resource_byname(pdev,
+ "hdmi");
+ if (IS_ERR(vc4_hdmi->hdmicore_regs))
+ return PTR_ERR(vc4_hdmi->hdmicore_regs);
+ /* This is shared between both HDMI controllers. Cannot
+ * claim for both instances. Lets not convert to using
+ * devm_platform_ioremap_resource_byname() like
+ * the rest
+ */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hd");
if (!res)
return -ENODEV;
@@ -2943,53 +2944,35 @@ static int vc5_hdmi_init_resources(struct drm_device *drm,
if (!vc4_hdmi->hd_regs)
return -ENOMEM;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cec");
- if (!res)
- return -ENODEV;
-
- vc4_hdmi->cec_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->cec_regs)
- return -ENOMEM;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csc");
- if (!res)
- return -ENODEV;
-
- vc4_hdmi->csc_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->csc_regs)
- return -ENOMEM;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvp");
- if (!res)
- return -ENODEV;
+ vc4_hdmi->cec_regs = devm_platform_ioremap_resource_byname(pdev,
+ "cec");
+ if (IS_ERR(vc4_hdmi->cec_regs))
+ return PTR_ERR(vc4_hdmi->cec_regs);
- vc4_hdmi->dvp_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->dvp_regs)
- return -ENOMEM;
+ vc4_hdmi->csc_regs = devm_platform_ioremap_resource_byname(pdev,
+ "csc");
+ if (IS_ERR(vc4_hdmi->csc_regs))
+ return PTR_ERR(vc4_hdmi->csc_regs);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- if (!res)
- return -ENODEV;
+ vc4_hdmi->dvp_regs = devm_platform_ioremap_resource_byname(pdev,
+ "dvp");
+ if (IS_ERR(vc4_hdmi->dvp_regs))
+ return PTR_ERR(vc4_hdmi->dvp_regs);
- vc4_hdmi->phy_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->phy_regs)
- return -ENOMEM;
+ vc4_hdmi->phy_regs = devm_platform_ioremap_resource_byname(pdev,
+ "phy");
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "packet");
- if (!res)
- return -ENODEV;
+ if (IS_ERR(vc4_hdmi->phy_regs))
+ return PTR_ERR(vc4_hdmi->phy_regs);
- vc4_hdmi->ram_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->ram_regs)
- return -ENOMEM;
+ vc4_hdmi->ram_regs = devm_platform_ioremap_resource_byname(pdev,
+ "packet");
+ if (IS_ERR(vc4_hdmi->ram_regs))
+ return PTR_ERR(vc4_hdmi->ram_regs);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rm");
- if (!res)
- return -ENODEV;
-
- vc4_hdmi->rm_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->rm_regs)
- return -ENOMEM;
+ vc4_hdmi->rm_regs = devm_platform_ioremap_resource_byname(pdev, "rm");
+ if (IS_ERR(vc4_hdmi->rm_regs))
+ return PTR_ERR(vc4_hdmi->rm_regs);
vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(vc4_hdmi->hsm_clock)) {
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 75a79390a0e3..9699b08585f7 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -100,6 +100,7 @@ xe-y += xe_bb.o \
xe_rtp.o \
xe_sa.o \
xe_sched_job.o \
+ xe_shrinker.o \
xe_step.o \
xe_survivability_mode.o \
xe_sync.o \
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 6795d1d916e4..9fde67ca989f 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -514,8 +514,13 @@ static int shrink_test_run_device(struct xe_device *xe)
* other way around, they may not be subject to swapping...
*/
if (alloced < purgeable) {
+ xe_ttm_tt_account_subtract(&xe_tt->ttm);
xe_tt->purgeable = true;
+ xe_ttm_tt_account_add(&xe_tt->ttm);
bo->ttm.priority = 0;
+ spin_lock(&bo->ttm.bdev->lru_lock);
+ ttm_bo_move_to_lru_tail(&bo->ttm);
+ spin_unlock(&bo->ttm.bdev->lru_lock);
} else {
int ret = shrink_test_fill_random(bo, &prng, link);
@@ -570,7 +575,6 @@ static int shrink_test_run_device(struct xe_device *xe)
if (ret == -EINTR)
intr = true;
} while (ret == -EINTR && !signal_pending(current));
-
if (!ret && !purgeable)
failed = shrink_test_verify(test, bo, count, &prng, link);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 0c7a7f5e5596..64f9c936eea0 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -11,6 +11,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_managed.h>
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
@@ -29,6 +30,7 @@
#include "xe_preempt_fence.h"
#include "xe_pxp.h"
#include "xe_res_cursor.h"
+#include "xe_shrinker.h"
#include "xe_trace_bo.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -315,9 +317,11 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
}
}
+/* struct xe_ttm_tt - Subclassed ttm_tt for xe */
struct xe_ttm_tt {
struct ttm_tt ttm;
- struct device *dev;
+ /** @xe - The xe device */
+ struct xe_device *xe;
struct sg_table sgt;
struct sg_table *sg;
/** @purgeable: Whether the content of the pages of @ttm is purgeable. */
@@ -330,7 +334,8 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
unsigned long num_pages = tt->num_pages;
int ret;
- XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
+ XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE));
if (xe_tt->sg)
return 0;
@@ -338,13 +343,13 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
num_pages, 0,
(u64)num_pages << PAGE_SHIFT,
- xe_sg_segment_size(xe_tt->dev),
+ xe_sg_segment_size(xe_tt->xe->drm.dev),
GFP_KERNEL);
if (ret)
return ret;
xe_tt->sg = &xe_tt->sgt;
- ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
+ ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (ret) {
sg_free_table(xe_tt->sg);
@@ -360,7 +365,7 @@ static void xe_tt_unmap_sg(struct ttm_tt *tt)
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->sg) {
- dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
+ dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
DMA_BIDIRECTIONAL, 0);
sg_free_table(xe_tt->sg);
xe_tt->sg = NULL;
@@ -375,21 +380,47 @@ struct sg_table *xe_bo_sg(struct xe_bo *bo)
return xe_tt->sg;
}
+/*
+ * Account ttm pages against the device shrinker's shrinkable and
+ * purgeable counts.
+ */
+static void xe_ttm_tt_account_add(struct ttm_tt *tt)
+{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+
+ if (xe_tt->purgeable)
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages);
+ else
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0);
+}
+
+static void xe_ttm_tt_account_subtract(struct ttm_tt *tt)
+{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+
+ if (xe_tt->purgeable)
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages);
+ else
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0);
+}
+
static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
u32 page_flags)
{
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_device *xe = xe_bo_device(bo);
- struct xe_ttm_tt *tt;
+ struct xe_ttm_tt *xe_tt;
+ struct ttm_tt *tt;
unsigned long extra_pages;
enum ttm_caching caching = ttm_cached;
int err;
- tt = kzalloc(sizeof(*tt), GFP_KERNEL);
- if (!tt)
+ xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL);
+ if (!xe_tt)
return NULL;
- tt->dev = xe->drm.dev;
+ tt = &xe_tt->ttm;
+ xe_tt->xe = xe;
extra_pages = 0;
if (xe_bo_needs_ccs_pages(bo))
@@ -435,42 +466,66 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
caching = ttm_uncached;
}
- err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
+ if (ttm_bo->type != ttm_bo_type_sg)
+ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+ err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages);
if (err) {
- kfree(tt);
+ kfree(xe_tt);
return NULL;
}
- return &tt->ttm;
+ if (ttm_bo->type != ttm_bo_type_sg) {
+ err = ttm_tt_setup_backup(tt);
+ if (err) {
+ ttm_tt_fini(tt);
+ kfree(xe_tt);
+ return NULL;
+ }
+ }
+
+ return tt;
}
static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx)
{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
int err;
/*
* dma-bufs are not populated with pages, and the dma-
* addresses are set up when moved to XE_PL_TT.
*/
- if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
+ if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
return 0;
- err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
+ if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) {
+ err = ttm_tt_restore(ttm_dev, tt, ctx);
+ } else {
+ ttm_tt_clear_backed_up(tt);
+ err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
+ }
if (err)
return err;
- return err;
+ xe_tt->purgeable = false;
+ xe_ttm_tt_account_add(tt);
+
+ return 0;
}
static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
- if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
+ if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
return;
xe_tt_unmap_sg(tt);
- return ttm_pool_free(&ttm_dev->pool, tt);
+ ttm_pool_free(&ttm_dev->pool, tt);
+ xe_ttm_tt_account_subtract(tt);
}
static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
@@ -934,6 +989,111 @@ out:
return ret;
}
+static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
+ struct ttm_buffer_object *bo,
+ unsigned long *scanned)
+{
+ long lret;
+
+ /* Fake move to system, without copying data. */
+ if (bo->resource->mem_type != XE_PL_SYSTEM) {
+ struct ttm_resource *new_resource;
+
+ lret = ttm_bo_wait_ctx(bo, ctx);
+ if (lret)
+ return lret;
+
+ lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx);
+ if (lret)
+ return lret;
+
+ xe_tt_unmap_sg(bo->ttm);
+ ttm_bo_move_null(bo, new_resource);
+ }
+
+ *scanned += bo->ttm->num_pages;
+ lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
+ {.purge = true,
+ .writeback = false,
+ .allow_move = false});
+
+ if (lret > 0)
+ xe_ttm_tt_account_subtract(bo->ttm);
+
+ return lret;
+}
+
+/**
+ * xe_bo_shrink() - Try to shrink an xe bo.
+ * @ctx: The struct ttm_operation_ctx used for shrinking.
+ * @bo: The TTM buffer object whose pages to shrink.
+ * @flags: Flags governing the shrink behaviour.
+ * @scanned: Pointer to a counter of the number of pages
+ * attempted to shrink.
+ *
+ * Try to shrink- or purge a bo, and if it succeeds, unmap dma.
+ * Note that we need to be able to handle also non xe bos
+ * (ghost bos), but only if the struct ttm_tt is embedded in
+ * a struct xe_ttm_tt. When the function attempts to shrink
+ * the pages of a buffer object, The value pointed to by @scanned
+ * is updated.
+ *
+ * Return: The number of pages shrunken or purged, or negative error
+ * code on failure.
+ */
+long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long *scanned)
+{
+ struct ttm_tt *tt = bo->ttm;
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+ struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
+ struct xe_device *xe = xe_tt->xe;
+ bool needs_rpm;
+ long lret = 0L;
+
+ if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) ||
+ (flags.purge && !xe_tt->purgeable))
+ return -EBUSY;
+
+ if (!ttm_bo_eviction_valuable(bo, &place))
+ return -EBUSY;
+
+ if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo))
+ return xe_bo_shrink_purge(ctx, bo, scanned);
+
+ if (xe_tt->purgeable) {
+ if (bo->resource->mem_type != XE_PL_SYSTEM)
+ lret = xe_bo_move_notify(xe_bo, ctx);
+ if (!lret)
+ lret = xe_bo_shrink_purge(ctx, bo, scanned);
+ goto out_unref;
+ }
+
+ /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */
+ needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM &&
+ xe_bo_needs_ccs_pages(xe_bo));
+ if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
+ goto out_unref;
+
+ *scanned += tt->num_pages;
+ lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
+ {.purge = false,
+ .writeback = flags.writeback,
+ .allow_move = true});
+ if (needs_rpm)
+ xe_pm_runtime_put(xe);
+
+ if (lret > 0)
+ xe_ttm_tt_account_subtract(tt);
+
+out_unref:
+ xe_bo_put(xe_bo);
+
+ return lret;
+}
+
/**
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
* @bo: The buffer object to move.
@@ -1951,6 +2111,8 @@ int xe_bo_pin_external(struct xe_bo *bo)
}
ttm_bo_pin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_subtract(bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2010,6 +2172,8 @@ int xe_bo_pin(struct xe_bo *bo)
}
ttm_bo_pin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_subtract(bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2044,6 +2208,8 @@ void xe_bo_unpin_external(struct xe_bo *bo)
spin_unlock(&xe->pinned.lock);
ttm_bo_unpin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_add(bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2067,6 +2233,8 @@ void xe_bo_unpin(struct xe_bo *bo)
spin_unlock(&xe->pinned.lock);
}
ttm_bo_unpin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_add(bo->ttm.ttm);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 2b9c0b0778a2..bda3fdd408da 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -149,6 +149,28 @@ static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
void xe_bo_put(struct xe_bo *bo);
+/*
+ * xe_bo_get_unless_zero() - Conditionally obtain a GEM object refcount on an
+ * xe bo
+ * @bo: The bo for which we want to obtain a refcount.
+ *
+ * There is a short window between where the bo's GEM object refcount reaches
+ * zero and where we put the final ttm_bo reference. Code in the eviction- and
+ * shrinking path should therefore attempt to grab a gem object reference before
+ * trying to use members outside of the base class ttm object. This function is
+ * intended for that purpose. On successful return, this function must be paired
+ * with an xe_bo_put().
+ *
+ * Return: @bo on success, NULL on failure.
+ */
+static inline __must_check struct xe_bo *xe_bo_get_unless_zero(struct xe_bo *bo)
+{
+ if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount))
+ return NULL;
+
+ return bo;
+}
+
static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
{
if (bo)
@@ -369,6 +391,20 @@ static inline unsigned int xe_sg_segment_size(struct device *dev)
return round_down(max / 2, PAGE_SIZE);
}
+/**
+ * struct xe_bo_shrink_flags - flags governing the shrink behaviour.
+ * @purge: Only purging allowed. Don't shrink if bo not purgeable.
+ * @writeback: Attempt to immediately move content to swap.
+ */
+struct xe_bo_shrink_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+};
+
+long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long *scanned);
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
/**
* xe_bo_is_mem_type - Whether the bo currently resides in the given
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index a7dd9c7b95e5..5d79b439dd62 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -52,6 +52,7 @@
#include "xe_pmu.h"
#include "xe_pxp.h"
#include "xe_query.h"
+#include "xe_shrinker.h"
#include "xe_sriov.h"
#include "xe_tile.h"
#include "xe_ttm_stolen_mgr.h"
@@ -398,6 +399,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
if (xe->unordered_wq)
destroy_workqueue(xe->unordered_wq);
+ if (!IS_ERR_OR_NULL(xe->mem.shrinker))
+ xe_shrinker_destroy(xe->mem.shrinker);
+
if (xe->destroy_wq)
destroy_workqueue(xe->destroy_wq);
@@ -431,6 +435,10 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
+ xe->mem.shrinker = xe_shrinker_create(xe);
+ if (IS_ERR(xe->mem.shrinker))
+ return ERR_CAST(xe->mem.shrinker);
+
xe->info.devid = pdev->device;
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index fac488942316..72ef0b6fc425 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -386,6 +386,8 @@ struct xe_device {
struct xe_vram_region vram;
/** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
+ /** @mem.sys_mgr: system memory shrinker. */
+ struct xe_shrinker *shrinker;
} mem;
/** @sriov: device level virtualization data */
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
new file mode 100644
index 000000000000..8184390f9c7b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/shrinker.h>
+
+#include <drm/ttm/ttm_backup.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "xe_bo.h"
+#include "xe_pm.h"
+#include "xe_shrinker.h"
+
+/**
+ * struct xe_shrinker - per-device shrinker
+ * @xe: Back pointer to the device.
+ * @lock: Lock protecting accounting.
+ * @shrinkable_pages: Number of pages that are currently shrinkable.
+ * @purgeable_pages: Number of pages that are currently purgeable.
+ * @shrink: Pointer to the mm shrinker.
+ * @pm_worker: Worker to wake up the device if required.
+ */
+struct xe_shrinker {
+ struct xe_device *xe;
+ rwlock_t lock;
+ long shrinkable_pages;
+ long purgeable_pages;
+ struct shrinker *shrink;
+ struct work_struct pm_worker;
+};
+
+static struct xe_shrinker *to_xe_shrinker(struct shrinker *shrink)
+{
+ return shrink->private_data;
+}
+
+/**
+ * xe_shrinker_mod_pages() - Modify shrinker page accounting
+ * @shrinker: Pointer to the struct xe_shrinker.
+ * @shrinkable: Shrinkable pages delta. May be negative.
+ * @purgeable: Purgeable page delta. May be negative.
+ *
+ * Modifies the shrinkable and purgeable pages accounting.
+ */
+void
+xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable)
+{
+ write_lock(&shrinker->lock);
+ shrinker->shrinkable_pages += shrinkable;
+ shrinker->purgeable_pages += purgeable;
+ write_unlock(&shrinker->lock);
+}
+
+static s64 xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
+{
+ unsigned int mem_type;
+ s64 freed = 0, lret;
+
+ for (mem_type = XE_PL_SYSTEM; mem_type <= XE_PL_TT; ++mem_type) {
+ struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type);
+ struct ttm_bo_lru_cursor curs;
+ struct ttm_buffer_object *ttm_bo;
+
+ if (!man || !man->use_tt)
+ continue;
+
+ ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) {
+ if (!ttm_bo_shrink_suitable(ttm_bo, ctx))
+ continue;
+
+ lret = xe_bo_shrink(ctx, ttm_bo, flags, scanned);
+ if (lret < 0)
+ return lret;
+
+ freed += lret;
+ if (*scanned >= to_scan)
+ break;
+ }
+ }
+
+ return freed;
+}
+
+static unsigned long
+xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
+ unsigned long num_pages;
+ bool can_backup = !!(sc->gfp_mask & __GFP_FS);
+
+ num_pages = ttm_backup_bytes_avail() >> PAGE_SHIFT;
+ read_lock(&shrinker->lock);
+
+ if (can_backup)
+ num_pages = min_t(unsigned long, num_pages, shrinker->shrinkable_pages);
+ else
+ num_pages = 0;
+
+ num_pages += shrinker->purgeable_pages;
+ read_unlock(&shrinker->lock);
+
+ return num_pages ? num_pages : SHRINK_EMPTY;
+}
+
+/*
+ * Check if we need runtime pm, and if so try to grab a reference if
+ * already active. If grabbing a reference fails, queue a worker that
+ * does it for us outside of reclaim, but don't wait for it to complete.
+ * If bo shrinking needs an rpm reference and we don't have it (yet),
+ * that bo will be skipped anyway.
+ */
+static bool xe_shrinker_runtime_pm_get(struct xe_shrinker *shrinker, bool force,
+ unsigned long nr_to_scan, bool can_backup)
+{
+ struct xe_device *xe = shrinker->xe;
+
+ if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe) ||
+ !ttm_backup_bytes_avail())
+ return false;
+
+ if (!force) {
+ read_lock(&shrinker->lock);
+ force = (nr_to_scan > shrinker->purgeable_pages && can_backup);
+ read_unlock(&shrinker->lock);
+ if (!force)
+ return false;
+ }
+
+ if (!xe_pm_runtime_get_if_active(xe)) {
+ if (xe_rpm_reclaim_safe(xe) && !ttm_bo_shrink_avoid_wait()) {
+ xe_pm_runtime_get(xe);
+ return true;
+ }
+ queue_work(xe->unordered_wq, &shrinker->pm_worker);
+ return false;
+ }
+
+ return true;
+}
+
+static void xe_shrinker_runtime_pm_put(struct xe_shrinker *shrinker, bool runtime_pm)
+{
+ if (runtime_pm)
+ xe_pm_runtime_put(shrinker->xe);
+}
+
+static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = ttm_bo_shrink_avoid_wait(),
+ };
+ unsigned long nr_to_scan, nr_scanned = 0, freed = 0;
+ struct xe_bo_shrink_flags shrink_flags = {
+ .purge = true,
+ /* Don't request writeback without __GFP_IO. */
+ .writeback = !ctx.no_wait_gpu && (sc->gfp_mask & __GFP_IO),
+ };
+ bool runtime_pm;
+ bool purgeable;
+ bool can_backup = !!(sc->gfp_mask & __GFP_FS);
+ s64 lret;
+
+ nr_to_scan = sc->nr_to_scan;
+
+ read_lock(&shrinker->lock);
+ purgeable = !!shrinker->purgeable_pages;
+ read_unlock(&shrinker->lock);
+
+ /* Might need runtime PM. Try to wake early if it looks like it. */
+ runtime_pm = xe_shrinker_runtime_pm_get(shrinker, false, nr_to_scan, can_backup);
+
+ if (purgeable && nr_scanned < nr_to_scan) {
+ lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
+ nr_to_scan, &nr_scanned);
+ if (lret >= 0)
+ freed += lret;
+ }
+
+ sc->nr_scanned = nr_scanned;
+ if (nr_scanned >= nr_to_scan || !can_backup)
+ goto out;
+
+ /* If we didn't wake before, try to do it now if needed. */
+ if (!runtime_pm)
+ runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
+
+ shrink_flags.purge = false;
+ lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
+ nr_to_scan, &nr_scanned);
+ if (lret >= 0)
+ freed += lret;
+
+ sc->nr_scanned = nr_scanned;
+out:
+ xe_shrinker_runtime_pm_put(shrinker, runtime_pm);
+ return nr_scanned ? freed : SHRINK_STOP;
+}
+
+/* Wake up the device for shrinking. */
+static void xe_shrinker_pm(struct work_struct *work)
+{
+ struct xe_shrinker *shrinker =
+ container_of(work, typeof(*shrinker), pm_worker);
+
+ xe_pm_runtime_get(shrinker->xe);
+ xe_pm_runtime_put(shrinker->xe);
+}
+
+/**
+ * xe_shrinker_create() - Create an xe per-device shrinker
+ * @xe: Pointer to the xe device.
+ *
+ * Returns: A pointer to the created shrinker on success,
+ * Negative error code on failure.
+ */
+struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
+{
+ struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
+
+ if (!shrinker)
+ return ERR_PTR(-ENOMEM);
+
+ shrinker->shrink = shrinker_alloc(0, "xe system shrinker");
+ if (!shrinker->shrink) {
+ kfree(shrinker);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm);
+ shrinker->xe = xe;
+ rwlock_init(&shrinker->lock);
+ shrinker->shrink->count_objects = xe_shrinker_count;
+ shrinker->shrink->scan_objects = xe_shrinker_scan;
+ shrinker->shrink->private_data = shrinker;
+ shrinker_register(shrinker->shrink);
+
+ return shrinker;
+}
+
+/**
+ * xe_shrinker_destroy() - Destroy an xe per-device shrinker
+ * @shrinker: Pointer to the shrinker to destroy.
+ */
+void xe_shrinker_destroy(struct xe_shrinker *shrinker)
+{
+ xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
+ xe_assert(shrinker->xe, !shrinker->purgeable_pages);
+ shrinker_free(shrinker->shrink);
+ flush_work(&shrinker->pm_worker);
+ kfree(shrinker);
+}
diff --git a/drivers/gpu/drm/xe/xe_shrinker.h b/drivers/gpu/drm/xe/xe_shrinker.h
new file mode 100644
index 000000000000..28a038f4fcbf
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_shrinker.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_SHRINKER_H_
+#define _XE_SHRINKER_H_
+
+struct xe_shrinker;
+struct xe_device;
+
+void xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable);
+
+struct xe_shrinker *xe_shrinker_create(struct xe_device *xe);
+
+void xe_shrinker_destroy(struct xe_shrinker *shrinker);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
index 9844a8edbfe1..d38b91872da3 100644
--- a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
@@ -108,9 +108,8 @@ int xe_ttm_sys_mgr_init(struct xe_device *xe)
u64 gtt_size;
si_meminfo(&si);
+ /* Potentially restrict amount of TT memory here. */
gtt_size = (u64)si.totalram * si.mem_unit;
- /* TTM limits allocation of all TTM devices by 50% of system memory */
- gtt_size /= 2;
man->use_tt = true;
man->func = &xe_ttm_sys_mgr_func;
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index a18cc8d8caf5..6433c00d5d7e 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -216,12 +216,3 @@ void host1x_debug_dump(struct host1x *host1x)
show_all(host1x, &o, true);
}
-
-void host1x_debug_dump_syncpts(struct host1x *host1x)
-{
- struct output o = {
- .fn = write_to_printk
- };
-
- show_syncpts(host1x, &o, false);
-}
diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h
index 62bd8a091fa7..c43c61d876a9 100644
--- a/drivers/gpu/host1x/debug.h
+++ b/drivers/gpu/host1x/debug.h
@@ -41,6 +41,5 @@ extern unsigned int host1x_debug_trace_cmdbuf;
void host1x_debug_init(struct host1x *host1x);
void host1x_debug_deinit(struct host1x *host1x);
void host1x_debug_dump(struct host1x *host1x);
-void host1x_debug_dump_syncpts(struct host1x *host1x);
#endif
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 947323f4a234..fa77e4e64f12 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -165,38 +165,6 @@ int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
}
EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
-int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
- bool hflip, bool vflip)
-{
- u32 r90, vf, hf;
-
- r90 = ((u32)mode >> 2) & 0x1;
- hf = ((u32)mode >> 1) & 0x1;
- vf = ((u32)mode >> 0) & 0x1;
- hf ^= (u32)hflip;
- vf ^= (u32)vflip;
-
- switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
- case IPU_ROTATE_NONE:
- *degrees = 0;
- break;
- case IPU_ROTATE_90_RIGHT:
- *degrees = 90;
- break;
- case IPU_ROTATE_180:
- *degrees = 180;
- break;
- case IPU_ROTATE_90_LEFT:
- *degrees = 270;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
-
struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
{
struct ipuv3_channel *channel;
@@ -516,12 +484,6 @@ int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
}
EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
-bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
-{
- return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
-}
-EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
-
int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
{
struct ipu_soc *ipu = channel->ipu;
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 82b244cb313e..07866b1369c6 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -337,12 +337,6 @@ void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id)
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
-int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch)
-{
- return ipu_ch_param_read_field(ch, IPU_FIELD_NPB) + 1;
-}
-EXPORT_SYMBOL_GPL(ipu_cpmem_get_burstsize);
-
void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
{
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
@@ -452,23 +446,6 @@ int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width)
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
-void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
-{
- switch (pixel_format) {
- case V4L2_PIX_FMT_UYVY:
- ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
- ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);/* pix fmt */
- ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
- break;
- case V4L2_PIX_FMT_YUYV:
- ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
- ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);/* pix fmt */
- ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
- break;
- }
-}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
-
void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
unsigned int uv_stride,
unsigned int u_offset, unsigned int v_offset)
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 778bc26d3ba5..d576b7d28437 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -186,32 +186,6 @@ static inline void ipu_csi_write(struct ipu_csi *csi, u32 value,
}
/*
- * Set mclk division ratio for generating test mode mclk. Only used
- * for test generator.
- */
-static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
- u32 ipu_clk)
-{
- u32 temp;
- int div_ratio;
-
- div_ratio = (ipu_clk / pixel_clk) - 1;
-
- if (div_ratio > 0xFF || div_ratio < 0) {
- dev_err(csi->ipu->dev,
- "value of pixel_clk extends normal range\n");
- return -EINVAL;
- }
-
- temp = ipu_csi_read(csi, CSI_SENS_CONF);
- temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
- ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
- CSI_SENS_CONF);
-
- return 0;
-}
-
-/*
* Find the CSI data format and data width for the given V4L2 media
* bus pixel format code.
*/
@@ -538,56 +512,6 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(ipu_csi_init_interface);
-bool ipu_csi_is_interlaced(struct ipu_csi *csi)
-{
- unsigned long flags;
- u32 sensor_protocol;
-
- spin_lock_irqsave(&csi->lock, flags);
- sensor_protocol =
- (ipu_csi_read(csi, CSI_SENS_CONF) &
- CSI_SENS_CONF_SENS_PRTCL_MASK) >>
- CSI_SENS_CONF_SENS_PRTCL_SHIFT;
- spin_unlock_irqrestore(&csi->lock, flags);
-
- switch (sensor_protocol) {
- case IPU_CSI_CLK_MODE_GATED_CLK:
- case IPU_CSI_CLK_MODE_NONGATED_CLK:
- case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
- case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
- case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
- return false;
- case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
- case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
- case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
- return true;
- default:
- dev_err(csi->ipu->dev,
- "CSI %d sensor protocol unsupported\n", csi->id);
- return false;
- }
-}
-EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced);
-
-void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w)
-{
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&csi->lock, flags);
-
- reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE);
- w->width = (reg & 0xFFFF) + 1;
- w->height = (reg >> 16 & 0xFFFF) + 1;
-
- reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
- w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT;
- w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT;
-
- spin_unlock_irqrestore(&csi->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ipu_csi_get_window);
-
void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
{
unsigned long flags;
@@ -624,38 +548,6 @@ void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert)
}
EXPORT_SYMBOL_GPL(ipu_csi_set_downsize);
-void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
- u32 r_value, u32 g_value, u32 b_value,
- u32 pix_clk)
-{
- unsigned long flags;
- u32 ipu_clk = clk_get_rate(csi->clk_ipu);
- u32 temp;
-
- spin_lock_irqsave(&csi->lock, flags);
-
- temp = ipu_csi_read(csi, CSI_TST_CTRL);
-
- if (!active) {
- temp &= ~CSI_TEST_GEN_MODE_EN;
- ipu_csi_write(csi, temp, CSI_TST_CTRL);
- } else {
- /* Set sensb_mclk div_ratio */
- ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk);
-
- temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
- CSI_TEST_GEN_B_MASK);
- temp |= CSI_TEST_GEN_MODE_EN;
- temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
- (g_value << CSI_TEST_GEN_G_SHIFT) |
- (b_value << CSI_TEST_GEN_B_SHIFT);
- ipu_csi_write(csi, temp, CSI_TST_CTRL);
- }
-
- spin_unlock_irqrestore(&csi->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator);
-
int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
struct v4l2_mbus_framefmt *mbus_fmt)
{
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index 846461bac70d..acd76ecc5221 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -321,79 +321,6 @@ void ipu_ic_task_disable(struct ipu_ic *ic)
}
EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
-int ipu_ic_task_graphics_init(struct ipu_ic *ic,
- const struct ipu_ic_colorspace *g_in_cs,
- bool galpha_en, u32 galpha,
- bool colorkey_en, u32 colorkey)
-{
- struct ipu_ic_priv *priv = ic->priv;
- struct ipu_ic_csc csc2;
- unsigned long flags;
- u32 reg, ic_conf;
- int ret = 0;
-
- if (ic->task == IC_TASK_ENCODER)
- return -EINVAL;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- ic_conf = ipu_ic_read(ic, IC_CONF);
-
- if (!(ic_conf & ic->bit->ic_conf_csc1_en)) {
- struct ipu_ic_csc csc1;
-
- ret = ipu_ic_calc_csc(&csc1,
- V4L2_YCBCR_ENC_601,
- V4L2_QUANTIZATION_FULL_RANGE,
- IPUV3_COLORSPACE_RGB,
- V4L2_YCBCR_ENC_601,
- V4L2_QUANTIZATION_FULL_RANGE,
- IPUV3_COLORSPACE_RGB);
- if (ret)
- goto unlock;
-
- /* need transparent CSC1 conversion */
- ret = init_csc(ic, &csc1, 0);
- if (ret)
- goto unlock;
- }
-
- ic->g_in_cs = *g_in_cs;
- csc2.in_cs = ic->g_in_cs;
- csc2.out_cs = ic->out_cs;
-
- ret = __ipu_ic_calc_csc(&csc2);
- if (ret)
- goto unlock;
-
- ret = init_csc(ic, &csc2, 1);
- if (ret)
- goto unlock;
-
- if (galpha_en) {
- ic_conf |= IC_CONF_IC_GLB_LOC_A;
- reg = ipu_ic_read(ic, IC_CMBP_1);
- reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit);
- reg |= (galpha << ic->bit->ic_cmb_galpha_bit);
- ipu_ic_write(ic, reg, IC_CMBP_1);
- } else
- ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
-
- if (colorkey_en) {
- ic_conf |= IC_CONF_KEY_COLOR_EN;
- ipu_ic_write(ic, colorkey, IC_CMBP_2);
- } else
- ic_conf &= ~IC_CONF_KEY_COLOR_EN;
-
- ipu_ic_write(ic, ic_conf, IC_CONF);
-
- ic->graphics = true;
-unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
-}
-EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
-
int ipu_ic_task_init_rsc(struct ipu_ic *ic,
const struct ipu_ic_csc *csc,
int in_width, int in_height,
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 841316582ea9..3c33b4defab5 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -355,20 +355,6 @@ static void dump_format(struct ipu_image_convert_ctx *ctx,
(ic_image->fmt->fourcc >> 24) & 0xff);
}
-int ipu_image_convert_enum_format(int index, u32 *fourcc)
-{
- const struct ipu_image_pixfmt *fmt;
-
- if (index >= (int)ARRAY_SIZE(image_convert_formats))
- return -EINVAL;
-
- /* Format found */
- fmt = &image_convert_formats[index];
- *fourcc = fmt->fourcc;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
-
static void free_dma_buf(struct ipu_image_convert_priv *priv,
struct ipu_image_convert_dma_buf *buf)
{
@@ -2437,40 +2423,6 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
}
EXPORT_SYMBOL_GPL(ipu_image_convert);
-/* "Canned" synchronous single image conversion */
-static void image_convert_sync_complete(struct ipu_image_convert_run *run,
- void *data)
-{
- struct completion *comp = data;
-
- complete(comp);
-}
-
-int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
- struct ipu_image *in, struct ipu_image *out,
- enum ipu_rotate_mode rot_mode)
-{
- struct ipu_image_convert_run *run;
- struct completion comp;
- int ret;
-
- init_completion(&comp);
-
- run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
- image_convert_sync_complete, &comp);
- if (IS_ERR(run))
- return PTR_ERR(run);
-
- ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
- ret = (ret == 0) ? -ETIMEDOUT : 0;
-
- ipu_image_convert_unprepare(run->ctx);
- kfree(run);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
-
int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
{
struct ipu_image_convert_priv *priv;
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index 3884acb7995a..16322b2137f8 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -216,8 +216,6 @@ void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync);
int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
-bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
-
int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
unsigned long base, u32 module, struct clk *clk_ipu);
void ipu_csi_exit(struct ipu_soc *ipu, int id);
diff --git a/drivers/gpu/ipu-v3/ipu-vdi.c b/drivers/gpu/ipu-v3/ipu-vdi.c
index a593b232b6d3..af9631fd4256 100644
--- a/drivers/gpu/ipu-v3/ipu-vdi.c
+++ b/drivers/gpu/ipu-v3/ipu-vdi.c
@@ -150,17 +150,6 @@ void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres)
}
EXPORT_SYMBOL_GPL(ipu_vdi_setup);
-void ipu_vdi_unsetup(struct ipu_vdi *vdi)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&vdi->lock, flags);
- ipu_vdi_write(vdi, 0, VDI_FSIZE);
- ipu_vdi_write(vdi, 0, VDI_C);
- spin_unlock_irqrestore(&vdi->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ipu_vdi_unsetup);
-
int ipu_vdi_enable(struct ipu_vdi *vdi)
{
unsigned long flags;
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 4cfa494243b9..da9c64152a60 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -337,9 +337,7 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagereflis
list_for_each_entry(pageref, pagereflist, list) {
y_low = pageref->offset / info->fix.line_length;
y_high = (pageref->offset + PAGE_SIZE - 1) / info->fix.line_length;
- dev_dbg(info->device,
- "page->index=%lu y_low=%d y_high=%d\n",
- pageref->page->index, y_low, y_high);
+ dev_dbg(info->device, "y_low=%d y_high=%d\n", y_low, y_high);
if (y_high > info->var.yres - 1)
y_high = info->var.yres - 1;
if (y_low < dirty_lines_start)
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 89a34dff85a4..5ae4241959f2 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -630,6 +630,8 @@ int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux,
u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]);
int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_lttpr_set_transparent_mode(struct drm_dp_aux *aux, bool enable);
+int drm_dp_lttpr_init(struct drm_dp_aux *aux, int lttpr_count);
int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index a1347e47e9d5..d8539174ca11 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -96,6 +96,9 @@ void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_
void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index fdae947682cd..2bf893eabb4b 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,6 +35,7 @@
*/
#include <linux/kref.h>
+#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -575,6 +576,19 @@ static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_obje
return (obj->handle_count > 1) || obj->dma_buf;
}
+/**
+ * drm_gem_is_imported() - Tests if GEM object's buffer has been imported
+ * @obj: the GEM object
+ *
+ * Returns:
+ * True if the GEM object's buffer has been imported, false otherwise
+ */
+static inline bool drm_gem_is_imported(const struct drm_gem_object *obj)
+{
+ /* The dma-buf's priv field points to the original GEM object. */
+ return obj->dma_buf && (obj->dma_buf->priv != obj);
+}
+
#ifdef CONFIG_LOCKDEP
/**
* drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index d22e3fb53631..cef5a6b5a4d6 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -120,7 +120,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
{
return (shmem->madv > 0) &&
!shmem->vmap_use_count && shmem->sgt &&
- !shmem->base.dma_buf && !shmem->base.import_attach;
+ !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base);
}
void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index afdd46ef04f7..11d59ce0bac0 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -95,8 +95,6 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
sizeof(_type), \
offsetof(_type, _member), \
_feat))
-struct drm_modeset_acquire_ctx *
-drm_kunit_helper_acquire_ctx_alloc(struct kunit *test);
struct drm_atomic_state *
drm_kunit_helper_atomic_state_alloc(struct kunit *test,
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 94400a78031f..bd40a443385c 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -346,7 +346,6 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
-int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);
int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
@@ -379,6 +378,7 @@ void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx,
u16 start, u16 end);
void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
u16 scanline);
+void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx);
/**
* mipi_dsi_generic_write_seq - transmit data using a generic write packet
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 9732f514566d..f31eba1c7cab 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -584,9 +584,15 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
* Prefer drm_device based logging over device or prink based logging.
*/
+/* Helper to enforce struct drm_device type */
+static inline struct device *__drm_to_dev(const struct drm_device *drm)
+{
+ return drm ? drm->dev : NULL;
+}
+
/* Helper for struct drm_device based logging. */
#define __drm_printk(drm, level, type, fmt, ...) \
- dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__)
+ dev_##level##type(__drm_to_dev(drm), "[drm] " fmt, ##__VA_ARGS__)
#define drm_info(drm, fmt, ...) \
@@ -620,25 +626,25 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
#define drm_dbg_core(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define drm_dbg_driver(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define drm_dbg_driver(drm, fmt, ...) \
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define drm_dbg_kms(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_KMS, fmt, ##__VA_ARGS__)
#define drm_dbg_prime(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define drm_dbg_atomic(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define drm_dbg_vbl(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define drm_dbg_state(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_STATE, fmt, ##__VA_ARGS__)
#define drm_dbg_lease(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_LEASE, fmt, ##__VA_ARGS__)
#define drm_dbg_dp(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DP, fmt, ##__VA_ARGS__)
#define drm_dbg_drmres(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
#define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__)
@@ -727,10 +733,9 @@ void __drm_err(const char *format, ...);
#define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\
- const struct drm_device *drm_ = (drm); \
\
if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \
- drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \
+ drm_dev_printk(__drm_to_dev(drm), KERN_DEBUG, fmt, ## __VA_ARGS__); \
})
#define drm_dbg_ratelimited(drm, fmt, ...) \
@@ -752,13 +757,13 @@ void __drm_err(const char *format, ...);
/* Helper for struct drm_device based WARNs */
#define drm_WARN(drm, condition, format, arg...) \
WARN(condition, "%s %s: [drm] " format, \
- dev_driver_string((drm)->dev), \
- dev_name((drm)->dev), ## arg)
+ dev_driver_string(__drm_to_dev(drm)), \
+ dev_name(__drm_to_dev(drm)), ## arg)
#define drm_WARN_ONCE(drm, condition, format, arg...) \
WARN_ONCE(condition, "%s %s: [drm] " format, \
- dev_driver_string((drm)->dev), \
- dev_name((drm)->dev), ## arg)
+ dev_driver_string(__drm_to_dev(drm)), \
+ dev_name(__drm_to_dev(drm)), ## arg)
#define drm_WARN_ON(drm, x) \
drm_WARN((drm), (x), "%s", \
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 6bf458dbce84..50928a7ae98e 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -71,12 +71,6 @@ enum drm_sched_priority {
DRM_SCHED_PRIORITY_COUNT
};
-/* Used to choose between FIFO and RR job-scheduling */
-extern int drm_sched_policy;
-
-#define DRM_SCHED_POLICY_RR 0
-#define DRM_SCHED_POLICY_FIFO 1
-
/**
* struct drm_sched_entity - A wrapper around a job queue (typically
* attached to the DRM file_priv).
@@ -338,8 +332,14 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* to schedule the job.
*/
struct drm_sched_job {
- struct spsc_node queue_node;
- struct list_head list;
+ u64 id;
+
+ /**
+ * @submit_ts:
+ *
+ * When the job was pushed into the entity queue.
+ */
+ ktime_t submit_ts;
/**
* @sched:
@@ -349,24 +349,30 @@ struct drm_sched_job {
* has finished.
*/
struct drm_gpu_scheduler *sched;
+
struct drm_sched_fence *s_fence;
+ struct drm_sched_entity *entity;
+ enum drm_sched_priority s_priority;
u32 credits;
+ /** @last_dependency: tracks @dependencies as they signal */
+ unsigned int last_dependency;
+ atomic_t karma;
+
+ struct spsc_node queue_node;
+ struct list_head list;
/*
* work is used only after finish_cb has been used and will not be
* accessed anymore.
*/
union {
- struct dma_fence_cb finish_cb;
- struct work_struct work;
+ struct dma_fence_cb finish_cb;
+ struct work_struct work;
};
- uint64_t id;
- atomic_t karma;
- enum drm_sched_priority s_priority;
- struct drm_sched_entity *entity;
struct dma_fence_cb cb;
+
/**
* @dependencies:
*
@@ -375,24 +381,8 @@ struct drm_sched_job {
* drm_sched_job_add_implicit_dependencies().
*/
struct xarray dependencies;
-
- /** @last_dependency: tracks @dependencies as they signal */
- unsigned long last_dependency;
-
- /**
- * @submit_ts:
- *
- * When the job was pushed into the entity queue.
- */
- ktime_t submit_ts;
};
-static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
- int threshold)
-{
- return s_job && atomic_inc_return(&s_job->karma) > threshold;
-}
-
enum drm_gpu_sched_stat {
DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
DRM_GPU_SCHED_STAT_NOMINAL,
@@ -570,14 +560,36 @@ struct drm_sched_init_args {
struct device *dev;
};
+/* Scheduler operations */
+
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_init_args *args);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
+
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining);
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
+void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
+void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
+
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
+
+/* Jobs */
+
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
u32 credits, void *owner);
void drm_sched_job_arm(struct drm_sched_job *job);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
@@ -592,30 +604,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
bool write);
bool drm_sched_job_has_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
-
-void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
- struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
-
-void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
-bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
-void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
-void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
-void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
-void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
-void drm_sched_fault(struct drm_gpu_scheduler *sched);
-void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
-void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
+static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
+ int threshold)
+{
+ return s_job && atomic_inc_return(&s_job->karma) > threshold;
+}
-void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
- struct drm_sched_rq *rq, ktime_t ts);
+/* Entities */
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
@@ -625,29 +623,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
-void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
-struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
-bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
int drm_sched_entity_error(struct drm_sched_entity *entity);
-
-struct drm_sched_fence *drm_sched_fence_alloc(
- struct drm_sched_entity *s_entity, void *owner);
-void drm_sched_fence_init(struct drm_sched_fence *fence,
- struct drm_sched_entity *entity);
-void drm_sched_fence_free(struct drm_sched_fence *fence);
-
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
- struct dma_fence *parent);
-void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
-
-unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
-void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
- unsigned long remaining);
-struct drm_gpu_scheduler *
-drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
#endif
diff --git a/include/drm/ttm/ttm_backup.h b/include/drm/ttm/ttm_backup.h
new file mode 100644
index 000000000000..24ad120b8827
--- /dev/null
+++ b/include/drm/ttm/ttm_backup.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _TTM_BACKUP_H_
+#define _TTM_BACKUP_H_
+
+#include <linux/mm_types.h>
+#include <linux/shmem_fs.h>
+
+struct ttm_backup;
+
+/**
+ * ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer
+ * @handle: The handle to convert.
+ *
+ * Converts an opaque handle received from the
+ * struct ttm_backoup_ops::backup_page() function to an (invalid)
+ * struct page pointer suitable for a struct page array.
+ *
+ * Return: An (invalid) struct page pointer.
+ */
+static inline struct page *
+ttm_backup_handle_to_page_ptr(unsigned long handle)
+{
+ return (struct page *)(handle << 1 | 1);
+}
+
+/**
+ * ttm_backup_page_ptr_is_handle() - Whether a struct page pointer is a handle
+ * @page: The struct page pointer to check.
+ *
+ * Return: true if the struct page pointer is a handld returned from
+ * ttm_backup_handle_to_page_ptr(). False otherwise.
+ */
+static inline bool ttm_backup_page_ptr_is_handle(const struct page *page)
+{
+ return (unsigned long)page & 1;
+}
+
+/**
+ * ttm_backup_page_ptr_to_handle() - Convert a struct page pointer to a handle
+ * @page: The struct page pointer to convert
+ *
+ * Return: The handle that was previously used in
+ * ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable
+ * for use as argument in the struct ttm_backup_ops drop() or
+ * copy_backed_up_page() functions.
+ */
+static inline unsigned long
+ttm_backup_page_ptr_to_handle(const struct page *page)
+{
+ WARN_ON(!ttm_backup_page_ptr_is_handle(page));
+ return (unsigned long)page >> 1;
+}
+
+void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle);
+
+int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
+ pgoff_t handle, bool intr);
+
+s64
+ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
+ bool writeback, pgoff_t idx, gfp_t page_gfp,
+ gfp_t alloc_gfp);
+
+void ttm_backup_fini(struct ttm_backup *backup);
+
+u64 ttm_backup_bytes_avail(void);
+
+struct ttm_backup *ttm_backup_shmem_create(loff_t size);
+
+#endif
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 8ea11cd8df39..903cd1030110 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -226,6 +226,27 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
struct ttm_resource_manager *man, s64 target);
/**
+ * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour
+ * @purge: Purge the content rather than backing it up.
+ * @writeback: Attempt to immediately write content to swap space.
+ * @allow_move: Allow moving to system before shrinking. This is typically
+ * not desired for zombie- or ghost objects (with zombie object meaning
+ * objects with a zero gem object refcount)
+ */
+struct ttm_bo_shrink_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+ u32 allow_move : 1;
+};
+
+long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct ttm_bo_shrink_flags flags);
+
+bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx);
+
+bool ttm_bo_shrink_avoid_wait(void);
+
+/**
* ttm_bo_get - reference a struct ttm_buffer_object
*
* @bo: The buffer object.
@@ -467,4 +488,76 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
int ttm_bo_populate(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx);
+/* Driver LRU walk helpers initially targeted for shrinking. */
+
+/**
+ * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping
+ */
+struct ttm_bo_lru_cursor {
+ /** @res_curs: Embedded struct ttm_resource_cursor. */
+ struct ttm_resource_cursor res_curs;
+ /**
+ * @ctx: The struct ttm_operation_ctx used while looping.
+ * governs the locking mode.
+ */
+ struct ttm_operation_ctx *ctx;
+ /**
+ * @bo: Buffer object pointer if a buffer object is refcounted,
+ * NULL otherwise.
+ */
+ struct ttm_buffer_object *bo;
+ /**
+ * @needs_unlock: Valid iff @bo != NULL. The bo resv needs
+ * unlock before the next iteration or after loop exit.
+ */
+ bool needs_unlock;
+};
+
+void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);
+
+struct ttm_bo_lru_cursor *
+ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx);
+
+struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs);
+
+struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs);
+
+/*
+ * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor.
+ */
+DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *,
+ if (_T) {ttm_bo_lru_cursor_fini(_T); },
+ ttm_bo_lru_cursor_init(curs, man, ctx),
+ struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx);
+static inline void *
+class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
+{ return *_T; }
+#define class_ttm_bo_lru_cursor_is_conditional false
+
+/**
+ * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning
+ * resources on LRU lists.
+ * @_cursor: struct ttm_bo_lru_cursor to use for the iteration.
+ * @_man: The resource manager whose LRU lists to iterate over.
+ * @_ctx: The struct ttm_operation_context to govern the @_bo locking.
+ * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object
+ * for the current iteration.
+ *
+ * Iterate over all resources of @_man and for each resource, attempt to
+ * reference and lock (using the locking mode detailed in @_ctx) the buffer
+ * object it points to. If successful, assign @_bo to the address of the
+ * buffer object and update @_cursor. The iteration is guarded in the
+ * sense that @_cursor will be initialized before looping start and cleaned
+ * up at looping termination, even if terminated prematurely by, for
+ * example a return or break statement. Exiting the loop will also unlock
+ * (if needed) and unreference @_bo.
+ */
+#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _ctx, _bo) \
+ scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _ctx) \
+ for ((_bo) = ttm_bo_lru_cursor_first(_cursor); (_bo); \
+ (_bo) = ttm_bo_lru_cursor_next(_cursor))
+
#endif
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 160d954a261e..54cd34a6e4c0 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -33,6 +33,7 @@
struct device;
struct seq_file;
+struct ttm_backup_flags;
struct ttm_operation_ctx;
struct ttm_pool;
struct ttm_tt;
@@ -89,6 +90,13 @@ void ttm_pool_fini(struct ttm_pool *pool);
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
+void ttm_pool_drop_backed_up(struct ttm_tt *tt);
+
+long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *ttm,
+ const struct ttm_backup_flags *flags);
+int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx);
+
int ttm_pool_mgr_init(unsigned long num_pages);
void ttm_pool_mgr_fini(void);
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 991edafdb2dd..13cf47f3322f 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -32,11 +32,13 @@
#include <drm/ttm/ttm_caching.h>
#include <drm/ttm/ttm_kmap_iter.h>
+struct ttm_backup;
struct ttm_device;
struct ttm_tt;
struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
+struct ttm_pool_tt_restore;
/**
* struct ttm_tt - This is a structure holding the pages, caching- and aperture
@@ -85,17 +87,22 @@ struct ttm_tt {
* fault handling abuses the DMA api a bit and dma_map_attrs can't be
* used to assure pgprot always matches.
*
+ * TTM_TT_FLAG_BACKED_UP: TTM internal only. This is set if the
+ * struct ttm_tt has been (possibly partially) backed up.
+ *
* TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
* set by TTM after ttm_tt_populate() has successfully returned, and is
* then unset when TTM calls ttm_tt_unpopulate().
+ *
*/
#define TTM_TT_FLAG_SWAPPED BIT(0)
#define TTM_TT_FLAG_ZERO_ALLOC BIT(1)
#define TTM_TT_FLAG_EXTERNAL BIT(2)
#define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3)
#define TTM_TT_FLAG_DECRYPTED BIT(4)
+#define TTM_TT_FLAG_BACKED_UP BIT(5)
-#define TTM_TT_FLAG_PRIV_POPULATED BIT(5)
+#define TTM_TT_FLAG_PRIV_POPULATED BIT(6)
uint32_t page_flags;
/** @num_pages: Number of pages in the page array. */
uint32_t num_pages;
@@ -106,10 +113,19 @@ struct ttm_tt {
/** @swap_storage: Pointer to shmem struct file for swap storage. */
struct file *swap_storage;
/**
+ * @backup: Pointer to backup struct for backed up tts.
+ * Could be unified with @swap_storage. Meanwhile, the driver's
+ * ttm_tt_create() callback is responsible for assigning
+ * this field.
+ */
+ struct ttm_backup *backup;
+ /**
* @caching: The current caching state of the pages, see enum
* ttm_caching.
*/
enum ttm_caching caching;
+ /** @restore: Partial restoration from backup state. TTM private */
+ struct ttm_pool_tt_restore *restore;
};
/**
@@ -129,9 +145,38 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
}
+/**
+ * ttm_tt_is_swapped() - Whether the ttm_tt is swapped out or backed up
+ * @tt: The struct ttm_tt.
+ *
+ * Return: true if swapped or backed up, false otherwise.
+ */
static inline bool ttm_tt_is_swapped(const struct ttm_tt *tt)
{
- return tt->page_flags & TTM_TT_FLAG_SWAPPED;
+ return tt->page_flags & (TTM_TT_FLAG_SWAPPED | TTM_TT_FLAG_BACKED_UP);
+}
+
+/**
+ * ttm_tt_is_backed_up() - Whether the ttm_tt backed up
+ * @tt: The struct ttm_tt.
+ *
+ * Return: true if swapped or backed up, false otherwise.
+ */
+static inline bool ttm_tt_is_backed_up(const struct ttm_tt *tt)
+{
+ return tt->page_flags & TTM_TT_FLAG_BACKED_UP;
+}
+
+/**
+ * ttm_tt_clear_backed_up() - Clear the ttm_tt backed-up status
+ * @tt: The struct ttm_tt.
+ *
+ * Drivers can use this functionto clear the backed-up status,
+ * for example before destroying or re-validating a purged tt.
+ */
+static inline void ttm_tt_clear_backed_up(struct ttm_tt *tt)
+{
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
}
/**
@@ -235,6 +280,26 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
struct ttm_tt *tt);
unsigned long ttm_tt_pages_limit(void);
+
+/**
+ * struct ttm_backup_flags - Flags to govern backup behaviour.
+ * @purge: Free pages without backing up. Bypass pools.
+ * @writeback: Attempt to copy contents directly to swap space, even
+ * if that means blocking on writes to external memory.
+ */
+struct ttm_backup_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+};
+
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags);
+
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx);
+
+int ttm_tt_setup_backup(struct ttm_tt *tt);
+
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
diff --git a/include/dt-bindings/clock/qcom,dsi-phy-28nm.h b/include/dt-bindings/clock/qcom,dsi-phy-28nm.h
new file mode 100644
index 000000000000..ab94d58377a1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dsi-phy-28nm.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DSI_PHY_28NM_H
+#define _DT_BINDINGS_CLK_QCOM_DSI_PHY_28NM_H
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+
+#endif
diff --git a/include/linux/component.h b/include/linux/component.h
index df4aa75c9e7c..9d6c66401280 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -3,7 +3,7 @@
#define COMPONENT_H
#include <linux/stddef.h>
-
+#include <linux/types.h>
struct device;
@@ -90,6 +90,8 @@ int component_compare_dev_name(struct device *dev, void *data);
void component_master_del(struct device *,
const struct component_master_ops *);
+bool component_master_is_bound(struct device *parent,
+ const struct component_master_ops *ops);
struct component_match;
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index fa9f9846b88e..1e59344c5673 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -43,9 +43,10 @@
* - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
* - 1.16 - Add contiguous VRAM allocation flag
* - 1.17 - Add SDMA queue creation with target SDMA engine ID
+ * - 1.18 - Rename pad in set_memory_policy_args to misc_process_flag
*/
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 17
+#define KFD_IOCTL_MINOR_VERSION 18
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -62,6 +63,8 @@ struct kfd_ioctl_get_version_args {
#define KFD_MAX_QUEUE_PERCENTAGE 100
#define KFD_MAX_QUEUE_PRIORITY 15
+#define KFD_MIN_QUEUE_RING_SIZE 1024
+
struct kfd_ioctl_create_queue_args {
__u64 ring_base_address; /* to KFD */
__u64 write_pointer_address; /* from KFD */
@@ -148,6 +151,9 @@ struct kfd_dbg_device_info_entry {
#define KFD_IOC_CACHE_POLICY_COHERENT 0
#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
+/* Misc. per process flags */
+#define KFD_PROC_FLAG_MFMA_HIGH_PRECISION (1 << 0)
+
struct kfd_ioctl_set_memory_policy_args {
__u64 alternate_aperture_base; /* to KFD */
__u64 alternate_aperture_size; /* to KFD */
@@ -155,7 +161,7 @@ struct kfd_ioctl_set_memory_policy_args {
__u32 gpu_id; /* to KFD */
__u32 default_policy; /* to KFD */
__u32 alternate_policy; /* to KFD */
- __u32 pad;
+ __u32 misc_process_flag; /* to KFD */
};
/*
diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h
index 859b8e91d4d3..1125fe47959f 100644
--- a/include/uapi/linux/kfd_sysfs.h
+++ b/include/uapi/linux/kfd_sysfs.h
@@ -63,6 +63,9 @@
#define HSA_CAP_PER_QUEUE_RESET_SUPPORTED 0x80000000
#define HSA_CAP_RESERVED 0x000f8000
+#define HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED 0x00000001
+#define HSA_CAP2_RESERVED 0xfffffffe
+
/* debug_prop bits in node properties */
#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f
#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_SHIFT 0
diff --git a/include/video/imx-ipu-image-convert.h b/include/video/imx-ipu-image-convert.h
index 3c71b8b94b33..003b3927ede5 100644
--- a/include/video/imx-ipu-image-convert.h
+++ b/include/video/imx-ipu-image-convert.h
@@ -41,19 +41,6 @@ typedef void (*ipu_image_convert_cb_t)(struct ipu_image_convert_run *run,
void *ctx);
/**
- * ipu_image_convert_enum_format() - enumerate the image converter's
- * supported input and output pixel formats.
- *
- * @index: pixel format index
- * @fourcc: v4l2 fourcc for this index
- *
- * Returns 0 with a valid index and fills in v4l2 fourcc, -EINVAL otherwise.
- *
- * In V4L2, drivers can call ipu_image_enum_format() in .enum_fmt.
- */
-int ipu_image_convert_enum_format(int index, u32 *fourcc);
-
-/**
* ipu_image_convert_adjust() - adjust input/output images to IPU restrictions.
*
* @in: input image format, adjusted on return
@@ -176,23 +163,4 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
ipu_image_convert_cb_t complete,
void *complete_context);
-/**
- * ipu_image_convert_sync() - synchronous single image conversion request
- *
- * @ipu: the IPU handle to use for the conversion
- * @ic_task: the IC task to use for the conversion
- * @in: input image format
- * @out: output image format
- * @rot_mode: rotation mode
- *
- * Carry out a single image conversion. Returns when the conversion
- * completes. The input/output formats and rotation mode must already
- * meet IPU retrictions. The created context is automatically unprepared
- * and the run freed on return.
- */
-int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
- struct ipu_image *in, struct ipu_image *out,
- enum ipu_rotate_mode rot_mode);
-
-
#endif /* __IMX_IPU_IMAGE_CONVERT_H__ */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index c422a403c099..c89574b6f527 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -262,7 +262,6 @@ void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off);
void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride,
u32 pixelformat);
void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id);
-int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch);
void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize);
void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch);
void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
@@ -270,7 +269,6 @@ void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
const struct ipu_rgb *rgb);
int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
-void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format);
void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
unsigned int uv_stride,
unsigned int u_offset,
@@ -361,13 +359,8 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
const struct v4l2_mbus_config *mbus_cfg,
const struct v4l2_mbus_framefmt *infmt,
const struct v4l2_mbus_framefmt *outfmt);
-bool ipu_csi_is_interlaced(struct ipu_csi *csi);
-void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w);
void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w);
void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert);
-void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
- u32 r_value, u32 g_value, u32 b_value,
- u32 pix_clk);
int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
struct v4l2_mbus_framefmt *mbus_fmt);
int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip,
@@ -445,10 +438,6 @@ int ipu_ic_task_init_rsc(struct ipu_ic *ic,
int in_width, int in_height,
int out_width, int out_height,
u32 rsc);
-int ipu_ic_task_graphics_init(struct ipu_ic *ic,
- const struct ipu_ic_colorspace *g_in_cs,
- bool galpha_en, u32 galpha,
- bool colorkey_en, u32 colorkey);
void ipu_ic_task_enable(struct ipu_ic *ic);
void ipu_ic_task_disable(struct ipu_ic *ic);
int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
@@ -467,7 +456,6 @@ struct ipu_vdi;
void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field);
void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel);
void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres);
-void ipu_vdi_unsetup(struct ipu_vdi *vdi);
int ipu_vdi_enable(struct ipu_vdi *vdi);
int ipu_vdi_disable(struct ipu_vdi *vdi);
struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu);
@@ -488,8 +476,6 @@ enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc);
enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat);
int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
bool hflip, bool vflip);
-int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
- bool hflip, bool vflip);
struct ipu_client_platformdata {
int csi;