summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format7
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml90
-rw-r--r--Documentation/gpu/automated_testing.rst4
-rw-r--r--Documentation/gpu/driver-uapi.rst5
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst15
-rw-r--r--Documentation/gpu/nouveau.rst3
-rw-r--r--Documentation/gpu/todo.rst13
-rw-r--r--Documentation/gpu/vgaarbiter.rst6
-rw-r--r--MAINTAINERS12
-rw-r--r--drivers/accel/amdxdna/TODO1
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c65
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c411
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h24
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c11
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h2
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c6
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c2
-rw-r--r--drivers/dma-buf/dma-buf.c167
-rw-r--r--drivers/dma-buf/heaps/system_heap.c3
-rw-r--r--drivers/dma-buf/sw_sync.c16
-rw-r--r--drivers/dma-buf/sync_debug.c21
-rw-r--r--drivers/dma-buf/udmabuf.c1
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/sysfb_simplefb.c31
-rw-r--r--drivers/gpu/drm/Kconfig111
-rw-r--r--drivers/gpu/drm/Kconfig.debug116
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c45
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h17
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c26
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c77
-rw-r--r--drivers/gpu/drm/ast/ast_post.c24
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c33
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c33
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c4
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c3
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c207
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c77
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c6
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c5
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c1
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c9
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.h2
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c9
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c2
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c3
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c7
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c47
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c3
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c3
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c3
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c3
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c5
-rw-r--r--drivers/gpu/drm/bridge/panel.c3
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c3
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c10
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c6
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c1
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c5
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c3
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c3
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c41
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c45
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c1
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c38
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c108
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c3
-rw-r--r--drivers/gpu/drm/ci/arm64.config2
-rw-r--r--drivers/gpu/drm/ci/build.sh16
-rw-r--r--drivers/gpu/drm/ci/build.yml14
-rw-r--r--drivers/gpu/drm/ci/container.yml24
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml51
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh11
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml4
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh3
-rw-r--r--drivers/gpu/drm/ci/test.yml76
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt313
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt30
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt2
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c160
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c37
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c376
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c116
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c20
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c294
-rw-r--r--drivers/gpu/drm/drm_atomic.c59
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c3
-rw-r--r--drivers/gpu/drm/drm_bridge.c167
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c58
-rw-r--r--drivers/gpu/drm/drm_client.c10
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c257
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_debugfs.c38
-rw-r--r--drivers/gpu/drm/drm_draw.c100
-rw-r--r--drivers/gpu/drm/drm_drv.c23
-rw-r--r--drivers/gpu/drm/drm_edid.c10
-rw-r--r--drivers/gpu/drm/drm_format_helper.c378
-rw-r--r--drivers/gpu/drm/drm_format_internal.h160
-rw-r--r--drivers/gpu/drm/drm_gem.c26
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c147
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c37
-rw-r--r--drivers/gpu/drm/drm_panel.c92
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c6
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs6
-rw-r--r--drivers/gpu/drm/drm_prime.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c11
-rw-r--r--drivers/gpu/drm/drm_syncobj.c47
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/mmu.c41
-rw-r--r--drivers/gpu/drm/gma500/mmu.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c31
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c33
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.c6
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c23
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c8
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c5
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c4
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c1
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvif/conn.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c11
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c11
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c11
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c9
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c441
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c1683
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c238
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c41
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c38
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c68
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h16
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c152
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h36
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c4
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig15
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c120
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c3
-rw-r--r--drivers/gpu/drm/scheduler/.kunitconfig12
-rw-r--r--drivers/gpu/drm/scheduler/Makefile2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c35
-rw-r--r--drivers/gpu/drm/scheduler/tests/Makefile7
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c359
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h226
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c476
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c13
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c13
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c14
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c15
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c15
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c14
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c14
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c14
-rw-r--r--drivers/gpu/drm/stm/lvds.c11
-rw-r--r--drivers/gpu/drm/sysfb/Kconfig76
-rw-r--r--drivers/gpu/drm/sysfb/Makefile8
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.c324
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h136
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c495
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c (renamed from drivers/gpu/drm/tiny/ofdrm.c)364
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c (renamed from drivers/gpu/drm/tiny/simpledrm.c)244
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c662
-rw-r--r--drivers/gpu/drm/tegra/dp.c67
-rw-r--r--drivers/gpu/drm/tegra/dp.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c4
-rw-r--r--drivers/gpu/drm/tests/Makefile2
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_test.c153
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c417
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c3
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c28
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c158
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c61
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c3
-rw-r--r--drivers/gpu/drm/tiny/Kconfig32
-rw-r--r--drivers/gpu/drm/tiny/Makefile2
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c26
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c143
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c46
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c20
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_main.c16
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c126
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c62
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h22
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c27
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h26
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c6
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c62
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c154
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h7
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c15
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c1
-rw-r--r--drivers/gpu/drm/vkms/Kconfig15
-rw-r--r--drivers/gpu/drm/vkms/Makefile5
-rw-r--r--drivers/gpu/drm/vkms/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile3
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c951
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.c640
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.h437
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.c61
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.h26
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c45
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h17
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c176
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c844
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c52
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c874
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h71
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c85
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c7
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c3
-rw-r--r--drivers/platform/arm64/acer-aspire1-ec.c3
-rw-r--r--drivers/video/screen_info_generic.c36
-rw-r--r--include/drm/display/drm_dp_helper.h94
-rw-r--r--include/drm/drm_atomic.h3
-rw-r--r--include/drm/drm_bridge.h195
-rw-r--r--include/drm/drm_bridge_helper.h12
-rw-r--r--include/drm/drm_device.h41
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_gem.h15
-rw-r--r--include/drm/drm_gem_shmem_helper.h45
-rw-r--r--include/drm/drm_kunit_helpers.h8
-rw-r--r--include/drm/drm_mipi_dsi.h1
-rw-r--r--include/drm/drm_mode_config.h4
-rw-r--r--include/drm/drm_panel.h41
-rw-r--r--include/drm/drm_probe_helper.h2
-rw-r--r--include/drm/gpu_scheduler.h112
-rw-r--r--include/linux/dma-buf.h27
-rw-r--r--include/linux/dma-fence.h25
-rw-r--r--include/linux/screen_info.h9
-rw-r--r--include/uapi/drm/asahi_drm.h1194
-rw-r--r--include/uapi/drm/drm.h4
-rw-r--r--include/uapi/drm/drm_fourcc.h45
-rw-r--r--include/uapi/drm/virtgpu_drm.h6
-rw-r--r--include/uapi/linux/virtio_gpu.h3
-rw-r--r--include/video/pixel_format.h41
396 files changed, 14850 insertions, 6293 deletions
diff --git a/.clang-format b/.clang-format
index fe1aa1a30d40..7630990aa07a 100644
--- a/.clang-format
+++ b/.clang-format
@@ -690,6 +690,13 @@ ForEachMacros:
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
+ - 'vkms_config_for_each_connector'
+ - 'vkms_config_for_each_crtc'
+ - 'vkms_config_for_each_encoder'
+ - 'vkms_config_for_each_plane'
+ - 'vkms_config_connector_for_each_possible_encoder'
+ - 'vkms_config_encoder_for_each_possible_crtc'
+ - 'vkms_config_plane_for_each_possible_crtc'
- 'while_for_each_ftrace_op'
- 'xa_for_each'
- 'xa_for_each_marked'
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index b0de4fd6f3d4..b5c8eb4fa2d1 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -246,6 +246,8 @@ properties:
- osddisplays,osd070t1718-19ts
# One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
- osddisplays,osd101t2045-53ts
+ # POWERTIP PH128800T004-ZZA01 10.1" WXGA TFT LCD panel
+ - powertip,ph128800t004-zza01
# POWERTIP PH128800T006-ZHC01 10.1" WXGA TFT LCD panel
- powertip,ph128800t006-zhc01
# POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
index 684c2896d238..31f0c0f038e4 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
@@ -19,6 +19,8 @@ properties:
- const: samsung,atna33xc20
- items:
- enum:
+ # Samsung 14" WQXGA+ (2880×1800 pixels) eDP AMOLED panel
+ - samsung,atna40yk20
# Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
- samsung,atna45af01
# Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel
diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
index dc078ceeca9a..43c6d2d72456 100644
--- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
+++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Broadcom V3D GPU
maintainers:
- - Eric Anholt <eric@anholt.net>
+ - Maíra Canal <mcanal@igalia.com>
- Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
properties:
@@ -22,20 +22,12 @@ properties:
- brcm,7278-v3d
reg:
- items:
- - description: hub register (required)
- - description: core0 register (required)
- - description: GCA cache controller register (if GCA controller present)
- - description: bridge register (if no external reset controller)
minItems: 2
+ maxItems: 4
reg-names:
- items:
- - const: hub
- - const: core0
- - enum: [ bridge, gca ]
- - enum: [ bridge, gca ]
minItems: 2
+ maxItems: 4
interrupts:
items:
@@ -58,6 +50,76 @@ required:
- reg-names
- interrupts
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,2711-v3d
+ then:
+ properties:
+ reg:
+ items:
+ - description: hub register
+ - description: core0 register
+ reg-names:
+ items:
+ - const: hub
+ - const: core0
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,2712-v3d
+ then:
+ properties:
+ reg:
+ items:
+ - description: hub register
+ - description: core0 register
+ - description: SMS state manager register
+ reg-names:
+ items:
+ - const: hub
+ - const: core0
+ - const: sms
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,7268-v3d
+ then:
+ properties:
+ reg:
+ items:
+ - description: hub register
+ - description: core0 register
+ - description: GCA cache controller register
+ - description: bridge register
+ reg-names:
+ items:
+ - const: hub
+ - const: core0
+ - const: gca
+ - const: bridge
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,7278-v3d
+ then:
+ properties:
+ reg:
+ items:
+ - description: hub register
+ - description: core0 register
+ - description: bridge register
+ reg-names:
+ items:
+ - const: hub
+ - const: core0
+ - const: bridge
+
additionalProperties: false
examples:
@@ -66,9 +128,9 @@ examples:
compatible = "brcm,7268-v3d";
reg = <0xf1200000 0x4000>,
<0xf1208000 0x4000>,
- <0xf1204000 0x100>,
- <0xf1204100 0x100>;
- reg-names = "hub", "core0", "bridge", "gca";
+ <0xf1204100 0x100>,
+ <0xf1204000 0x100>;
+ reg-names = "hub", "core0", "gca", "bridge";
interrupts = <0 78 4>,
<0 77 4>;
};
diff --git a/Documentation/gpu/automated_testing.rst b/Documentation/gpu/automated_testing.rst
index 6d7c6086034d..62aa3ede02a5 100644
--- a/Documentation/gpu/automated_testing.rst
+++ b/Documentation/gpu/automated_testing.rst
@@ -115,6 +115,10 @@ created (eg. https://gitlab.freedesktop.org/janedoe/linux/-/pipelines)
5. The various jobs will be run and when the pipeline is finished, all jobs
should be green unless a regression has been found.
+6. Warnings in the pipeline indicate that lockdep
+(see Documentation/locking/lockdep-design.rst) issues have been detected
+during the tests.
+
How to update test expectations
===============================
diff --git a/Documentation/gpu/driver-uapi.rst b/Documentation/gpu/driver-uapi.rst
index 971cdb4816fc..1f15a8ca1265 100644
--- a/Documentation/gpu/driver-uapi.rst
+++ b/Documentation/gpu/driver-uapi.rst
@@ -27,3 +27,8 @@ drm/xe uAPI
===========
.. kernel-doc:: include/uapi/drm/xe_drm.h
+
+drm/asahi uAPI
+================
+
+.. kernel-doc:: include/uapi/drm/asahi_drm.h
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index b4ee25af1702..5139705089f2 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -233,6 +233,21 @@ Panel Self Refresh Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_self_refresh_helper.c
:export:
+HDMI Atomic State Helpers
+=========================
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/display/drm_hdmi_state_helper.c
+ :doc: hdmi helpers
+
+Functions Reference
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/display/drm_hdmi_state_helper.c
+ :export:
+
HDCP Helper Functions Reference
===============================
diff --git a/Documentation/gpu/nouveau.rst b/Documentation/gpu/nouveau.rst
index 0f34131ccc27..b8c801e0068c 100644
--- a/Documentation/gpu/nouveau.rst
+++ b/Documentation/gpu/nouveau.rst
@@ -27,3 +27,6 @@ GSP Support
.. kernel-doc:: drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
:doc: GSP message queue element
+
+.. kernel-doc:: drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+ :doc: GSP message handling policy
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 256d0d1cb216..c57777a24e03 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -441,14 +441,15 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>
Level: Intermediate
-Request memory regions in all drivers
--------------------------------------
+Request memory regions in all fbdev drivers
+--------------------------------------------
-Go through all drivers and add code to request the memory regions that the
-driver uses. This requires adding calls to request_mem_region(),
+Old/ancient fbdev drivers do not request their memory properly.
+Go through these drivers and add code to request the memory regions
+that the driver uses. This requires adding calls to request_mem_region(),
pci_request_region() or similar functions. Use helpers for managed cleanup
-where possible.
-
+where possible. Problematic areas include hardware that has exclusive ranges
+like VGA. VGA16fb does not request the range as it is expected.
Drivers are pretty bad at doing this and there used to be conflicts among
DRM and fbdev drivers. Still, it's the correct thing to do.
diff --git a/Documentation/gpu/vgaarbiter.rst b/Documentation/gpu/vgaarbiter.rst
index bde3c0afb059..d1e953712cc2 100644
--- a/Documentation/gpu/vgaarbiter.rst
+++ b/Documentation/gpu/vgaarbiter.rst
@@ -11,9 +11,9 @@ Section 7, Legacy Devices.
The Resource Access Control (RAC) module inside the X server [0] existed for
the legacy VGA arbitration task (besides other bus management tasks) when more
-than one legacy device co-exists on the same machine. But the problem happens
+than one legacy device co-exist on the same machine. But the problem happens
when these devices are trying to be accessed by different userspace clients
-(e.g. two server in parallel). Their address assignments conflict. Moreover,
+(e.g. two servers in parallel). Their address assignments conflict. Moreover,
ideally, being a userspace application, it is not the role of the X server to
control bus resources. Therefore an arbitration scheme outside of the X server
is needed to control the sharing of these resources. This document introduces
@@ -106,7 +106,7 @@ In-kernel interface
libpciaccess
------------
-To use the vga arbiter char device it was implemented an API inside the
+To use the vga arbiter char device, an API was implemented inside the
libpciaccess library. One field was added to struct pci_device (each device
on the system)::
diff --git a/MAINTAINERS b/MAINTAINERS
index c59316109e3f..877b2408d0ee 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2306,6 +2306,7 @@ F: drivers/watchdog/apple_wdt.c
F: include/dt-bindings/interrupt-controller/apple-aic.h
F: include/dt-bindings/pinctrl/apple.h
F: include/linux/soc/apple/*
+F: include/uapi/drm/asahi_drm.h
ARM/ARTPEC MACHINE SUPPORT
M: Jesper Nilsson <jesper.nilsson@axis.com>
@@ -7370,8 +7371,7 @@ M: Javier Martinez Canillas <javierm@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
-F: drivers/gpu/drm/tiny/ofdrm.c
-F: drivers/gpu/drm/tiny/simpledrm.c
+F: drivers/gpu/drm/sysfb/
F: drivers/video/aperture.c
F: drivers/video/nomodeset.c
F: include/linux/aperture.h
@@ -7853,8 +7853,8 @@ F: drivers/gpu/drm/ci/xfails/meson*
F: drivers/gpu/drm/meson/
DRM DRIVERS FOR ATMEL HLCDC
-M: Sam Ravnborg <sam@ravnborg.org>
-M: Boris Brezillon <bbrezillon@kernel.org>
+M: Manikandan Muralidharan <manikandan.m@microchip.com>
+M: Dharma Balasubiramani <dharma.b@microchip.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
@@ -8182,7 +8182,8 @@ F: drivers/gpu/drm/ttm/
F: include/drm/ttm/
DRM AUTOMATED TESTING
-M: Helen Koike <helen.koike@collabora.com>
+M: Helen Koike <helen.fornazier@gmail.com>
+M: Vignesh Raman <vignesh.raman@collabora.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
@@ -25586,6 +25587,7 @@ F: include/uapi/linux/virtio_gpio.h
VIRTIO GPU DRIVER
M: David Airlie <airlied@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
+M: Dmitry Osipenko <dmitry.osipenko@collabora.com>
R: Gurchetan Singh <gurchetansingh@chromium.org>
R: Chia-I Wu <olvaffe@gmail.com>
L: dri-devel@lists.freedesktop.org
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
index 5119bccd1917..ad8ac6e315b6 100644
--- a/drivers/accel/amdxdna/TODO
+++ b/drivers/accel/amdxdna/TODO
@@ -1,3 +1,2 @@
-- Add import and export BO support
- Add debugfs support
- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 00d215ac866e..e04549f64d69 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -758,27 +758,42 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu
static int aie2_populate_range(struct amdxdna_gem_obj *abo)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
- struct mm_struct *mm = abo->mem.notifier.mm;
- struct hmm_range range = { 0 };
+ struct amdxdna_umap *mapp;
unsigned long timeout;
+ struct mm_struct *mm;
+ bool found;
int ret;
- XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx",
- abo->mem.userptr, abo->mem.size);
- range.notifier = &abo->mem.notifier;
- range.start = abo->mem.userptr;
- range.end = abo->mem.userptr + abo->mem.size;
- range.hmm_pfns = abo->mem.pfns;
- range.default_flags = HMM_PFN_REQ_FAULT;
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+again:
+ found = false;
+ down_write(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (mapp->invalid) {
+ found = true;
+ break;
+ }
+ }
- if (!mmget_not_zero(mm))
+ if (!found) {
+ abo->mem.map_invalid = false;
+ up_write(&xdna->notifier_lock);
+ return 0;
+ }
+ kref_get(&mapp->refcnt);
+ up_write(&xdna->notifier_lock);
+
+ XDNA_DBG(xdna, "populate memory range %lx %lx",
+ mapp->vma->vm_start, mapp->vma->vm_end);
+ mm = mapp->notifier.mm;
+ if (!mmget_not_zero(mm)) {
+ amdxdna_umap_put(mapp);
return -EFAULT;
+ }
- timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
-again:
- range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier);
+ mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
mmap_read_lock(mm);
- ret = hmm_range_fault(&range);
+ ret = hmm_range_fault(&mapp->range);
mmap_read_unlock(mm);
if (ret) {
if (time_after(jiffies, timeout)) {
@@ -786,21 +801,27 @@ again:
goto put_mm;
}
- if (ret == -EBUSY)
+ if (ret == -EBUSY) {
+ amdxdna_umap_put(mapp);
goto again;
+ }
goto put_mm;
}
- down_read(&xdna->notifier_lock);
- if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) {
- up_read(&xdna->notifier_lock);
+ down_write(&xdna->notifier_lock);
+ if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
goto again;
}
- abo->mem.map_invalid = false;
- up_read(&xdna->notifier_lock);
+ mapp->invalid = false;
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
+ goto again;
put_mm:
+ amdxdna_umap_put(mapp);
mmput(mm);
return ret;
}
@@ -908,10 +929,6 @@ void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
struct drm_gem_object *gobj = to_gobj(abo);
long ret;
- down_write(&xdna->notifier_lock);
- abo->mem.map_invalid = true;
- mmu_interval_set_seq(&abo->mem.notifier, cur_seq);
- up_write(&xdna->notifier_lock);
ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
true, MAX_SCHEDULE_TIMEOUT);
if (!ret || ret == -ERESTARTSYS)
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index 606433d73236..26831ec69f89 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -9,7 +9,10 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/gpu_scheduler.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
#include <linux/iosys-map.h>
+#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include "amdxdna_ctx.h"
@@ -18,6 +21,8 @@
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+MODULE_IMPORT_NS("DMA_BUF");
+
static int
amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
{
@@ -55,57 +60,38 @@ amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
return 0;
}
-static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
-{
- struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
- struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
-
- XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
- if (abo->pinned)
- amdxdna_gem_unpin(abo);
-
- if (abo->type == AMDXDNA_BO_DEV) {
- mutex_lock(&abo->client->mm_lock);
- drm_mm_remove_node(&abo->mm_node);
- mutex_unlock(&abo->client->mm_lock);
-
- vunmap(abo->mem.kva);
- drm_gem_object_put(to_gobj(abo->dev_heap));
- drm_gem_object_release(gobj);
- mutex_destroy(&abo->lock);
- kfree(abo);
- return;
- }
-
- if (abo->type == AMDXDNA_BO_DEV_HEAP)
- drm_mm_takedown(&abo->mm);
-
- drm_gem_vunmap_unlocked(gobj, &map);
- mutex_destroy(&abo->lock);
- drm_gem_shmem_free(&abo->base);
-}
-
-static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
- .free = amdxdna_gem_obj_free,
-};
-
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
- struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj,
- mem.notifier);
- struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier);
+ struct amdxdna_gem_obj *abo = mapp->abo;
+ struct amdxdna_dev *xdna;
- XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d",
- abo->mem.userptr, abo->mem.size, abo->type);
+ xdna = to_xdna_dev(to_gobj(abo)->dev);
+ XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d",
+ mapp->vma->vm_start, mapp->vma->vm_end, abo->type);
if (!mmu_notifier_range_blockable(range))
return false;
+ down_write(&xdna->notifier_lock);
+ abo->mem.map_invalid = true;
+ mapp->invalid = true;
+ mmu_interval_set_seq(&mapp->notifier, cur_seq);
+ up_write(&xdna->notifier_lock);
+
xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
+ if (range->event == MMU_NOTIFY_UNMAP) {
+ down_write(&xdna->notifier_lock);
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ up_write(&xdna->notifier_lock);
+ }
+
return true;
}
@@ -113,102 +99,310 @@ static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
.invalidate = amdxdna_hmm_invalidate,
};
-static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo)
+static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp;
+
+ down_read(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (!vma || mapp->vma == vma) {
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ if (vma)
+ break;
+ }
+ }
+ up_read(&xdna->notifier_lock);
+}
- if (!xdna->dev_info->ops->hmm_invalidate)
- return;
+static void amdxdna_umap_release(struct kref *ref)
+{
+ struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt);
+ struct vm_area_struct *vma = mapp->vma;
+ struct amdxdna_dev *xdna;
+
+ mmu_interval_notifier_remove(&mapp->notifier);
+ if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_clear_unevictable(vma->vm_file->f_mapping);
+
+ xdna = to_xdna_dev(to_gobj(mapp->abo)->dev);
+ down_write(&xdna->notifier_lock);
+ list_del(&mapp->node);
+ up_write(&xdna->notifier_lock);
+
+ kvfree(mapp->range.hmm_pfns);
+ kfree(mapp);
+}
+
+void amdxdna_umap_put(struct amdxdna_umap *mapp)
+{
+ kref_put(&mapp->refcnt, amdxdna_umap_release);
+}
+
+static void amdxdna_hmm_unreg_work(struct work_struct *work)
+{
+ struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap,
+ hmm_unreg_work);
- mmu_interval_notifier_remove(&abo->mem.notifier);
- kvfree(abo->mem.pfns);
- abo->mem.pfns = NULL;
+ amdxdna_umap_put(mapp);
}
-static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr,
- size_t len)
+static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long len = vma->vm_end - vma->vm_start;
+ unsigned long addr = vma->vm_start;
+ struct amdxdna_umap *mapp;
u32 nr_pages;
int ret;
if (!xdna->dev_info->ops->hmm_invalidate)
return 0;
- if (abo->mem.pfns)
- return -EEXIST;
+ mapp = kzalloc(sizeof(*mapp), GFP_KERNEL);
+ if (!mapp)
+ return -ENOMEM;
nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
- abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns),
- GFP_KERNEL);
- if (!abo->mem.pfns)
- return -ENOMEM;
+ mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns),
+ GFP_KERNEL);
+ if (!mapp->range.hmm_pfns) {
+ ret = -ENOMEM;
+ goto free_map;
+ }
- ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier,
+ ret = mmu_interval_notifier_insert_locked(&mapp->notifier,
current->mm,
addr,
len,
&amdxdna_hmm_ops);
if (ret) {
XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
- kvfree(abo->mem.pfns);
+ goto free_pfns;
}
- abo->mem.userptr = addr;
+ mapp->range.notifier = &mapp->notifier;
+ mapp->range.start = vma->vm_start;
+ mapp->range.end = vma->vm_end;
+ mapp->range.default_flags = HMM_PFN_REQ_FAULT;
+ mapp->vma = vma;
+ mapp->abo = abo;
+ kref_init(&mapp->refcnt);
+
+ if (abo->mem.userptr == AMDXDNA_INVALID_ADDR)
+ abo->mem.userptr = addr;
+ INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work);
+ if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_set_unevictable(vma->vm_file->f_mapping);
+
+ down_write(&xdna->notifier_lock);
+ list_add_tail(&mapp->node, &abo->mem.umap_list);
+ up_write(&xdna->notifier_lock);
+
+ return 0;
+
+free_pfns:
+ kvfree(mapp->range.hmm_pfns);
+free_map:
+ kfree(mapp);
return ret;
}
+static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long num_pages = vma_pages(vma);
+ unsigned long offset = 0;
+ int ret;
+
+ if (!is_import_bo(abo)) {
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed shmem mmap %d", ret);
+ return ret;
+ }
+
+ /* The buffer is based on memory pages. Fix the flag. */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages %d", ret);
+ vma->vm_ops->close(vma);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+ ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
+ return ret;
+ }
+
+ do {
+ vm_fault_t fault_ret;
+
+ fault_ret = handle_mm_fault(vma, vma->vm_start + offset,
+ FAULT_FLAG_WRITE, NULL);
+ if (fault_ret & VM_FAULT_ERROR) {
+ vma->vm_ops->close(vma);
+ XDNA_ERR(xdna, "Fault in page failed");
+ return -EFAULT;
+ }
+
+ offset += PAGE_SIZE;
+ } while (--num_pages);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ drm_gem_object_put(to_gobj(abo));
+
+ return 0;
+}
+
static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
struct vm_area_struct *vma)
{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- unsigned long num_pages;
int ret;
- ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size);
+ ret = amdxdna_hmm_register(abo, vma);
if (ret)
return ret;
+ ret = amdxdna_insert_pages(abo, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages, ret %d", ret);
+ goto hmm_unreg;
+ }
+
+ XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx",
+ drm_vma_node_offset_addr(&gobj->vma_node), abo->type,
+ vma->vm_start, gobj->size);
+ return 0;
+
+hmm_unreg:
+ amdxdna_hmm_unregister(abo, vma);
+ return ret;
+}
+
+static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ unsigned long num_pages = vma_pages(vma);
+ int ret;
+
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
+ vma->vm_private_data = gobj;
+
+ drm_gem_object_get(gobj);
ret = drm_gem_shmem_mmap(&abo->base, vma);
if (ret)
- goto hmm_unreg;
+ goto put_obj;
- num_pages = gobj->size >> PAGE_SHIFT;
- /* Try to insert the pages */
+ /* The buffer is based on memory pages. Fix the flag. */
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
- ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
if (ret)
- XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret);
+ goto close_vma;
return 0;
-hmm_unreg:
- amdxdna_hmm_unregister(abo);
+close_vma:
+ vma->vm_ops->close(vma);
+put_obj:
+ drm_gem_object_put(gobj);
return ret;
}
-static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf)
+static const struct dma_buf_ops amdxdna_dmabuf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = amdxdna_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
{
- return drm_gem_shmem_vm_ops.fault(vmf);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &amdxdna_dmabuf_ops;
+ exp_info.size = gobj->size;
+ exp_info.flags = flags;
+ exp_info.priv = gobj;
+ exp_info.resv = gobj->resv;
+
+ return drm_gem_dmabuf_export(gobj->dev, &exp_info);
}
-static void amdxdna_gem_vm_open(struct vm_area_struct *vma)
+static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo)
{
- drm_gem_shmem_vm_ops.open(vma);
+ dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(abo->dma_buf, abo->attach);
+ dma_buf_put(abo->dma_buf);
+ drm_gem_object_release(to_gobj(abo));
+ kfree(abo);
}
-static void amdxdna_gem_vm_close(struct vm_area_struct *vma)
+static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
{
- struct drm_gem_object *gobj = vma->vm_private_data;
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+
+ amdxdna_hmm_unregister(abo, NULL);
+ flush_workqueue(xdna->notifier_wq);
+
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
- amdxdna_hmm_unregister(to_xdna_obj(gobj));
- drm_gem_shmem_vm_ops.close(vma);
+ if (abo->type == AMDXDNA_BO_DEV) {
+ mutex_lock(&abo->client->mm_lock);
+ drm_mm_remove_node(&abo->mm_node);
+ mutex_unlock(&abo->client->mm_lock);
+
+ vunmap(abo->mem.kva);
+ drm_gem_object_put(to_gobj(abo->dev_heap));
+ drm_gem_object_release(gobj);
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+ return;
+ }
+
+ if (abo->type == AMDXDNA_BO_DEV_HEAP)
+ drm_mm_takedown(&abo->mm);
+
+ drm_gem_vunmap(gobj, &map);
+ mutex_destroy(&abo->lock);
+
+ if (is_import_bo(abo)) {
+ amdxdna_imported_obj_free(abo);
+ return;
+ }
+
+ drm_gem_shmem_free(&abo->base);
}
-static const struct vm_operations_struct amdxdna_gem_vm_ops = {
- .fault = amdxdna_gem_vm_fault,
- .open = amdxdna_gem_vm_open,
- .close = amdxdna_gem_vm_close,
+static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
+ .free = amdxdna_gem_obj_free,
};
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
@@ -220,7 +414,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = amdxdna_gem_obj_mmap,
- .vm_ops = &amdxdna_gem_vm_ops,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+ .export = amdxdna_gem_prime_export,
};
static struct amdxdna_gem_obj *
@@ -239,6 +434,7 @@ amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
abo->mem.userptr = AMDXDNA_INVALID_ADDR;
abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
abo->mem.size = size;
+ INIT_LIST_HEAD(&abo->mem.umap_list);
return abo;
}
@@ -258,6 +454,51 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
return to_gobj(abo);
}
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ struct sg_table *sgt;
+ int ret;
+
+ get_dma_buf(dma_buf);
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto put_buf;
+ }
+
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(gobj)) {
+ ret = PTR_ERR(gobj);
+ goto fail_unmap;
+ }
+
+ abo = to_xdna_obj(gobj);
+ abo->attach = attach;
+ abo->dma_buf = dma_buf;
+
+ return gobj;
+
+fail_unmap:
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+put_buf:
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
static struct amdxdna_gem_obj *
amdxdna_drm_alloc_shmem(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
@@ -417,7 +658,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
- ret = drm_gem_vmap_unlocked(to_gobj(abo), &map);
+ ret = drm_gem_vmap(to_gobj(abo), &map);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
@@ -483,6 +724,9 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
int ret;
+ if (is_import_bo(abo))
+ return 0;
+
switch (abo->type) {
case AMDXDNA_BO_SHMEM:
case AMDXDNA_BO_DEV_HEAP:
@@ -515,6 +759,9 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
{
+ if (is_import_bo(abo))
+ return;
+
if (abo->type == AMDXDNA_BO_DEV)
abo = abo->dev_heap;
@@ -606,7 +853,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
goto put_obj;
}
- if (abo->type == AMDXDNA_BO_DEV)
+ if (is_import_bo(abo))
+ drm_clflush_sg(abo->base.sgt);
+ else if (abo->type == AMDXDNA_BO_DEV)
drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
else
drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
index 8ccc0375dd9d..aee97e971d6d 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.h
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -6,6 +6,20 @@
#ifndef _AMDXDNA_GEM_H_
#define _AMDXDNA_GEM_H_
+#include <linux/hmm.h>
+
+struct amdxdna_umap {
+ struct vm_area_struct *vma;
+ struct mmu_interval_notifier notifier;
+ struct hmm_range range;
+ struct work_struct hmm_unreg_work;
+ struct amdxdna_gem_obj *abo;
+ struct list_head node;
+ struct kref refcnt;
+ bool invalid;
+ bool unmapped;
+};
+
struct amdxdna_mem {
u64 userptr;
void *kva;
@@ -13,8 +27,7 @@ struct amdxdna_mem {
size_t size;
struct page **pages;
u32 nr_pages;
- struct mmu_interval_notifier notifier;
- unsigned long *pfns;
+ struct list_head umap_list;
bool map_invalid;
};
@@ -31,9 +44,12 @@ struct amdxdna_gem_obj {
struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
u32 assigned_hwctx;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
};
#define to_gobj(obj) (&(obj)->base.base)
+#define is_import_bo(obj) ((obj)->attach)
static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
{
@@ -47,8 +63,12 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
drm_gem_object_put(to_gobj(abo));
}
+void amdxdna_umap_put(struct amdxdna_umap *mapp);
+
struct drm_gem_object *
amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct amdxdna_gem_obj *
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index f5b8497cf5ad..f2bf1d374cc7 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -226,6 +226,7 @@ const struct drm_driver amdxdna_drm_drv = {
.num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
.gem_create_object = amdxdna_gem_create_object_cb,
+ .gem_prime_import = amdxdna_gem_prime_import,
};
static const struct amdxdna_dev_info *
@@ -266,12 +267,16 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
fs_reclaim_release(GFP_KERNEL);
}
+ xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0);
+ if (!xdna->notifier_wq)
+ return -ENOMEM;
+
mutex_lock(&xdna->dev_lock);
ret = xdna->dev_info->ops->init(xdna);
mutex_unlock(&xdna->dev_lock);
if (ret) {
XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
- return ret;
+ goto destroy_notifier_wq;
}
ret = amdxdna_sysfs_init(xdna);
@@ -301,6 +306,8 @@ failed_dev_fini:
mutex_lock(&xdna->dev_lock);
xdna->dev_info->ops->fini(xdna);
mutex_unlock(&xdna->dev_lock);
+destroy_notifier_wq:
+ destroy_workqueue(xdna->notifier_wq);
return ret;
}
@@ -310,6 +317,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
struct device *dev = &pdev->dev;
struct amdxdna_client *client;
+ destroy_workqueue(xdna->notifier_wq);
+
pm_runtime_get_noresume(dev);
pm_runtime_forbid(dev);
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
index 37848a8d8031..ab79600911aa 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -6,6 +6,7 @@
#ifndef _AMDXDNA_PCI_DRV_H_
#define _AMDXDNA_PCI_DRV_H_
+#include <linux/workqueue.h>
#include <linux/xarray.h>
#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
@@ -98,6 +99,7 @@ struct amdxdna_dev {
struct list_head client_list;
struct amdxdna_fw_ver fw_ver;
struct rw_semaphore notifier_lock; /* for mmu notifier*/
+ struct workqueue_struct *notifier_wq;
};
/*
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 8741c73b92ce..212d21ad2bbd 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -282,7 +282,7 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
ivpu_bo_unbind_locked(bo);
mutex_destroy(&bo->lock);
- drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
drm_gem_shmem_free(&bo->base);
}
@@ -362,7 +362,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
if (flags & DRM_IVPU_BO_MAPPABLE) {
dma_resv_lock(bo->base.base.resv, NULL);
- ret = drm_gem_shmem_vmap(&bo->base, &map);
+ ret = drm_gem_shmem_vmap_locked(&bo->base, &map);
dma_resv_unlock(bo->base.base.resv);
if (ret)
@@ -387,7 +387,7 @@ void ivpu_bo_free(struct ivpu_bo *bo)
if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
dma_resv_lock(bo->base.base.resv, NULL);
- drm_gem_shmem_vunmap(&bo->base, &map);
+ drm_gem_shmem_vunmap_locked(&bo->base, &map);
dma_resv_unlock(bo->base.base.resv);
}
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
index ba0cf2f94732..a991b8198dc4 100644
--- a/drivers/accel/qaic/qaic_debugfs.c
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -240,7 +240,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
mhi_unprepare:
mhi_unprepare_from_transfer(mhi_dev);
destroy_workqueue:
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
out:
return ret;
@@ -253,7 +252,6 @@ static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev)
qdev = dev_get_drvdata(&mhi_dev->dev);
mhi_unprepare_from_transfer(qdev->bootlog_ch);
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
qdev->bootlog_ch = NULL;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5baa83b85515..0c48d41dd5eb 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -636,10 +636,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->release))
return ERR_PTR(-EINVAL);
- if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- (exp_info->ops->pin || exp_info->ops->unpin)))
- return ERR_PTR(-EINVAL);
-
if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
@@ -782,7 +778,7 @@ static void mangle_sg_table(struct sg_table *sg_table)
/* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to
- * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * not corrupt the sgt. The mixing is undone on unmap
* before passing the sgt back to the exporter.
*/
for_each_sgtable_sg(sg_table, sg, i)
@@ -790,29 +786,19 @@ static void mangle_sg_table(struct sg_table *sg_table)
#endif
}
-static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction direction)
-{
- struct sg_table *sg_table;
- signed long ret;
-
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
- if (IS_ERR_OR_NULL(sg_table))
- return sg_table;
- if (!dma_buf_attachment_is_dynamic(attach)) {
- ret = dma_resv_wait_timeout(attach->dmabuf->resv,
- DMA_RESV_USAGE_KERNEL, true,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0) {
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
- direction);
- return ERR_PTR(ret);
- }
- }
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return !!attach->importer_ops;
+}
- mangle_sg_table(sg_table);
- return sg_table;
+static bool
+dma_buf_pin_on_map(struct dma_buf_attachment *attach)
+{
+ return attach->dmabuf->ops->pin &&
+ (!dma_buf_attachment_is_dynamic(attach) ||
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
}
/**
@@ -935,48 +921,11 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
list_add(&attach->node, &dmabuf->attachments);
dma_resv_unlock(dmabuf->resv);
- /* When either the importer or the exporter can't handle dynamic
- * mappings we cache the mapping here to avoid issues with the
- * reservation object lock.
- */
- if (dma_buf_attachment_is_dynamic(attach) !=
- dma_buf_is_dynamic(dmabuf)) {
- struct sg_table *sgt;
-
- dma_resv_lock(attach->dmabuf->resv, NULL);
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- ret = dmabuf->ops->pin(attach);
- if (ret)
- goto err_unlock;
- }
-
- sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
- if (!sgt)
- sgt = ERR_PTR(-ENOMEM);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto err_unpin;
- }
- dma_resv_unlock(attach->dmabuf->resv);
- attach->sgt = sgt;
- attach->dir = DMA_BIDIRECTIONAL;
- }
-
return attach;
err_attach:
kfree(attach);
return ERR_PTR(ret);
-
-err_unpin:
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
-
-err_unlock:
- dma_resv_unlock(attach->dmabuf->resv);
-
- dma_buf_detach(dmabuf, attach);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
@@ -995,16 +944,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
-static void __unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sg_table,
- enum dma_data_direction direction)
-{
- /* uses XOR, hence this unmangles */
- mangle_sg_table(sg_table);
-
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
-}
-
/**
* dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
@@ -1020,16 +959,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
return;
dma_resv_lock(dmabuf->resv, NULL);
-
- if (attach->sgt) {
-
- __unmap_dma_buf(attach, attach->sgt, attach->dir);
-
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
- }
list_del(&attach->node);
-
dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
@@ -1058,7 +988,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1081,7 +1011,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1115,7 +1045,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
- int r;
+ signed long ret;
might_sleep();
@@ -1124,41 +1054,37 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt) {
+ if (dma_buf_pin_on_map(attach)) {
+ ret = attach->dmabuf->ops->pin(attach);
/*
- * Two mappings with different directions for the same
- * attachment are not allowed.
+ * Catch exporters making buffers inaccessible even when
+ * attachments preventing that exist.
*/
- if (attach->dir != direction &&
- attach->dir != DMA_BIDIRECTIONAL)
- return ERR_PTR(-EBUSY);
-
- return attach->sgt;
- }
-
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
- r = attach->dmabuf->ops->pin(attach);
- if (r)
- return ERR_PTR(r);
- }
+ WARN_ON_ONCE(ret == EBUSY);
+ if (ret)
+ return ERR_PTR(ret);
}
- sg_table = __map_dma_buf(attach, direction);
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table))
+ goto error_unpin;
- if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- attach->dmabuf->ops->unpin(attach);
-
- if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
- attach->sgt = sg_table;
- attach->dir = direction;
+ /*
+ * Importers with static attachments don't wait for fences.
+ */
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ goto error_unmap;
}
+ mangle_sg_table(sg_table);
#ifdef CONFIG_DMA_API_DEBUG
- if (!IS_ERR(sg_table)) {
+ {
struct scatterlist *sg;
u64 addr;
int len;
@@ -1175,6 +1101,16 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
+
+error_unmap:
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ sg_table = ERR_PTR(ret);
+
+error_unpin:
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
+
+ return sg_table;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
@@ -1227,14 +1163,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt == sg_table)
- return;
-
- __unmap_dma_buf(attach, sg_table, direction);
+ mangle_sg_table(sg_table);
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
- if (dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 26d5dc89ea16..82b1b714300d 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -21,8 +21,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-static struct dma_heap *sys_heap;
-
struct system_heap_buffer {
struct dma_heap *heap;
struct list_head attachments;
@@ -424,6 +422,7 @@ static const struct dma_heap_ops system_heap_ops = {
static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
+ struct dma_heap *sys_heap;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index f5905d67dedb..849280ae79a9 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -173,20 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
}
-static void timeline_fence_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- snprintf(str, size, "%lld", fence->seqno);
-}
-
-static void timeline_fence_timeline_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- struct sync_timeline *parent = dma_fence_parent(fence);
-
- snprintf(str, size, "%d", parent->value);
-}
-
static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
{
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
@@ -208,8 +194,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
.get_timeline_name = timeline_fence_get_timeline_name,
.signaled = timeline_fence_signaled,
.release = timeline_fence_release,
- .fence_value_str = timeline_fence_value_str,
- .timeline_value_str = timeline_fence_timeline_value_str,
.set_deadline = timeline_fence_set_deadline,
};
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 237bce21d1e7..270daae7d89a 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -82,25 +82,8 @@ static void sync_print_fence(struct seq_file *s,
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
- if (fence->ops->timeline_value_str &&
- fence->ops->fence_value_str) {
- char value[64];
- bool success;
-
- fence->ops->fence_value_str(fence, value, sizeof(value));
- success = strlen(value);
-
- if (success) {
- seq_printf(s, ": %s", value);
-
- fence->ops->timeline_value_str(fence, value,
- sizeof(value));
-
- if (strlen(value))
- seq_printf(s, " / %s", value);
- }
- }
-
+ seq_printf(s, ": %lld", fence->seqno);
+ seq_printf(s, " / %d", parent->value);
seq_putc(s, '\n');
}
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index e74e36a8ecda..7eee3eb47a8e 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -285,7 +285,6 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
}
static const struct dma_buf_ops udmabuf_ops = {
- .cache_sgt_mapping = true,
.map_dma_buf = map_udmabuf,
.unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf,
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 7309394b8fc9..e57bff702b5f 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -558,6 +558,7 @@ int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
__weak __alias(__efi_mem_desc_lookup);
+EXPORT_SYMBOL_GPL(efi_mem_desc_lookup);
/*
* Calculate the highest address of an efi memory descriptor.
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index 75a186bf8f8e..592d8a644619 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -35,36 +35,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
return false;
- /*
- * The meaning of depth and bpp for direct-color formats is
- * inconsistent:
- *
- * - DRM format info specifies depth as the number of color
- * bits; including alpha, but not including filler bits.
- * - Linux' EFI platform code computes lfb_depth from the
- * individual color channels, including the reserved bits.
- * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
- * versions use 15.
- * - On the kernel command line, 'bpp' of 32 is usually
- * XRGB8888 including the filler bits, but 15 is XRGB1555
- * not including the filler bit.
- *
- * It's not easily possible to fix this in struct screen_info,
- * as this could break UAPI. The best solution is to compute
- * bits_per_pixel from the color bits, reserved bits and
- * reported lfb_depth, whichever is highest. In the loop below,
- * ignore simplefb formats with alpha bits, as EFI and VESA
- * don't specify alpha channels.
- */
- if (si->lfb_depth > 8) {
- bits_per_pixel = max(max3(si->red_size + si->red_pos,
- si->green_size + si->green_pos,
- si->blue_size + si->blue_pos),
- si->rsvd_size + si->rsvd_pos);
- bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
- } else {
- bits_per_pixel = si->lfb_depth;
- }
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
const struct simplefb_format *f = &formats[i];
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2cba2b6ebe1c..3921772ae612 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -26,6 +26,11 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+menu "DRM debugging options"
+depends on DRM
+source "drivers/gpu/drm/Kconfig.debug"
+endmenu
+
if DRM
config DRM_MIPI_DBI
@@ -37,65 +42,6 @@ config DRM_MIPI_DSI
bool
depends on DRM
-config DRM_DEBUG_MM
- bool "Insert extra checks and debug info into the DRM range managers"
- default n
- depends on DRM
- depends on STACKTRACE_SUPPORT
- select STACKDEPOT
- help
- Enable allocation tracking of memory manager and leak detection on
- shutdown.
-
- Recommended for driver developers only.
-
- If in doubt, say "N".
-
-config DRM_USE_DYNAMIC_DEBUG
- bool "use dynamic debug to implement drm.debug"
- default n
- depends on BROKEN
- depends on DRM
- depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
- depends on JUMP_LABEL
- help
- Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
- Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
- bytes per callsite, the .data costs can be substantial, and
- are therefore configurable.
-
-config DRM_KUNIT_TEST_HELPERS
- tristate
- depends on DRM && KUNIT
- select DRM_KMS_HELPER
- help
- KUnit Helpers for KMS drivers.
-
-config DRM_KUNIT_TEST
- tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT && MMU
- select DRM_BUDDY
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_STATE_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_EXEC
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_GEM_SHMEM_HELPER
- select DRM_KUNIT_TEST_HELPERS
- select DRM_LIB_RANDOM
- select PRIME_NUMBERS
- default KUNIT_ALL_TESTS
- help
- This builds unit tests for DRM. This option is not useful for
- distributions or general kernels, but only for kernel
- developers working on DRM and associated drivers.
-
- For more information on KUnit and unit tests in general,
- please refer to the KUnit documentation in
- Documentation/dev-tools/kunit/.
-
- If in doubt, say "N".
-
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -247,23 +193,6 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
-config DRM_TTM_KUNIT_TEST
- tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
- default n
- depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
- select DRM_TTM
- select DRM_BUDDY
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_KUNIT_TEST_HELPERS
- default KUNIT_ALL_TESTS
- help
- Enables unit tests for TTM, a GPU memory manager subsystem used
- to manage memory buffers. This option is mostly useful for kernel
- developers. It depends on (UML || COMPILE_TEST) since no other driver
- which uses TTM can be loaded while running the tests.
-
- If in doubt, say "N".
-
config DRM_EXEC
tristate
depends on DRM
@@ -335,6 +264,8 @@ config DRM_SCHED
tristate
depends on DRM
+source "drivers/gpu/drm/sysfb/Kconfig"
+
source "drivers/gpu/drm/arm/Kconfig"
source "drivers/gpu/drm/radeon/Kconfig"
@@ -474,9 +405,6 @@ config DRM_HYPERV
If M is selected the module will be called hyperv_drm.
-config DRM_EXPORT_FOR_TESTS
- bool
-
# Separate option as not all DRM drivers use it
config DRM_PANEL_BACKLIGHT_QUIRKS
tristate
@@ -489,31 +417,6 @@ config DRM_PRIVACY_SCREEN
bool
default n
-config DRM_WERROR
- bool "Compile the drm subsystem with warnings as errors"
- depends on DRM && EXPERT
- depends on !WERROR
- default n
- help
- A kernel build should not cause any compiler warnings, and this
- enables the '-Werror' flag to enforce that rule in the drm subsystem.
-
- The drm subsystem enables more warnings than the kernel default, so
- this config option is disabled by default.
-
- If in doubt, say N.
-
-config DRM_HEADER_TEST
- bool "Ensure DRM headers are self-contained and pass kernel-doc"
- depends on DRM && EXPERT && BROKEN
- default n
- help
- Ensure the DRM subsystem headers both under drivers/gpu/drm and
- include/drm compile, are self-contained, have header guards, and have
- no kernel-doc warnings.
-
- If in doubt, say N.
-
endif
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
new file mode 100644
index 000000000000..c493743e8aca
--- /dev/null
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -0,0 +1,116 @@
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default n
+ depends on BROKEN
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
+config DRM_WERROR
+ bool "Compile the drm subsystem with warnings as errors"
+ depends on DRM && EXPERT
+ depends on !WERROR
+ default n
+ help
+ A kernel build should not cause any compiler warnings, and this
+ enables the '-Werror' flag to enforce that rule in the drm subsystem.
+
+ The drm subsystem enables more warnings than the kernel default, so
+ this config option is disabled by default.
+
+ If in doubt, say N.
+
+config DRM_HEADER_TEST
+ bool "Ensure DRM headers are self-contained and pass kernel-doc"
+ depends on DRM && EXPERT
+ default n
+ help
+ Ensure the DRM subsystem headers both under drivers/gpu/drm and
+ include/drm compile, are self-contained, have header guards, and have
+ no kernel-doc warnings.
+
+ If in doubt, say N.
+
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_KUNIT_TEST_HELPERS
+ tristate
+ depends on DRM && KUNIT
+ select DRM_KMS_HELPER
+ help
+ KUnit Helpers for KMS drivers.
+
+config DRM_KUNIT_TEST
+ tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
+ depends on DRM && KUNIT && MMU
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_BUDDY
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_EXEC
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KUNIT_TEST_HELPERS
+ select DRM_LIB_RANDOM
+ select PRIME_NUMBERS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for DRM. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on DRM and associated drivers.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
+
+config DRM_TTM_KUNIT_TEST
+ tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+ default n
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
+ select DRM_TTM
+ select DRM_BUDDY
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ Enables unit tests for TTM, a GPU memory manager subsystem used
+ to manage memory buffers. This option is mostly useful for kernel
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
+
+ If in doubt, say "N".
+
+config DRM_SCHED_KUNIT_TEST
+ tristate "KUnit tests for the DRM scheduler" if !KUNIT_ALL_TESTS
+ select DRM_SCHED
+ depends on DRM && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Choose this option to build unit tests for the DRM scheduler.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_EXPORT_FOR_TESTS
+ bool
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ed54a546bbe2..b5d5561bbe5f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -134,6 +134,7 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := \
drm_atomic_helper.o \
drm_atomic_state_helper.o \
+ drm_bridge_helper.o \
drm_crtc_helper.o \
drm_damage_helper.o \
drm_flip_work.o \
@@ -204,6 +205,7 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
obj-y += mxsfb/
+obj-y += sysfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
index ad80542b60ed..2b60128e2c69 100644
--- a/drivers/gpu/drm/adp/adp-mipi.c
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -212,12 +212,13 @@ static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
};
static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adp_mipi_drv_private *adp =
container_of(bridge, struct adp_mipi_drv_private, bridge);
- return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, adp->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 521b9faab180..492813ab1b54 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -458,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect
u8 link_status[DP_LINK_STATUS_SIZE];
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -616,7 +616,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -681,7 +681,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
index 139ab00dee8f..2d3ad7610c2e 100644
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -37,6 +37,7 @@
*/
/* define for signature structure */
+#define AST_HWC_SIGNATURE_SIZE SZ_32
#define AST_HWC_SIGNATURE_CHECKSUM 0x00
#define AST_HWC_SIGNATURE_SizeX 0x04
#define AST_HWC_SIGNATURE_SizeY 0x08
@@ -45,6 +46,21 @@
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+static unsigned long ast_cursor_vram_size(void)
+{
+ return AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE;
+}
+
+long ast_cursor_vram_offset(struct ast_device *ast)
+{
+ unsigned long size = ast_cursor_vram_size();
+
+ if (size > ast->vram_size)
+ return -EINVAL;
+
+ return ALIGN_DOWN(ast->vram_size - size, SZ_8);
+}
+
static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height)
{
u32 csum = 0;
@@ -75,7 +91,7 @@ static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, un
static void ast_set_cursor_image(struct ast_device *ast, const u8 *src,
unsigned int width, unsigned int height)
{
- u8 __iomem *dst = ast->cursor_plane.base.vaddr;
+ u8 __iomem *dst = ast_plane_vaddr(&ast->cursor_plane.base);
u32 csum;
csum = ast_cursor_calculate_checksum(src, width, height);
@@ -177,7 +193,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct ast_device *ast = to_ast_device(plane->dev);
struct drm_rect damage;
u64 dst_off = ast_plane->offset;
- u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
+ u8 __iomem *dst = ast_plane_vaddr(ast_plane); /* TODO: Use mapping abstraction properly */
u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
unsigned int offset_x, offset_y;
u16 x, y;
@@ -274,25 +290,16 @@ int ast_cursor_plane_init(struct ast_device *ast)
struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
struct ast_plane *ast_plane = &ast_cursor_plane->base;
struct drm_plane *cursor_plane = &ast_plane->base;
- size_t size;
- void __iomem *vaddr;
- u64 offset;
+ unsigned long size;
+ long offset;
int ret;
- /*
- * Allocate backing storage for cursors. The BOs are permanently
- * pinned to the top end of the VRAM.
- */
-
- size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
+ size = ast_cursor_vram_size();
+ offset = ast_cursor_vram_offset(ast);
+ if (offset < 0)
+ return offset;
- if (ast->vram_fb_available < size)
- return -ENOMEM;
-
- vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_fb_available - size;
-
- ret = ast_plane_init(dev, ast_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_plane, offset, size,
0x01, &ast_cursor_plane_funcs,
ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR);
@@ -303,7 +310,5 @@ int ast_cursor_plane_init(struct ast_device *ast)
drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(cursor_plane);
- ast->vram_fb_available -= size;
-
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d2c2605d2728..2ee402096cd9 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -112,12 +112,9 @@ enum ast_config_mode {
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
-
#define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2)
#define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH)
-#define AST_HWC_SIGNATURE_SIZE 32
-
/*
* Planes
*/
@@ -125,7 +122,6 @@ enum ast_config_mode {
struct ast_plane {
struct drm_plane base;
- void __iomem *vaddr;
u64 offset;
unsigned long size;
};
@@ -183,7 +179,6 @@ struct ast_device {
void __iomem *vram;
unsigned long vram_base;
unsigned long vram_size;
- unsigned long vram_fb_available;
struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
@@ -340,14 +335,6 @@ static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 i
__ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
}
-#define AST_VIDMEM_SIZE_8M 0x00800000
-#define AST_VIDMEM_SIZE_16M 0x01000000
-#define AST_VIDMEM_SIZE_32M 0x02000000
-#define AST_VIDMEM_SIZE_64M 0x04000000
-#define AST_VIDMEM_SIZE_128M 0x08000000
-
-#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-
struct ast_vbios_stdtable {
u8 misc;
u8 seq[4];
@@ -440,6 +427,7 @@ int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
/* ast_cursor.c */
+long ast_cursor_vram_offset(struct ast_device *ast);
int ast_cursor_plane_init(struct ast_device *ast);
/* ast dp501 */
@@ -454,11 +442,12 @@ int ast_astdp_output_init(struct ast_device *ast);
/* ast_mode.c */
int ast_mode_config_init(struct ast_device *ast);
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
+ u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type);
+void __iomem *ast_plane_vaddr(struct ast_plane *ast);
#endif
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 6dfe6d9777d4..0bc140319464 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -35,36 +35,35 @@
static u32 ast_get_vram_size(struct ast_device *ast)
{
- u8 jreg;
u32 vram_size;
+ u8 vgacr99, vgacraa;
- vram_size = AST_VIDMEM_DEFAULT_SIZE;
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff);
- switch (jreg & 3) {
+ vgacraa = ast_get_index_reg(ast, AST_IO_VGACRI, 0xaa);
+ switch (vgacraa & AST_IO_VGACRAA_VGAMEM_SIZE_MASK) {
case 0:
- vram_size = AST_VIDMEM_SIZE_8M;
+ vram_size = SZ_8M;
break;
case 1:
- vram_size = AST_VIDMEM_SIZE_16M;
+ vram_size = SZ_16M;
break;
case 2:
- vram_size = AST_VIDMEM_SIZE_32M;
+ vram_size = SZ_32M;
break;
case 3:
- vram_size = AST_VIDMEM_SIZE_64M;
+ vram_size = SZ_64M;
break;
}
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xff);
- switch (jreg & 0x03) {
+ vgacr99 = ast_get_index_reg(ast, AST_IO_VGACRI, 0x99);
+ switch (vgacr99 & AST_IO_VGACR99_VGAMEM_RSRV_MASK) {
case 1:
- vram_size -= 0x100000;
+ vram_size -= SZ_1M;
break;
case 2:
- vram_size -= 0x200000;
+ vram_size -= SZ_2M;
break;
case 3:
- vram_size -= 0x400000;
+ vram_size -= SZ_4M;
break;
}
@@ -93,7 +92,6 @@ int ast_mm_init(struct ast_device *ast)
ast->vram_base = base;
ast->vram_size = vram_size;
- ast->vram_fb_available = vram_size;
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c3b950675485..1de832964e92 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -51,6 +51,26 @@
#define AST_LUT_SIZE 256
+#define AST_PRIMARY_PLANE_MAX_OFFSET (BIT(16) - 1)
+
+static unsigned long ast_fb_vram_offset(void)
+{
+ return 0; // with shmem, the primary plane is always at offset 0
+}
+
+static unsigned long ast_fb_vram_size(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ unsigned long offset = ast_fb_vram_offset(); // starts at offset
+ long cursor_offset = ast_cursor_vram_offset(ast); // ends at cursor offset
+
+ if (cursor_offset < 0)
+ cursor_offset = ast->vram_size; // no cursor; it's all ours
+ if (drm_WARN_ON_ONCE(dev, offset > cursor_offset))
+ return 0; // cannot legally happen; signal error
+ return cursor_offset - offset;
+}
+
static inline void ast_load_palette_index(struct ast_device *ast,
u8 index, u8 red, u8 green,
u8 blue)
@@ -439,7 +459,7 @@ static void ast_wait_for_vretrace(struct ast_device *ast)
*/
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
+ u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
@@ -448,7 +468,6 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
{
struct drm_plane *plane = &ast_plane->base;
- ast_plane->vaddr = vaddr;
ast_plane->offset = offset;
ast_plane->size = size;
@@ -457,6 +476,13 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
type, NULL);
}
+void __iomem *ast_plane_vaddr(struct ast_plane *ast_plane)
+{
+ struct ast_device *ast = to_ast_device(ast_plane->base.dev);
+
+ return ast->vram + ast_plane->offset;
+}
+
/*
* Primary plane
*/
@@ -503,7 +529,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane_vaddr(ast_plane));
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
@@ -576,12 +602,12 @@ static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
{
struct ast_plane *ast_plane = to_ast_plane(plane);
- if (plane->state && plane->state->fb && ast_plane->vaddr) {
+ if (plane->state && plane->state->fb) {
sb->format = plane->state->fb->format;
sb->width = plane->state->fb->width;
sb->height = plane->state->fb->height;
sb->pitch[0] = plane->state->fb->pitches[0];
- iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr);
+ iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane_vaddr(ast_plane));
return 0;
}
return -ENODEV;
@@ -608,13 +634,11 @@ static int ast_primary_plane_init(struct ast_device *ast)
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
- void __iomem *vaddr = ast->vram;
- u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
- unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- unsigned long size = ast->vram_fb_available - cursor_size;
+ u64 offset = ast_fb_vram_offset();
+ unsigned long size = ast_fb_vram_size(ast);
int ret;
- ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_primary_plane, offset, size,
0x01, &ast_primary_plane_funcs,
ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY);
@@ -922,9 +946,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
/*
* Concurrent operations could possibly trigger a call to
- * drm_connector_helper_funcs.get_modes by trying to read the
- * display modes. Protect access to I/O registers by acquiring
- * the I/O-register lock. Released in atomic_flush().
+ * drm_connector_helper_funcs.get_modes by reading the display
+ * modes. Protect access to registers by acquiring the modeset
+ * lock.
*/
mutex_lock(&ast->modeset_lock);
drm_atomic_helper_commit_tail(state);
@@ -938,16 +962,20 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
- static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB8888);
struct ast_device *ast = to_ast_device(dev);
- unsigned long fbsize, fbpages, max_fbpages;
-
- max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
-
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
- fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
- if (fbpages > max_fbpages)
+ unsigned long max_fb_size = ast_fb_vram_size(ast);
+ u64 pitch;
+
+ if (drm_WARN_ON_ONCE(dev, !info))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(info, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > AST_PRIMARY_PLANE_MAX_OFFSET)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > max_fb_size / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
@@ -1018,10 +1046,7 @@ int ast_mode_config_init(struct ast_device *ast)
return ret;
drm_mode_config_reset(dev);
-
- ret = drmm_kms_helper_poll_init(dev);
- if (ret)
- return ret;
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 91e85e457bdf..37568cf3822c 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -1075,16 +1075,16 @@ static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *par
switch (param->vram_size) {
default:
- case AST_VIDMEM_SIZE_8M:
+ case SZ_8M:
param->dram_config |= 0x00;
break;
- case AST_VIDMEM_SIZE_16M:
+ case SZ_16M:
param->dram_config |= 0x04;
break;
- case AST_VIDMEM_SIZE_32M:
+ case SZ_32M:
param->dram_config |= 0x08;
break;
- case AST_VIDMEM_SIZE_64M:
+ case SZ_64M:
param->dram_config |= 0x0c;
break;
}
@@ -1446,16 +1446,16 @@ static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *par
switch (param->vram_size) {
default:
- case AST_VIDMEM_SIZE_8M:
+ case SZ_8M:
param->dram_config |= 0x00;
break;
- case AST_VIDMEM_SIZE_16M:
+ case SZ_16M:
param->dram_config |= 0x04;
break;
- case AST_VIDMEM_SIZE_32M:
+ case SZ_32M:
param->dram_config |= 0x08;
break;
- case AST_VIDMEM_SIZE_64M:
+ case SZ_64M:
param->dram_config |= 0x0c;
break;
}
@@ -1635,19 +1635,19 @@ static void ast_post_chip_2300(struct ast_device *ast)
switch (temp & 0x0c) {
default:
case 0x00:
- param.vram_size = AST_VIDMEM_SIZE_8M;
+ param.vram_size = SZ_8M;
break;
case 0x04:
- param.vram_size = AST_VIDMEM_SIZE_16M;
+ param.vram_size = SZ_16M;
break;
case 0x08:
- param.vram_size = AST_VIDMEM_SIZE_32M;
+ param.vram_size = SZ_32M;
break;
case 0x0c:
- param.vram_size = AST_VIDMEM_SIZE_64M;
+ param.vram_size = SZ_64M;
break;
}
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index bb2cc1d8b84e..e15adaf3a80e 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -30,9 +30,11 @@
#define AST_IO_VGACRI (0x54)
#define AST_IO_VGACR80_PASSWORD (0xa8)
+#define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0)
#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
#define AST_IO_VGACRA3_DVO_ENABLED BIT(7)
+#define AST_IO_VGACRAA_VGAMEM_SIZE_MASK GENMASK(1, 0)
#define AST_IO_VGACRB6_HSYNC_OFF BIT(0)
#define AST_IO_VGACRB6_VSYNC_OFF BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 050dae338ffe..1257009e850c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -948,13 +948,14 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
}
static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret = 0;
if (adv->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, adv->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 83d711ee3a2e..f2bafa6cf779 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -143,35 +143,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx6345->aux, anx6345->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
@@ -517,6 +489,7 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
};
static int anx6345_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
@@ -553,7 +526,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx6345->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index f74694bb9c50..a83020d6576f 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -656,35 +656,7 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx78xx->aux, anx78xx->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
@@ -888,6 +860,7 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
};
static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
@@ -924,7 +897,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge,
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx78xx->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 071168aa0c3b..042154e2d8cc 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1113,10 +1113,10 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
};
static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct analogix_dp_device *dp = bridge->driver_private;
- struct drm_encoder *encoder = dp->encoder;
struct drm_connector *connector = NULL;
int ret = 0;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 0b97b66de577..866806e908cd 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2141,6 +2141,7 @@ static void hdcp_check_work_func(struct work_struct *work)
}
static int anx7625_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
@@ -2159,7 +2160,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
}
if (ctx->pdata.panel_bridge) {
- err = drm_bridge_attach(bridge->encoder,
+ err = drm_bridge_attach(encoder,
ctx->pdata.panel_bridge,
&ctx->bridge, flags);
if (err)
@@ -2771,7 +2772,6 @@ static void anx7625_i2c_remove(struct i2c_client *client)
if (platform->hdcp_workqueue) {
cancel_delayed_work(&platform->hdcp_work);
- flush_workqueue(platform->hdcp_workqueue);
destroy_workqueue(platform->hdcp_workqueue);
}
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index 015983c015e5..c179b86d208f 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -86,6 +86,7 @@ struct drm_aux_bridge_data {
};
static int drm_aux_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct drm_aux_bridge_data *data;
@@ -95,7 +96,7 @@ static int drm_aux_bridge_attach(struct drm_bridge *bridge,
data = container_of(bridge, struct drm_aux_bridge_data, bridge);
- return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
+ return drm_bridge_attach(encoder, data->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index 48f297c78ee6..b3f588b71a7d 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -156,6 +156,7 @@ void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status sta
EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index c7a0247e06ad..b022dd6e6b6e 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -425,6 +425,17 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
+struct cdns_dsi_bridge_state {
+ struct drm_bridge_state base;
+ struct cdns_dsi_cfg dsi_cfg;
+};
+
+static inline struct cdns_dsi_bridge_state *
+to_cdns_dsi_bridge_state(struct drm_bridge_state *bridge_state)
+{
+ return container_of(bridge_state, struct cdns_dsi_bridge_state, base);
+}
+
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
{
return container_of(input, struct cdns_dsi, input);
@@ -568,15 +579,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
+ int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
int ret;
ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
if (ret)
return ret;
- phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
- mipi_dsi_pixel_format_to_bpp(output->dev->format),
- nlanes, phy_cfg);
+ ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
+ mipi_dsi_pixel_format_to_bpp(output->dev->format),
+ nlanes, phy_cfg);
+ if (ret)
+ return ret;
ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
if (ret)
@@ -605,6 +619,7 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
}
static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
@@ -617,7 +632,7 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+ return drm_bridge_attach(encoder, output->bridge, bridge,
flags);
}
@@ -655,7 +670,8 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -675,11 +691,17 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put(dsi->base.dev);
}
-static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
+ dsi->phy_initialized = false;
+ dsi->link_initialized = false;
+ phy_power_off(dsi->dphy);
+ phy_exit(dsi->dphy);
+
pm_runtime_put(dsi->base.dev);
}
@@ -752,32 +774,59 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi)
dsi->link_initialized = true;
}
-static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct cdns_dsi_bridge_state *dsi_state;
+ struct drm_bridge_state *new_bridge_state;
struct drm_display_mode *mode;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
+ struct drm_connector *connector;
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
- u32 tmp, reg_wakeup, div;
+ u32 tmp, reg_wakeup, div, status;
int nlanes;
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
+ new_bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+ if (WARN_ON(!new_bridge_state))
+ return;
+
+ dsi_state = to_cdns_dsi_bridge_state(new_bridge_state);
+ dsi_cfg = dsi_state->dsi_cfg;
+
if (dsi->platform_ops && dsi->platform_ops->enable)
dsi->platform_ops->enable(dsi);
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
nlanes = output->dev->lanes;
- WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
-
cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
+ /*
+ * Now that the DSI Link and DSI Phy are initialized,
+ * wait for the CLK and Data Lanes to be ready.
+ */
+ tmp = CLK_LANE_RDY;
+ for (int i = 0; i < nlanes; i++)
+ tmp |= DATA_LANE_RDY(i);
+
+ if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
+ (tmp == (status & tmp)), 100, 500000))
+ dev_err(dsi->base.dev,
+ "Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n");
+
writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
dsi->regs + VID_HSIZE1);
writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
@@ -892,7 +941,8 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
-static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -904,13 +954,109 @@ static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
cdns_dsi_hs_init(dsi);
}
+static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_output *output = &dsi->output;
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = drm_mipi_dsi_get_input_bus_fmt(output->dev->format);
+ if (!input_fmts[0])
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
+
+ return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state, *old_dsi_state;
+ struct drm_bridge_state *bridge_state;
+
+ if (WARN_ON(!bridge->base.state))
+ return NULL;
+
+ bridge_state = drm_priv_to_bridge_state(bridge->base.state);
+ old_dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &dsi_state->base);
+
+ memcpy(&dsi_state->dsi_cfg, &old_dsi_state->dsi_cfg,
+ sizeof(dsi_state->dsi_cfg));
+
+ return &dsi_state->base;
+}
+
+static void
+cdns_dsi_bridge_atomic_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = to_cdns_dsi_bridge_state(state);
+
+ kfree(dsi_state);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ memset(dsi_state, 0, sizeof(*dsi_state));
+ dsi_state->base.bridge = bridge;
+
+ return &dsi_state->base;
+}
+
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
- .disable = cdns_dsi_bridge_disable,
- .pre_enable = cdns_dsi_bridge_pre_enable,
- .enable = cdns_dsi_bridge_enable,
- .post_disable = cdns_dsi_bridge_post_disable,
+ .atomic_disable = cdns_dsi_bridge_atomic_disable,
+ .atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable,
+ .atomic_enable = cdns_dsi_bridge_atomic_enable,
+ .atomic_post_disable = cdns_dsi_bridge_atomic_post_disable,
+ .atomic_check = cdns_dsi_bridge_atomic_check,
+ .atomic_reset = cdns_dsi_bridge_atomic_reset,
+ .atomic_duplicate_state = cdns_dsi_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = cdns_dsi_bridge_atomic_destroy_state,
+ .atomic_get_input_bus_fmts = cdns_dsi_bridge_get_input_bus_fmts,
};
static int cdns_dsi_attach(struct mipi_dsi_host *host,
@@ -920,8 +1066,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
struct drm_bridge *bridge;
- struct drm_panel *panel;
- struct device_node *np;
int ret;
/*
@@ -939,26 +1083,10 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
/*
* The host <-> device link might be described using an OF-graph
* representation, in this case we extract the device of_node from
- * this representation, otherwise we use dsidev->dev.of_node which
- * should have been filled by the core.
+ * this representation.
*/
- np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT,
- dev->channel);
- if (!np)
- np = of_node_get(dev->dev.of_node);
-
- panel = of_drm_find_panel(np);
- if (!IS_ERR(panel)) {
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DSI);
- } else {
- bridge = of_drm_find_bridge(dev->dev.of_node);
- if (!bridge)
- bridge = ERR_PTR(-EINVAL);
- }
-
- of_node_put(np);
-
+ bridge = devm_drm_of_get_bridge(dsi->base.dev, dsi->base.dev->of_node,
+ DSI_OUTPUT_PORT, dev->channel);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
dev_err(host->dev, "failed to add DSI device %s (err = %d)",
@@ -968,7 +1096,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
output->dev = dev;
output->bridge = bridge;
- output->panel = panel;
/*
* The DSI output has been properly configured, we can now safely
@@ -984,12 +1111,9 @@ static int cdns_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dev)
{
struct cdns_dsi *dsi = to_cdns_dsi(host);
- struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
drm_bridge_remove(&input->bridge);
- if (output->panel)
- drm_panel_bridge_remove(output->bridge);
return 0;
}
@@ -1152,7 +1276,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
clk_disable_unprepare(dsi->dsi_sys_clk);
clk_disable_unprepare(dsi->dsi_p_clk);
reset_control_assert(dsi->dsi_p_rst);
- dsi->link_initialized = false;
return 0;
}
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
index ca7ea2da635c..5db5dbbbcaad 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
@@ -10,7 +10,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <linux/bits.h>
#include <linux/completion.h>
@@ -21,7 +20,6 @@ struct reset_control;
struct cdns_dsi_output {
struct mipi_dsi_device *dev;
- struct drm_panel *panel;
struct drm_bridge *bridge;
union phy_configure_opts phy_opts;
};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 81fad14c2cd5..b431e7efd1f0 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -546,76 +546,6 @@ out:
}
/**
- * cdns_mhdp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * cdns_mhdp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
- struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* cdns_mhdp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
@@ -1453,7 +1383,7 @@ static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
- cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_up(&mhdp->aux, mhdp->link.revision);
cdns_mhdp_fill_sink_caps(mhdp, dpcd);
@@ -1500,7 +1430,7 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
if (mhdp->plugged)
- cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_down(&mhdp->aux, mhdp->link.revision);
mhdp->link_up = false;
}
@@ -1726,6 +1656,7 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
}
static int cdns_mhdp_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
@@ -2305,7 +2236,7 @@ static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
* If everything looks fine, just return, as we don't handle
* DP IRQs.
*/
- if (ret > 0 &&
+ if (!ret &&
drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
goto out;
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 81f7c701961f..634c5b030667 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -580,11 +580,13 @@ static int chipone_dsi_host_attach(struct chipone *icn)
return ret;
}
-static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int chipone_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct chipone *icn = bridge_to_chipone(bridge);
- return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, icn->panel_bridge, bridge, flags);
}
#define MAX_INPUT_SEL_FORMATS 1
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index da17f0978a79..210c45c1efd4 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -268,13 +268,14 @@ static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
}
static int ch7033_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
struct drm_connector *connector = &priv->connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, priv->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
@@ -305,7 +306,7 @@ static int ch7033_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+ return drm_connector_attach_encoder(&priv->connector, encoder);
}
static void ch7033_bridge_detach(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 72bc508d4e6e..09c08a53d5bd 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -34,6 +34,7 @@ to_display_connector(struct drm_bridge *bridge)
}
static int display_connector_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 26ae1ab5237f..2cb6dfc7a6d3 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -113,11 +113,12 @@ static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock)
}
static int fsl_ldb_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- return drm_bridge_attach(bridge->encoder, fsl_ldb->panel_bridge,
+ return drm_bridge_attach(encoder, fsl_ldb->panel_bridge,
bridge, flags);
}
@@ -180,9 +181,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
configured_link_freq = clk_get_rate(fsl_ldb->clk);
if (configured_link_freq != requested_link_freq)
- dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
- configured_link_freq,
- requested_link_freq);
+ dev_warn(fsl_ldb->dev,
+ "Configured %pC clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
+ fsl_ldb->clk, configured_link_freq, requested_link_freq);
clk_prepare_enable(fsl_ldb->clk);
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
index 9b5bebbe357d..61347f6ec33d 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
@@ -104,7 +104,7 @@ void ldb_bridge_disable_helper(struct drm_bridge *bridge)
}
EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
@@ -116,9 +116,8 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
- ldb_ch->next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ return drm_bridge_attach(encoder, ldb_ch->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper);
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
index a0a5cde27fbc..38a8a54b37a6 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
@@ -81,7 +81,7 @@ void ldb_bridge_enable_helper(struct drm_bridge *bridge);
void ldb_bridge_disable_helper(struct drm_bridge *bridge);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
int ldb_init_helper(struct ldb *ldb);
diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
index 3ebf0b9866de..f072c6ed39ef 100644
--- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
+++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
@@ -23,7 +23,8 @@ struct imx_legacy_bridge {
#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base)
static int imx_legacy_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
@@ -76,9 +77,9 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
imx_bridge->base.type = type;
- ret = devm_drm_bridge_add(dev, &imx_bridge->base);
- if (ret)
- return ERR_PTR(ret);
+ ret = devm_drm_bridge_add(dev, &imx_bridge->base);
+ if (ret)
+ return ERR_PTR(ret);
return &imx_bridge->base;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index a17433a7c755..8a4fd7d77a8d 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -40,11 +40,12 @@ to_imx8mp_hdmi_pvi(struct drm_bridge *bridge)
}
static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
- return drm_bridge_attach(bridge->encoder, pvi->next_bridge,
+ return drm_bridge_attach(encoder, pvi->next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 3cb484773ddf..d4f3492ca5ab 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -662,7 +662,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs);
- return ret;
+ return 0;
}
static void imx8qxp_ldb_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1d9529dc7f2a..1f6fd488e703 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -108,6 +108,7 @@ imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge,
}
static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pc_channel *ch = bridge->driver_private;
@@ -119,7 +120,7 @@ static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
ch->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index cd6818db0fd3..e092c9ea99b0 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -128,6 +128,7 @@ static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl)
}
static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
@@ -138,7 +139,7 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
pl->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index 49dd4f96d52c..da138ab51b3b 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -48,6 +48,7 @@ struct imx8qxp_pxl2dpi {
#define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge)
static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
@@ -58,7 +59,7 @@ static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
p2d->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index 21152a1c28f7..a3a63a977b0a 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -665,13 +665,14 @@ it6263_bridge_mode_valid(struct drm_bridge *bridge,
}
static int it6263_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6263 *it = bridge_to_it6263(bridge);
struct drm_connector *connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, it->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -679,7 +680,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
- connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ connector = drm_bridge_connector_init(bridge->dev, encoder);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
dev_err(it->dev, "failed to initialize bridge connector: %d\n",
@@ -687,7 +688,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(connector, bridge->encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 8a607558ac89..1383d1e21afe 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -771,40 +771,6 @@ static void it6505_calc_video_info(struct it6505 *it6505)
DRM_MODE_ARG(&it6505->video_info));
}
-static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux,
- struct it6505_drm_dp_link *link,
- u8 mode)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < DPCD_V_1_1)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= mode;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- if (mode == DP_SET_POWER_D0) {
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
static void it6505_clear_int(struct it6505 *it6505)
{
it6505_write(it6505, INT_STATUS_01, 0xFF);
@@ -2578,8 +2544,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
}
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
@@ -2910,8 +2875,7 @@ static enum drm_connector_status it6505_detect(struct it6505 *it6505)
}
if (it6505->hpd_state) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d",
@@ -3124,6 +3088,7 @@ static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge)
}
static int it6505_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
@@ -3233,8 +3198,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
it6505_int_mask_enable(it6505);
it6505_video_reset(it6505);
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
}
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -3246,8 +3210,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_DEBUG_DRIVER(dev, "start");
if (it6505->powered) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D3);
+ drm_dp_link_power_down(&it6505->aux, it6505->link.revision);
it6505_video_disable(it6505);
}
}
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index b9f90f32145d..7b110ae53291 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -586,6 +586,7 @@ static bool it66121_is_hpd_detect(struct it66121_ctx *ctx)
}
static int it66121_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -594,7 +595,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ ret = drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 52da204f5740..3e49d855b364 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -543,12 +543,13 @@ exit:
}
static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
+ ret = drm_bridge_attach(encoder, lt->hdmi_port, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0) {
dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 0fc5ea18fe6a..9b2dac9bd63c 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -99,11 +99,12 @@ static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge)
}
static int lt9211_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 026803034231..a35a8b8ca89c 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -740,11 +740,12 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
}
static int lt9611_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611->next_bridge,
+ return drm_bridge_attach(encoder, lt9611->next_bridge,
bridge, flags);
}
@@ -1130,7 +1131,7 @@ static int lt9611_probe(struct i2c_client *client)
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
- DRM_BRIDGE_OP_HDMI;
+ DRM_BRIDGE_OP_HDMI | DRM_BRIDGE_OP_HDMI_AUDIO;
lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
lt9611->bridge.vendor = "Lontium";
lt9611->bridge.product = "LT9611";
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index f4c3ff1fdc69..20bf1a3c786d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -280,11 +280,12 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
}
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge,
+ return drm_bridge_attach(encoder, lt9611uxc->next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 389af0233fcd..1646e454e0b0 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -34,11 +34,12 @@ static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
}
static int lvds_codec_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
- return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
+ return drm_bridge_attach(encoder, lvds_codec->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index a47aabf134fd..15a5a1f644fc 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -190,6 +190,7 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
}
static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct i2c_client *stdp4028_i2c
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index 53dd140a1b8d..1d4ae0097df8 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -104,11 +104,12 @@ static void lvds_serialiser_on(struct mchp_lvds *lvds)
}
static int mchp_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mchp_lvds *lvds = bridge_to_lvds(bridge);
- return drm_bridge_attach(bridge->encoder, lvds->panel_bridge,
+ return drm_bridge_attach(encoder, lvds->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index d04c62a0cb9f..55912ae11f46 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -910,6 +910,7 @@ static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
}
static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
@@ -919,7 +920,7 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
- return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, panel_bridge, bridge, flags);
}
static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 27261b2ac9c8..25d7c415478b 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -214,13 +214,14 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
};
static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
/* Let this driver create connector if requested */
- ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ptn_bridge->panel_bridge,
bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -239,7 +240,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge,
&ptn3460_connector_helper_funcs);
drm_connector_register(&ptn_bridge->connector);
drm_connector_attach_encoder(&ptn_bridge->connector,
- bridge->encoder);
+ encoder);
drm_helper_hpd_irq_event(ptn_bridge->connector.dev);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 258c85c83a28..79b009ab9396 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -58,6 +58,7 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
};
static int panel_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
@@ -81,7 +82,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
drm_panel_bridge_set_orientation(connector, bridge);
drm_connector_attach_encoder(&panel_bridge->connector,
- bridge->encoder);
+ encoder);
if (bridge->dev->registered) {
if (connector->funcs->reset)
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 13ada42a5514..8726fefc5c65 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -418,6 +418,7 @@ static void ps8622_post_disable(struct drm_bridge *bridge)
}
static int ps8622_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index a42138b33258..2422ff68c104 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -494,6 +494,7 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
}
static int ps8640_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
@@ -518,7 +519,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the panel-bridge to the dsi bridge */
- ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ps_bridge->panel_bridge,
&ps_bridge->bridge, flags);
if (ret)
goto err_bridge_attach;
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 54de6ed2fae8..0014c497e3fe 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -1640,11 +1640,12 @@ static void samsung_dsim_mode_set(struct drm_bridge *bridge,
}
static int samsung_dsim_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge, bridge,
flags);
}
@@ -1935,9 +1936,9 @@ int samsung_dsim_probe(struct platform_device *pdev)
struct samsung_dsim *dsi;
int ret, i;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
@@ -2007,7 +2008,6 @@ int samsung_dsim_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- dsi->bridge.funcs = &samsung_dsim_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 914a2609a685..6de61d9fe064 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -416,6 +416,7 @@ out:
}
static int sii902x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -424,7 +425,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
int ret;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, sii902x->next_bridge,
+ return drm_bridge_attach(encoder, sii902x->next_bridge,
bridge, flags);
drm_connector_helper_add(&sii902x->connector,
@@ -452,7 +453,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
- drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sii902x->connector, encoder);
return 0;
}
@@ -1138,6 +1139,7 @@ static int sii902x_init(struct sii902x *sii902x)
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ sii902x->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
if (sii902x->i2c->irq > 0)
sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 28a2e1ee04b2..3af650dc92a1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2203,6 +2203,7 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
}
static int sii8620_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index ab0b0e36e97a..70db5b99e5bb 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -103,12 +103,13 @@ static const struct drm_connector_funcs simple_bridge_con_funcs = {
};
static int simple_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, sbridge->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, sbridge->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -127,7 +128,7 @@ static int simple_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(&sbridge->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sbridge->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 6166f197e37b..5e5f8c2f95be 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -1077,6 +1077,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_HPD;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 0890add5f707..b1cdf806b3c4 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2889,12 +2889,13 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
}
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_hdmi *hdmi = bridge->driver_private;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ return drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
return dw_hdmi_connector_create(hdmi);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 2b6e70a49f43..b08ada920a50 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -1072,15 +1072,16 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->panel_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index 5fd7a459efdd..c76f5f2e74d1 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -870,15 +870,16 @@ dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi2->panel_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 49c76027f831..edf01476f2ef 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -202,11 +202,12 @@ static void tc358762_enable(struct drm_bridge *bridge,
}
static int tc358762_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 3d3d135b4348..3f76c890fad9 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -295,11 +295,12 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
}
static int tc358764_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 39e2d3a7a27d..7e5449fb86a3 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1795,6 +1795,7 @@ static const struct drm_connector_funcs tc_connector_funcs = {
};
static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1807,6 +1808,7 @@ static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
}
static int tc_edp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index ec79b0dd0e2c..063f217a17b6 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -554,6 +554,7 @@ static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
};
static int tc358768_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -563,7 +564,7 @@ static int tc358768_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ return drm_bridge_attach(encoder, priv->output.bridge, bridge,
flags);
}
@@ -580,7 +581,8 @@ tc358768_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void tc358768_bridge_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -602,7 +604,8 @@ static void tc358768_bridge_disable(struct drm_bridge *bridge)
dev_warn(priv->dev, "Software disable failed: %d\n", ret);
}
-static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -682,13 +685,17 @@ static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
return (u32)div_u64(m, n);
}
-static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
struct mipi_dsi_device *dsi_dev = priv->output.dev;
unsigned long mode_flags = dsi_dev->mode_flags;
u32 val, val2, lptxcnt, hact, data_type;
s32 raw_val;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
const struct drm_display_mode *mode;
u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
u32 dsiclk, hsbyteclk;
@@ -719,7 +726,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
return;
}
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
ret = tc358768_setup_pll(priv, mode);
if (ret) {
dev_err(dev, "PLL setup failed: %d\n", ret);
@@ -1076,14 +1086,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
tc358768_write(priv, TC358768_DSI_CONFW, val);
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
-static void tc358768_bridge_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -1100,11 +1108,8 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge)
tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
#define MAX_INPUT_SEL_FORMATS 1
@@ -1166,10 +1171,10 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
.attach = tc358768_bridge_attach,
.mode_valid = tc358768_bridge_mode_valid,
.mode_fixup = tc358768_mode_fixup,
- .pre_enable = tc358768_bridge_pre_enable,
- .enable = tc358768_bridge_enable,
- .disable = tc358768_bridge_disable,
- .post_disable = tc358768_bridge_post_disable,
+ .atomic_pre_enable = tc358768_bridge_atomic_pre_enable,
+ .atomic_enable = tc358768_bridge_atomic_enable,
+ .atomic_disable = tc358768_bridge_atomic_disable,
+ .atomic_post_disable = tc358768_bridge_atomic_post_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index c89757bec4e6..1b10e6ee1724 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -286,7 +286,8 @@ static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
return container_of(b, struct tc_data, bridge);
}
-static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -309,7 +310,8 @@ static void tc_bridge_pre_enable(struct drm_bridge *bridge)
usleep_range(10, 20);
}
-static void tc_bridge_post_disable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -368,30 +370,21 @@ static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val)
ret, addr);
}
-/* helper function to access bus_formats */
-static struct drm_connector *get_connector(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- return connector;
-
- return NULL;
-}
-
-static void tc_bridge_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2;
u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2;
u32 val = 0;
u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay;
- struct drm_display_mode *mode;
- struct drm_connector *connector = get_connector(bridge->encoder);
-
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
hback_porch = mode->htotal - mode->hsync_end;
hsync_len = mode->hsync_end - mode->hsync_start;
@@ -589,21 +582,25 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
}
static int tc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, tc->panel_bridge,
+ return drm_bridge_attach(encoder, tc->panel_bridge,
&tc->bridge, flags);
}
static const struct drm_bridge_funcs tc_bridge_funcs = {
.attach = tc_bridge_attach,
- .pre_enable = tc_bridge_pre_enable,
- .enable = tc_bridge_enable,
+ .atomic_pre_enable = tc_bridge_atomic_pre_enable,
+ .atomic_enable = tc_bridge_atomic_enable,
.mode_valid = tc_mode_valid,
- .post_disable = tc_bridge_post_disable,
+ .atomic_post_disable = tc_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
};
static int tc_attach_host(struct tc_data *tc)
diff --git a/drivers/gpu/drm/bridge/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c
index 20658258fb51..ac87033ba537 100644
--- a/drivers/gpu/drm/bridge/tda998x_drv.c
+++ b/drivers/gpu/drm/bridge/tda998x_drv.c
@@ -1365,6 +1365,7 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
/* DRM bridge functions */
static int tda998x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index bba10cf9b4f9..e2fc78adebcf 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -43,11 +43,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
}
static int thc63_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct thc63_dev *thc63 = to_thc63(bridge);
- return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
+ return drm_bridge_attach(encoder, thc63->next, bridge, flags);
}
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index 85f2a0e74a1c..47638d1c96ec 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -242,12 +242,12 @@ static void dlpc_mode_set(struct drm_bridge *bridge,
drm_mode_copy(&dlpc->mode, adjusted_mode);
}
-static int dlpc_attach(struct drm_bridge *bridge,
+static int dlpc_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
- return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, dlpc->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs dlpc_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 95563aa1b450..033c44326552 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -40,7 +40,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_drv.h> /* DRM_MODESET_LOCK_ALL_BEGIN() needs drm_drv_uses_atomic_modeset() */
+#include <drm/drm_bridge_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -290,11 +290,12 @@ static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
}
static int sn65dsi83_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
@@ -370,7 +371,6 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
{
- struct drm_device *dev = sn65dsi83->bridge.dev;
struct drm_modeset_acquire_ctx ctx;
int err;
@@ -385,26 +385,21 @@ static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
* Keep the lock during the whole operation to be atomic.
*/
- DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
-
- if (!sn65dsi83->bridge.encoder->crtc) {
- /*
- * No CRTC attached -> No CRTC active outputs to reset
- * This can happen when the SN65DSI83 is reset. Simply do
- * nothing without returning any errors.
- */
- err = 0;
- goto end;
- }
+ drm_modeset_acquire_init(&ctx, 0);
dev_warn(sn65dsi83->dev, "reset the pipe\n");
- err = drm_atomic_helper_reset_crtc(sn65dsi83->bridge.encoder->crtc, &ctx);
+retry:
+ err = drm_bridge_helper_reset_crtc(&sn65dsi83->bridge, &ctx);
+ if (err == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
-end:
- DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
- return err;
+ return 0;
}
static void sn65dsi83_reset_work(struct work_struct *ws)
@@ -946,9 +941,9 @@ static int sn65dsi83_probe(struct i2c_client *client)
struct sn65dsi83 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sn65dsi83, bridge, &sn65dsi83_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
INIT_WORK(&ctx->reset_work, sn65dsi83_reset_work);
@@ -988,7 +983,6 @@ static int sn65dsi83_probe(struct i2c_client *client)
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sn65dsi83_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 01d456b955ab..f72675766e01 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -35,6 +35,7 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#define SN_DEVICE_ID_REGS 0x00 /* up to 0x07 */
#define SN_DEVICE_REV_REG 0x08
#define SN_DPPLL_SRC_REG 0x0A
#define DPPLL_CLK_SRC_DSICLK BIT(0)
@@ -243,11 +244,26 @@ static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata,
regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf));
}
-static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata)
+static struct drm_display_mode *
+get_new_adjusted_display_mode(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ return &crtc_state->adjusted_mode;
+}
+
+static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
u32 bit_rate_khz, clk_freq_khz;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
bit_rate_khz = mode->clock *
mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
@@ -274,7 +290,8 @@ static const u32 ti_sn_bridge_dsiclk_lut[] = {
460800000,
};
-static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
int i;
u32 refclk_rate;
@@ -287,7 +304,7 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
clk_prepare_enable(pdata->refclk);
} else {
- refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
+ refclk_rate = ti_sn_bridge_get_dsi_freq(pdata, state) * 1000;
refclk_lut = ti_sn_bridge_dsiclk_lut;
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
}
@@ -311,12 +328,13 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i];
}
-static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
+static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
mutex_lock(&pdata->comms_mutex);
/* configure bridge ref_clk */
- ti_sn_bridge_set_refclk_freq(pdata);
+ ti_sn_bridge_set_refclk_freq(pdata, state);
/*
* HPD on this bridge chip is a bit useless. This is an eDP bridge
@@ -376,7 +394,7 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
* clock so reading early doesn't work.
*/
if (pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, NULL);
return ret;
}
@@ -423,36 +441,8 @@ static int status_show(struct seq_file *s, void *data)
return 0;
}
-
DEFINE_SHOW_ATTRIBUTE(status);
-static void ti_sn65dsi86_debugfs_remove(void *data)
-{
- debugfs_remove_recursive(data);
-}
-
-static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
-{
- struct device *dev = pdata->dev;
- struct dentry *debugfs;
- int ret;
-
- debugfs = debugfs_create_dir(dev_name(dev), NULL);
-
- /*
- * We might get an error back if debugfs wasn't enabled in the kernel
- * so let's just silently return upon failure.
- */
- if (IS_ERR_OR_NULL(debugfs))
- return;
-
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
- if (ret)
- return;
-
- debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
-}
-
/* -----------------------------------------------------------------------------
* Auxiliary Devices (*not* AUX)
*/
@@ -732,6 +722,7 @@ static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86
}
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -748,7 +739,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
* Attach the next bridge.
* We never want the next bridge to *also* create a connector.
*/
- ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
+ ret = drm_bridge_attach(encoder, pdata->next_bridge,
&pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
goto err_initted_aux;
@@ -821,12 +812,13 @@ static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0);
}
-static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
unsigned int bit_rate_mhz, clk_freq_mhz;
unsigned int val;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* set DSIA clk frequency */
bit_rate_mhz = (mode->clock / 1000) *
@@ -856,12 +848,14 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state,
+ unsigned int bpp)
{
unsigned int bit_rate_khz, dp_rate_mhz;
unsigned int i;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* Calculate minimum bit rate based on our pixel clock. */
bit_rate_khz = mode->clock * bpp;
@@ -960,10 +954,11 @@ static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata)
return valid_rates;
}
-static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
u8 hsync_polarity = 0, vsync_polarity = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -1105,7 +1100,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
pdata->ln_polrs << LN_POLRS_OFFSET);
/* set dsi clk frequency value */
- ti_sn_bridge_set_dsi_rate(pdata);
+ ti_sn_bridge_set_dsi_rate(pdata, state);
/*
* The SN65DSI86 only supports ASSR Display Authentication method and
@@ -1140,7 +1135,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
valid_rates = ti_sn_bridge_read_valid_rates(pdata);
/* Train until we run out of rates */
- for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp);
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, state, bpp);
dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
dp_rate_idx++) {
if (!(valid_rates & BIT(dp_rate_idx)))
@@ -1156,7 +1151,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
}
/* config video parameters */
- ti_sn_bridge_set_video_timings(pdata);
+ ti_sn_bridge_set_video_timings(pdata, state);
/* enable video stream */
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE,
@@ -1171,7 +1166,7 @@ static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
pm_runtime_get_sync(pdata->dev);
if (!pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, state);
/* td7: min 100 us after enable before DSI data */
usleep_range(100, 110);
@@ -1216,6 +1211,15 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
return drm_edid_read_ddc(connector, &pdata->aux.ddc);
}
+static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ struct dentry *debugfs;
+
+ debugfs = debugfs_create_dir(dev_name(pdata->dev), root);
+ debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
+}
+
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
@@ -1229,6 +1233,7 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .debugfs_init = ti_sn65dsi86_debugfs_init,
};
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
@@ -1894,6 +1899,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ti_sn65dsi86 *pdata;
+ u8 id_buf[8];
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
@@ -1937,7 +1943,15 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
if (ret)
return ret;
- ti_sn65dsi86_debugfs_init(pdata);
+ pm_runtime_get_sync(dev);
+ ret = regmap_bulk_read(pdata->regmap, SN_DEVICE_ID_REGS, id_buf, ARRAY_SIZE(id_buf));
+ pm_runtime_put_autosuspend(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read device id\n");
+
+ /* The ID string is stored backwards */
+ if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf)))
+ return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n");
/*
* Break ourselves up into a collection of aux devices. The only real
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index 22316382451f..cca75443f012 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -45,11 +45,13 @@ static void tdp158_disable(struct drm_bridge *bridge,
regulator_disable(tdp158->vcc);
}
-static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int tdp158_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct tdp158 *tdp158 = bridge->driver_private;
- return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags);
+ return drm_bridge_attach(encoder, tdp158->next, bridge, flags);
}
static const struct drm_bridge_funcs tdp158_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 79ab5da827e1..e15d232ddbac 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -120,12 +120,13 @@ static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
}
static int tfp410_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, dvi->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -159,7 +160,7 @@ static int tfp410_attach(struct drm_bridge *bridge,
drm_display_info_set_bus_formats(&dvi->connector.display_info,
&dvi->bus_format, 1);
- drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
+ drm_connector_attach_encoder(&dvi->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index 47b74cb25b14..1c289051a598 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -38,6 +38,7 @@ static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
}
static int tpd12s015_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
@@ -46,7 +47,7 @@ static int tpd12s015_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+ ret = drm_bridge_attach(encoder, tpd->next_bridge,
bridge, flags);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index a8fca079921b..fddfbd4d2493 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -193,6 +193,8 @@ CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
CONFIG_REGULATOR_DA9211=y
CONFIG_DRM_ANALOGIX_ANX7625=y
+CONFIG_PHY_MTK_HDMI=y
+CONFIG_PHY_MTK_MIPI_DSI=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 19fe01257ab9..284873e94d8d 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -98,14 +98,14 @@ done
make ${KERNEL_IMAGE_NAME}
-mkdir -p /lava-files/
+mkdir -p /kernel/
for image in ${KERNEL_IMAGE_NAME}; do
- cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
+ cp arch/${KERNEL_ARCH}/boot/${image} /kernel/.
done
if [[ -n ${DEVICE_TREES} ]]; then
make dtbs
- cp ${DEVICE_TREES} /lava-files/.
+ cp ${DEVICE_TREES} /kernel/.
fi
make modules
@@ -121,11 +121,11 @@ if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
-d arch/arm64/boot/Image.lzma \
-C lzma\
-b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
- /lava-files/cheza-kernel
+ /kernel/cheza-kernel
KERNEL_IMAGE_NAME+=" cheza-kernel"
# Make a gzipped copy of the Image for db410c.
- gzip -k /lava-files/Image
+ gzip -k /kernel/Image
KERNEL_IMAGE_NAME+=" Image.gz"
fi
@@ -139,7 +139,7 @@ cp -rfv drivers/gpu/drm/ci/* install/.
. .gitlab-ci/container/container_post_build.sh
if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
- xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /lava-files/vmlinux.xz
+ xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /kernel/vmlinux.xz
FILES_TO_UPLOAD="$KERNEL_IMAGE_NAME vmlinux.xz"
if [[ -n $DEVICE_TREES ]]; then
@@ -148,7 +148,7 @@ if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
ls -l "${S3_JWT_FILE}"
for f in $FILES_TO_UPLOAD; do
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/$f \
+ ci-fairy s3cp --token-file "${S3_JWT_FILE}" /kernel/$f \
https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
done
@@ -165,7 +165,7 @@ ln -s common artifacts/install/ci-common
cp .config artifacts/${CI_JOB_NAME}_config
for image in ${KERNEL_IMAGE_NAME}; do
- cp /lava-files/$image artifacts/install/.
+ cp /kernel/$image artifacts/install/.
done
tar -C artifacts -cf artifacts/install.tar install
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
index 274f118533a7..8eb56ebcf4aa 100644
--- a/drivers/gpu/drm/ci/build.yml
+++ b/drivers/gpu/drm/ci/build.yml
@@ -67,7 +67,7 @@ testing:arm32:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm.config
@@ -79,7 +79,7 @@ testing:arm64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm64.config
@@ -91,7 +91,7 @@ testing:x86_64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: x86_64.config
@@ -143,6 +143,10 @@ debian-arm64-release:
rules:
- when: never
+debian-arm64-ubsan:
+ rules:
+ - when: never
+
debian-build-testing:
rules:
- when: never
@@ -183,6 +187,10 @@ debian-testing-msan:
rules:
- when: never
+debian-testing-ubsan:
+ rules:
+ - when: never
+
debian-vulkan:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index 07dc13ff865d..56c95c2f91ae 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -24,6 +24,18 @@ alpine/x86_64_build:
rules:
- when: never
+debian/arm32_test-base:
+ rules:
+ - when: never
+
+debian/arm32_test-gl:
+ rules:
+ - when: never
+
+debian/arm32_test-vk:
+ rules:
+ - when: never
+
debian/arm64_test-gl:
rules:
- when: never
@@ -32,6 +44,10 @@ debian/arm64_test-vk:
rules:
- when: never
+debian/baremetal_arm32_test:
+ rules:
+ - when: never
+
debian/ppc64el_build:
rules:
- when: never
@@ -40,6 +56,14 @@ debian/s390x_build:
rules:
- when: never
+debian/x86_32_build:
+ rules:
+ - when: never
+
+debian/x86_64_test-android:
+ rules:
+ - when: never
+
debian/x86_64_test-vk:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index f04aabe8327c..65adcd97e06b 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,11 +1,11 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 7d3062470f3ccc6cb40540e772e902c7e2248024
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 82ab58f6c6f94fa80ca7e1615146f08356e3ba69
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: 33adea9ebafd059ac88a5ccfec60536394f36c7c
+ IGT_VERSION: 04bedb9238586b81d4d4ca62b02e584f6cfc77af
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.20.0
@@ -143,11 +143,11 @@ stages:
# Pre-merge pipeline
- if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
# Push to a branch on a fork
- - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
+ - if: &is-fork-push $CI_PIPELINE_SOURCE == "push"
# nightly pipeline
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
# pipeline for direct pushes that bypassed the CI
- - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
+ - if: &is-direct-push $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
# Rules applied to every job in the pipeline
@@ -170,29 +170,48 @@ stages:
- !reference [.disable-farm-mr-rules, rules]
# Never run immediately after merging, as we just ran everything
- !reference [.never-post-merge-rules, rules]
- # Build everything in merge pipelines, if any files affecting the pipeline
- # were changed
+ # Build everything in merge pipelines
- if: *is-merge-attempt
- changes: &all_paths
- - drivers/gpu/drm/ci/**/*
when: on_success
# Same as above, but for pre-merge pipelines
- if: *is-pre-merge
- changes:
- *all_paths
when: manual
- # Skip everything for pre-merge and merge pipelines which don't change
- # anything in the build
+ # Build everything after someone bypassed the CI
+ - if: *is-direct-push
+ when: manual
+ # Build everything in scheduled pipelines
+ - if: *is-scheduled-pipeline
+ when: on_success
+ # Allow building everything in fork pipelines, but build nothing unless
+ # manually triggered
+ - when: manual
+
+
+# Repeat of the above but with `when: on_success` replaced with
+# `when: delayed` + `start_in:`, for build-only jobs.
+# Note: make sure the branches in this list are the same as in
+# `.container+build-rules` above.
+.build-only-delayed-rules:
+ rules:
+ - !reference [.common-rules, rules]
+ # Run when re-enabling a disabled farm, but not when disabling it
+ - !reference [.disable-farm-mr-rules, rules]
+ # Never run immediately after merging, as we just ran everything
+ - !reference [.never-post-merge-rules, rules]
+ # Build everything in merge pipelines
- if: *is-merge-attempt
- when: never
+ when: delayed
+ start_in: &build-delay 5 minutes
+ # Same as above, but for pre-merge pipelines
- if: *is-pre-merge
- when: never
+ when: manual
# Build everything after someone bypassed the CI
- if: *is-direct-push
- when: on_success
+ when: manual
# Build everything in scheduled pipelines
- if: *is-scheduled-pipeline
- when: on_success
+ when: delayed
+ start_in: *build-delay
# Allow building everything in fork pipelines, but build nothing unless
# manually triggered
- when: manual
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 68b042e43b7f..2a0599f12c58 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -85,5 +85,16 @@ deqp-runner junit \
--limit 50 \
--template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml"
+# Check if /proc/lockdep_stats exists
+if [ -f /proc/lockdep_stats ]; then
+ # If debug_locks is 0, it indicates lockdep is detected and it turns itself off.
+ debug_locks=$(grep 'debug_locks:' /proc/lockdep_stats | awk '{print $2}')
+ if [ "$debug_locks" -eq 0 ] && [ "$ret" -eq 0 ]; then
+ echo "Warning: LOCKDEP issue detected. Please check dmesg logs for more information."
+ cat /proc/lockdep_stats
+ ret=101
+ fi
+fi
+
cd $oldpath
exit $ret
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 20049f3626b2..c04ba0e69935 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,5 +1,5 @@
variables:
- CONTAINER_TAG: "20250204-mesa-uprev"
+ CONTAINER_TAG: "20250307-mesa-uprev"
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
@@ -20,3 +20,5 @@ variables:
DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}"
ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}"
+
+ CONDITIONAL_BUILD_ANGLE_TAG: fec96cc945650c5fe9f7188cabe80d8a
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index 6e5ac51e8c0a..f22720359b33 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -48,7 +48,8 @@ ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
rm -rf results
mkdir -p results/job-rootfs-overlay/
-artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
+artifacts/ci-common/export-gitlab-job-env-for-dut.sh \
+ > results/job-rootfs-overlay/set-job-env-vars.sh
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 6a1e059858e5..84a25f0e783b 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -1,6 +1,14 @@
+.allow_failure_lockdep:
+ variables:
+ FF_USE_NEW_BASH_EVAL_STRATEGY: 'true'
+ allow_failure:
+ exit_codes:
+ - 101
+
.lava-test:
extends:
- .container+build-rules
+ - .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
@@ -69,6 +77,7 @@
extends:
- .baremetal-test-arm64
- .use-debian/baremetal_arm64_test
+ - .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
@@ -89,6 +98,28 @@
tags:
- $RUNNER_TAG
+.software-driver:
+ stage: software-driver
+ extends:
+ - .allow_failure_lockdep
+ timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - when: on_success
+ extends:
+ - .test-gl
+ tags:
+ - kvm
+ script:
+ - ln -sf $CI_PROJECT_DIR/install /install
+ - mv install/bzImage /kernel/bzImage
+ - mkdir -p /lib/modules
+ - install/crosvm-runner.sh install/igt_runner.sh
+ needs:
+ - debian/x86_64_test-gl
+ - testing:x86_64
+ - igt:x86_64
+
.msm-sc7180:
extends:
- .lava-igt:arm64
@@ -133,7 +164,7 @@ msm:apq8016:
BM_KERNEL_EXTRA_ARGS: clk_ignore_unused
RUNNER_TAG: google-freedreno-db410c
script:
- - ./install/bare-metal/fastboot.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:apq8096:
extends:
@@ -147,7 +178,7 @@ msm:apq8096:
GPU_VERSION: apq8096
RUNNER_TAG: google-freedreno-db820c
script:
- - ./install/bare-metal/fastboot.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:sdm845:
extends:
@@ -161,7 +192,7 @@ msm:sdm845:
GPU_VERSION: sdm845
RUNNER_TAG: google-freedreno-cheza
script:
- - ./install/bare-metal/cros-servo.sh
+ - ./install/bare-metal/cros-servo.sh || exit $?
msm:sm8350-hdk:
extends:
@@ -440,47 +471,16 @@ panfrost:g12b:
- .panfrost-gpu
virtio_gpu:none:
- stage: software-driver
- timeout: "1h30m"
- rules:
- - !reference [.scheduled_pipeline-rules, rules]
- - when: on_success
+ extends:
+ - .software-driver
variables:
CROSVM_GALLIUM_DRIVER: llvmpipe
DRIVER_NAME: virtio_gpu
GPU_VERSION: none
- extends:
- - .test-gl
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - install/crosvm-runner.sh install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
vkms:none:
- stage: software-driver
- timeout: "1h30m"
- rules:
- - !reference [.scheduled_pipeline-rules, rules]
- - when: on_success
+ extends:
+ - .software-driver
variables:
DRIVER_NAME: vkms
GPU_VERSION: none
- extends:
- - .test-gl
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - mkdir -p /lib/modules
- - ./install/crosvm-runner.sh ./install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index 75374085f40f..f44dbce3151a 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -14,16 +14,10 @@ amdgpu/amd_plane@mpo-scale-nv12,Fail
amdgpu/amd_plane@mpo-scale-p010,Fail
amdgpu/amd_plane@mpo-scale-rgb,Crash
amdgpu/amd_plane@mpo-swizzle-toggle,Fail
-amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Crash
+amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail
kms_addfb_basic@bad-pitch-65536,Fail
kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@too-high,Fail
-kms_async_flips@alternate-sync-async-flip,Fail
-kms_async_flips@alternate-sync-async-flip-atomic,Fail
-kms_async_flips@test-cursor,Fail
-kms_async_flips@test-cursor-atomic,Fail
-kms_async_flips@test-time-stamp,Fail
-kms_async_flips@test-time-stamp-atomic,Fail
kms_atomic_transition@plane-all-modeset-transition-internal-panels,Fail
kms_atomic_transition@plane-all-transition,Fail
kms_atomic_transition@plane-all-transition-nonblocking,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index 3879c4812a22..902d54027506 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -14,6 +14,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index a29cea4f234c..8e2b5504004e 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -1,22 +1,18 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
-kms_flip@dpms-off-confusion-interruptible,Timeout
-kms_flip@wf_vblank-ts-check-interruptible,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@@ -31,12 +27,18 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -44,8 +46,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 2ef1dc35a7fa..922327632eff 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
index ee11999e3da1..7353ab11e940 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -1,4 +1,3 @@
-core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -16,6 +15,7 @@ kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
@@ -30,7 +30,6 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_pm_backlight@basic-brightness,Fail
-kms_pm_backlight@brightness-with-dpms,Crash
kms_pm_backlight@fade,Fail
kms_pm_backlight@fade-with-dpms,Fail
kms_pm_rpm@modeset-stress-extra-wait,Timeout
@@ -43,8 +42,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
index 4f50e0240ff4..80bf2741866c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index 47b3f1d42bb6..6fef7c1e56ea 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -1,4 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -8,8 +8,9 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip@busy-flip,Timeout
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -34,17 +35,22 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
+kms_pipe_stress@stress-xrgb8888-untiled,Fail
+kms_pipe_stress@stress-xrgb8888-ytiled,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
kms_psr2_sf@plane-move-sf-dmg-area,Fail
-kms_psr2_sf@pr-cursor-plane-update-sf,Timeout
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_sf@psr2-cursor-plane-update-sf,Fail
@@ -57,6 +63,7 @@ kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
kms_psr2_su@page_flip-P010,Fail
kms_setmode@basic,Fail
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -65,6 +72,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index c87ff8b40e99..c393a138b8a6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
@@ -16,7 +17,6 @@ gem_.*
# Hangs the machine and timeout occurs
i915_pm_rc6_residency.*
i915_suspend.*
-xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
i915_pm_rpm.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index 843c363b42f5..8adf5f0a6e80 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,39 +1,47 @@
-core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
+core_setmaster_vs_auth,Fail
gen9_exec_parse@unaligned-access,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
-kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
+kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
+sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index 219ae839323a..2e4ef9f35654 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
index 0e08fff741aa..57453e340040 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
@@ -3,12 +3,13 @@ i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
kms_flip@dpms-off-confusion,Fail
+kms_flip@nonexisting-fb,Fail
kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset,Fail
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,UnexpectedImprovement(Skip)
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
@@ -28,7 +29,6 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-blt,Timeout
kms_lease@lease-uevent,Fail
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_rotation_crc@bad-pixel-format,Fail
@@ -37,13 +37,10 @@ kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
+perf_pmu@most-busy-idle-check-all,Fail
perf_pmu@rc6,Crash
+prime_busy@before-wait,Fail
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
index 1a3d87c0ca6e..8dec57da1bb3 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
index d4fba4f55ec1..117098bc95d9 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -21,8 +21,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index dc722d6a774e..e287462a491a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 93d42b146df9..462c050a8b2d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,13 +1,14 @@
api_intel_allocator@reopen,Timeout
api_intel_bb@destroy-bb,Timeout
core_hotunplug@hotrebind-lateclose,Timeout
-drm_read@short-buffer-block,Timeout
dumb_buffer@map-valid,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rps@engine-order,Timeout
+i915_pm_rps@waitboost,Fail
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
@@ -16,6 +17,7 @@ perf_pmu@enable-race,Timeout
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
perf_pmu@semaphore-wait-idle,Timeout
+prime_busy@before,Fail
prime_mmap@test_refcounting,Timeout
sriov_basic@enable-vfs-bind-unbind-each-numvfs-all,Timeout
syncobj_basic@illegal-fd-to-handle,Timeout
@@ -26,8 +28,3 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index 938377896841..429dc3c731df 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -18,6 +18,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 1cb6978c86dc..0f167cfd503c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -1,4 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -6,10 +6,9 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -30,13 +29,19 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -45,8 +50,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
index 29bff8922ae1..7e7374ebf3d1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index 4f176c04ec4e..592d7d69e6fc 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -17,8 +17,28 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail
kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 2956567c3048..443596d9e662 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -46,3 +46,10 @@ kms_prop_blob@invalid-set-prop
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/d25442b9-0b6b-433c-8e23-997840fad305@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@flip-vs-wf_vblank-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index d0db51874aef..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 5a063361d7f2..184d0cccc318 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,12 +1,38 @@
-core_setmaster@master-drop-set-user,Fail
dumb_buffer@create-clear,Crash
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@flip-vs-wf_vblank-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
index df7e5ce7a036..0c67fec92450 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -18,3 +18,24 @@ kms_cursor_legacy@cursor-vs-flip-atomic-transitions
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
fbdev@write
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/a520d1d6-95b3-4573-b8f2-689f05bc2230@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@basic-flip-vs-modeset
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/ca960a82-00fc-4183-b983-998f7ac2fbb5@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/da578eed-224f-4374-853a-1ff0aa20d03b@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-toggle-modeset-transition
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index d0db51874aef..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
index 8198e06344a3..9fd44a4b962a 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 7752adff05c1..72c469021b66 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -1,8 +1,4 @@
kms_3d,Fail
-kms_cursor_legacy@forked-bo,Fail
-kms_cursor_legacy@forked-move,Fail
-kms_cursor_legacy@single-bo,Fail
-kms_cursor_legacy@torture-bo,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
index 1674c8e214d6..87724413174c 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
index 5550be5486ed..a4d2f2a7963a 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index 8910afb6acf2..d270af1cca52 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -32,3 +32,10 @@ kms_lease@page-flip-implicit-plane
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane@plane-position-hole-dpms
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/73
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-position-covered
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index 478d7c161616..d4b8ba3a54a9 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -28,3 +29,6 @@ kms_cursor_crc@cursor-random-max-size
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# It causes other tests to fail, so skip it.
+kms_invalid_mode@overflow-vrefresh
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
index cd3d3b0befe4..cafc802cecea 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
@@ -11,3 +11,10 @@ msm/msm_mapping@shadow
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_lease@page-flip-implicit-plane
+
+# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/74
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_cursor_crc@cursor-random-128x128
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index ef9318afcd89..022db559cc7d 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
index 38ec0305c1f4..e32d73c6c98e 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -130,3 +130,10 @@ kms_lease@page-flip-implicit-plane
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/75
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@plain-flip-ts-check-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 2ce7f7e23a01..6c86d1953e11 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -18,6 +18,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -35,3 +36,315 @@ kms_content_protection@uevent
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# Kernel panic
+msm/msm_recovery@hangcheck
+# DEBUG - Begin test msm/msm_recovery@hangcheck
+# Console: switching to colour dummy device 80x25
+# [ 489.526286] [IGT] msm_recovery: executing
+# [ 489.531926] [IGT] msm_recovery: starting subtest hangcheck
+# [ 492.808574] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 492.820358] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 492.831154] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 493.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 493.844177] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 493.854971] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 494.824633] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 494.836237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 494.847034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 495.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 495.828170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 495.838966] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 496.804643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 496.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 496.827041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 497.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 497.844170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 497.854963] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 498.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 498.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 498.843024] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 499.816568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 499.828163] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 499.838958] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 500.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 500.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 500.830960] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 501.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 501.844175] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 501.854965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 502.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 502.836171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 502.846965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 503.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 503.828176] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 503.838969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 504.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 504.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 504.827033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 505.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 505.840247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 505.851043] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 506.820637] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 506.832233] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 506.843026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 507.816567] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 507.828171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 507.838965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 508.808568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 508.820173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 508.830969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 509.832568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 509.844173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 509.854967] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 510.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 510.836162] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 510.846954] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 511.816569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 511.828173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 511.838968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 512.804641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 512.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 512.827040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 513.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 513.840239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 513.851035] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 514.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 514.836164] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 514.846959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 515.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 515.824235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 515.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 515.912427] rcu: INFO: rcu_preempt self-detected stall on CPU
+# [ 515.918398] rcu: 0-....: (6452 ticks this GP) idle=6afc/1/0x4000000000000000 softirq=12492/12697 fqs=3179
+# [ 515.929296] rcu: (t=6505 jiffies g=36205 q=58 ncpus=8)
+# [ 515.934709] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
+# [ 515.934727] Tainted: [W]=WARN
+# [ 515.934732] Hardware name: Google Cheza (rev3+) (DT)
+# [ 515.934739] pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+# [ 515.934751] pc : rcu_core+0x59c/0xe68
+# [ 515.934769] lr : rcu_core+0x74/0xe68
+# [ 515.934781] sp : ffff800080003e50
+# [ 515.934785] x29: ffff800080003e50 x28: ffff225d038e9bc0 x27: 0000000000000002
+# [ 515.934805] x26: ffffc171a8ee6108 x25: ffffc171a85bc2c0 x24: ffff60ecd691e000
+# [ 515.934820] x23: ffffc171a85d15c0 x22: ffffc171a8f8d780 x21: ffff225e7eeef5c0
+# [ 515.934835] x20: ffffc171a8ef0e80 x19: ffffc171a85d15d1 x18: ffffc171a9461e70
+# [ 515.934850] x17: ffff60ecd691e000 x16: ffff800080000000 x15: 0000000000000000
+# [ 515.934866] x14: ffffc171a85d0780 x13: 0000000000000400 x12: 0000000000000000
+# [ 515.934880] x11: ffffc171a85ce900 x10: ffffc171a8ef5000 x9 : ffffc171a8ef0000
+# [ 515.934894] x8 : ffff800080003d88 x7 : ffffc171a8ee6100 x6 : ffff800080003de0
+# [ 515.934909] x5 : ffff800080003dc8 x4 : 0000000000000003 x3 : 0000000000000000
+# [ 515.934923] x2 : 0000000000000101 x1 : 0000000000000000 x0 : ffff225d038e9bc0
+# [ 515.934939] Call trace:
+# [ 515.934945] rcu_core+0x59c/0xe68 (P)
+# [ 515.934962] rcu_core_si+0x10/0x1c
+# [ 515.934976] handle_softirqs+0x118/0x4b8
+# [ 515.934994] __do_softirq+0x14/0x20
+# [ 515.935007] ____do_softirq+0x10/0x1c
+# [ 515.935021] call_on_irq_stack+0x24/0x4c
+# [ 515.935034] do_softirq_own_stack+0x1c/0x28
+# [ 515.935048] __irq_exit_rcu+0x174/0x1b4
+# [ 515.935063] irq_exit_rcu+0x10/0x38
+# [ 515.935077] el1_interrupt+0x38/0x64
+# [ 515.935092] el1h_64_irq_handler+0x18/0x24
+# [ 515.935104] el1h_64_irq+0x6c/0x70
+# [ 515.935115] lock_acquire+0x1e0/0x338 (P)
+# [ 515.935129] __mutex_lock+0xa8/0x4b8
+# [ 515.935144] mutex_lock_nested+0x24/0x30
+# [ 515.935159] _find_opp_table_unlocked+0x40/0xfc
+# [ 515.935174] _find_key+0x64/0x16c
+# [ 515.935184] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 515.935197] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 515.935211] __cpufreq_driver_target+0x144/0x29c
+# [ 515.935227] sugov_work+0x58/0x74
+# [ 515.935239] kthread_worker_fn+0xf4/0x324
+# [ 515.935254] kthread+0x12c/0x208
+# [ 515.935266] ret_from_fork+0x10/0x20
+# [ 516.808569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 516.820174] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 516.830968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 517.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 517.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 517.851032] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 518.820642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 518.832237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 518.843030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 519.812636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 519.824231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 519.835026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 520.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 520.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 520.830959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 521.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 521.840238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 521.851033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 522.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 522.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 522.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 523.812639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 523.824239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 523.835034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 524.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 524.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 524.827026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 525.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 525.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 525.851031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 526.820641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 526.832244] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 526.843041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 527.812642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 527.824242] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 527.835038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 528.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 528.816234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 528.827027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 529.832634] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 529.844231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 529.855017] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 530.820646] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 530.832270] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 530.843065] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 531.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 531.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 531.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 532.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 532.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 532.827031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 533.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 533.840243] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 533.851037] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 534.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 534.832245] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 534.843038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 535.812641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 535.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 535.835033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 536.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 536.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 536.827030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 537.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 537.840234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 537.851020] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 538.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 538.832235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 538.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 539.812644] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 539.824247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 539.835040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 540.124426] watchdog: BUG: soft lockup - CPU#0 stuck for 49s! [sugov:0:126]
+# [ 540.124439] Modules linked in:
+# [ 540.124448] irq event stamp: 9912389
+# [ 540.124453] hardirqs last enabled at (9912388): [<ffffc171a767a24c>] exit_to_kernel_mode+0x38/0x130
+# [ 540.124473] hardirqs last disabled at (9912389): [<ffffc171a767a368>] el1_interrupt+0x24/0x64
+# [ 540.124486] softirqs last enabled at (9898068): [<ffffc171a62bc290>] handle_softirqs+0x4a0/0x4b8
+# [ 540.124505] softirqs last disabled at (9898071): [<ffffc171a62105b0>] __do_softirq+0x14/0x20
+# [ 540.124525] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
+# [ 540.124540] Tainted: [W]=WARN
+# [ 540.124544] Hardware name: Google Cheza (rev3+) (DT)
+# [ 540.124549] pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+# [ 540.124560] pc : xhci_urb_enqueue+0xbc/0x32c
+# [ 540.124573] lr : xhci_urb_enqueue+0xb4/0x32c
+# [ 540.124581] sp : ffff800080003c20
+# [ 540.124586] x29: ffff800080003c20 x28: 0000000000000000 x27: ffff225d00b1e6a0
+# [ 540.124602] x26: ffff225d01c3d800 x25: 0000000000000001 x24: 0000000000000006
+# [ 540.124617] x23: ffff225d044dc000 x22: ffff225d044dc000 x21: 0000000000000001
+# [ 540.124632] x20: ffff225d002d7280 x19: ffff225d0573a780 x18: ffff225e7eff0f50
+# [ 540.124647] x17: 000000000000cab0 x16: 0000000000000000 x15: ffff225d0353a000
+# [ 540.124661] x14: 0000000000000000 x13: 0000000000000820 x12: 0000000000000000
+# [ 540.124674] x11: ffff800080003a30 x10: 0000000000000001 x9 : 0000000000000000
+# [ 540.124689] x8 : ffff225d002d7300 x7 : 0000000000000000 x6 : 000000000000003f
+# [ 540.124702] x5 : 00000000ffffffff x4 : 0000000000000920 x3 : 0000000000000080
+# [ 540.124716] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff225d002d7280
+# [ 540.124731] Call trace:
+# [ 540.124736] xhci_urb_enqueue+0xbc/0x32c (P)
+# [ 540.124751] usb_hcd_submit_urb+0x98/0x7fc
+# [ 540.124766] usb_submit_urb+0x294/0x560
+# [ 540.124780] intr_callback+0x78/0x1fc
+# [ 540.124798] __usb_hcd_giveback_urb+0x68/0x128
+# [ 540.124812] usb_giveback_urb_bh+0xa8/0x140
+# [ 540.124825] process_one_work+0x208/0x5e8
+# [ 540.124840] bh_worker+0x1a8/0x20c
+# [ 540.124853] workqueue_softirq_action+0x78/0x88
+# [ 540.124868] tasklet_hi_action+0x14/0x3c
+# [ 540.124883] handle_softirqs+0x118/0x4b8
+# [ 540.124897] __do_softirq+0x14/0x20
+# [ 540.124908] ____do_softirq+0x10/0x1c
+# [ 540.124922] call_on_irq_stack+0x24/0x4c
+# [ 540.124934] do_softirq_own_stack+0x1c/0x28
+# [ 540.124947] __irq_exit_rcu+0x174/0x1b4
+# [ 540.124961] irq_exit_rcu+0x10/0x38
+# [ 540.124976] el1_interrupt+0x38/0x64
+# [ 540.124987] el1h_64_irq_handler+0x18/0x24
+# [ 540.124998] el1h_64_irq+0x6c/0x70
+# [ 540.125009] lock_acquire+0x1e0/0x338 (P)
+# [ 540.125023] __mutex_lock+0xa8/0x4b8
+# [ 540.125038] mutex_lock_nested+0x24/0x30
+# [ 540.125052] _find_opp_table_unlocked+0x40/0xfc
+# [ 540.125067] _find_key+0x64/0x16c
+# [ 540.125078] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 540.125090] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 540.125105] __cpufreq_driver_target+0x144/0x29c
+# [ 540.125121] sugov_work+0x58/0x74
+# [ 540.125133] kthread_worker_fn+0xf4/0x324
+# [ 540.125148] kthread+0x12c/0x208
+# [ 540.125160] ret_from_fork+0x10/0x20
+# [ 540.125176] Kernel panic - not syncing: softlockup: hung tasks
+# [ 540.423567] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W L 6.14.0-rc4-gdddf15cff632 #1
+# [ 540.433411] Tainted: [W]=WARN, [L]=SOFTLOCKUP
+# [ 540.437901] Hardware name: Google Cheza (rev3+) (DT)
+# [ 540.443022] Call trace:
+# [ 540.445559] show_stack+0x18/0x24 (C)
+# [ 540.449357] dump_stack_lvl+0x38/0xd0
+# [ 540.453157] dump_stack+0x18/0x24
+# [ 540.456599] panic+0x3bc/0x41c
+# [ 540.459767] watchdog_timer_fn+0x254/0x2e4
+# [ 540.464005] __hrtimer_run_queues+0x3c4/0x440
+# [ 540.468508] hrtimer_interrupt+0xe4/0x244
+# [ 540.472662] arch_timer_handler_phys+0x2c/0x44
+# [ 540.477256] handle_percpu_devid_irq+0x90/0x1f0
+# [ 540.481943] handle_irq_desc+0x40/0x58
+# [ 540.485829] generic_handle_domain_irq+0x1c/0x28
+# [ 540.490604] gic_handle_irq+0x4c/0x11c
+# [ 540.494483] do_interrupt_handler+0x50/0x84
+# [ 540.498811] el1_interrupt+0x34/0x64
+# [ 540.502518] el1h_64_irq_handler+0x18/0x24
+# [ 540.506758] el1h_64_irq+0x6c/0x70
+# [ 540.510279] xhci_urb_enqueue+0xbc/0x32c (P)
+# [ 540.514693] usb_hcd_submit_urb+0x98/0x7fc
+# [ 540.518932] usb_submit_urb+0x294/0x560
+# [ 540.522901] intr_callback+0x78/0x1fc
+# [ 540.526700] __usb_hcd_giveback_urb+0x68/0x128
+# [ 540.531288] usb_giveback_urb_bh+0xa8/0x140
+# [ 540.535614] process_one_work+0x208/0x5e8
+# [ 540.539769] bh_worker+0x1a8/0x20c
+# [ 540.543293] workqueue_softirq_action+0x78/0x88
+# [ 540.547980] tasklet_hi_action+0x14/0x3c
+# [ 540.552038] handle_softirqs+0x118/0x4b8
+# [ 540.556096] __do_softirq+0x14/0x20
+# [ 540.559705] ____do_softirq+0x10/0x1c
+# [ 540.563500] call_on_irq_stack+0x24/0x4c
+# [ 540.567554] do_softirq_own_stack+0x1c/0x28
+# [ 540.571878] __irq_exit_rcu+0x174/0x1b4
+# [ 540.575849] irq_exit_rcu+0x10/0x38
+# [ 540.579462] el1_interrupt+0x38/0x64
+# [ 540.583158] el1h_64_irq_handler+0x18/0x24
+# [ 540.587397] el1h_64_irq+0x6c/0x70
+# [ 540.590918] lock_acquire+0x1e0/0x338 (P)
+# [ 540.595060] __mutex_lock+0xa8/0x4b8
+# [ 540.598760] mutex_lock_nested+0x24/0x30
+# [ 540.602818] _find_opp_table_unlocked+0x40/0xfc
+# [ 540.607503] _find_key+0x64/0x16c
+# [ 540.610940] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 540.615798] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 540.620924] __cpufreq_driver_target+0x144/0x29c
+# [ 540.625698] sugov_work+0x58/0x74
+# [ 540.629134] kthread_worker_fn+0xf4/0x324
+# [ 540.633278] kthread+0x12c/0x208
+# [ 540.636619] ret_from_fork+0x10/0x20
+# [ 540.640321] SMP: stopping secondary CPUs
+# [ 540.644518] Kernel Offset: 0x417126200000 from 0xffff800080000000
+# [ 540.650848] PHYS_OFFSET: 0xfff0dda400000000
+# [ 540.655170] CPU features: 0x000,00000100,00901250,8200721b
+# [ 540.660829] Memory Limit: none
+# [ 540.663999] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]---
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
index 329770c520d9..9450f2a002fd 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index ba9160d4d8eb..61122ea7f008 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -5,6 +5,5 @@ core_setmaster_vs_auth,Crash
dumb_buffer@create-clear,Crash
fbdev@pan,Crash
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
-kms_flip@flip-vs-modeset-vs-hang,Crash
kms_prop_blob@invalid-set-prop,Crash
kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
index eb16b29dee48..71418ea35a17 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 2803d0d80192..45dd8d493f6e 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -2,7 +2,6 @@ dumb_buffer@create-clear,Crash
kms_atomic_transition@modeset-transition,Fail
kms_atomic_transition@modeset-transition-fencing,Fail
kms_atomic_transition@plane-toggle-modeset-transition,Fail
-kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_color@gamma,Fail
kms_color@legacy-gamma,Fail
kms_cursor_crc@cursor-alpha-opaque,Fail
@@ -55,6 +54,7 @@ kms_flip@plain-flip-ts-check,Fail
kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 348b4ce7eb4b..b467991d4094 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -75,58 +75,72 @@ kms_bw@linear-tiling-2-displays-2160x1440p
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/f944dd08-c88c-49ae-aff0-274374550a93@collabora.com/T/#u
# Failure Rate: 40
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-1-displays-2160x1440p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/afa2d3bf-29f2-488d-8cc9-f30d461444b0@collabora.com/T/#u
# Failure Rate: 80
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane_multiple@tiling-none
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/6fdaa97f-c1a5-4216-831f-dbb7c5f90498@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-1-displays-1920x1080p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/616aa015-9574-4527-9d07-d8d698bbcc3c@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane@plane-position-hole-dpms
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/7a1b888f-d7db-4ed7-96cd-3975ace837fb@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@flip-vs-absolute-wf_vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/f17fffb6-abc4-464e-8465-395311b01f6a@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@flip-vs-blocking-wf-vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/9b590b26-1bf9-4951-b6a3-ef6c67e6a1c6@collabora.com/T/#u
# Failure Rate: 60
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-2-displays-1920x1080p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/059545fa-65b1-4f5c-a13e-4d2898679f51@collabora.com/T/#u
# Failure Rate: 20
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@modeset-vs-vblank-race-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/eece9a80-42f3-41f4-86cc-69d8a51b976a@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/63dfd5b7-8a54-44a3-9530-f8dcd77a21d1@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@linear-tiling-1-displays-3840x2160p
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index e8e994d92557..b83ec75161b2 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index c72fee70e739..9749ddb75121 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -157,6 +157,7 @@ kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index adbcdd0f28d2..28e37185bac0 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -19,6 +19,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
index 62428f3c8f31..e3ca6da8cde7 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
@@ -88,3 +88,31 @@ kms_flip@flip-vs-expired-vblank
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_pipe_crc_basic@nonblocking-crc-frame-sequence
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/2364a6bf-e6bc-4741-8c78-cea8bdb06e03@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@modeset-vs-vblank-race
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/f7d72ed9-a783-46d7-b75d-54072bda32a3@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_pipe_crc_basic@suspend-read-crc
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/98d3ba54-bcb9-41ab-adb1-a18ba61ee2e4@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-panning-bottom-right-suspend
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/b58d15eb-094d-4ac2-aad3-83e518c2f55d@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_vblank@ts-continuation-dpms-suspend
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index 319789806271..716d2d4e452d 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -1,5 +1,6 @@
# keeps printing vkms_vblank_simulate: vblank timer overrun and never ends
kms_invalid_mode@int-max-clock
+kms_invalid_mode@overflow-vrefresh
# kernel panic seen with kms_cursor_crc tests
kms_cursor_crc.*
@@ -802,6 +803,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 30c736fc0067..7d2e499ea5de 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -98,6 +98,21 @@ struct drm_bridge_connector {
* HDMI connector infrastructure, if any (see &DRM_BRIDGE_OP_HDMI).
*/
struct drm_bridge *bridge_hdmi;
+ /**
+ * @bridge_hdmi_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * HDMI Audio infrastructure, if any (see &DRM_BRIDGE_OP_HDMI_AUDIO).
+ */
+ struct drm_bridge *bridge_hdmi_audio;
+ /**
+ * @bridge_dp_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * DisplayPort Audio infrastructure, if any (see
+ * &DRM_BRIDGE_OP_DP_AUDIO).
+ */
+ struct drm_bridge *bridge_dp_audio;
};
#define to_drm_bridge_connector(x) \
@@ -433,14 +448,25 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ if (!bridge->funcs->hdmi_audio_startup)
+ return 0;
+
+ return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
- if (!bridge->funcs->hdmi_audio_startup)
- return 0;
+ if (!bridge->funcs->dp_audio_startup)
+ return 0;
- return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ return bridge->funcs->dp_audio_startup(connector, bridge);
+ }
+
+ return -EINVAL;
}
static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
@@ -451,11 +477,19 @@ static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ }
- return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ return bridge->funcs->dp_audio_prepare(connector, bridge, fmt, hparms);
+ }
+
+ return -EINVAL;
}
static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
@@ -464,11 +498,15 @@ static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+ bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ }
- bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+ bridge->funcs->dp_audio_shutdown(connector, bridge);
+ }
}
static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector,
@@ -478,15 +516,27 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ if (!bridge->funcs->hdmi_audio_mute_stream)
+ return -ENOTSUPP;
- if (bridge->funcs->hdmi_audio_mute_stream)
return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
enable, direction);
- else
- return -ENOTSUPP;
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ if (!bridge->funcs->dp_audio_mute_stream)
+ return -ENOTSUPP;
+
+ return bridge->funcs->dp_audio_mute_stream(connector, bridge,
+ enable, direction);
+ }
+
+ return -EINVAL;
}
static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = {
@@ -576,6 +626,42 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc = bridge->max_bpc;
}
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_AUDIO) {
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->hdmi_audio_prepare ||
+ !bridge->funcs->hdmi_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_hdmi_audio = bridge;
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) {
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->dp_audio_prepare ||
+ !bridge->funcs->dp_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_dp_audio = bridge;
+ }
+
if (!drm_bridge_get_next_bridge(bridge))
connector_type = bridge->type;
@@ -611,22 +697,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc);
if (ret)
return ERR_PTR(ret);
-
- if (bridge->hdmi_audio_max_i2s_playback_channels ||
- bridge->hdmi_audio_spdif_playback) {
- if (!bridge->funcs->hdmi_audio_prepare ||
- !bridge->funcs->hdmi_audio_shutdown)
- return ERR_PTR(-EINVAL);
-
- ret = drm_connector_hdmi_audio_init(connector,
- bridge->hdmi_audio_dev,
- &drm_bridge_connector_hdmi_audio_funcs,
- bridge->hdmi_audio_max_i2s_playback_channels,
- bridge->hdmi_audio_spdif_playback,
- bridge->hdmi_audio_dai_port);
- if (ret)
- return ERR_PTR(ret);
- }
} else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
@@ -635,6 +705,24 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(ret);
}
+ if (bridge_connector->bridge_hdmi_audio ||
+ bridge_connector->bridge_dp_audio) {
+ struct device *dev;
+
+ if (bridge_connector->bridge_hdmi_audio)
+ dev = bridge_connector->bridge_hdmi_audio->hdmi_audio_dev;
+ else
+ dev = bridge_connector->bridge_dp_audio->hdmi_audio_dev;
+
+ ret = drm_connector_hdmi_audio_init(connector, dev,
+ &drm_bridge_connector_hdmi_audio_funcs,
+ bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_spdif_playback,
+ bridge->hdmi_audio_dai_port);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 56a4965e518c..ed31471bd0e2 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -96,7 +96,7 @@ static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
ssize_t err = 0;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
return (enable && err < 0) ? err : 0;
}
@@ -112,7 +112,7 @@ static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
mask[0] = la_mask & 0xff;
mask[1] = la_mask >> 8;
- err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
}
@@ -123,15 +123,14 @@ static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
unsigned int retries = min(5, attempts - 1);
ssize_t err;
- err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
- msg->msg, msg->len);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_TX_MESSAGE_BUFFER,
+ msg->msg, msg->len);
if (err < 0)
return err;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
- (msg->len - 1) | (retries << 4) |
- DP_CEC_TX_MESSAGE_SEND);
- return err < 0 ? err : 0;
+ return drm_dp_dpcd_write_byte(aux, DP_CEC_TX_MESSAGE_INFO,
+ (msg->len - 1) | (retries << 4) |
+ DP_CEC_TX_MESSAGE_SEND);
}
static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
@@ -144,13 +143,13 @@ static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
return 0;
- err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
- if (err >= 0) {
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CONTROL, &val);
+ if (!err) {
if (enable)
val |= DP_CEC_SNOOPING_ENABLE;
else
val &= ~DP_CEC_SNOOPING_ENABLE;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
}
return (enable && err < 0) ? err : 0;
}
@@ -194,7 +193,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
u8 rx_msg_info;
ssize_t err;
- err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
if (err < 0)
return err;
@@ -202,7 +201,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
return 0;
msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
- err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+ err = drm_dp_dpcd_read_data(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
if (err < 0)
return err;
@@ -215,7 +214,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
struct cec_adapter *adap = aux->cec.adap;
u8 flags;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
return;
if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
@@ -230,7 +229,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
(DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES);
- drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+ drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
}
/**
@@ -253,13 +252,13 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux)
if (!aux->cec.adap)
goto unlock;
- ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
- &cec_irq);
+ ret = drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+ &cec_irq);
if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
goto unlock;
drm_dp_cec_handle_irq(aux);
- drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+ drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
unlock:
mutex_unlock(&aux->cec.lock);
}
@@ -269,7 +268,7 @@ static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
{
u8 cap = 0;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) < 0 ||
!(cap & DP_CEC_TUNNELING_CAPABLE))
return false;
if (cec_cap)
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index dbce1c3f4969..57828f2b7b5a 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -327,7 +327,7 @@ static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SI
if (offset < DP_RECEIVER_CAP_SIZE) {
rd_interval = dpcd[offset];
} else {
- if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, offset, &rd_interval) < 0) {
drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* arbitrary default delay */
@@ -358,7 +358,7 @@ int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux)
int unit;
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) < 0) {
drm_err(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* default to max */
@@ -704,6 +704,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_read_data() instead.
*/
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
@@ -752,6 +754,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_write_data() instead.
*/
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
@@ -774,14 +778,13 @@ EXPORT_SYMBOL(drm_dp_dpcd_write);
* @aux: DisplayPort AUX channel
* @status: buffer to store the link status in (must be at least 6 bytes)
*
- * Returns the number of bytes transferred on success or a negative error
- * code on failure.
+ * Returns a negative error code on failure or 0 on success.
*/
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE])
{
- return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
- DP_LINK_STATUS_SIZE);
+ return drm_dp_dpcd_read_data(aux, DP_LANE0_1_STATUS, status,
+ DP_LINK_STATUS_SIZE);
}
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
@@ -804,30 +807,20 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
{
int ret;
- if (dp_phy == DP_PHY_DPRX) {
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS,
- link_status,
- DP_LINK_STATUS_SIZE);
-
- if (ret < 0)
- return ret;
-
- WARN_ON(ret != DP_LINK_STATUS_SIZE);
-
- return 0;
- }
+ if (dp_phy == DP_PHY_DPRX)
+ return drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE);
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
- link_status,
- DP_LINK_STATUS_SIZE - 1);
+ ret = drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
+ link_status,
+ DP_LINK_STATUS_SIZE - 1);
if (ret < 0)
return ret;
- WARN_ON(ret != DP_LINK_STATUS_SIZE - 1);
-
/* Convert the LTTPR to the sink PHY link status layout */
memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1],
&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS],
@@ -838,12 +831,81 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_up);
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_down);
+
static int read_payload_update_status(struct drm_dp_aux *aux)
{
int ret;
u8 status;
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0)
return ret;
@@ -870,21 +932,21 @@ int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
int ret;
int retries = 0;
- drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
+ drm_dp_dpcd_write_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
payload_alloc[0] = vcpid;
payload_alloc[1] = start_time_slot;
payload_alloc[2] = time_slot_count;
- ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
+ ret = drm_dp_dpcd_write_data(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret);
goto fail;
}
retry:
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret);
goto fail;
@@ -1040,15 +1102,15 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
{
u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
- if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
- if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_TEST_REQUEST, &link_edid_read) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_TEST_REQUEST);
return false;
@@ -1061,23 +1123,23 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
return false;
}
- if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
/* send back checksum for the last edid extension block data */
- if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
- &real_edid_checksum, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_EDID_CHECKSUM,
+ real_edid_checksum) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_EDID_CHECKSUM);
return false;
}
test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
- if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_RESPONSE, test_resp) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_RESPONSE);
return false;
@@ -1114,12 +1176,10 @@ static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
return 0;
- ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext,
- sizeof(dpcd_ext));
+ ret = drm_dp_dpcd_read_data(aux, DP_DP13_DPCD_REV, &dpcd_ext,
+ sizeof(dpcd_ext));
if (ret < 0)
return ret;
- if (ret != sizeof(dpcd_ext))
- return -EIO;
if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
drm_dbg_kms(aux->drm_dev,
@@ -1156,10 +1216,10 @@ int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
{
int ret;
- ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
if (ret < 0)
return ret;
- if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0)
+ if (dpcd[DP_DPCD_REV] == 0)
return -EIO;
ret = drm_dp_read_extended_dpcd_caps(aux, dpcd);
@@ -1209,11 +1269,9 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
len *= 4;
- ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
+ ret = drm_dp_dpcd_read_data(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
if (ret < 0)
return ret;
- if (ret != len)
- return -EIO;
drm_dbg_kms(aux->drm_dev, "%s: DPCD DFP: %*ph\n", aux->name, len, downstream_ports);
@@ -1570,7 +1628,7 @@ EXPORT_SYMBOL(drm_dp_downstream_mode);
*/
int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
{
- return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+ return drm_dp_dpcd_read_data(aux, DP_BRANCH_ID, id, 6);
}
EXPORT_SYMBOL(drm_dp_downstream_id);
@@ -1635,13 +1693,13 @@ void drm_dp_downstream_debug(struct seq_file *m,
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+ if (!len)
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_SW_REV, rev, 2);
+ if (!len)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
if (detailed_cap_info) {
@@ -1779,11 +1837,9 @@ int drm_dp_read_sink_count(struct drm_dp_aux *aux)
u8 count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count);
+ ret = drm_dp_dpcd_read_byte(aux, DP_SINK_COUNT, &count);
if (ret < 0)
return ret;
- if (ret != 1)
- return -EIO;
return DP_GET_SINK_COUNT(count);
}
@@ -2172,13 +2228,13 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
u8 buf, count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
WARN_ON(!(buf & DP_TEST_SINK_START));
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK_MISC, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK_MISC, &buf);
if (ret < 0)
return ret;
@@ -2192,11 +2248,7 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
* At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes
* per component (RGB or CrYCb).
*/
- ret = drm_dp_dpcd_read(aux, DP_TEST_CRC_R_CR, crc, 6);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_read_data(aux, DP_TEST_CRC_R_CR, crc, 6);
}
static void drm_dp_aux_crc_work(struct work_struct *work)
@@ -2395,11 +2447,11 @@ int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2422,11 +2474,11 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2512,11 +2564,7 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset,
struct drm_dp_dpcd_ident *ident)
{
- int ret;
-
- ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
-
- return ret < 0 ? ret : 0;
+ return drm_dp_dpcd_read_data(aux, offset, ident, sizeof(*ident));
}
static void drm_dp_dump_desc(struct drm_dp_aux *aux,
@@ -2774,13 +2822,11 @@ static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
int ret;
for (offset = 0; offset < buf_size; offset += block_size) {
- ret = drm_dp_dpcd_read(aux,
- address + offset,
- &buf[offset], block_size);
+ ret = drm_dp_dpcd_read_data(aux,
+ address + offset,
+ &buf[offset], block_size);
if (ret < 0)
return ret;
-
- WARN_ON(ret != block_size);
}
return 0;
@@ -2995,12 +3041,12 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
int err;
u8 rate, lanes;
- err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LINK_RATE, &rate);
if (err < 0)
return err;
data->link_rate = drm_dp_bw_code_to_link_rate(rate);
- err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LANE_COUNT, &lanes);
if (err < 0)
return err;
data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
@@ -3008,22 +3054,22 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
if (lanes & DP_ENHANCED_FRAME_CAP)
data->enhanced_frame_cap = true;
- err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
+ err = drm_dp_dpcd_read_byte(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
if (err < 0)
return err;
switch (data->phy_pattern) {
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
- &data->custom80, sizeof(data->custom80));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ &data->custom80, sizeof(data->custom80));
if (err < 0)
return err;
break;
case DP_PHY_TEST_PATTERN_CP2520:
- err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
- &data->hbr2_reset,
- sizeof(data->hbr2_reset));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
+ &data->hbr2_reset,
+ sizeof(data->hbr2_reset));
if (err < 0)
return err;
}
@@ -3050,15 +3096,15 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
DP_LINK_QUAL_PATTERN_11_MASK;
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux, DP_TRAINING_PATTERN_SET,
+ test_pattern);
if (err < 0)
return err;
} else {
for (i = 0; i < data->num_lanes; i++) {
- err = drm_dp_dpcd_writeb(aux,
- DP_LINK_QUAL_LANE0_SET + i,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux,
+ DP_LINK_QUAL_LANE0_SET + i,
+ test_pattern);
if (err < 0)
return err;
}
@@ -3265,8 +3311,8 @@ bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_C
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
- &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev,
"Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n");
return false;
@@ -3290,7 +3336,7 @@ bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n");
return false;
}
@@ -3421,16 +3467,13 @@ EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
*/
int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
{
- int ret;
u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
DP_PCON_ENABLE_LINK_FRL_MODE;
if (enable_frl_ready_hpd)
buf |= DP_PCON_ENABLE_HPD_READY;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
-
- return ret;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
@@ -3445,7 +3488,7 @@ bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3474,7 +3517,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
@@ -3509,11 +3552,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
return -EINVAL;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
@@ -3539,7 +3578,7 @@ int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
else
buf &= ~DP_PCON_FRL_LINK_TRAIN_EXTENDED;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
if (ret < 0)
return ret;
@@ -3555,13 +3594,7 @@ EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
*/
int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
}
EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
@@ -3576,7 +3609,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
int ret;
u8 buf = 0;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
@@ -3585,11 +3618,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
return -EINVAL;
}
buf |= DP_PCON_ENABLE_HDMI_LINK;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
@@ -3604,7 +3633,7 @@ bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3629,7 +3658,7 @@ int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
int mode;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
if (ret < 0)
return ret;
@@ -3658,7 +3687,7 @@ void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
for (i = 0; i < hdmi->max_lanes; i++) {
- if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
return;
error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
@@ -3793,7 +3822,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3804,11 +3833,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
buf |= pps_buf_config << 2;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
/**
@@ -3820,13 +3845,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
*/
int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_default);
@@ -3842,15 +3861,11 @@ int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
if (ret < 0)
return ret;
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
@@ -3867,21 +3882,17 @@ int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
if (ret < 0)
return ret;
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
@@ -3897,7 +3908,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3906,11 +3917,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
else
buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
@@ -3942,12 +3949,12 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[0] = level;
}
- ret = drm_dp_dpcd_write(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_write_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
+ if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -3965,22 +3972,22 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
if (!bl->aux_enable)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to read eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
if (enable)
buf |= DP_EDP_BACKLIGHT_ENABLE;
else
buf &= ~DP_EDP_BACKLIGHT_ENABLE;
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to write eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -4016,15 +4023,16 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM;
if (bl->pwmgen_bit_count) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
}
if (bl->pwm_freq_pre_divider) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_FREQ_SET, bl->pwm_freq_pre_divider);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_FREQ_SET,
+ bl->pwm_freq_pre_divider);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev,
"%s: Failed to write aux backlight frequency: %d\n",
aux->name, ret);
@@ -4032,8 +4040,8 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4088,8 +4096,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
if (!bl->aux_set)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
aux->name, ret);
return -ENODEV;
@@ -4122,14 +4130,14 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
* - FxP is within 25% of desired value.
* Note: 25% is arbitrary value and may need some tweak.
*/
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
aux->name, ret);
return 0;
}
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
aux->name, ret);
return 0;
@@ -4154,8 +4162,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
break;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
return 0;
@@ -4180,8 +4188,8 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
u8 buf[2];
u8 mode_reg;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4194,11 +4202,11 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
int size = 1 + bl->lsb_reg_used;
- ret = drm_dp_dpcd_read(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
- if (ret != size) {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
if (bl->lsb_reg_used)
@@ -4343,8 +4351,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
if (!panel || !panel->dev || !aux)
return -EINVAL;
- ret = drm_dp_dpcd_read(aux, DP_EDP_DPCD_REV, edp_dpcd,
- EDP_DISPLAY_CTL_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_DPCD_REV, edp_dpcd,
+ EDP_DISPLAY_CTL_CAP_SIZE);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 3a1f1ffc7b55..a89f38fd3218 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -2192,24 +2192,20 @@ static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
guid_copy(&mstb->guid, guid);
if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
+ struct drm_dp_aux *aux;
u8 buf[UUID_SIZE];
export_guid(buf, &mstb->guid);
- if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- mstb->port_parent,
- DP_GUID, sizeof(buf), buf);
- } else {
- ret = drm_dp_dpcd_write(mstb->mgr->aux,
- DP_GUID, buf, sizeof(buf));
- }
- }
+ if (mstb->port_parent)
+ aux = &mstb->port_parent->aux;
+ else
+ aux = mstb->mgr->aux;
- if (ret < 16 && ret > 0)
- return -EPROTO;
+ ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf));
+ }
- return ret == 16 ? 0 : ret;
+ return ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2744,14 +2740,13 @@ retry:
do {
tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
- ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
- &msg[offset],
- tosend);
- if (ret != tosend) {
- if (ret == -EIO && retries < 5) {
- retries++;
- goto retry;
- }
+ ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,
+ &msg[offset],
+ tosend);
+ if (ret == -EIO && retries < 5) {
+ retries++;
+ goto retry;
+ } else if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
return -EIO;
@@ -3624,7 +3619,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
return DRM_DP_SST;
- if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
+ if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0)
return DRM_DP_SST;
if (mstm_cap & DP_MST_CAP)
@@ -3679,10 +3674,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0)
goto out_unlock;
@@ -3697,7 +3692,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mstb = mgr->mst_primary;
mgr->mst_primary = NULL;
/* this can fail if the device is gone */
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
@@ -3763,8 +3758,8 @@ EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
@@ -3813,18 +3808,18 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
goto out_fail;
}
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
@@ -3883,8 +3878,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
*mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
@@ -3922,9 +3917,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
- replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,
+ replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
len, ret);
return false;
@@ -4881,9 +4876,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
int i;
for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
- if (drm_dp_dpcd_read(mgr->aux,
- DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
- &buf[i], 16) != 16)
+ if (drm_dp_dpcd_read_data(mgr->aux,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
+ &buf[i], 16) < 0)
return false;
}
return true;
@@ -4972,23 +4967,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
}
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- if (ret != 2) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret < 0) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret < 0) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
- ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,
+ DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret < 0) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
@@ -6112,14 +6108,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
/* Enpoint decompression with DP-to-DP peer device */
@@ -6157,8 +6153,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- if (drm_dp_dpcd_read(immediate_upstream_aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(immediate_upstream_aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
@@ -6180,11 +6176,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
* therefore the endpoint needs to be
* both DSC and FEC capable.
*/
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE))
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 90fe07a89260..076edf161048 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -222,7 +222,7 @@ static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *r
while ((len = next_reg_area(&offset))) {
int address = DP_TUNNELING_BASE + offset;
- if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
+ if (drm_dp_dpcd_read_data(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
return -EIO;
offset += len;
@@ -913,7 +913,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
goto out_err;
if (enable)
@@ -921,7 +921,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
else
val &= ~mask;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
goto out_err;
tunnel->bw_alloc_enabled = enable;
@@ -1039,7 +1039,7 @@ static int clear_bw_req_state(struct drm_dp_aux *aux)
{
u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
- if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
+ if (drm_dp_dpcd_write_byte(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
return -EIO;
return 0;
@@ -1052,7 +1052,7 @@ static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
u8 val;
int err;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
*status_changed = val & status_change_mask;
@@ -1095,7 +1095,7 @@ static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
if (err)
goto out;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
err = -EIO;
goto out;
}
@@ -1196,13 +1196,13 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
goto out_err;
val &= mask;
if (val) {
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
goto out_err;
return 1;
@@ -1215,7 +1215,7 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
* Check for estimated BW changes explicitly to account for lost
* BW change notifications.
*/
- if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
goto out_err;
if (val * tunnel->bw_granularity != tunnel->estimated_bw)
@@ -1300,7 +1300,7 @@ int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *a
{
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index c205f37da1e1..d9d9948b29e9 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -10,6 +10,298 @@
#include <drm/display/drm_hdmi_state_helper.h>
/**
+ * DOC: hdmi helpers
+ *
+ * These functions contain an implementation of the HDMI specification
+ * in the form of KMS helpers.
+ *
+ * It contains TMDS character rate computation, automatic selection of
+ * output formats, infoframes generation, etc.
+ *
+ * Infoframes Compliance
+ * ~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Drivers using the helpers will expose the various infoframes
+ * generated according to the HDMI specification in debugfs.
+ *
+ * Compliance can then be tested using ``edid-decode`` from the ``v4l-utils`` project
+ * (https://git.linuxtv.org/v4l-utils.git/). A sample run would look like:
+ *
+ * .. code-block:: bash
+ *
+ * # edid-decode \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/avi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdmi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/spd \
+ * /sys/class/drm/card1-HDMI-A-1/edid \
+ * -c
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 1e 6d f4 5b 1e ef 06 00
+ * 07 20 01 03 80 2f 34 78 ea 24 05 af 4f 42 ab 25
+ * 0f 50 54 21 08 00 d1 c0 61 40 45 40 01 01 01 01
+ * 01 01 01 01 01 01 98 d0 00 40 a1 40 d4 b0 30 20
+ * 3a 00 d1 0b 12 00 00 1a 00 00 00 fd 00 3b 3d 1e
+ * b2 31 00 0a 20 20 20 20 20 20 00 00 00 fc 00 4c
+ * 47 20 53 44 51 48 44 0a 20 20 20 20 00 00 00 ff
+ * 00 32 30 37 4e 54 52 4c 44 43 34 33 30 0a 01 46
+ *
+ * 02 03 42 72 23 09 07 07 4d 01 03 04 90 12 13 1f
+ * 22 5d 5e 5f 60 61 83 01 00 00 6d 03 0c 00 10 00
+ * b8 3c 20 00 60 01 02 03 67 d8 5d c4 01 78 80 03
+ * e3 0f 00 18 e2 00 6a e3 05 c0 00 e6 06 05 01 52
+ * 52 51 11 5d 00 a0 a0 40 29 b0 30 20 3a 00 d1 0b
+ * 12 00 00 1a 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 c3
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: GSM
+ * Model: 23540
+ * Serial Number: 454430 (0x0006ef1e)
+ * Made in: week 7 of 2022
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 47 cm x 52 cm
+ * Gamma: 2.20
+ * DPMS levels: Standby Suspend Off
+ * RGB color display
+ * First detailed timing is the preferred timing
+ * Color Characteristics:
+ * Red : 0.6835, 0.3105
+ * Green: 0.2587, 0.6679
+ * Blue : 0.1445, 0.0585
+ * White: 0.3134, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * Standard Timings:
+ * DMT 0x52: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * Detailed Timing Descriptors:
+ * DTD 1: 2560x2880 59.966580 Hz 8:9 185.417 kHz 534.000000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 240 Hpol P
+ * Vfront 3 Vsync 10 Vback 199 Vpol N
+ * Display Range Limits:
+ * Monitor ranges (GTF): 59-61 Hz V, 30-178 kHz H, max dotclock 490 MHz
+ * Display Product Name: 'LG SDQHD'
+ * Display Product Serial Number: '207NTRLDC430'
+ * Extension blocks: 1
+ * Checksum: 0x46
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Basic audio support
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 2
+ * Audio Data Block:
+ * Linear PCM:
+ * Max channels: 2
+ * Supported sample rates (kHz): 48 44.1 32
+ * Supported sample sizes (bits): 24 20 16
+ * Video Data Block:
+ * VIC 1: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * VIC 3: 720x480 59.940060 Hz 16:9 31.469 kHz 27.000000 MHz
+ * VIC 4: 1280x720 60.000000 Hz 16:9 45.000 kHz 74.250000 MHz
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native)
+ * VIC 18: 720x576 50.000000 Hz 16:9 31.250 kHz 27.000000 MHz
+ * VIC 19: 1280x720 50.000000 Hz 16:9 37.500 kHz 74.250000 MHz
+ * VIC 31: 1920x1080 50.000000 Hz 16:9 56.250 kHz 148.500000 MHz
+ * VIC 34: 1920x1080 30.000000 Hz 16:9 33.750 kHz 74.250000 MHz
+ * VIC 93: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * VIC 94: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Speaker Allocation Data Block:
+ * FL/FR - Front Left/Right
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * Supports_AI
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 300 MHz
+ * Extended HDMI video details:
+ * HDMI VICs:
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * HDMI VIC 2: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * HDMI VIC 3: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 600 MHz
+ * SCDC Present
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Video Capability Data Block:
+ * YCbCr quantization: No Data
+ * RGB quantization: Selectable (via AVI Q)
+ * PT scan behavior: Always Underscanned
+ * IT scan behavior: Always Underscanned
+ * CE scan behavior: Always Underscanned
+ * Colorimetry Data Block:
+ * BT2020YCC
+ * BT2020RGB
+ * HDR Static Metadata Data Block:
+ * Electro optical transfer functions:
+ * Traditional gamma - SDR luminance range
+ * SMPTE ST2084
+ * Supported static metadata descriptors:
+ * Static metadata type 1
+ * Desired content max luminance: 82 (295.365 cd/m^2)
+ * Desired content max frame-average luminance: 82 (295.365 cd/m^2)
+ * Desired content min luminance: 81 (0.298 cd/m^2)
+ * Detailed Timing Descriptors:
+ * DTD 2: 2560x2880 29.986961 Hz 8:9 87.592 kHz 238.250000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 80 Hpol P
+ * Vfront 3 Vsync 10 Vback 28 Vpol N
+ * Checksum: 0xc3 Unused space in Extension Block: 43 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.29.0-5346
+ * edid-decode SHA: c363e9aa6d70 2025-03-11 11:41:18
+ *
+ * Warnings:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * IT Video Formats are overscanned by default, but normally this should be underscanned.
+ * Video Data Block: VIC 1 and the first DTD are not identical. Is this intended?
+ * Video Data Block: All VICs are in ascending order, and the first (preferred) VIC <= 4, is that intended?
+ * Video Capability Data Block: Set Selectable YCbCr Quantization to avoid interop issues.
+ * Video Capability Data Block: S_PT is equal to S_IT and S_CE, so should be set to 0 instead.
+ * Colorimetry Data Block: Set the sRGB colorimetry bit to avoid interop issues.
+ * Display Product Serial Number is set, so the Serial Number in the Base EDID should be 0.
+ * EDID:
+ * Base EDID: Some timings are out of range of the Monitor Ranges:
+ * Vertical Freq: 24.000 - 60.317 Hz (Monitor: 59.000 - 61.000 Hz)
+ * Horizontal Freq: 31.250 - 185.416 kHz (Monitor: 30.000 - 178.000 kHz)
+ * Maximum Clock: 594.000 MHz (Monitor: 490.000 MHz)
+ *
+ * Failures:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Video Capability Data Block: IT video formats are always underscanned, but bit 7 of Byte 3 of the CTA-861 Extension header is set to overscanned.
+ * EDID:
+ * CTA-861: Native progressive timings are a mix of several resolutions.
+ *
+ * EDID conformity: FAIL
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 82 02 0d 31 12 28 04 00 00 00 00 00 00 00 00 00
+ * 00
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x31
+ *
+ * AVI InfoFrame
+ * Version: 2
+ * Length: 13
+ * Y: Color Component Sample Format: RGB
+ * A: Active Format Information Present: Yes
+ * B: Bar Data Present: Bar Data not present
+ * S: Scan Information: Composed for an underscanned display
+ * C: Colorimetry: No Data
+ * M: Picture Aspect Ratio: 16:9
+ * R: Active Portion Aspect Ratio: 8
+ * ITC: IT Content: No Data
+ * EC: Extended Colorimetry: xvYCC601
+ * Q: RGB Quantization Range: Limited Range
+ * SC: Non-Uniform Picture Scaling: No Known non-uniform scaling
+ * YQ: YCC Quantization Range: Limited Range
+ * CN: IT Content Type: Graphics
+ * PR: Pixel Data Repetition Count: 0
+ * Line Number of End of Top Bar: 0
+ * Line Number of Start of Bottom Bar: 0
+ * Pixel Number of End of Left Bar: 0
+ * Pixel Number of Start of Right Bar: 0
+ *
+ * ----------------
+ *
+ * AVI InfoFrame conformity: PASS
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 81 01 05 49 03 0c 00 20 01
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x49
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03
+ * Version: 1
+ * Length: 5
+ * HDMI Video Format: HDMI_VIC is present
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ *
+ * ----------------
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03 conformity: PASS
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 83 01 19 93 42 72 6f 61 64 63 6f 6d 56 69 64 65
+ * 6f 63 6f 72 65 00 00 00 00 00 00 00 09
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x93
+ *
+ * Source Product Description InfoFrame
+ * Version: 1
+ * Length: 25
+ * Vendor Name: 'Broadcom'
+ * Product Description: 'Videocore'
+ * Source Information: PC general
+ *
+ * ----------------
+ *
+ * Source Product Description InfoFrame conformity: PASS
+ *
+ * Testing
+ * ~~~~~~~
+ *
+ * The helpers have unit testing and can be tested using kunit with:
+ *
+ * .. code-block:: bash
+ *
+ * $ ./tools/testing/kunit/kunit.py run \
+ * --kunitconfig=drivers/gpu/drm/tests \
+ * drm_atomic_helper_connector_hdmi_*
+ */
+
+/**
* __drm_atomic_helper_connector_hdmi_reset() - Initializes all HDMI @drm_connector_state resources
* @connector: DRM connector
* @new_conn_state: connector state to reset
@@ -816,7 +1108,7 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
* @status: Connection status
*
* This function should be called as a part of the .detect() / .detect_ctx()
- * callbacks, updating the HDMI-specific connector's data.
+ * callbacks for all status changes.
*/
void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
enum drm_connector_status status)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9ea2611770f4..0138cf0b8b63 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -933,6 +933,9 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
* state). This is especially true in enable hooks because the pipeline has
* changed.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The old connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -967,6 +970,9 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
* attached to @encoder vs ones that do (and to inspect their state). This is
* especially true in disable hooks because the pipeline will change.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The new connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -988,6 +994,59 @@ drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
/**
+ * drm_atomic_get_connector_for_encoder - Get connector currently assigned to an encoder
+ * @encoder: The encoder to find the connector of
+ * @ctx: Modeset locking context
+ *
+ * This function finds and returns the connector currently assigned to
+ * an @encoder.
+ *
+ * It is similar to the drm_atomic_get_old_connector_for_encoder() and
+ * drm_atomic_get_new_connector_for_encoder() helpers, but doesn't
+ * require access to the atomic state. If you have access to it, prefer
+ * using these. This helper is typically useful in situations where you
+ * don't have access to the atomic state, like detect, link repair,
+ * threaded interrupt handlers, or hooks from other frameworks (ALSA,
+ * CEC, etc.).
+ *
+ * Returns:
+ * The connector connected to @encoder, or an error pointer otherwise.
+ * When the error is EDEADLK, a deadlock has been detected and the
+ * sequence must be restarted.
+ */
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *out_connector = ERR_PTR(-EINVAL);
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (!connector->state)
+ continue;
+
+ if (encoder == connector->state->best_encoder) {
+ out_connector = connector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return out_connector;
+}
+EXPORT_SYMBOL(drm_atomic_get_connector_for_encoder);
+
+
+/**
* drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder
* @state: Atomic state
* @encoder: The encoder to fetch the crtc state for
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5302ab324898..ee64ca1b1bec 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3409,6 +3409,9 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_all);
* This implies a reset of all active components available between the CRTC and
* connectors.
*
+ * A variant of this function exists with
+ * drm_bridge_helper_reset_crtc(), dedicated to bridges.
+ *
* NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
* For drivers which optimize out unnecessary modesets this will result in
* a no-op commit, achieving nothing.
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index fa2794217a90..b4c89ec01998 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
@@ -198,10 +199,96 @@
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
+static void __drm_bridge_free(struct kref *kref)
+{
+ struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+
+ kfree(bridge->container);
+}
+
+/**
+ * drm_bridge_get - Acquire a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function increments the bridge's refcount.
+ *
+ * Returns:
+ * Pointer to @bridge.
+ */
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_get(&bridge->refcount);
+
+ return bridge;
+}
+EXPORT_SYMBOL(drm_bridge_get);
+
+/**
+ * drm_bridge_put - Release a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function decrements the bridge's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_bridge_put(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_put(&bridge->refcount, __drm_bridge_free);
+}
+EXPORT_SYMBOL(drm_bridge_put);
+
+/**
+ * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_bridge, cast to a void pointer
+ *
+ * Wrapper of drm_bridge_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_bridge_put_void(void *data)
+{
+ struct drm_bridge *bridge = (struct drm_bridge *)data;
+
+ drm_bridge_put(bridge);
+}
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs)
+{
+ void *container;
+ struct drm_bridge *bridge;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ bridge = container + offset;
+ bridge->container = container;
+ bridge->funcs = funcs;
+ kref_init(&bridge->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
+ if (err)
+ return ERR_PTR(err);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_bridge_alloc);
+
/**
* drm_bridge_add - add the given bridge to the global bridge list
*
* @bridge: bridge control structure
+ *
+ * The bridge to be added must have been allocated by
+ * devm_drm_bridge_alloc().
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
@@ -280,6 +367,11 @@ static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
};
+static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
+{
+ return bridge->funcs->atomic_reset != NULL;
+}
+
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
@@ -327,12 +419,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
- ret = bridge->funcs->attach(bridge, flags);
+ ret = bridge->funcs->attach(bridge, encoder, flags);
if (ret < 0)
goto err_reset_bridge;
}
- if (bridge->funcs->atomic_reset) {
+ if (drm_bridge_is_atomic(bridge)) {
struct drm_bridge_state *state;
state = bridge->funcs->atomic_reset(bridge);
@@ -377,7 +469,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (WARN_ON(!bridge->dev))
return;
- if (bridge->funcs->atomic_reset)
+ if (drm_bridge_is_atomic(bridge))
drm_atomic_private_obj_fini(&bridge->base);
if (bridge->funcs->detach)
@@ -1300,6 +1392,75 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
+static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
+ struct drm_bridge *bridge,
+ unsigned int idx)
+{
+ drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
+ drm_printf(p, "\ttype: [%d] %s\n",
+ bridge->type,
+ drm_get_connector_type_name(bridge->type));
+
+ if (bridge->of_node)
+ drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
+
+ drm_printf(p, "\tops: [0x%x]", bridge->ops);
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ drm_puts(p, " detect");
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ drm_puts(p, " edid");
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_puts(p, " hpd");
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ drm_puts(p, " modes");
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ drm_puts(p, " hdmi");
+ drm_puts(p, "\n");
+}
+
+static int allbridges_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ mutex_lock(&bridge_lock);
+
+ list_for_each_entry(bridge, &bridge_list, list)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+
+ mutex_unlock(&bridge_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(allbridges);
+
+static int encoder_bridges_show(struct seq_file *m, void *data)
+{
+ struct drm_encoder *encoder = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ drm_for_each_bridge_in_chain(encoder, bridge)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
+
+void drm_bridge_debugfs_params(struct dentry *root)
+{
+ debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
+}
+
+void drm_bridge_debugfs_encoder_params(struct dentry *root,
+ struct drm_encoder *encoder)
+{
+ /* bridges list */
+ debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
+}
+
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c
new file mode 100644
index 000000000000..af80d2496194
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_helper.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_modeset_lock.h>
+
+/**
+ * drm_bridge_helper_reset_crtc - Reset the pipeline feeding a bridge
+ * @bridge: DRM bridge to reset
+ * @ctx: lock acquisition context
+ *
+ * Reset a @bridge pipeline. It will power-cycle all active components
+ * between the CRTC and connector that bridge is connected to.
+ *
+ * As it relies on drm_atomic_helper_reset_crtc(), the same limitations
+ * apply.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. If the error
+ * returned is EDEADLK, the whole atomic sequence must be restarted.
+ */
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector *connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ connector = drm_atomic_get_connector_for_encoder(encoder, ctx);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ goto out;
+ }
+
+ if (!connector->state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ crtc = connector->state->crtc;
+ ret = drm_atomic_helper_reset_crtc(crtc, ctx);
+ if (ret)
+ goto out;
+
+out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_bridge_helper_reset_crtc);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 549b28a5918c..f1de7faf9fb4 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(drm_client_release);
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
if (buffer->gem) {
- drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
+ drm_gem_vunmap(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
@@ -252,7 +252,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
drm_gem_lock(gem);
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap_locked(gem, map);
if (ret)
goto err_drm_gem_vmap_unlocked;
*map_copy = *map;
@@ -278,7 +278,7 @@ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
- drm_gem_vunmap(gem, map);
+ drm_gem_vunmap_locked(gem, map);
drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
@@ -316,7 +316,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer,
ret = drm_gem_pin_locked(gem);
if (ret)
goto err_drm_gem_pin_locked;
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap_locked(gem, map);
if (ret)
goto err_drm_gem_vmap;
@@ -348,7 +348,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
struct iosys_map *map = &buffer->map;
drm_gem_lock(gem);
- drm_gem_vunmap(gem, map);
+ drm_gem_vunmap_locked(gem, map);
drm_gem_unpin_locked(gem);
drm_gem_unlock(gem);
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index aca442c25209..0f9d5ba36c81 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -39,7 +39,7 @@ int drm_client_modeset_create(struct drm_client_dev *client)
unsigned int max_connector_count = 1;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
- unsigned int i = 0;
+ int i = 0;
/* Add terminating zero entry to enable index less iteration */
client->modesets = kcalloc(num_crtc + 1, sizeof(*client->modesets), GFP_KERNEL);
@@ -73,9 +73,10 @@ err_free:
static void drm_client_modeset_release(struct drm_client_dev *client)
{
struct drm_mode_set *modeset;
- unsigned int i;
drm_client_for_each_modeset(modeset, client) {
+ int i;
+
drm_mode_destroy(client->dev, modeset->mode);
modeset->mode = NULL;
modeset->fb = NULL;
@@ -117,10 +118,10 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_get_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -130,10 +131,10 @@ drm_connector_get_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -144,10 +145,10 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_preferred_mode(struct drm_connector *connector, int width, int height)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay > width ||
@@ -159,16 +160,18 @@ drm_connector_preferred_mode(struct drm_connector *connector, int width, int hei
return NULL;
}
-static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_first_mode(struct drm_connector *connector)
{
return list_first_entry_or_null(&connector->modes,
struct drm_display_mode, head);
}
-static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_pick_cmdline_mode(struct drm_connector *connector)
{
- struct drm_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode;
+ const struct drm_cmdline_mode *cmdline_mode;
+ const struct drm_display_mode *mode;
bool prefer_non_interlace;
/*
@@ -237,9 +240,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
return enable;
}
-static void drm_client_connectors_enabled(struct drm_connector **connectors,
+static void drm_client_connectors_enabled(struct drm_connector *connectors[],
unsigned int connector_count,
- bool *enabled)
+ bool enabled[])
{
bool any_enabled = false;
struct drm_connector *connector;
@@ -263,16 +266,35 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors,
enabled[i] = drm_connector_enabled(connectors[i], false);
}
+static void mode_replace(struct drm_device *dev,
+ const struct drm_display_mode **dst,
+ const struct drm_display_mode *src)
+{
+ drm_mode_destroy(dev, (struct drm_display_mode *)*dst);
+
+ *dst = src ? drm_mode_duplicate(dev, src) : NULL;
+}
+
+static void modes_destroy(struct drm_device *dev,
+ const struct drm_display_mode *modes[],
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mode_replace(dev, &modes[i], NULL);
+}
+
static bool drm_client_target_cloned(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
- int count, i, j;
+ int count, i;
bool can_clone = false;
- struct drm_display_mode *dmt_mode, *mode;
+ struct drm_display_mode *dmt_mode;
/* only contemplate cloning in the single crtc case */
if (dev->mode_config.num_crtc > 1)
@@ -291,9 +313,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
/* check the command line or if nothing common pick 1024x768 */
can_clone = true;
for (i = 0; i < connector_count; i++) {
+ int j;
+
if (!enabled[i])
continue;
- modes[i] = drm_connector_pick_cmdline_mode(connectors[i]);
+
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connectors[i]));
if (!modes[i]) {
can_clone = false;
break;
@@ -323,6 +349,8 @@ static bool drm_client_target_cloned(struct drm_device *dev,
goto fail;
for (i = 0; i < connector_count; i++) {
+ const struct drm_display_mode *mode;
+
if (!enabled[i])
continue;
@@ -332,7 +360,7 @@ static bool drm_client_target_cloned(struct drm_device *dev,
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
- modes[i] = mode;
+ mode_replace(dev, &modes[i], mode);
}
if (!modes[i])
can_clone = false;
@@ -349,19 +377,19 @@ fail:
}
static int drm_client_get_tile_offsets(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
int idx,
int h_idx, int v_idx)
{
- struct drm_connector *connector;
int i;
int hoffset = 0, voffset = 0;
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+
if (!connector->has_tile)
continue;
@@ -384,14 +412,13 @@ static int drm_client_get_tile_offsets(struct drm_device *dev,
}
static bool drm_client_target_preferred(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const u64 mask = BIT_ULL(connector_count) - 1;
- struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
int num_tiled_conns = 0;
@@ -405,7 +432,9 @@ static bool drm_client_target_preferred(struct drm_device *dev,
retry:
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+ const char *mode_type;
+
if (conn_configured & BIT_ULL(i))
continue;
@@ -438,20 +467,23 @@ retry:
modes, offsets, i,
connector->tile_h_loc, connector->tile_v_loc);
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
- /* got for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
+
if (!modes[i]) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n",
- connector->base.id, connector->name,
- connector->tile_group ? connector->tile_group->id : 0);
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred modes, pick one off the list */
- if (!modes[i])
- modes[i] = drm_connector_first_mode(connector);
+
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
+ }
+
/*
* In case of tiled mode if all tiles not present fallback to
* first available non tiled mode.
@@ -466,18 +498,24 @@ retry:
(connector->tile_h_loc == 0 &&
connector->tile_v_loc == 0 &&
!drm_connector_get_tiled_mode(connector))) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
} else {
- modes[i] = drm_connector_get_tiled_mode(connector);
+ mode_type = "tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_get_tiled_mode(connector));
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n",
- connector->base.id, connector->name,
- modes[i] ? modes[i]->name : "none");
+ if (modes[i])
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] found %s mode: %s\n",
+ connector->base.id, connector->name,
+ mode_type, modes[i]->name);
+ else
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] no mode found\n",
+ connector->base.id, connector->name);
+
conn_configured |= BIT_ULL(i);
}
@@ -502,18 +540,17 @@ static bool connector_has_possible_crtc(struct drm_connector *connector,
}
static int drm_client_pick_crtcs(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
+ struct drm_crtc *best_crtcs[],
+ const struct drm_display_mode *modes[],
int n, int width, int height)
{
struct drm_device *dev = client->dev;
struct drm_connector *connector;
int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
+ struct drm_crtc **crtcs;
struct drm_mode_set *modeset;
- int o;
if (n == connector_count)
return 0;
@@ -543,7 +580,8 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
* remaining connectors
*/
drm_client_for_each_modeset(modeset, client) {
- crtc = modeset->crtc;
+ struct drm_crtc *crtc = modeset->crtc;
+ int o;
if (!connector_has_possible_crtc(connector, crtc))
continue;
@@ -577,17 +615,17 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
/* Try to read the BIOS display configuration and use it for the initial config */
static bool drm_client_firmware_config(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ struct drm_crtc *crtcs[],
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const int count = min_t(unsigned int, connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq, mask;
struct drm_device *dev = client->dev;
- int i, j;
+ int i;
bool *save_enabled;
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
@@ -621,11 +659,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
- struct drm_connector *connector;
+ struct drm_connector *connector = connectors[i];
struct drm_encoder *encoder;
- struct drm_crtc *new_crtc;
-
- connector = connectors[i];
+ struct drm_crtc *crtc;
+ const char *mode_type;
+ int j;
if (conn_configured & BIT(i))
continue;
@@ -664,7 +702,7 @@ retry:
num_connectors_enabled++;
- new_crtc = connector->state->crtc;
+ crtc = connector->state->crtc;
/*
* Make sure we're not trying to drive multiple connectors
@@ -672,69 +710,52 @@ retry:
* match the BIOS.
*/
for (j = 0; j < count; j++) {
- if (crtcs[j] == new_crtc) {
- drm_dbg_kms(dev, "fallback: cloned configuration\n");
+ if (crtcs[j] == crtc) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] fallback: cloned configuration\n",
+ connector->base.id, connector->name);
goto bail;
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
-
- /* go for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
- /* try for preferred next */
if (!modes[i]) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n",
- connector->base.id, connector->name,
- str_yes_no(connector->has_tile));
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred mode marked by the EDID? Are there any modes? */
- if (!modes[i] && !list_empty(&connector->modes)) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_first_mode(connector);
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
}
/* last resort: use current mode */
if (!modes[i]) {
- /*
- * IMPORTANT: We want to use the adjusted mode (i.e.
- * after the panel fitter upscaling) as the initial
- * config, not the input mode, which is what crtc->mode
- * usually contains. But since our current
- * code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly.
- *
- * This is crtc->mode and not crtc->state->mode for the
- * fastboot check to work correctly.
- */
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n",
- connector->base.id, connector->name);
- modes[i] = &connector->state->crtc->mode;
+ mode_type = "current";
+ mode_replace(dev, &modes[i],
+ &crtc->state->mode);
}
+
/*
* In case of tiled modes, if all tiles are not present
* then fallback to a non tiled mode.
*/
if (connector->has_tile &&
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
}
- crtcs[i] = new_crtc;
+ crtcs[i] = crtc;
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n",
+ drm_dbg_kms(dev, "[CONNECTOR::%d:%s] on [CRTC:%d:%s] using %s mode: %s\n",
connector->base.id, connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+ crtc->base.id, crtc->name,
+ mode_type, modes[i]->name);
fallback = false;
conn_configured |= BIT(i);
@@ -799,8 +820,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
- /* points to modes protected by mode_config.mutex */
- struct drm_display_mode **modes;
+ const struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
bool *enabled;
@@ -851,7 +871,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
if (!drm_client_firmware_config(client, connectors, connector_count, crtcs,
modes, offsets, enabled, width, height)) {
- memset(modes, 0, connector_count * sizeof(*modes));
+ modes_destroy(dev, modes, connector_count);
memset(crtcs, 0, connector_count * sizeof(*crtcs));
memset(offsets, 0, connector_count * sizeof(*offsets));
@@ -868,10 +888,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
crtcs, modes, 0, width, height);
}
+ mutex_unlock(&dev->mode_config.mutex);
+
drm_client_modeset_release(client);
for (i = 0; i < connector_count; i++) {
- struct drm_display_mode *mode = modes[i];
+ const struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
struct drm_client_offset *offset = &offsets[i];
@@ -902,11 +924,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
- mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
kfree(crtcs);
+ modes_destroy(dev, modes, connector_count);
kfree(modes);
kfree(offsets);
kfree(enabled);
@@ -938,7 +960,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
struct drm_plane *plane = modeset->crtc->primary;
struct drm_cmdline_mode *cmdline;
u64 valid_mask = 0;
- unsigned int i;
+ int i;
if (!modeset->num_connectors)
return false;
@@ -1219,11 +1241,12 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
struct drm_connector *connector;
struct drm_mode_set *modeset;
struct drm_modeset_acquire_ctx ctx;
- int j;
int ret;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
drm_client_for_each_modeset(modeset, client) {
+ int j;
+
if (!modeset->crtc->enabled)
continue;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 0955f1c385dd..39497493f74c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -334,7 +334,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- encoder_funcs = encoder->helper_private;
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6b2178864c7e..3dfd8b34dceb 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -740,40 +740,6 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
crtc->debugfs_entry = NULL;
}
-static int bridges_show(struct seq_file *m, void *data)
-{
- struct drm_encoder *encoder = m->private;
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_bridge *bridge;
- unsigned int idx = 0;
-
- drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs);
- drm_printf(&p, "\ttype: [%d] %s\n",
- bridge->type,
- drm_get_connector_type_name(bridge->type));
-
- if (bridge->of_node)
- drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node);
-
- drm_printf(&p, "\tops: [0x%x]", bridge->ops);
- if (bridge->ops & DRM_BRIDGE_OP_DETECT)
- drm_puts(&p, " detect");
- if (bridge->ops & DRM_BRIDGE_OP_EDID)
- drm_puts(&p, " edid");
- if (bridge->ops & DRM_BRIDGE_OP_HPD)
- drm_puts(&p, " hpd");
- if (bridge->ops & DRM_BRIDGE_OP_MODES)
- drm_puts(&p, " modes");
- if (bridge->ops & DRM_BRIDGE_OP_HDMI)
- drm_puts(&p, " hdmi");
- drm_puts(&p, "\n");
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(bridges);
-
void drm_debugfs_encoder_add(struct drm_encoder *encoder)
{
struct drm_minor *minor = encoder->dev->primary;
@@ -789,9 +755,7 @@ void drm_debugfs_encoder_add(struct drm_encoder *encoder)
encoder->debugfs_entry = root;
- /* bridges list */
- debugfs_create_file("bridges", 0444, root, encoder,
- &bridges_fops);
+ drm_bridge_debugfs_encoder_params(root, encoder);
if (encoder->funcs && encoder->funcs->debugfs_init)
encoder->funcs->debugfs_init(encoder, root);
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index 385eb5e10047..9dc0408fbbea 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -13,85 +13,7 @@
#include <drm/drm_fourcc.h>
#include "drm_draw_internal.h"
-
-/*
- * Conversions from xrgb8888
- */
-
-static u16 convert_xrgb8888_to_rgb565(u32 pix)
-{
- return ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
-{
- return ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
-}
-
-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
-{
- return ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_argb1555(u32 pix)
-{
- return BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u32 convert_xrgb8888_to_argb8888(u32 pix)
-{
- return pix | GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
-}
-
-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
-{
- pix = ((pix & 0x00FF0000) >> 14) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x000000FF) << 22);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
+#include "drm_format_internal.h"
/**
* drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
@@ -106,28 +28,28 @@ u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
{
switch (format) {
case DRM_FORMAT_RGB565:
- return convert_xrgb8888_to_rgb565(color);
+ return drm_pixel_xrgb8888_to_rgb565(color);
case DRM_FORMAT_RGBA5551:
- return convert_xrgb8888_to_rgba5551(color);
+ return drm_pixel_xrgb8888_to_rgba5551(color);
case DRM_FORMAT_XRGB1555:
- return convert_xrgb8888_to_xrgb1555(color);
+ return drm_pixel_xrgb8888_to_xrgb1555(color);
case DRM_FORMAT_ARGB1555:
- return convert_xrgb8888_to_argb1555(color);
+ return drm_pixel_xrgb8888_to_argb1555(color);
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
return color;
case DRM_FORMAT_ARGB8888:
- return convert_xrgb8888_to_argb8888(color);
+ return drm_pixel_xrgb8888_to_argb8888(color);
case DRM_FORMAT_XBGR8888:
- return convert_xrgb8888_to_xbgr8888(color);
+ return drm_pixel_xrgb8888_to_xbgr8888(color);
case DRM_FORMAT_ABGR8888:
- return convert_xrgb8888_to_abgr8888(color);
+ return drm_pixel_xrgb8888_to_abgr8888(color);
case DRM_FORMAT_XRGB2101010:
- return convert_xrgb8888_to_xrgb2101010(color);
+ return drm_pixel_xrgb8888_to_xrgb2101010(color);
case DRM_FORMAT_ARGB2101010:
- return convert_xrgb8888_to_argb2101010(color);
+ return drm_pixel_xrgb8888_to_argb2101010(color);
case DRM_FORMAT_ABGR2101010:
- return convert_xrgb8888_to_abgr2101010(color);
+ return drm_pixel_xrgb8888_to_abgr2101010(color);
default:
WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
return 0;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 17fc5dc708f4..3dc7acd56b1d 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -40,6 +40,7 @@
#include <linux/xarray.h>
#include <drm/drm_accel.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_cache.h>
#include <drm/drm_client_event.h>
#include <drm/drm_color_mgmt.h>
@@ -500,6 +501,25 @@ void drm_dev_unplug(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unplug);
+/**
+ * drm_dev_set_dma_dev - set the DMA device for a DRM device
+ * @dev: DRM device
+ * @dma_dev: DMA device or NULL
+ *
+ * Sets the DMA device of the given DRM device. Only required if
+ * the DMA device is different from the DRM device's parent. After
+ * calling this function, the DRM device holds a reference on
+ * @dma_dev. Pass NULL to clear the DMA device.
+ */
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev)
+{
+ dma_dev = get_device(dma_dev);
+
+ put_device(dev->dma_dev);
+ dev->dma_dev = dma_dev;
+}
+EXPORT_SYMBOL(drm_dev_set_dma_dev);
+
/*
* Available recovery methods for wedged device. To be sent along with device
* wedged uevent.
@@ -654,6 +674,8 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
{
drm_fs_inode_free(dev->anon_inode);
+ put_device(dev->dma_dev);
+ dev->dma_dev = NULL;
put_device(dev->dev);
/* Prevent use-after-free in drm_managed_release when debugging is
* enabled. Slightly awkward, but can't really be helped. */
@@ -1188,6 +1210,7 @@ static int __init drm_core_init(void)
}
drm_debugfs_root = debugfs_create_dir("dri", NULL);
+ drm_bridge_debugfs_params(drm_debugfs_root);
ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
if (ret < 0)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 13bc4c290b17..1e69326283dc 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -7099,18 +7099,12 @@ EXPORT_SYMBOL(drm_add_edid_modes);
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay)
+ unsigned int hdisplay, unsigned int vdisplay)
{
- int i, count, num_modes = 0;
+ int i, count = ARRAY_SIZE(drm_dmt_modes), num_modes = 0;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
- count = ARRAY_SIZE(drm_dmt_modes);
- if (hdisplay < 0)
- hdisplay = 0;
- if (vdisplay < 0)
- vdisplay = 0;
-
for (i = 0; i < count; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 01d3ab307ac3..d36e6cacc575 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -20,6 +20,8 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
+#include "drm_format_internal.h"
+
/**
* drm_format_conv_state_init - Initialize format-conversion state
* @state: The state to initialize
@@ -244,6 +246,152 @@ static int drm_fb_xfrm(struct iosys_map *dst,
xfrm_line);
}
+#define ALIGN_DOWN_PIXELS(end, n, a) \
+ ((end) - ((n) & ((a) - 1)))
+
+static __always_inline void drm_fb_xfrm_line_32to8(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 8) |
+ (xfrm_pixel(pix[2]) << 16) |
+ (xfrm_pixel(pix[3]) << 24);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixels */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf8++ = xfrm_pixel(le32_to_cpup(sbuf32++));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to16(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le64 *dbuf64 = dbuf;
+ __le32 *dbuf32;
+ __le16 *dbuf16;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+#if defined(CONFIG_64BIT)
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u64 val64 = ((u64)xfrm_pixel(pix[0])) |
+ ((u64)xfrm_pixel(pix[1]) << 16) |
+ ((u64)xfrm_pixel(pix[2]) << 32) |
+ ((u64)xfrm_pixel(pix[3]) << 48);
+ *dbuf64++ = cpu_to_le64(val64);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+#endif
+
+ /* write 2 pixels at once */
+ dbuf32 = (__le32 __force *)dbuf64;
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 2)) {
+ u32 pix[2] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 16);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixel */
+ dbuf16 = (__le16 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf16++ = cpu_to_le16(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to24(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write pixels in chunks of 4 */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 val24[4] = {
+ xfrm_pixel(le32_to_cpup(sbuf32)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 1)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 2)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 3)),
+ };
+ u32 out32[3] = {
+ /* write output bytes in reverse order for little endianness */
+ ((val24[0] & 0x000000ff)) |
+ ((val24[0] & 0x0000ff00)) |
+ ((val24[0] & 0x00ff0000)) |
+ ((val24[1] & 0x000000ff) << 24),
+ ((val24[1] & 0x0000ff00) >> 8) |
+ ((val24[1] & 0x00ff0000) >> 8) |
+ ((val24[2] & 0x000000ff) << 16) |
+ ((val24[2] & 0x0000ff00) << 16),
+ ((val24[2] & 0x00ff0000) >> 16) |
+ ((val24[3] & 0x000000ff) << 8) |
+ ((val24[3] & 0x0000ff00) << 8) |
+ ((val24[3] & 0x00ff0000) << 8),
+ };
+
+ *dbuf32++ = cpu_to_le32(out32[0]);
+ *dbuf32++ = cpu_to_le32(out32[1]);
+ *dbuf32++ = cpu_to_le32(out32[2]);
+ sbuf32 += ARRAY_SIZE(val24);
+ }
+
+ /* write trailing pixel */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32) {
+ u32 val24 = xfrm_pixel(le32_to_cpup(sbuf32++));
+ /* write output in reverse order for little endianness */
+ *dbuf8++ = (val24 & 0x000000ff);
+ *dbuf8++ = (val24 & 0x0000ff00) >> 8;
+ *dbuf8++ = (val24 & 0x00ff0000) >> 16;
+ }
+}
+
+static __always_inline void drm_fb_xfrm_line_32to32(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ while (sbuf32 < send32)
+ *dbuf32++ = cpu_to_le32(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
/**
* drm_fb_memcpy - Copy clip buffer
* @dst: Array of destination buffers
@@ -368,17 +516,7 @@ EXPORT_SYMBOL(drm_fb_swab);
static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- dbuf8[x] = ((pix & 0x00e00000) >> 16) |
- ((pix & 0x0000e000) >> 11) |
- ((pix & 0x000000c0) >> 6);
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb332);
}
/**
@@ -417,38 +555,19 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565);
+}
+
+static __always_inline u32 drm_xrgb8888_to_rgb565_swab(u32 pix)
+{
+ return swab16(drm_pixel_xrgb8888_to_rgb565(pix));
}
/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(swab16(val16));
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_xrgb8888_to_rgb565_swab);
}
/**
@@ -495,19 +614,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb1555);
}
/**
@@ -547,20 +654,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb1555);
}
/**
@@ -600,20 +694,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgba5551);
}
/**
@@ -653,18 +734,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write blue-green-red to output in little endianness */
- *dbuf8++ = (pix & 0x000000FF) >> 0;
- *dbuf8++ = (pix & 0x0000FF00) >> 8;
- *dbuf8++ = (pix & 0x00FF0000) >> 16;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb888);
}
/**
@@ -704,18 +774,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write red-green-blue to output in little endianness */
- *dbuf8++ = (pix & 0x00ff0000) >> 16;
- *dbuf8++ = (pix & 0x0000ff00) >> 8;
- *dbuf8++ = (pix & 0x000000ff) >> 0;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgr888);
}
/**
@@ -755,16 +814,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix |= GENMASK(31, 24); /* fill alpha bits */
- dbuf32[x] = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb8888);
}
/**
@@ -804,19 +854,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
}
static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
@@ -835,19 +873,7 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
}
static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
@@ -866,20 +892,7 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- pix = val32 | ((val32 >> 8) & 0x00300C03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb2101010);
}
/**
@@ -920,21 +933,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000ff) << 2) |
- ((pix & 0x0000ff00) << 4) |
- ((pix & 0x00ff0000) << 6);
- pix = GENMASK(31, 30) | /* set alpha bits */
- val32 | ((val32 >> 8) & 0x00300c03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb2101010);
}
/**
@@ -975,19 +974,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
-
- for (x = 0; x < pixels; x++) {
- u32 pix = le32_to_cpu(sbuf32[x]);
- u8 r = (pix & 0x00ff0000) >> 16;
- u8 g = (pix & 0x0000ff00) >> 8;
- u8 b = pix & 0x000000ff;
-
- /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dbuf8++ = (3 * r + 6 * g + b) / 10;
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_r8_bt601);
}
/**
@@ -1031,36 +1018,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
static void drm_fb_argb8888_to_argb4444_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- unsigned int pixels2 = pixels & ~GENMASK_ULL(0, 0);
- __le32 *dbuf32 = dbuf;
- __le16 *dbuf16 = dbuf + pixels2 * sizeof(*dbuf16);
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u16 val16;
- u32 pix[2];
-
- for (x = 0; x < pixels2; x += 2, ++dbuf32) {
- pix[0] = le32_to_cpu(sbuf32[x]);
- pix[1] = le32_to_cpu(sbuf32[x + 1]);
- val32 = ((pix[0] & 0xf0000000) >> 16) |
- ((pix[0] & 0x00f00000) >> 12) |
- ((pix[0] & 0x0000f000) >> 8) |
- ((pix[0] & 0x000000f0) >> 4) |
- ((pix[1] & 0xf0000000) >> 0) |
- ((pix[1] & 0x00f00000) << 4) |
- ((pix[1] & 0x0000f000) << 8) |
- ((pix[1] & 0x000000f0) << 12);
- *dbuf32 = cpu_to_le32(val32);
- }
- for (; x < pixels; x++) {
- pix[0] = le32_to_cpu(sbuf32[x]);
- val16 = ((pix[0] & 0xf0000000) >> 16) |
- ((pix[0] & 0x00f00000) >> 12) |
- ((pix[0] & 0x0000f000) >> 8) |
- ((pix[0] & 0x000000f0) >> 4);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_argb8888_to_argb4444);
}
/**
diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
new file mode 100644
index 000000000000..9f857bfa368d
--- /dev/null
+++ b/drivers/gpu/drm/drm_format_internal.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+#ifndef DRM_FORMAT_INTERNAL_H
+#define DRM_FORMAT_INTERNAL_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/*
+ * Each pixel-format conversion helper takes a raw pixel in a
+ * specific input format and returns a raw pixel in a specific
+ * output format. All pixels are in little-endian byte order.
+ *
+ * Function names are
+ *
+ * drm_pixel_<input>_to_<output>_<algorithm>()
+ *
+ * where <input> and <output> refer to pixel formats. The
+ * <algorithm> is optional and hints to the method used for the
+ * conversion. Helpers with no algorithm given apply pixel-bit
+ * shifting.
+ *
+ * The argument type is u32. We expect this to be wide enough to
+ * hold all conversion input from 32-bit RGB to any output format.
+ * The Linux kernel should avoid format conversion for anything
+ * but XRGB8888 input data. Converting from other format can still
+ * be acceptable in some cases.
+ *
+ * The return type is u32. It is wide enough to hold all conversion
+ * output from XRGB8888. For output formats wider than 32 bit, a
+ * return type of u64 would be acceptable.
+ */
+
+/*
+ * Conversions from XRGB8888
+ */
+
+static inline u32 drm_pixel_xrgb8888_to_r8_bt601(u32 pix)
+{
+ u32 r = (pix & 0x00ff0000) >> 16;
+ u32 g = (pix & 0x0000ff00) >> 8;
+ u32 b = pix & 0x000000ff;
+
+ /* ITU-R BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ return (3 * r + 6 * g + b) / 10;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb332(u32 pix)
+{
+ return ((pix & 0x00e00000) >> 16) |
+ ((pix & 0x0000e000) >> 11) |
+ ((pix & 0x000000c0) >> 6);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000fc00) >> 5) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgba5551(u32 pix)
+{
+ return drm_pixel_xrgb8888_to_rgbx5551(pix) |
+ BIT(0); /* set alpha bit */
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ drm_pixel_xrgb8888_to_xrgb1555(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb888(u32 pix)
+{
+ return pix & GENMASK(23, 0);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_bgr888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ pix;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0xff000000)) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ drm_pixel_xrgb8888_to_xbgr8888(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000ff) << 2) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x00ff0000) << 6);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xrgb2101010(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr2101010(u32 pix)
+{
+ pix = ((pix & 0x00ff0000) >> 14) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x000000ff) << 22);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xbgr2101010(pix);
+}
+
+/*
+ * Conversion from ARGB8888
+ */
+
+static inline u32 drm_pixel_argb8888_to_argb4444(u32 pix)
+{
+ return ((pix & 0xf0000000) >> 16) |
+ ((pix & 0x00f00000) >> 12) |
+ ((pix & 0x0000f000) >> 8) |
+ ((pix & 0x000000f0) >> 4);
+}
+
+#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c6240bab3fa5..1e659d2660f7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1216,7 +1216,7 @@ void drm_gem_unpin(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@@ -1233,9 +1233,9 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vmap);
+EXPORT_SYMBOL(drm_gem_vmap_locked);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_assert_held(obj->resv);
@@ -1248,7 +1248,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
/* Always set the mapping to NULL. Callers may rely on this. */
iosys_map_clear(map);
}
-EXPORT_SYMBOL(drm_gem_vunmap);
+EXPORT_SYMBOL(drm_gem_vunmap_locked);
void drm_gem_lock(struct drm_gem_object *obj)
{
@@ -1262,25 +1262,25 @@ void drm_gem_unlock(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_unlock);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_vmap(obj, map);
+ ret = drm_gem_vmap_locked(obj, map);
dma_resv_unlock(obj->resv);
return ret;
}
-EXPORT_SYMBOL(drm_gem_vmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vmap);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_lock(obj->resv, NULL);
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
dma_resv_unlock(obj->resv);
}
-EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vunmap);
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires
@@ -1543,10 +1543,10 @@ tail:
EXPORT_SYMBOL(drm_gem_lru_scan);
/**
- * drm_gem_evict - helper to evict backing pages for a GEM object
+ * drm_gem_evict_locked - helper to evict backing pages for a GEM object
* @obj: obj in question
*/
-int drm_gem_evict(struct drm_gem_object *obj)
+int drm_gem_evict_locked(struct drm_gem_object *obj)
{
dma_resv_assert_held(obj->resv);
@@ -1558,4 +1558,4 @@ int drm_gem_evict(struct drm_gem_object *obj)
return 0;
}
-EXPORT_SYMBOL(drm_gem_evict);
+EXPORT_SYMBOL(drm_gem_evict_locked);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 0fbeb686e561..6f72e7a0f427 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -362,7 +362,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
ret = -EINVAL;
goto err_drm_gem_vunmap;
}
- ret = drm_gem_vmap_unlocked(obj, &map[i]);
+ ret = drm_gem_vmap(obj, &map[i]);
if (ret)
goto err_drm_gem_vunmap;
}
@@ -384,7 +384,7 @@ err_drm_gem_vunmap:
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
return ret;
}
@@ -411,7 +411,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
continue;
if (iosys_map_is_null(&map[i]))
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index d99dee67353a..aa43265f4f4f 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -165,7 +165,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
} else {
dma_resv_lock(shmem->base.resv, NULL);
- drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -174,9 +174,10 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
}
if (shmem->pages)
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
- drm_WARN_ON(obj->dev, shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
dma_resv_unlock(shmem->base.resv);
}
@@ -186,21 +187,20 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
-static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct page **pages;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->pages_use_count++ > 0)
+ if (refcount_inc_not_zero(&shmem->pages_use_count))
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
- shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -216,38 +216,36 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
shmem->pages = pages;
+ refcount_set(&shmem->pages_use_count, 1);
+
return 0;
}
/*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function decreases the use count and puts the backing pages when use drops to zero.
*/
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- return;
-
- if (--shmem->pages_use_count > 0)
- return;
-
+ if (refcount_dec_and_test(&shmem->pages_use_count)) {
#ifdef CONFIG_X86
- if (shmem->map_wc)
- set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
+ if (shmem->map_wc)
+ set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif
- drm_gem_put_pages(obj, shmem->pages,
- shmem->pages_mark_dirty_on_put,
- shmem->pages_mark_accessed_on_put);
- shmem->pages = NULL;
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+ }
}
-EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
@@ -257,7 +255,12 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
- ret = drm_gem_shmem_get_pages(shmem);
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ if (!ret)
+ refcount_set(&shmem->pages_pin_count, 1);
return ret;
}
@@ -267,7 +270,8 @@ void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
dma_resv_assert_held(shmem->base.resv);
- drm_gem_shmem_put_pages(shmem);
+ if (refcount_dec_and_test(&shmem->pages_pin_count))
+ drm_gem_shmem_put_pages_locked(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
@@ -288,6 +292,9 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
return ret;
@@ -296,7 +303,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_pin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
/**
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
@@ -311,14 +318,17 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+ if (refcount_dec_not_one(&shmem->pages_pin_count))
+ return;
+
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_unpin_locked(shmem);
dma_resv_unlock(shmem->base.resv);
}
-EXPORT_SYMBOL(drm_gem_shmem_unpin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
/*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing
* store.
@@ -327,47 +337,43 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
* exists for the buffer backing the shmem GEM object. It hides the differences
* between dma-buf imported and natively allocated objects.
*
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->dma_buf, map);
- if (!ret) {
- if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->dma_buf, map);
- return -EIO;
- }
- }
} else {
pgprot_t prot = PAGE_KERNEL;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->vmap_use_count++ > 0) {
+ if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_pin_locked(shmem);
if (ret)
- goto err_zero_use;
+ return ret;
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
- if (!shmem->vaddr)
+ if (!shmem->vaddr) {
ret = -ENOMEM;
- else
+ } else {
iosys_map_set_vaddr(map, shmem->vaddr);
+ refcount_set(&shmem->vmap_use_count, 1);
+ }
}
if (ret) {
@@ -379,28 +385,26 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
err_put_pages:
if (!drm_gem_is_imported(obj))
- drm_gem_shmem_put_pages(shmem);
-err_zero_use:
- shmem->vmap_use_count = 0;
+ drm_gem_shmem_unpin_locked(shmem);
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
/*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
* This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
+ * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
+ * drops to zero.
*
* This function hides the differences between dma-buf imported and natively
* allocated objects.
*/
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
@@ -409,19 +413,15 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
} else {
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
+ if (refcount_dec_and_test(&shmem->vmap_use_count)) {
+ vunmap(shmem->vaddr);
+ shmem->vaddr = NULL;
- vunmap(shmem->vaddr);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_unpin_locked(shmem);
+ }
}
-
- shmem->vaddr = NULL;
}
-EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
static int
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
@@ -449,7 +449,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
{
dma_resv_assert_held(shmem->base.resv);
@@ -460,9 +460,9 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
return (madv >= 0);
}
-EXPORT_SYMBOL(drm_gem_shmem_madvise);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
@@ -476,7 +476,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
shmem->sgt = NULL;
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
shmem->madv = -1;
@@ -492,7 +492,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
}
-EXPORT_SYMBOL(drm_gem_shmem_purge);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
@@ -575,8 +575,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- shmem->pages_use_count++;
+ drm_WARN_ON_ONCE(obj->dev,
+ !refcount_inc_not_zero(&shmem->pages_use_count));
dma_resv_unlock(shmem->base.resv);
@@ -589,7 +589,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
dma_resv_lock(shmem->base.resv, NULL);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
drm_gem_vm_close(vma);
@@ -639,7 +639,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return -EINVAL;
dma_resv_lock(shmem->base.resv, NULL);
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
if (ret)
@@ -666,11 +666,12 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
if (drm_gem_is_imported(&shmem->base))
return;
- drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
- drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
-EXPORT_SYMBOL(drm_gem_shmem_print_info);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
/**
* drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
@@ -707,7 +708,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
return ERR_PTR(ret);
@@ -729,7 +730,7 @@ err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b2b6a8e49dda..e44f28fd81d3 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -179,8 +179,8 @@ int drm_gem_pin_locked(struct drm_gem_object *obj);
void drm_gem_unpin_locked(struct drm_gem_object *obj);
int drm_gem_pin(struct drm_gem_object *obj);
void drm_gem_unpin(struct drm_gem_object *obj);
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index dfa595556320..e5184a0c2465 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -36,6 +36,8 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
+#include <linux/media-bus-format.h>
+
#include <video/mipi_display.h>
/**
@@ -871,6 +873,41 @@ ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
EXPORT_SYMBOL(mipi_dsi_generic_read);
/**
+ * drm_mipi_dsi_get_input_bus_fmt() - Get the required MEDIA_BUS_FMT_* based
+ * input pixel format for a given DSI output
+ * pixel format
+ * @dsi_format: pixel format that a DSI host needs to output
+ *
+ * Various DSI hosts can use this function during their
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation to ascertain
+ * the MEDIA_BUS_FMT_* pixel format required as input.
+ *
+ * RETURNS:
+ * a 32-bit MEDIA_BUS_FMT_* value on success or 0 in case of failure.
+ */
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format)
+{
+ switch (dsi_format) {
+ case MIPI_DSI_FMT_RGB888:
+ return MEDIA_BUS_FMT_RGB888_1X24;
+
+ case MIPI_DSI_FMT_RGB666:
+ return MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return MEDIA_BUS_FMT_RGB666_1X18;
+
+ case MIPI_DSI_FMT_RGB565:
+ return MEDIA_BUS_FMT_RGB565_1X16;
+
+ default:
+ /* Unsupported DSI Format */
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_mipi_dsi_get_input_bus_fmt);
+
+/**
* mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
* @dsi: DSI peripheral device
* @data: buffer containing data to be transmitted
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index c627e42a7ce7..99b348782ce3 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -74,8 +74,9 @@ EXPORT_SYMBOL(drm_panel_init);
* drm_panel_add - add a panel to the global registry
* @panel: panel to add
*
- * Add a panel to the global registry so that it can be looked up by display
- * drivers.
+ * Add a panel to the global registry so that it can be looked
+ * up by display drivers. The panel to be added must have been
+ * allocated by devm_drm_panel_alloc().
*/
void drm_panel_add(struct drm_panel *panel)
{
@@ -317,6 +318,93 @@ int drm_panel_get_modes(struct drm_panel *panel,
}
EXPORT_SYMBOL(drm_panel_get_modes);
+static void __drm_panel_free(struct kref *kref)
+{
+ struct drm_panel *panel = container_of(kref, struct drm_panel, refcount);
+
+ kfree(panel->container);
+}
+
+/**
+ * drm_panel_get - Acquire a panel reference
+ * @panel: DRM panel
+ *
+ * This function increments the panel's refcount.
+ * Returns:
+ * Pointer to @panel
+ */
+struct drm_panel *drm_panel_get(struct drm_panel *panel)
+{
+ if (!panel)
+ return panel;
+
+ kref_get(&panel->refcount);
+
+ return panel;
+}
+EXPORT_SYMBOL(drm_panel_get);
+
+/**
+ * drm_panel_put - Release a panel reference
+ * @panel: DRM panel
+ *
+ * This function decrements the panel's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_panel_put(struct drm_panel *panel)
+{
+ if (panel)
+ kref_put(&panel->refcount, __drm_panel_free);
+}
+EXPORT_SYMBOL(drm_panel_put);
+
+/**
+ * drm_panel_put_void - wrapper to drm_panel_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_panel, cast to a void pointer
+ *
+ * Wrapper of drm_panel_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_panel_put_void(void *data)
+{
+ struct drm_panel *panel = (struct drm_panel *)data;
+
+ drm_panel_put(panel);
+}
+
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type)
+{
+ void *container;
+ struct drm_panel *panel;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ panel = container + offset;
+ panel->container = container;
+ panel->funcs = funcs;
+ kref_init(&panel->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_panel_put_void, panel);
+ if (err)
+ return ERR_PTR(err);
+
+ drm_panel_init(panel, dev, funcs, connector_type);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_panel_alloc);
+
#ifdef CONFIG_OF
/**
* of_drm_find_panel - look up a panel using a device tree node
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index c554ad8f246b..7ac0fd5391fe 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -517,6 +517,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* ZOTAC Gaming Zone */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ZOTAC"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "G0A1W"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* One Mix 2S (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index f2a99681b998..11390cd46dcf 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -5,7 +5,7 @@
//! It is called from a panic handler, so it should't allocate memory and
//! does all the work on the stack or on the provided buffers. For
//! simplification, it only supports low error correction, and applies the
-//! first mask (checkerboard). It will draw the smallest QRcode that can
+//! first mask (checkerboard). It will draw the smallest QR code that can
//! contain the string passed as parameter. To get the most compact
//! QR code, the start of the URL is encoded as binary, and the
//! compressed kmsg is encoded as numeric.
@@ -415,7 +415,7 @@ impl Iterator for SegmentIterator<'_> {
self.carry_len -= out_len;
let pow = u64::pow(10, self.carry_len as u32);
let out = (self.carry / pow) as u16;
- self.carry = self.carry % pow;
+ self.carry %= pow;
Some((out, NUM_CHARS_BITS[out_len]))
}
}
@@ -876,7 +876,7 @@ impl QrImage<'_> {
/// will be encoded as binary segment, otherwise it will be encoded
/// efficiently as a numeric segment, and appended to the URL.
/// * `data_len`: Length of the data, that needs to be encoded, must be less
-/// than data_size.
+/// than `data_size`.
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
/// a V40 QR code. It will then be overwritten with the QR code image.
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index bdb51c8f262e..d828502268b8 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -707,7 +707,7 @@ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- return drm_gem_vmap(obj, map);
+ return drm_gem_vmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
@@ -723,7 +723,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
@@ -804,7 +804,6 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
- .cache_sgt_mapping = true,
.attach = drm_gem_map_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
@@ -998,7 +997,7 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
- return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
+ return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev));
}
EXPORT_SYMBOL(drm_gem_prime_import);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 7ba16323e7c2..6b3541159c0f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -958,15 +958,16 @@ static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res)
* cleaned up when the DRM device goes away.
*
* See drm_kms_helper_poll_init() for more information.
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise.
*/
-int drmm_kms_helper_poll_init(struct drm_device *dev)
+void drmm_kms_helper_poll_init(struct drm_device *dev)
{
+ int ret;
+
drm_kms_helper_poll_init(dev);
- return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ ret = drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ if (ret)
+ drm_warn(dev, "Connector status will not be updated, error %d\n", ret);
}
EXPORT_SYMBOL(drmm_kms_helper_poll_init);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4f2ab8a7b50f..636cd83ca29e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -741,7 +741,7 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
}
static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
- int fd, int handle)
+ int fd, int handle, u64 point)
{
struct dma_fence *fence = sync_file_get_fence(fd);
struct drm_syncobj *syncobj;
@@ -755,14 +755,24 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(syncobj, fence);
+ if (point) {
+ struct dma_fence_chain *chain = dma_fence_chain_alloc();
+
+ if (!chain)
+ return -ENOMEM;
+
+ drm_syncobj_add_point(syncobj, chain, fence, point);
+ } else {
+ drm_syncobj_replace_fence(syncobj, fence);
+ }
+
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
}
static int drm_syncobj_export_sync_file(struct drm_file *file_private,
- int handle, int *p_fd)
+ int handle, u64 point, int *p_fd)
{
int ret;
struct dma_fence *fence;
@@ -772,7 +782,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, point, 0, &fence);
if (ret)
goto err_put_fd;
@@ -869,6 +879,9 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -876,13 +889,18 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
return drm_syncobj_export_sync_file(file_private, args->handle,
- &args->fd);
+ point, &args->fd);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_handle_to_fd(file_private, args->handle,
&args->fd);
@@ -893,6 +911,9 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -900,14 +921,20 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
return drm_syncobj_import_sync_file_fence(file_private,
args->fd,
- args->handle);
+ args->handle,
+ point);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 03b076db9381..3bbfc1b56a65 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -260,7 +260,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev;
struct drm_device *drm;
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *base;
struct clk *pix_clk_in;
char pix_clk_name[32];
@@ -278,8 +277,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
return -ENODEV;
fsl_dev->soc = id->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
return ret;
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 4d78b33eaa82..e6753282e70e 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -730,44 +730,3 @@ out:
return ret;
}
-
-int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn)
-{
- int ret;
- struct psb_mmu_pt *pt;
- uint32_t tmp;
- spinlock_t *lock = &pd->driver->lock;
-
- down_read(&pd->driver->sem);
- pt = psb_mmu_pt_map_lock(pd, virtual);
- if (!pt) {
- uint32_t *v;
-
- spin_lock(lock);
- v = kmap_atomic(pd->p);
- tmp = v[psb_mmu_pd_index(virtual)];
- kunmap_atomic(v);
- spin_unlock(lock);
-
- if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
- !(pd->invalid_pte & PSB_PTE_VALID)) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *pfn = pd->invalid_pte >> PAGE_SHIFT;
- goto out;
- }
- tmp = pt->v[psb_mmu_pt_index(virtual)];
- if (!(tmp & PSB_PTE_VALID)) {
- ret = -EINVAL;
- } else {
- ret = 0;
- *pfn = tmp >> PAGE_SHIFT;
- }
- psb_mmu_pt_unmap_unlock(pt);
-out:
- up_read(&pd->driver->sem);
- return ret;
-}
diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h
index d4b5720ef08e..e6d39703718c 100644
--- a/drivers/gpu/drm/gma500/mmu.h
+++ b/drivers/gpu/drm/gma500/mmu.h
@@ -71,8 +71,6 @@ extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
uint32_t start_pfn,
unsigned long address,
uint32_t num_pages, int type);
-extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn);
extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index de8ccfe9890f..ea9b41af0867 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -658,10 +658,3 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
};
-
-/* Not used yet */
-const struct gma_clock_funcs mrst_clock_funcs = {
- .clock = mrst_lvds_clock,
- .limit = mrst_limit,
- .pll_is_valid = gma_pll_is_valid,
-};
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 9dc9dcd1b09f..979ea8ecf0d5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -182,7 +182,6 @@ struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg,
void gma_i2c_destroy(struct gma_i2c_chan *chan);
int psb_intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter);
-extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 8be0ec340de5..45b10f30a2a9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -12,37 +12,6 @@
#include "psb_intel_drv.h"
/**
- * psb_intel_ddc_probe
- * @adapter: Associated I2C adaptor
- */
-bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
-{
- u8 out_buf[] = { 0x0, 0x0 };
- u8 buf[2];
- int ret;
- struct i2c_msg msgs[] = {
- {
- .addr = 0x50,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = 0x50,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- ret = i2c_transfer(adapter, msgs, 2);
- if (ret == 2)
- return true;
-
- return false;
-}
-
-/**
* psb_intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
* @adapter: Associated I2C adaptor
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index cb405771d6e2..5385a2126e45 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -309,21 +309,6 @@ out:
return ret;
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
-{
- struct gud_device *gdrm = to_gud_device(drm);
-
- if (!gdrm->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
-}
-
static int gud_stats_debugfs(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
@@ -376,7 +361,6 @@ static const struct drm_driver gud_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gud_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = "gud",
@@ -434,6 +418,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
size_t max_buffer_size = 0;
struct gud_device *gdrm;
struct drm_device *drm;
+ struct device *dma_dev;
u8 *formats_dev;
u32 *formats;
int ret, i;
@@ -609,17 +594,19 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_set_intfdata(intf, gdrm);
- gdrm->dmadev = usb_intf_get_dma_device(intf);
- if (!gdrm->dmadev)
- dev_warn(dev, "buffer sharing not supported");
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ dev_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
ret = drm_dev_register(drm, 0);
- if (ret) {
- put_device(gdrm->dmadev);
+ if (ret)
return ret;
- }
drm_kms_helper_poll_init(drm);
@@ -638,8 +625,6 @@ static void gud_disconnect(struct usb_interface *interface)
drm_kms_helper_poll_fini(drm);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
- put_device(gdrm->dmadev);
- gdrm->dmadev = NULL;
}
static int gud_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index 0d148a6f27aa..d6fb25388722 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -16,7 +16,6 @@
struct gud_device {
struct drm_device drm;
struct drm_simple_display_pipe pipe;
- struct device *dmadev;
struct work_struct work;
u32 flags;
const struct drm_format_info *xrgb8888_emulation_format;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
index f6355c16cc0a..a3b78b0fd53e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -188,7 +188,7 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ if (ret) {
drm_err(dp->dev, "Get lane status failed\n");
return ret;
}
@@ -236,7 +236,7 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ if (ret) {
drm_err(dp->dev, "get lane status failed\n");
break;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 2eea9fb0e76b..e80debdc4176 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -825,7 +825,6 @@ static const struct component_ops dsi_ops = {
static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
{
struct dsi_hw_ctx *ctx = dsi->ctx;
- struct resource *res;
ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(ctx->pclk)) {
@@ -833,8 +832,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
return PTR_ERR(ctx->pclk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap dsi io region\n");
return PTR_ERR(ctx->base);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 2eb49177ac42..45c4eb008ad5 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -844,7 +844,6 @@ static struct drm_plane_funcs ade_plane_funcs = {
static void *ade_hw_ctx_alloc(struct platform_device *pdev,
struct drm_crtc *crtc)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct ade_hw_ctx *ctx = NULL;
@@ -856,8 +855,7 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
return ERR_PTR(-ENOMEM);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap ade io base\n");
return ERR_PTR(-EIO);
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.c b/drivers/gpu/drm/imagination/pvr_debugfs.c
index 6b77c9b4bde8..c7ce7daaa87a 100644
--- a/drivers/gpu/drm/imagination/pvr_debugfs.c
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.c
@@ -28,9 +28,8 @@ pvr_debugfs_init(struct drm_minor *minor)
struct drm_device *drm_dev = minor->dev;
struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
struct dentry *root = minor->debugfs_root;
- size_t i;
- for (i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
+ for (size_t i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
const struct pvr_debugfs_entry *entry = &pvr_debugfs_entries[i];
struct dentry *dir;
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c
index 5e51bc980751..5228e214491c 100644
--- a/drivers/gpu/drm/imagination/pvr_free_list.c
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -237,11 +237,10 @@ pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
u64 dma_pfn = dma_addr >>
ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
- u32 dma_addr_offset;
BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE);
- for (dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
+ for (u32 dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) {
WARN_ON_ONCE(dma_pfn >> 32);
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
index d09c4c684116..012596402a33 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.c
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -50,9 +50,8 @@ pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id)
{
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
- u32 entry;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
if (layout_entries[entry].id == id)
return &layout_entries[entry];
}
@@ -65,9 +64,8 @@ pvr_fw_find_private_data(struct pvr_device *pvr_dev)
{
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
- u32 entry;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
if (layout_entries[entry].id == META_PRIVATE_DATA ||
layout_entries[entry].id == MIPS_PRIVATE_DATA ||
layout_entries[entry].id == RISCV_PRIVATE_DATA)
@@ -97,7 +95,6 @@ pvr_fw_validate(struct pvr_device *pvr_dev)
const u8 *fw = firmware->data;
u32 fw_offset = firmware->size - SZ_4K;
u32 layout_table_size;
- u32 entry;
if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE))
return -EINVAL;
@@ -144,7 +141,7 @@ pvr_fw_validate(struct pvr_device *pvr_dev)
return -EINVAL;
layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset];
- for (entry = 0; entry < header->layout_entry_num; entry++) {
+ for (u32 entry = 0; entry < header->layout_entry_num; entry++) {
u32 start_addr = layout_entries[entry].base_addr;
u32 end_addr = start_addr + layout_entries[entry].alloc_size;
@@ -233,13 +230,12 @@ pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
u32 end_addr = addr + size;
- int entry = 0;
/* Ensure requested range is not zero, and size is not causing addr to overflow. */
if (end_addr <= addr)
return -EINVAL;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (int entry = 0; entry < num_layout_entries; entry++) {
u32 entry_start_addr = layout_entries[entry].base_addr;
u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size;
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index 6d13864851fc..c7cfdd60116d 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -370,13 +370,12 @@ configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr)
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
u64 seg_out_addr_top;
- u32 i;
seg_out_addr_top =
ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV,
ROGUE_FW_SEGMMU_META_BIFDM_ID);
- for (i = 0; i < num_layout_entries; i++) {
+ for (u32 i = 0; i < num_layout_entries; i++) {
/*
* FW code is using the bootloader segment which is already
* configured on boot. FW coremem code and data don't use the
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.c b/drivers/gpu/drm/imagination/pvr_fw_mips.c
index 0bed0257e2ab..ee0735b745a9 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.c
@@ -37,10 +37,9 @@ process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code
struct elf32_hdr *header = (struct elf32_hdr *)fw;
struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- u32 entry;
int err;
- for (entry = 0; entry < header->e_phnum; entry++, program_header++) {
+ for (u32 entry = 0; entry < header->e_phnum; entry++, program_header++) {
void *write_addr;
/* Only consider loadable entries in the ELF segment table */
@@ -97,7 +96,6 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
const struct pvr_fw_layout_entry *stack_entry;
struct rogue_mipsfw_boot_data *boot_data;
dma_addr_t dma_addr;
- u32 page_nr;
int err;
err = process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
@@ -132,7 +130,7 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
boot_data->reg_base = pvr_dev->regs_resource->start;
- for (page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
+ for (u32 page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
/* Firmware expects 4k pages, but host page size might be different. */
u32 src_page_nr = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) >> PAGE_SHIFT;
u32 page_offset = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) & ~PAGE_MASK;
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 5dbb636d7d4f..c15202ffb2e6 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -21,7 +21,6 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv)
{
struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr;
struct pvr_fw_trace *fw_trace = priv;
- u32 thread_nr;
tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
tracebuf_ctrl->tracebuf_flags = 0;
@@ -31,7 +30,7 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv)
else
tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct rogue_fwif_tracebuf_space *tracebuf_space =
&tracebuf_ctrl->tracebuf[thread_nr];
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
@@ -48,10 +47,9 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- u32 thread_nr;
int err;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
trace_buffer->buf =
@@ -88,7 +86,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
ARRAY_SIZE(fw_trace->buffers));
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct rogue_fwif_tracebuf_space *tracebuf_space =
&fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
@@ -99,7 +97,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
return 0;
err_free_buf:
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
if (trace_buffer->buf)
@@ -112,9 +110,8 @@ err_free_buf:
void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
- u32 thread_nr;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
@@ -184,9 +181,7 @@ struct pvr_fw_trace_seq_data {
static u32 find_sfid(u32 id)
{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
+ for (u32 i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
if (stid_fmts[i].id == id)
return i;
}
@@ -285,12 +280,11 @@ static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
{
struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
- u32 i;
/* Reset trace index, then advance to *pos. */
fw_trace_get_first(trace_seq_data);
- for (i = 0; i < *pos; i++) {
+ for (u32 i = 0; i < *pos; i++) {
if (!fw_trace_get_next(trace_seq_data))
return NULL;
}
@@ -455,12 +449,11 @@ void
pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
- u32 thread_nr;
static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
"The filename buffer is only large enough for a single-digit thread count");
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
char filename[8];
snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
index 6a8c81fe8c1e..f692a4187550 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -76,8 +76,6 @@ pvr_gem_object_flags_validate(u64 flags)
DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS),
};
- int i;
-
/*
* Check for bits set in undefined regions. Reserved regions refer to
* options that can only be set by the kernel. These are explicitly
@@ -91,7 +89,7 @@ pvr_gem_object_flags_validate(u64 flags)
* Check for all combinations of flags marked as invalid in the array
* above.
*/
- for (i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
+ for (int i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
u64 combo = invalid_combinations[i];
if ((flags & combo) == combo)
@@ -203,7 +201,7 @@ pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
dma_resv_lock(obj->resv, NULL);
- err = drm_gem_shmem_vmap(shmem_obj, &map);
+ err = drm_gem_shmem_vmap_locked(shmem_obj, &map);
if (err)
goto err_unlock;
@@ -257,7 +255,7 @@ pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
}
- drm_gem_shmem_vunmap(shmem_obj, &map);
+ drm_gem_shmem_vunmap_locked(shmem_obj, &map);
dma_resv_unlock(obj->resv);
}
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c
index 54f88d6c01e5..dc0c25fa1847 100644
--- a/drivers/gpu/drm/imagination/pvr_hwrt.c
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.c
@@ -44,13 +44,12 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file,
{
struct pvr_device *pvr_dev = pvr_file->pvr_dev;
int err;
- int i;
hwrt->pvr_dev = pvr_dev;
hwrt->max_rts = args->layers;
/* Get pointers to the free lists */
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
hwrt->free_lists[i] = pvr_free_list_lookup(pvr_file, args->free_list_handles[i]);
if (!hwrt->free_lists[i]) {
err = -EINVAL;
@@ -67,7 +66,7 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file,
return 0;
err_put_free_lists:
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
pvr_free_list_put(hwrt->free_lists[i]);
hwrt->free_lists[i] = NULL;
}
@@ -78,9 +77,7 @@ err_put_free_lists:
static void
hwrt_fini_kernel_structure(struct pvr_hwrt_dataset *hwrt)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
pvr_free_list_put(hwrt->free_lists[i]);
hwrt->free_lists[i] = NULL;
}
@@ -363,13 +360,12 @@ hwrt_data_init_fw_structure(struct pvr_file *pvr_file,
struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
struct pvr_device *pvr_dev = pvr_file->pvr_dev;
struct rogue_fwif_rta_ctl *rta_ctl;
- int free_list_i;
int err;
pvr_fw_object_get_fw_addr(hwrt->common_fw_obj,
&hwrt_data->data.hwrt_data_common_fw_addr);
- for (free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
+ for (int free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
pvr_fw_object_get_fw_addr(hwrt->free_lists[free_list_i]->fw_obj,
&hwrt_data->data.freelists_fw_addr[free_list_i]);
}
diff --git a/drivers/gpu/drm/imagination/pvr_stream.c b/drivers/gpu/drm/imagination/pvr_stream.c
index 975336a4facf..679aa618b7a9 100644
--- a/drivers/gpu/drm/imagination/pvr_stream.c
+++ b/drivers/gpu/drm/imagination/pvr_stream.c
@@ -67,9 +67,8 @@ pvr_stream_process_1(struct pvr_device *pvr_dev, const struct pvr_stream_def *st
u8 *dest, u32 dest_size, u32 *stream_offset_out)
{
int err = 0;
- u32 i;
- for (i = 0; i < nr_entries; i++) {
+ for (u32 i = 0; i < nr_entries; i++) {
if (stream_def[i].offset >= dest_size) {
err = -EINVAL;
break;
@@ -131,7 +130,6 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
u32 musthave_masks[PVR_STREAM_EXTHDR_TYPE_MAX];
u32 ext_header;
int err = 0;
- u32 i;
/* Copy "must have" mask from device. We clear this as we process the stream. */
memcpy(musthave_masks, pvr_dev->stream_musthave_quirks[cmd_defs->type],
@@ -159,7 +157,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
musthave_masks[type] &= ~data;
- for (i = 0; i < header->ext_streams_num; i++) {
+ for (u32 i = 0; i < header->ext_streams_num; i++) {
const struct pvr_stream_ext_def *ext_def = &header->ext_streams[i];
if (!(ext_header & ext_def->header_mask))
@@ -181,7 +179,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
* Verify that "must have" mask is now zero. If it isn't then one of the "must have" quirks
* for this command was not present.
*/
- for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) {
if (musthave_masks[i])
return -EINVAL;
}
@@ -245,13 +243,11 @@ pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs
if (err)
return err;
} else {
- u32 i;
-
/*
* If we don't have an extension stream then there must not be any "must have"
* quirks for this command.
*/
- for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) {
if (pvr_dev->stream_musthave_quirks[cmd_defs->type][i])
return -EINVAL;
}
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index 94af854547d6..5847a1c92bea 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -100,10 +100,9 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev)
{
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
- int page_nr;
vunmap(mips_data->pt);
- for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
+ for (int page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
dma_unmap_page(from_pvr_device(pvr_dev)->dev,
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 9e66eb77b1eb..6d8325c76697 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -162,11 +162,12 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
}
static int imx_pd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
- return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, imxpd->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 20b93fff0239..f851e9ffdb28 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -791,11 +791,12 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
}
static int ingenic_drm_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
- struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder);
+ struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(encoder);
- return drm_bridge_attach(bridge->encoder, ib->next_bridge,
+ return drm_bridge_attach(encoder, ib->next_bridge,
&ib->bridge, flags);
}
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 9bb997dbb4b9..5deec673c11e 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -47,7 +47,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ refcount_set(&bo->base.pages_use_count, 1);
mapping_set_unevictable(mapping);
}
@@ -195,7 +195,7 @@ static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_vmap(&bo->base, map);
+ return drm_gem_shmem_vmap_locked(&bo->base, map);
}
static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 825135a26aa4..7934098e651b 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
} else {
buffer_chunk->size = lima_bo_size(bo);
- ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+ ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
kvfree(et);
goto out;
@@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
- drm_gem_vunmap_unlocked(&bo->base.base, &map);
+ drm_gem_vunmap(&bo->base.base, &map);
}
buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 395449a72f0a..b302d8ec3ad0 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1048,6 +1048,7 @@ void mcde_dsi_disable(struct drm_bridge *bridge)
}
static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
@@ -1059,7 +1060,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the DSI bridge to the output (panel etc) bridge */
- return drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
+ return drm_bridge_attach(encoder, d->bridge_out, bridge, flags);
}
static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index fed3307d3374..b2408abb9d49 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2287,6 +2287,7 @@ static void mtk_dp_poweroff(struct mtk_dp *mtk_dp)
}
static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
@@ -2310,7 +2311,7 @@ static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
goto err_aux_register;
if (mtk_dp->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, mtk_dp->next_bridge,
+ ret = drm_bridge_attach(encoder, mtk_dp->next_bridge,
&mtk_dp->bridge, flags);
if (ret) {
drm_warn(mtk_dp->drm_dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 0fd13e6dd3f1..0f3b1ef8e497 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -765,6 +765,7 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
}
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
@@ -783,7 +784,7 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
"Failed to get bridge\n");
}
- return drm_bridge_attach(bridge->encoder, dpi->next_bridge,
+ return drm_bridge_attach(encoder, dpi->next_bridge,
&dpi->bridge, flags);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d1f407fb7eb1..4fe1f38a3c4b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -807,12 +807,13 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
}
static int mtk_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge,
&dsi->bridge, flags);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 06e4fac152b7..c9d0c335c519 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1269,6 +1269,7 @@ static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridg
}
static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1281,7 +1282,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
}
if (hdmi->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ ret = drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index e79f7c3ce32e..c9678dc68fa1 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -83,12 +83,13 @@ meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
}
static int meson_encoder_cvbs_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_cvbs *meson_encoder_cvbs =
bridge_to_meson_encoder_cvbs(bridge);
- return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge,
+ return drm_bridge_attach(encoder, meson_encoder_cvbs->next_bridge,
&meson_encoder_cvbs->bridge, flags);
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index fe204437bd65..3db518e5f95d 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -33,11 +33,12 @@ struct meson_encoder_dsi {
container_of(x, struct meson_encoder_dsi, bridge)
static int meson_encoder_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, encoder_dsi->next_bridge,
+ return drm_bridge_attach(encoder, encoder_dsi->next_bridge,
&encoder_dsi->bridge, flags);
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 6d1c9262a2cf..5f02695aafd1 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -49,11 +49,12 @@ struct meson_encoder_hdmi {
container_of(x, struct meson_encoder_hdmi, bridge)
static int meson_encoder_hdmi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
- return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge,
+ return drm_bridge_attach(encoder, encoder_hdmi->next_bridge,
&encoder_hdmi->bridge, flags);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index d8633a596f8d..69a26bb5fabd 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1100,20 +1100,6 @@ static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
return ret == 1;
}
-static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl,
- u8 *link_status)
-{
- int ret = 0, len;
-
- len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
- if (len != DP_LINK_STATUS_SIZE) {
- DRM_ERROR("DP link status read failed, err: %d\n", len);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
@@ -1140,7 +1126,7 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
for (tries = 0; tries < maximum_retries; tries++) {
drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
- ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
+ ret = drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
if (ret)
return ret;
@@ -1252,7 +1238,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
- ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
+ ret = drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
if (ret)
return ret;
@@ -1805,7 +1791,7 @@ static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl)
u8 link_status[DP_LINK_STATUS_SIZE];
int num_lanes = ctrl->link->link_params.num_lanes;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
return drm_dp_channel_eq_ok(link_status, num_lanes);
}
@@ -1863,7 +1849,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
rc = msm_dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
@@ -1888,7 +1874,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
if (!drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes))
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 1a1fbb2d7d4f..92a9077959b3 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -714,21 +714,21 @@ end:
static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link)
{
- int len;
+ int ret;
link->prev_sink_count = link->msm_dp_link.sink_count;
- len = drm_dp_read_sink_count(link->aux);
- if (len < 0) {
+ ret = drm_dp_read_sink_count(link->aux);
+ if (ret < 0) {
DRM_ERROR("DP parse sink count failed\n");
- return len;
+ return ret;
}
- link->msm_dp_link.sink_count = len;
+ link->msm_dp_link.sink_count = ret;
- len = drm_dp_dpcd_read_link_status(link->aux,
- link->link_status);
- if (len < DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(link->aux,
+ link->link_status);
+ if (ret < 0) {
DRM_ERROR("DP link status read failed\n");
- return len;
+ return ret;
}
return msm_dp_link_parse_request(link);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4fabb01345aa..72ada9f2f043 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -434,12 +434,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dsi_mgr_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- return drm_bridge_attach(bridge->encoder, msm_dsi->next_bridge,
+ return drm_bridge_attach(encoder, msm_dsi->next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 1456354c8af4..ab6c8bc4a30b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -515,6 +515,7 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge->ops = DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_EDID;
bridge->hdmi_audio_max_i2s_playback_channels = 8;
bridge->hdmi_audio_dev = &hdmi->pdev->dev;
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index 8ee00f59ca82..fcb2a7517377 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -134,7 +134,6 @@ static int lcdif_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct lcdif_drm_private *lcdif;
- struct resource *res;
int ret;
lcdif = devm_kzalloc(&pdev->dev, sizeof(*lcdif), GFP_KERNEL);
@@ -144,8 +143,7 @@ static int lcdif_load(struct drm_device *drm)
lcdif->drm = drm;
drm->dev_private = lcdif;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lcdif->base = devm_ioremap_resource(drm->dev, res);
+ lcdif->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lcdif->base))
return PTR_ERR(lcdif->base);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 59020862cf65..c183b1112bc4 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -8,6 +8,7 @@
* Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
+#include <linux/aperture.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -215,7 +216,6 @@ static int mxsfb_load(struct drm_device *drm,
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct mxsfb_drm_private *mxsfb;
- struct resource *res;
int ret;
mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL);
@@ -226,8 +226,7 @@ static int mxsfb_load(struct drm_device *drm,
drm->dev_private = mxsfb;
mxsfb->devdata = devdata;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mxsfb->base = devm_ioremap_resource(drm->dev, res);
+ mxsfb->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxsfb->base))
return PTR_ERR(mxsfb->base);
@@ -361,6 +360,15 @@ static int mxsfb_probe(struct platform_device *pdev)
if (ret)
goto err_free;
+ /*
+ * Remove early framebuffers (ie. simplefb). The framebuffer can be
+ * located anywhere in RAM
+ */
+ ret = aperture_remove_all_conflicting_devices(mxsfb_driver.name);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't kick out existing framebuffers\n");
+
ret = drm_dev_register(drm, 0);
if (ret)
goto err_unload;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 504cb3f2054b..725331638a15 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -775,10 +775,8 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
union hdmi_infoframe infoframe = { 0 };
const u8 rekey = 56; /* binary driver, and tegra, constant */
u32 max_ac_packet;
- struct {
- struct nvif_outp_infoframe_v0 infoframe;
- u8 data[17];
- } args = { 0 };
+ DEFINE_RAW_FLEX(struct nvif_outp_infoframe_v0, args, data, 17);
+ const u8 data_len = __struct_size(args) - sizeof(*args);
int ret, size;
max_ac_packet = mode->htotal - mode->hdisplay;
@@ -815,29 +813,29 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
return;
/* AVI InfoFrame. */
- args.infoframe.version = 0;
- args.infoframe.head = nv_crtc->index;
+ args->version = 0;
+ args->head = nv_crtc->index;
if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, &nv_connector->base, mode)) {
drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode,
HDMI_QUANTIZATION_RANGE_FULL);
- size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ size = hdmi_infoframe_pack(&infoframe, args->data, data_len);
} else {
size = 0;
}
- nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size);
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, args, size);
/* Vendor InfoFrame. */
- memset(&args.data, 0, sizeof(args.data));
+ memset(args->data, 0, data_len);
if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
&nv_connector->base, mode))
- size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ size = hdmi_infoframe_pack(&infoframe, args->data, data_len);
else
size = 0;
- nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size);
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, args, size);
nv_encoder->hdmi.enabled = true;
}
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 746e126c3ecf..1c12854a8550 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -31,6 +31,29 @@ typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
struct nvkm_gsp_event;
typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
+/**
+ * DOC: GSP message handling policy
+ *
+ * When sending a GSP RPC command, there can be multiple cases of handling
+ * the GSP RPC messages, which are the reply of GSP RPC commands, according
+ * to the requirement of the callers and the nature of the GSP RPC commands.
+ *
+ * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the
+ * caller after the GSP RPC command is issued.
+ *
+ * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP
+ * RPC message after the GSP RPC command is issued.
+ *
+ * NVKM_GSP_RPC_REPLY_POLL - If specified, wait for the specific reply and
+ * discard the reply before returning to the caller.
+ *
+ */
+enum nvkm_gsp_rpc_reply_policy {
+ NVKM_GSP_RPC_REPLY_NOWAIT = 0,
+ NVKM_GSP_RPC_REPLY_RECV,
+ NVKM_GSP_RPC_REPLY_POLL,
+};
+
struct nvkm_gsp {
const struct nvkm_gsp_func *func;
struct nvkm_subdev subdev;
@@ -188,7 +211,8 @@ struct nvkm_gsp {
const struct nvkm_gsp_rm {
void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
- void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
+ void *(*rpc_push)(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc);
void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
@@ -255,9 +279,10 @@ nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
}
static inline void *
-nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc)
{
- return gsp->rm->rpc_push(gsp, argv, wait, repc);
+ return gsp->rm->rpc_push(gsp, argv, policy, repc);
}
static inline void *
@@ -268,13 +293,14 @@ nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
if (IS_ERR_OR_NULL(argv))
return argv;
- return nvkm_gsp_rpc_push(gsp, argv, true, argc);
+ return nvkm_gsp_rpc_push(gsp, argv, NVKM_GSP_RPC_REPLY_RECV, argc);
}
static inline int
-nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
+nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy)
{
- void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0);
+ void *repv = nvkm_gsp_rpc_push(gsp, argv, policy, 0);
if (IS_ERR(repv))
return PTR_ERR(repv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e154d08857c5..c69139701056 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1079,6 +1079,10 @@ nouveau_pmops_freeze(struct device *dev)
{
struct nouveau_drm *drm = dev_get_drvdata(dev);
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
return nouveau_do_suspend(drm, false);
}
@@ -1087,6 +1091,10 @@ nouveau_pmops_thaw(struct device *dev)
{
struct nouveau_drm *drm = dev_get_drvdata(dev);
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
return nouveau_do_resume(drm, false);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 7cc84472cece..7622587f149e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -184,10 +184,10 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
struct nouveau_cli *cli = chan->cli;
struct nouveau_drm *drm = cli->drm;
struct nouveau_fence_priv *priv = (void*)drm->fence;
- struct {
- struct nvif_event_v0 base;
- struct nvif_chan_event_v0 host;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_chan_event_v0));
+ struct nvif_chan_event_v0 *host =
+ (struct nvif_chan_event_v0 *)args->data;
int ret;
INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
@@ -207,12 +207,12 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
if (!priv->uevent)
return;
- args.host.version = 0;
- args.host.type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
+ host->version = 0;
+ host->type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
ret = nvif_event_ctor(&chan->user, "fenceNonStallIntr", (chan->runlist << 16) | chan->chid,
nouveau_fence_wait_uevent_handler, false,
- &args.base, sizeof(args), &fctx->event);
+ args, __struct_size(args), &fctx->event);
WARN_ON(ret);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index e12e2596ed84..6fa387da0637 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -720,10 +720,7 @@ nouveau_svm_fault(struct work_struct *work)
struct nouveau_svm *svm = container_of(buffer, typeof(*svm), buffer[buffer->id]);
struct nvif_object *device = &svm->drm->client.device.object;
struct nouveau_svmm *svmm;
- struct {
- struct nouveau_pfnmap_args i;
- u64 phys[1];
- } args;
+ DEFINE_RAW_FLEX(struct nouveau_pfnmap_args, args, p.phys, 1);
unsigned long hmm_flags;
u64 inst, start, limit;
int fi, fn;
@@ -772,11 +769,11 @@ nouveau_svm_fault(struct work_struct *work)
mutex_unlock(&svm->mutex);
/* Process list of faults. */
- args.i.i.version = 0;
- args.i.i.type = NVIF_IOCTL_V0_MTHD;
- args.i.m.version = 0;
- args.i.m.method = NVIF_VMM_V0_PFNMAP;
- args.i.p.version = 0;
+ args->i.version = 0;
+ args->i.type = NVIF_IOCTL_V0_MTHD;
+ args->m.version = 0;
+ args->m.method = NVIF_VMM_V0_PFNMAP;
+ args->p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
struct svm_notifier notifier;
@@ -802,9 +799,9 @@ nouveau_svm_fault(struct work_struct *work)
* fault window, determining required pages and access
* permissions based on pending faults.
*/
- args.i.p.addr = start;
- args.i.p.page = PAGE_SHIFT;
- args.i.p.size = PAGE_SIZE;
+ args->p.addr = start;
+ args->p.page = PAGE_SHIFT;
+ args->p.size = PAGE_SIZE;
/*
* Determine required permissions based on GPU fault
* access flags.
@@ -832,16 +829,16 @@ nouveau_svm_fault(struct work_struct *work)
notifier.svmm = svmm;
if (atomic)
- ret = nouveau_atomic_range_fault(svmm, svm->drm,
- &args.i, sizeof(args),
+ ret = nouveau_atomic_range_fault(svmm, svm->drm, args,
+ __struct_size(args),
&notifier);
else
- ret = nouveau_range_fault(svmm, svm->drm, &args.i,
- sizeof(args), hmm_flags,
- &notifier);
+ ret = nouveau_range_fault(svmm, svm->drm, args,
+ __struct_size(args),
+ hmm_flags, &notifier);
mmput(mm);
- limit = args.i.p.addr + args.i.p.size;
+ limit = args->p.addr + args->p.size;
for (fn = fi; ++fn < buffer->fault_nr; ) {
/* It's okay to skip over duplicate addresses from the
* same SVMM as faults are ordered by access type such
@@ -855,14 +852,14 @@ nouveau_svm_fault(struct work_struct *work)
if (buffer->fault[fn]->svmm != svmm ||
buffer->fault[fn]->addr >= limit ||
(buffer->fault[fi]->access == FAULT_ACCESS_READ &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/conn.c b/drivers/gpu/drm/nouveau/nvif/conn.c
index 9ee18cb99264..5a1a83c62a2a 100644
--- a/drivers/gpu/drm/nouveau/nvif/conn.c
+++ b/drivers/gpu/drm/nouveau/nvif/conn.c
@@ -30,17 +30,17 @@ int
nvif_conn_event_ctor(struct nvif_conn *conn, const char *name, nvif_event_func func, u8 types,
struct nvif_event *event)
{
- struct {
- struct nvif_event_v0 base;
- struct nvif_conn_event_v0 conn;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_conn_event_v0));
+ struct nvif_conn_event_v0 *args_conn =
+ (struct nvif_conn_event_v0 *)args->data;
int ret;
- args.conn.version = 0;
- args.conn.types = types;
+ args_conn->version = 0;
+ args_conn->types = types;
ret = nvif_event_ctor_(&conn->object, name ?: "nvifConnHpd", nvif_conn_id(conn),
- func, true, &args.base, sizeof(args), false, event);
+ func, true, args, __struct_size(args), false, event);
NVIF_DEBUG(&conn->object, "[NEW EVENT:HPD types:%02x]", types);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c
index 6daeb7f0b09b..1ea20b2bdd29 100644
--- a/drivers/gpu/drm/nouveau/nvif/outp.c
+++ b/drivers/gpu/drm/nouveau/nvif/outp.c
@@ -195,20 +195,17 @@ nvif_outp_dp_aux_pwr(struct nvif_outp *outp, bool enable)
int
nvif_outp_hda_eld(struct nvif_outp *outp, int head, void *data, u32 size)
{
- struct {
- struct nvif_outp_hda_eld_v0 mthd;
- u8 data[128];
- } args;
+ DEFINE_RAW_FLEX(struct nvif_outp_hda_eld_v0, mthd, data, 128);
int ret;
- if (WARN_ON(size > ARRAY_SIZE(args.data)))
+ if (WARN_ON(size > (__struct_size(mthd) - sizeof(*mthd))))
return -EINVAL;
- args.mthd.version = 0;
- args.mthd.head = head;
+ mthd->version = 0;
+ mthd->head = head;
- memcpy(args.data, data, size);
- ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, &args, sizeof(args.mthd) + size);
+ memcpy(mthd->data, data, size);
+ ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, mthd, sizeof(*mthd) + size);
NVIF_ERRON(ret, &outp->object, "[HDA_ELD head:%d size:%d]", head, size);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
index 3a30bea30e36..90186f98065c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
@@ -56,7 +56,7 @@ r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index db2602e88006..969f6b921fdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -585,13 +585,37 @@ r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
}
static void *
-r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
+r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn,
+ enum nvkm_gsp_rpc_reply_policy policy,
+ u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *reply;
+ void *repv = NULL;
+
+ switch (policy) {
+ case NVKM_GSP_RPC_REPLY_NOWAIT:
+ break;
+ case NVKM_GSP_RPC_REPLY_RECV:
+ reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
+ if (!IS_ERR_OR_NULL(reply))
+ repv = reply->data;
+ else
+ repv = reply;
+ break;
+ case NVKM_GSP_RPC_REPLY_POLL:
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ break;
+ }
+
+ return repv;
+}
+
+static void *
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
{
struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct nvfw_gsp_rpc *msg;
u32 fn = rpc->function;
- void *repv = NULL;
int ret;
if (gsp->subdev.debug >= NV_DBG_TRACE) {
@@ -605,15 +629,7 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait,
if (ret)
return ERR_PTR(ret);
- if (wait) {
- msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
- if (!IS_ERR_OR_NULL(msg))
- repv = msg->data;
- else
- repv = msg;
- }
-
- return repv;
+ return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len);
}
static void
@@ -797,7 +813,7 @@ r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
rpc->params.hRoot = client->object.handle;
rpc->params.hObjectParent = 0;
rpc->params.hObjectOld = object->handle;
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
static void
@@ -815,7 +831,7 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
struct nvkm_gsp *gsp = object->client->gsp;
void *ret = NULL;
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc));
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc));
if (IS_ERR_OR_NULL(rpc))
return rpc;
@@ -876,7 +892,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 rep
struct nvkm_gsp *gsp = object->client->gsp;
int ret = 0;
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc);
if (IS_ERR_OR_NULL(rpc)) {
*params = NULL;
return PTR_ERR(rpc);
@@ -948,8 +964,8 @@ r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
}
static void *
-r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
{
struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
@@ -967,7 +983,7 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
rpc->length = sizeof(*rpc) + max_payload_size;
msg->checksum = rpc->length;
- repv = r535_gsp_rpc_send(gsp, payload, false, 0);
+ repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
if (IS_ERR(repv))
goto done;
@@ -988,7 +1004,7 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
memcpy(next, payload, size);
- repv = r535_gsp_rpc_send(gsp, next, false, 0);
+ repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
if (IS_ERR(repv))
goto done;
@@ -997,20 +1013,10 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
}
/* Wait for reply. */
- rpc = r535_gsp_msg_recv(gsp, fn, payload_size +
- sizeof(*rpc));
- if (!IS_ERR_OR_NULL(rpc)) {
- if (wait) {
- repv = rpc->data;
- } else {
- nvkm_gsp_rpc_done(gsp, rpc);
- repv = NULL;
- }
- } else {
- repv = wait ? rpc : NULL;
- }
+ repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size +
+ sizeof(*rpc));
} else {
- repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len);
+ repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len);
}
done:
@@ -1327,7 +1333,7 @@ r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
}
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
enum registry_type {
@@ -1684,7 +1690,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
build_registry(gsp, rpc);
- return nvkm_gsp_rpc_wr(gsp, rpc, false);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT);
fail:
clean_registry(gsp);
@@ -1893,7 +1899,7 @@ r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
info->pciConfigMirrorSize = 0x001000;
r535_gsp_acpi_info(gsp, &info->acpiMethodData);
- return nvkm_gsp_rpc_wr(gsp, info, false);
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
index 5f3c9c02a4c0..35ba1798ee6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
@@ -105,7 +105,7 @@ fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target
rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
}
- ret = nvkm_gsp_rpc_wr(gsp, rpc, true);
+ ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index b17e77f700dd..6eff97a09160 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -420,6 +420,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
*/
static int dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
@@ -429,7 +430,7 @@ static int dpi_bridge_attach(struct drm_bridge *bridge,
dpi_init_pll(dpi);
- return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
+ return drm_bridge_attach(encoder, dpi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 9b9cc593790c..91ee63bfe0bc 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4617,6 +4617,7 @@ static const struct component_ops dsi_component_ops = {
*/
static int dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
@@ -4624,7 +4625,7 @@ static int dsi_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge,
+ return drm_bridge_attach(encoder, dsi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index e1ac447221ee..a3b22952fdc3 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -314,6 +314,7 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
*/
static int hdmi4_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
@@ -321,7 +322,7 @@ static int hdmi4_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ return drm_bridge_attach(encoder, hdmi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index fa9904e4c218..0c98444d39a9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -312,6 +312,7 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
*/
static int hdmi5_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
@@ -319,7 +320,7 @@ static int hdmi5_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ return drm_bridge_attach(encoder, hdmi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index f9ae358e8e52..e78826e4b560 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -128,6 +128,7 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
*/
static int sdi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
@@ -135,7 +136,7 @@ static int sdi_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
+ return drm_bridge_attach(encoder, sdi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index aaeef603682c..50349518eda1 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -538,6 +538,7 @@ static int venc_get_clocks(struct venc_device *venc)
*/
static int venc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct venc_device *venc = drm_bridge_to_venc(bridge);
@@ -545,7 +546,7 @@ static int venc_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
+ return drm_bridge_attach(encoder, venc->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index 4692c36fe217..87fb0fd29658 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -279,9 +279,10 @@ static int y030xx067a_probe(struct spi_device *spi)
struct y030xx067a *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct y030xx067a, panel,
+ &y030xx067a_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -306,9 +307,6 @@ static int y030xx067a_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
"Failed to get reset GPIO\n");
- drm_panel_init(&priv->panel, dev, &y030xx067a_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 503ecea72c5e..ea5119018df4 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -306,9 +306,11 @@ static int versatile_panel_probe(struct platform_device *pdev)
return PTR_ERR(map);
}
- vpanel = devm_kzalloc(dev, sizeof(*vpanel), GFP_KERNEL);
- if (!vpanel)
- return -ENOMEM;
+ vpanel = devm_drm_panel_alloc(dev, struct versatile_panel, panel,
+ &versatile_panel_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(vpanel))
+ return PTR_ERR(vpanel);
ret = regmap_read(map, SYS_CLCD, &val);
if (ret) {
@@ -348,9 +350,6 @@ static int versatile_panel_probe(struct platform_device *pdev)
dev_info(dev, "panel mounted on IB2 daughterboard\n");
}
- drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&vpanel->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index b05a663c134c..db006576d704 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -224,9 +224,11 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
struct tm5p5_nt35596 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct tm5p5_nt35596, panel,
+ &tm5p5_nt35596_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd";
ctx->supplies[1].supply = "vddio";
@@ -253,9 +255,6 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight)) {
ret = PTR_ERR(ctx->panel.backlight);
diff --git a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
index 77604d6a4e72..6e52bf6830e1 100644
--- a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
+++ b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
@@ -200,9 +200,10 @@ static int a030jtn01_probe(struct spi_device *spi)
spi->mode |= SPI_MODE_3 | SPI_3WIRE;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct a030jtn01, panel,
+ &a030jtn01_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -223,9 +224,6 @@ static int a030jtn01_probe(struct spi_device *spi)
if (IS_ERR(priv->reset_gpio))
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO");
- drm_panel_init(&priv->panel, dev, &a030jtn01_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index 7e66db4a88bb..5eb0727438cd 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -350,9 +350,11 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
struct boe_bf060y8m_aj0 *boe;
int ret;
- boe = devm_kzalloc(dev, sizeof(*boe), GFP_KERNEL);
- if (!boe)
- return -ENOMEM;
+ boe = devm_drm_panel_alloc(dev, struct boe_bf060y8m_aj0, panel,
+ &boe_bf060y8m_aj0_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(boe))
+ return PTR_ERR(boe);
ret = boe_bf060y8m_aj0_init_vregs(boe, dev);
if (ret)
@@ -374,9 +376,6 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
boe->panel.prepare_prev_first = true;
boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
index 0b87f1e6ecae..f33d4f855929 100644
--- a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
+++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
@@ -349,9 +349,11 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
const struct panel_desc *desc;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct boe_th101mb31ig002, panel,
+ &boe_th101mb31ig002_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
@@ -383,9 +385,6 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, ret,
"Failed to get orientation\n");
- drm_panel_init(&ctx->panel, &dsi->dev, &boe_th101mb31ig002_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
index 50e4a5341bc6..20b6e11a7d84 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
@@ -166,9 +166,11 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
struct boe_tv101wum_ll2 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct boe_tv101wum_ll2, panel,
+ &boe_tv101wum_ll2_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(boe_tv101wum_ll2_supplies),
@@ -190,8 +192,6 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_HSE;
- drm_panel_init(&ctx->panel, dev, &boe_tv101wum_ll2_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index 6b3f4d664d2a..ae6e9ffc46cb 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -511,9 +511,10 @@ static int dsicm_probe(struct mipi_dsi_device *dsi)
dev_dbg(dev, "probe\n");
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
+ ddata = devm_drm_panel_alloc(dev, struct panel_drv_data, panel,
+ &dsicm_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ddata))
+ return PTR_ERR(ddata);
mipi_dsi_set_drvdata(dsi, ddata);
ddata->dsi = dsi;
@@ -530,9 +531,6 @@ static int dsicm_probe(struct mipi_dsi_device *dsi)
dsicm_hw_reset(ddata);
- drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
if (ddata->use_dsi_backlight) {
struct backlight_properties props = { 0 };
props.max_brightness = 255;
diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
index 0bfed0ec0bbc..fb9f9f42be4f 100644
--- a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
+++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
@@ -163,9 +163,11 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
struct ebbg_ft8719 *ctx;
int i, ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ebbg_ft8719, panel,
+ &ebbg_ft8719_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
@@ -196,9 +198,6 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &ebbg_ft8719_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 52028c8f8988..e8fe0014143f 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -839,9 +839,10 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
struct device_node *ddc;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct panel_edp, base,
+ &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->prepared_time = 0;
panel->desc = desc;
@@ -886,8 +887,6 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
dev_set_drvdata(dev, panel);
- drm_panel_init(&panel->base, dev, &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
-
err = drm_panel_of_backlight(&panel->base);
if (err)
goto err_finished_ddc_init;
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index 92b03a2f65a3..ff994bf0e3cc 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -80,7 +80,7 @@ struct hx8394_panel_desc {
unsigned int lanes;
unsigned long mode_flags;
enum mipi_dsi_pixel_format format;
- int (*init_sequence)(struct hx8394 *ctx);
+ void (*init_sequence)(struct mipi_dsi_multi_context *dsi_ctx);
};
static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
@@ -88,98 +88,94 @@ static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
return container_of(panel, struct hx8394, panel);
}
-static int hsd060bhw4_init_sequence(struct hx8394 *ctx)
+static void hsd060bhw4_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x78, 0x0c, 0x07);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x78, 0x0c, 0x07);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
- 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
- 0x7c);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
+ 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
+ 0x7c);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
- 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
- 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
- 0x00, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
+ 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
+ 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
+ 0x00, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
- 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
- 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
- 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
- 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
- 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
- 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
- 0x4a, 0x4c, 0x4b, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
+ 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
+ 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
+ 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
+ 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
+ 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
+ 0x4a, 0x4c, 0x4b, 0x7f);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x7d, 0x7d);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x7d, 0x7d);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0xed);
}
static const struct drm_display_mode hsd060bhw4_mode = {
@@ -205,114 +201,110 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = {
.init_sequence = hsd060bhw4_init_sequence,
};
-static int powkiddy_x55_init_sequence(struct hx8394 *ctx)
+static void powkiddy_x55_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
- 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
- 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
+ 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
+ 0x86);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x6e, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
- 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
- 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
- 0x07, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
+ 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
+ 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
- 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
- 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
- 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
+ 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
+ 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
- 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
- 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
- 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+ 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
+ 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
+ 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
- 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
- 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
- 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
- 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
- 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
+ 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
+ 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
+ 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
+ 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5,
- 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN5,
+ 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xed);
}
static const struct drm_display_mode powkiddy_x55_mode = {
@@ -339,131 +331,127 @@ static const struct hx8394_panel_desc powkiddy_x55_desc = {
.init_sequence = powkiddy_x55_init_sequence,
};
-static int mchp_ac40t08a_init_sequence(struct hx8394 *ctx)
+static void mchp_ac40t08a_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* DCS commands do not seem to be sent correclty without this delay */
- msleep(20);
+ mipi_dsi_msleep(dsi_ctx, 20);
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
- 0x71, 0x71, 0x57, 0x47);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
+ 0x71, 0x71, 0x57, 0x47);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
- 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
- 0x01, 0x0c, 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x6e, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
- 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
- 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
- 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
- 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
- 0x07, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
+ 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
+ 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
+ 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
+ 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
- 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
- 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
+ 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
- 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
- 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
- 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
- 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
+ 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
- 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
- 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
- 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
- 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
- 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
- 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
- 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
- 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
- 0x6b, 0x72, 0x7f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
+ 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
+ 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
+ 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
+ 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
+ 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
+ 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
+ 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
+ 0x6b, 0x72, 0x7f, 0x7f);
/* Unknown command, not listed in the HX8394-F datasheet (C0H) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x73);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x73);
/* Set CABC control (C9h)*/
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCABC,
- 0x76, 0x00, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCABC,
+ 0x76, 0x00, 0x30);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet (D4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
/* 5.19.11 Set register bank (D8h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet (C6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xed);
}
static const struct drm_display_mode mchp_ac40t08a_mode = {
@@ -493,35 +481,31 @@ static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
int ret;
- ret = ctx->desc->init_sequence(ctx);
- if (ret) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- return ret;
- }
+ ctx->desc->init_sequence(&dsi_ctx);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
/* Panel is operational 120 msec after reset */
msleep(120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to turn on the display: %d\n", ret);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
goto sleep_in;
- }
return 0;
sleep_in:
+ ret = dsi_ctx.accum_err;
+ dsi_ctx.accum_err = 0;
+
/* This will probably fail, but let's try orderly power off anyway. */
- if (!mipi_dsi_dcs_enter_sleep_mode(dsi))
- msleep(50);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
return ret;
}
@@ -530,17 +514,12 @@ static int hx8394_disable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- msleep(50); /* about 3 frames */
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50); /* about 3 frames */
- return 0;
+ return dsi_ctx.accum_err;
}
static int hx8394_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 04f1d2676c78..116d67bfa114 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -23,10 +23,12 @@
#define DSI_NUM_MIN 1
-#define mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, cmd, seq...) \
- do { \
- mipi_dsi_dcs_write_seq(dsi0, cmd, seq); \
- mipi_dsi_dcs_write_seq(dsi1, cmd, seq); \
+#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \
+ do { \
+ dsi_ctx.dsi = dsi0; \
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
+ dsi_ctx.dsi = dsi1; \
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
} while (0)
struct panel_info {
@@ -67,868 +69,829 @@ static int elish_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd2, 0x30);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x76, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x77, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x59);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x48);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd7, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdc, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdd, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe1, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe2, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf3, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf4, 0x48);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x13, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x97, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x98, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x99, 0x95);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9b, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9c, 0x0b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9d, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9e, 0x90);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa3, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x60);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
- msleep(70);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
-
- return 0;
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+
+ return dsi_ctx.accum_err;
}
static int elish_csot_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x30);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x55, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x4d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x96);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x07);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x07);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x5c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x1c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
- msleep(70);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
-
- return 0;
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+
+ return dsi_ctx.accum_err;
}
static int j606f_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi = pinfo->dsi[0];
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0xd9);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x78);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x63);
- mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0x95, 0xeb);
- mipi_dsi_dcs_write_seq(dsi, 0x96, 0xeb);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x66);
- mipi_dsi_dcs_write_seq(dsi, 0x75, 0xa2);
- mipi_dsi_dcs_write_seq(dsi, 0x77, 0xb3);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x23);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x77);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x01, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x09, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x0b, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x10, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x13, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x17, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x21, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x32);
- mipi_dsi_dcs_write_seq(dsi, 0x37, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x38, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x9a);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x42);
- mipi_dsi_dcs_write_seq(dsi, 0x3f, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x43, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x47, 0x66);
- mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x9a);
- mipi_dsi_dcs_write_seq(dsi, 0x4b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x4c, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x4d, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0x4e, 0x43);
-
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 18);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x52, 0x34);
- mipi_dsi_dcs_write_seq(dsi, 0x55, 0x82, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x7f, 0x3c);
- mipi_dsi_dcs_write_seq(dsi, 0x82, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x97, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xb6,
- 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
- 0x05, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0x93, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x94, 0x5f);
- mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x55);
- mipi_dsi_dcs_write_seq(dsi, 0xda, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xdc, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x90);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x3f, 0xe0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_GET_SCANLINE, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0xf1, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x64, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x67, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x6a, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x70, 0x30);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_START, 0xf3);
- mipi_dsi_dcs_write_seq(dsi, 0xa3, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xa4, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xa5, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0xa1);
- mipi_dsi_dcs_write_seq(dsi, 0x0a, 0xf2);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x50);
- mipi_dsi_dcs_write_seq(dsi, 0x13, 0x51);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x65);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x17, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x86);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0xbb);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x05);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0xc3);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x78);
- mipi_dsi_dcs_write_seq(dsi, 0x35, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xc9, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0xca, 0x4e);
- mipi_dsi_dcs_write_seq(dsi, 0xcb, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
- mipi_dsi_dcs_write_seq(dsi, 0xaa, 0x47);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x27);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x53);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x01);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x60, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x63, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x64, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x66, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x67, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x68, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x78, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x2f);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0xe0);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x08);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x5d);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x5d);
- mipi_dsi_dcs_write_seq(dsi, 0x4b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x91, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xdc, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x60);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0x8d);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x75);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x02);
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- usleep_range(10000, 11000);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x68, 0x05, 0x01);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(100);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(30);
-
- return 0;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0xd9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x63);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x95, 0xeb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x96, 0xeb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x66);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xa2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x9a);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x42);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x66);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x9a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x43);
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 18);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x34);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x82, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x3c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6,
+ 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x05, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x93, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xda, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x90);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0xe0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_GET_SCANLINE, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_START, 0xf3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa3, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa4, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa5, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd6, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0xf2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x65);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0xc3);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc9, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xaa, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x27);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd1, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xe0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x08);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x5d);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x5d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x8d);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x75);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x02);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x05, 0x01);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode elish_boe_modes[] = {
@@ -1063,18 +1026,18 @@ static int nt36523_prepare(struct drm_panel *panel)
static int nt36523_disable(struct drm_panel *panel)
{
struct panel_info *pinfo = to_panel_info(panel);
- int i, ret;
+ int i;
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
- ret = mipi_dsi_dcs_set_display_off(pinfo->dsi[i]);
- if (ret < 0)
- dev_err(&pinfo->dsi[i]->dev, "failed to set display off: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]};
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
}
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
- ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->dsi[i]);
- if (ret < 0)
- dev_err(&pinfo->dsi[i]->dev, "failed to enter sleep mode: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]};
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
}
msleep(70);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index f23d8832a1ad..93f11e2e9398 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -34,8 +34,8 @@ struct s6d7aa0 {
struct s6d7aa0_panel_desc {
unsigned int panel_type;
- int (*init_func)(struct s6d7aa0 *ctx);
- int (*off_func)(struct s6d7aa0 *ctx);
+ void (*init_func)(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx);
+ void (*off_func)(struct mipi_dsi_multi_context *dsi_ctx);
const struct drm_display_mode *drm_mode;
unsigned long mode_flags;
u32 bus_flags;
@@ -62,93 +62,61 @@ static void s6d7aa0_reset(struct s6d7aa0 *ctx)
msleep(50);
}
-static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock)
+static void s6d7aa0_lock(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx, bool lock)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
if (lock) {
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5);
if (ctx->desc->use_passwd3)
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0x5a, 0x5a);
} else {
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0x5a, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a);
if (ctx->desc->use_passwd3)
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0xa5, 0xa5);
}
-
- return 0;
}
static int s6d7aa0_on(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = ctx->desc->init_func(ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- return ret;
- }
+ ctx->desc->init_func(ctx, &dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
-static int s6d7aa0_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_off(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = ctx->desc->off_func(ctx);
- if (ret < 0) {
- dev_err(dev, "Panel-specific off function failed: %d\n", ret);
- return ret;
- }
+ ctx->desc->off_func(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(64);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 64);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return 0;
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int s6d7aa0_prepare(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
s6d7aa0_reset(ctx);
ret = s6d7aa0_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -159,12 +127,8 @@ static int s6d7aa0_prepare(struct drm_panel *panel)
static int s6d7aa0_disable(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = s6d7aa0_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ s6d7aa0_off(ctx);
return 0;
}
@@ -185,13 +149,11 @@ static int s6d7aa0_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness);
- return 0;
+ return dsi_ctx.accum_err;
}
static int s6d7aa0_bl_get_brightness(struct backlight_device *bl)
@@ -228,65 +190,39 @@ s6d7aa0_create_backlight(struct mipi_dsi_device *dsi)
/* Initialization code and structures for LSL080AL02 panel */
-static int s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ mipi_dsi_usleep_range(dsi_ctx, 20000, 25000);
- usleep_range(20000, 25000);
+ s6d7aa0_lock(ctx, dsi_ctx, false);
- ret = s6d7aa0_lock(ctx, false);
- if (ret < 0) {
- dev_err(dev, "Failed to unlock registers: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MCS_OTP_RELOAD, 0x00, 0x10);
- usleep_range(1000, 1500);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_OTP_RELOAD, 0x00, 0x10);
+ mipi_dsi_usleep_range(dsi_ctx, 1000, 1500);
/* SEQ_B6_PARAM_8_R01 */
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x10);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0x10);
/* BL_CTL_ON */
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x28);
-
- usleep_range(5000, 6000);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x28);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
+ mipi_dsi_usleep_range(dsi_ctx, 5000, 6000);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
- msleep(120);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx);
- ret = s6d7aa0_lock(ctx, true);
- if (ret < 0) {
- dev_err(dev, "Failed to lock registers: %d\n", ret);
- return ret;
- }
+ mipi_dsi_msleep(dsi_ctx, 120);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, true);
- return 0;
+ mipi_dsi_dcs_set_display_on_multi(dsi_ctx);
}
-static int s6d7aa0_lsl080al02_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al02_off(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
/* BL_CTL_OFF */
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x20);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x20);
}
static const struct drm_display_mode s6d7aa0_lsl080al02_mode = {
@@ -317,79 +253,51 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
/* Initialization code and structures for LSL080AL03 panel */
-static int s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- usleep_range(20000, 25000);
+ mipi_dsi_usleep_range(dsi_ctx, 20000, 25000);
- ret = s6d7aa0_lock(ctx, false);
- if (ret < 0) {
- dev_err(dev, "Failed to unlock registers: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, false);
if (ctx->desc->panel_type == S6D7AA0_PANEL_LSL080AL03) {
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0xc7, 0x00, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
- 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
- 0x80, 0x78);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0xc7, 0x00, 0x29);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23,
+ 0x09);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21,
+ 0x80, 0x78);
} else if (ctx->desc->panel_type == S6D7AA0_PANEL_LTL101AT01) {
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0x0b);
- mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
- 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
- 0x80, 0x68);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23,
+ 0x09);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21,
+ 0x80, 0x68);
}
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x51);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x02, 0x08, 0x08);
-
- usleep_range(10000, 11000);
-
- mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x80, 0x80, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xcd,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
- mipi_dsi_dcs_write_seq(dsi, 0xce,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x03);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x51);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xf2, 0x02, 0x08, 0x08);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
-
- ret = s6d7aa0_lock(ctx, true);
- if (ret < 0) {
- dev_err(dev, "Failed to lock registers: %d\n", ret);
- return ret;
- }
+ mipi_dsi_usleep_range(dsi_ctx, 10000, 11000);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x80, 0x80, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcd,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x03);
- return 0;
+ mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx);
+ s6d7aa0_lock(ctx, dsi_ctx, true);
+ mipi_dsi_dcs_set_display_on_multi(dsi_ctx);
}
-static int s6d7aa0_lsl080al03_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al03_off(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0x22, 0x00);
}
static const struct drm_display_mode s6d7aa0_lsl080al03_mode = {
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 729cbb0d8403..36abfa2e65e9 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -36,60 +36,49 @@ static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel)
static int sharp_nt_panel_init(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
/* Novatek two-lane operation */
- ret = mipi_dsi_dcs_write(dsi, 0xae, (u8[]){ 0x03 }, 1);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xae, 0x03);
/* Set both MCU and RGB I/F to 24bpp */
- ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
- (MIPI_DCS_PIXEL_FMT_24BIT << 4));
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx,
+ MIPI_DCS_PIXEL_FMT_24BIT |
+ (MIPI_DCS_PIXEL_FMT_24BIT << 4));
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 232b03c1a259..df718c4a86cb 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -579,9 +579,10 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
u32 bus_flags;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
+ &panel_simple_funcs, desc->connector_type);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->desc = desc;
@@ -694,8 +695,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- drm_panel_init(&panel->base, dev, &panel_simple_funcs, connector_type);
-
err = drm_panel_of_backlight(&panel->base);
if (err) {
dev_err_probe(dev, err, "Could not find backlight\n");
@@ -3796,6 +3795,32 @@ static const struct panel_desc pda_91_00156_a0 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode powertip_ph128800t004_zza01_mode = {
+ .clock = 71150,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 9,
+ .vsync_end = 800 + 9 + 8,
+ .vtotal = 800 + 9 + 8 + 6,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc powertip_ph128800t004_zza01 = {
+ .modes = &powertip_ph128800t004_zza01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode powertip_ph128800t006_zhc01_mode = {
.clock = 66500,
.hdisplay = 1280,
@@ -5154,6 +5179,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
+ .compatible = "powertip,ph128800t004-zza01",
+ .data = &powertip_ph128800t004_zza01,
+ }, {
.compatible = "powertip,ph128800t006-zhc01",
.data = &powertip_ph128800t006_zhc01,
}, {
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
index 17349825543f..b148e6cba9bd 100644
--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -106,53 +106,34 @@ static int r63353_panel_power_off(struct r63353_panel *rpanel)
static int r63353_panel_activate(struct r63353_panel *rpanel)
{
struct mipi_dsi_device *dsi = rpanel->dsi;
- struct device *dev = &dsi->dev;
- int i, ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ int i;
- ret = mipi_dsi_dcs_soft_reset(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to do Software Reset (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_soft_reset_multi(&dsi_ctx);
- usleep_range(15000, 17000);
+ mipi_dsi_usleep_range(&dsi_ctx, 15000, 17000);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
for (i = 0; i < rpanel->pdata->init_length; i++) {
const struct r63353_instr *instr = &rpanel->pdata->init[i];
- ret = mipi_dsi_dcs_write_buffer(dsi, instr->data, instr->len);
- if (ret < 0)
- goto fail;
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, instr->data,
+ instr->len);
}
- msleep(120);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_msleep(&dsi_ctx, 120);
- usleep_range(5000, 10000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display ON (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000);
- return 0;
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
-fail:
- gpiod_set_value(rpanel->reset_gpio, 0);
+ if (dsi_ctx.accum_err)
+ gpiod_set_value(rpanel->reset_gpio, 0);
- return ret;
+ return dsi_ctx.accum_err;
}
static int r63353_panel_prepare(struct drm_panel *panel)
@@ -178,27 +159,16 @@ static int r63353_panel_prepare(struct drm_panel *panel)
return 0;
}
-static int r63353_panel_deactivate(struct r63353_panel *rpanel)
+static void r63353_panel_deactivate(struct r63353_panel *rpanel)
{
struct mipi_dsi_device *dsi = rpanel->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display OFF (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- usleep_range(5000, 10000);
+ mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
- return ret;
- }
-
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
}
static int r63353_panel_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index cffcb0ac7c11..ad95f2ed31d9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -42,6 +42,14 @@ enum panfrost_gpu_pm {
GPU_PM_VREG_OFF,
};
+/**
+ * enum panfrost_gpu_quirks - GPU optional quirks
+ * @GPU_QUIRK_FORCE_AARCH64_PGTABLE: Use AARCH64_4K page table format
+ */
+enum panfrost_gpu_quirks {
+ GPU_QUIRK_FORCE_AARCH64_PGTABLE,
+};
+
struct panfrost_features {
u16 id;
u16 revision;
@@ -95,6 +103,9 @@ struct panfrost_compatible {
/* Allowed PM features */
u8 pm_features;
+
+ /* GPU configuration quirks */
+ u8 gpu_quirks;
};
struct panfrost_device {
@@ -162,6 +173,11 @@ struct panfrost_mmu {
int as;
atomic_t as_count;
struct list_head list;
+ struct {
+ u64 transtab;
+ u64 memattr;
+ u64 transcfg;
+ } cfg;
};
struct panfrost_engine_usage {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 0f3935556ac7..b87f83e94eda 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -476,7 +476,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
}
}
- args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
+ args->retained = drm_gem_shmem_madvise_locked(&bo->base, args->madv);
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
@@ -824,6 +824,7 @@ static const struct panfrost_compatible mediatek_mt8188_data = {
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
@@ -835,6 +836,7 @@ static const struct panfrost_compatible mediatek_mt8192_data = {
.num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
.pm_domain_names = mediatek_mt8192_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
static const struct of_device_id dt_match[] = {
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index 47751302f1bc..4042afe2fbf4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job)
goto dump_header;
}
- ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+ ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0;
@@ -228,7 +228,7 @@ void panfrost_core_dump(struct panfrost_job *job)
vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size);
- drm_gem_vunmap_unlocked(&bo->base.base, &map);
+ drm_gem_vunmap(&bo->base.base, &map);
iter.hdr->bomap.valid = 1;
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
index 7ed0cd3ea2d4..52f9d69f6db9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_features.h
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -54,6 +54,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g72 (\
@@ -64,6 +65,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g51 hw_features_g72
@@ -77,6 +79,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g76 (\
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 3d9f51bd48b6..02b60ea1433a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -51,7 +51,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo);
- drm_gem_shmem_purge(&bo->base);
+ drm_gem_shmem_purge_locked(&bo->base);
ret = true;
dma_resv_unlock(shmem->base.resv);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index b91019cd5acb..f6b91c052cfb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -26,6 +26,48 @@
#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define mmu_read(dev, reg) readl(dev->iomem + reg)
+static u64 mair_to_memattr(u64 mair, bool coherent)
+{
+ u64 memattr = 0;
+ u32 i;
+
+ for (i = 0; i < 8; i++) {
+ u8 in_attr = mair >> (8 * i), out_attr;
+ u8 outer = in_attr >> 4, inner = in_attr & 0xf;
+
+ /* For caching to be enabled, inner and outer caching policy
+ * have to be both write-back, if one of them is write-through
+ * or non-cacheable, we just choose non-cacheable. Device
+ * memory is also translated to non-cacheable.
+ */
+ if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
+ AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
+ } else {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
+ /* Use SH_MIDGARD_INNER mode when device isn't coherent,
+ * so SH_IS, which is used when IOMMU_CACHE is set, maps
+ * to Mali's internal-shareable mode. As per the Mali
+ * Spec, inner and outer-shareable modes aren't allowed
+ * for WB memory when coherency is disabled.
+ * Use SH_CPU_INNER mode when coherency is enabled, so
+ * that SH_IS actually maps to the standard definition of
+ * inner-shareable.
+ */
+ if (!coherent)
+ out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
+ else
+ out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
+ }
+
+ memattr |= (u64)out_attr << (8 * i);
+ }
+
+ return memattr;
+}
+
static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{
int ret;
@@ -124,9 +166,9 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev,
static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as_nr = mmu->as;
- struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
- u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
- u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
+ u64 transtab = mmu->cfg.transtab;
+ u64 memattr = mmu->cfg.memattr;
+ u64 transcfg = mmu->cfg.transcfg;
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
@@ -139,6 +181,9 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
+ mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
+ mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -152,9 +197,67 @@ static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
+ mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
+ mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), 0);
+
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
+static int mmu_cfg_init_mali_lpae(struct panfrost_mmu *mmu)
+{
+ struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
+
+ /* TODO: The following fields are duplicated between the MMU and Page
+ * Table config structs. Ideally, should be kept in one place.
+ */
+ mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab;
+ mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr;
+ mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
+
+ return 0;
+}
+
+static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
+{
+ struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
+ struct panfrost_device *pfdev = mmu->pfdev;
+
+ if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
+ ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK))
+ return -EINVAL;
+
+ mmu->cfg.transtab = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+
+ mmu->cfg.memattr = mair_to_memattr(pgtbl_cfg->arm_lpae_s1_cfg.mair,
+ pgtbl_cfg->coherent_walk);
+
+ mmu->cfg.transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
+ AS_TRANSCFG_PTW_RA |
+ AS_TRANSCFG_ADRMODE_AARCH64_4K |
+ AS_TRANSCFG_INA_BITS(55 - pgtbl_cfg->ias);
+ if (pgtbl_cfg->coherent_walk)
+ mmu->cfg.transcfg |= AS_TRANSCFG_PTW_SH_OS;
+
+ return 0;
+}
+
+static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
+ enum io_pgtable_fmt fmt)
+{
+ struct panfrost_device *pfdev = mmu->pfdev;
+
+ switch (fmt) {
+ case ARM_64_LPAE_S1:
+ return mmu_cfg_init_aarch64_4k(mmu);
+ case ARM_MALI_LPAE:
+ return mmu_cfg_init_mali_lpae(mmu);
+ default:
+ /* This should never happen */
+ drm_WARN(pfdev->ddev, 1, "Invalid pgtable format");
+ return -EINVAL;
+ }
+}
+
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as;
@@ -327,7 +430,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
struct drm_gem_object *obj = &shmem->base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
- int prot = IOMMU_READ | IOMMU_WRITE;
+ int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE;
if (WARN_ON(mapping->active))
return 0;
@@ -489,7 +592,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_unlock;
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ refcount_set(&bo->base.pages_use_count, 1);
} else {
pages = bo->base.pages;
if (pages[page_offset]) {
@@ -528,7 +631,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map;
mmu_map_sg(pfdev, bomapping->mmu, addr,
- IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
bomapping->active = true;
bo->heap_rss_size += SZ_2M;
@@ -615,7 +718,22 @@ static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features);
+ u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features);
struct panfrost_mmu *mmu;
+ enum io_pgtable_fmt fmt;
+ int ret;
+
+ if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
+ if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) {
+ dev_err_once(pfdev->dev,
+ "AARCH64_4K page table not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+ fmt = ARM_64_LPAE_S1;
+ } else {
+ fmt = ARM_MALI_LPAE;
+ }
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
if (!mmu)
@@ -633,23 +751,33 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = SZ_4K | SZ_2M,
- .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
- .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .ias = va_bits,
+ .oas = pa_bits,
.coherent_walk = pfdev->coherent,
.tlb = &mmu_tlb_ops,
.iommu_dev = pfdev->dev,
};
- mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
- mmu);
+ mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
if (!mmu->pgtbl_ops) {
- kfree(mmu);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto err_free_mmu;
}
+ ret = panfrost_mmu_cfg_init(mmu, fmt);
+ if (ret)
+ goto err_free_io_pgtable;
+
kref_init(&mmu->refcount);
return mmu;
+
+err_free_io_pgtable:
+ free_io_pgtable_ops(mmu->pgtbl_ops);
+
+err_free_mmu:
+ kfree(mmu);
+ return ERR_PTR(ret);
}
static const char *access_type_name(struct panfrost_device *pfdev,
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index ba9b6e2b2636..52befead08c6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_close_bo;
}
- ret = drm_gem_vmap_unlocked(&bo->base, &map);
+ ret = drm_gem_vmap(&bo->base, &map);
if (ret)
goto err_put_mapping;
perfcnt->buf = map.vaddr;
@@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
return 0;
err_vunmap:
- drm_gem_vunmap_unlocked(&bo->base, &map);
+ drm_gem_vunmap(&bo->base, &map);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
@@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
+ drm_gem_vunmap(&perfcnt->mapping->obj->base.base, &map);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index c7bba476ab3f..2b8f1617b836 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -16,6 +16,8 @@
#define GROUPS_L2_COHERENT BIT(0) /* Cores groups are l2 coherent */
#define GPU_MMU_FEATURES 0x014 /* (RO) MMU features */
+#define GPU_MMU_FEATURES_VA_BITS(x) ((x) & GENMASK(7, 0))
+#define GPU_MMU_FEATURES_PA_BITS(x) (((x) >> 8) & GENMASK(7, 0))
#define GPU_AS_PRESENT 0x018 /* (RO) Address space slots present */
#define GPU_JS_PRESENT 0x01C /* (RO) Job slots present */
@@ -299,6 +301,17 @@
#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */
#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x08) /* (RW) Memory attributes for address space n, low word. */
#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0x0C) /* (RW) Memory attributes for address space n, high word. */
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
+ ((w) ? BIT(0) : 0) | \
+ ((r) ? BIT(1) : 0))
+#define AS_MEMATTR_AARCH64_SH_MIDGARD_INNER (0 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER (1 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER_SHADER_COH (2 << 4)
+#define AS_MEMATTR_AARCH64_SHARED (0 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
+#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10) /* (RW) Lock region address for address space n, low word */
#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14) /* (RW) Lock region address for address space n, high word */
#define AS_COMMAND(as) (MMU_AS(as) + 0x18) /* (WO) MMU command register for address space n */
@@ -309,6 +322,24 @@
/* Additional Bifrost AS registers */
#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30) /* (RW) Translation table configuration for address space n, low word */
#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34) /* (RW) Translation table configuration for address space n, high word */
+#define AS_TRANSCFG_ADRMODE_LEGACY (0 << 0)
+#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
+#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K (8 << 0)
+#define AS_TRANSCFG_INA_BITS(x) ((x) << 6)
+#define AS_TRANSCFG_OUTA_BITS(x) ((x) << 14)
+#define AS_TRANSCFG_SL_CONCAT BIT(22)
+#define AS_TRANSCFG_PTW_MEMATTR_NC (1 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WB (2 << 24)
+#define AS_TRANSCFG_PTW_SH_NS (0 << 28)
+#define AS_TRANSCFG_PTW_SH_OS (2 << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3 << 28)
+#define AS_TRANSCFG_PTW_RA BIT(30)
+#define AS_TRANSCFG_DISABLE_HIER_AP BIT(33)
+#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
+#define AS_TRANSCFG_WXN BIT(35)
+#define AS_TRANSCFG_XREADABLE BIT(36)
#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */
#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */
@@ -324,6 +355,11 @@
#define AS_TRANSTAB_LPAE_READ_INNER BIT(2)
#define AS_TRANSTAB_LPAE_SHARE_OUTER BIT(4)
+/*
+ * Begin AARCH64_4K MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_AARCH64_4K_ADDR_MASK 0xfffffffffffffff0
+
#define AS_STATUS_AS_ACTIVE 0x01
#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8)
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 5749ef2ebe03..1a363bb814f4 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -112,7 +112,7 @@ panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
if (bo->kmap)
return 0;
- ret = drm_gem_vmap_unlocked(bo->obj, &map);
+ ret = drm_gem_vmap(bo->obj, &map);
if (ret)
return ret;
@@ -126,7 +126,7 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
if (bo->kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
- drm_gem_vunmap_unlocked(bo->obj, &map);
+ drm_gem_vunmap(bo->obj, &map);
bo->kmap = NULL;
}
}
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 4d31d1967716..446ec780eb4a 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -840,7 +840,7 @@ panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
if (queue->syncwait.kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
- drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
+ drm_gem_vunmap(queue->syncwait.obj, &map);
queue->syncwait.kmap = NULL;
}
@@ -866,7 +866,7 @@ panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue
goto err_put_syncwait_obj;
queue->syncwait.obj = &bo->base.base;
- ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
+ ret = drm_gem_vmap(queue->syncwait.obj, &map);
if (drm_WARN_ON(&ptdev->base, ret))
goto err_put_syncwait_obj;
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 1e4b28d03f4d..5f460b296c0c 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -501,7 +501,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
* if we find it, it will take precedence. This is on the Integrator/AP
* which only has this option for PL110 graphics.
*/
- if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
+ if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
&clcd_id);
if (np)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fa78824931cc..3f3c360dce4b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -501,8 +501,8 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -678,7 +678,7 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -741,7 +741,7 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index 380a855b832a..a9145253294f 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -634,6 +634,7 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
}
static int rcar_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -641,7 +642,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
if (!lvds->next_bridge)
return 0;
- return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
+ return drm_bridge_attach(encoder, lvds->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index d1e626068065..7ab8be46c7f6 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -799,11 +799,12 @@ static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi)
*/
static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index 7c1817240846..e57536fd6f4d 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -14,10 +14,15 @@ config DRM_RZG2L_DU
Choose this option if you have an RZ/G2L alike chipset.
If M is selected the module will be called rzg2l-du-drm.
-config DRM_RZG2L_MIPI_DSI
- tristate "RZ/G2L MIPI DSI Encoder Support"
- depends on DRM && DRM_BRIDGE && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select DRM_MIPI_DSI
+config DRM_RZG2L_USE_MIPI_DSI
+ bool "RZ/G2L MIPI DSI Encoder Support"
+ depends on DRM_BRIDGE && OF
+ depends on DRM_RZG2L_DU || COMPILE_TEST
+ default DRM_RZG2L_DU
help
Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders.
+
+config DRM_RZG2L_MIPI_DSI
+ def_tristate DRM_RZG2L_DU
+ depends on DRM_RZG2L_USE_MIPI_DSI
+ select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index cbd9b9841267..5e40f0c1e7b0 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -79,7 +79,7 @@ DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops);
static const struct drm_driver rzg2l_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .dumb_create = rzg2l_du_dumb_create,
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(rzg2l_du_dumb_create),
DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &rzg2l_du_fops,
.name = "rzg2l-du",
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index 90c6269ccd29..55a97691e9b2 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -36,23 +36,129 @@
static const struct rzg2l_du_format_info rzg2l_du_format_infos[] = {
{
- .fourcc = DRM_FORMAT_XRGB8888,
- .v4l2 = V4L2_PIX_FMT_XBGR32,
- .bpp = 32,
+ .fourcc = DRM_FORMAT_RGB332,
+ .v4l2 = V4L2_PIX_FMT_RGB332,
.planes = 1,
.hsub = 1,
}, {
- .fourcc = DRM_FORMAT_ARGB8888,
- .v4l2 = V4L2_PIX_FMT_ABGR32,
- .bpp = 32,
+ .fourcc = DRM_FORMAT_ARGB4444,
+ .v4l2 = V4L2_PIX_FMT_ARGB444,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB4444,
+ .v4l2 = V4L2_PIX_FMT_XRGB444,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB1555,
+ .v4l2 = V4L2_PIX_FMT_ARGB555,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB1555,
+ .v4l2 = V4L2_PIX_FMT_XRGB555,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGB565,
+ .v4l2 = V4L2_PIX_FMT_RGB565,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGR888,
+ .v4l2 = V4L2_PIX_FMT_RGB24,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
.v4l2 = V4L2_PIX_FMT_BGR24,
- .bpp = 24,
.planes = 1,
.hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRA8888,
+ .v4l2 = V4L2_PIX_FMT_ARGB32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRX8888,
+ .v4l2 = V4L2_PIX_FMT_XRGB32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB8888,
+ .v4l2 = V4L2_PIX_FMT_ABGR32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB8888,
+ .v4l2 = V4L2_PIX_FMT_XBGR32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_UYVY,
+ .v4l2 = V4L2_PIX_FMT_UYVY,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUYV,
+ .v4l2 = V4L2_PIX_FMT_YUYV,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVYU,
+ .v4l2 = V4L2_PIX_FMT_YVYU,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV12,
+ .v4l2 = V4L2_PIX_FMT_NV12M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV21,
+ .v4l2 = V4L2_PIX_FMT_NV21M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV16,
+ .v4l2 = V4L2_PIX_FMT_NV16M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV61,
+ .v4l2 = V4L2_PIX_FMT_NV61M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV420,
+ .v4l2 = V4L2_PIX_FMT_YUV420M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVU420,
+ .v4l2 = V4L2_PIX_FMT_YVU420M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV422,
+ .v4l2 = V4L2_PIX_FMT_YUV422M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVU422,
+ .v4l2 = V4L2_PIX_FMT_YVU422M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV444,
+ .v4l2 = V4L2_PIX_FMT_YUV444M,
+ .planes = 3,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_YVU444,
+ .v4l2 = V4L2_PIX_FMT_YVU444M,
+ .planes = 3,
+ .hsub = 1,
}
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
index 876e97cfbf45..e2c599f115c6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
@@ -23,7 +23,6 @@ struct sg_table;
struct rzg2l_du_format_info {
u32 fourcc;
u32 v4l2;
- unsigned int bpp;
unsigned int planes;
unsigned int hsub;
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index 4550c6d84796..96c014449547 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -523,11 +523,12 @@ err:
*/
static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/scheduler/.kunitconfig b/drivers/gpu/drm/scheduler/.kunitconfig
new file mode 100644
index 000000000000..cece53609fcf
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/.kunitconfig
@@ -0,0 +1,12 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_SCHED_KUNIT_TEST=y
+CONFIG_EXPERT=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCKDEP=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_LIST=y
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
index 53863621829f..6e13e4c63e9d 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -23,3 +23,5 @@
gpu-sched-y := sched_main.o sched_fence.o sched_entity.o
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
+
+obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index bfea608a7106..829579c41c6b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -828,11 +828,15 @@ EXPORT_SYMBOL(drm_sched_job_init);
*
* This arms a scheduler job for execution. Specifically it initializes the
* &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
- * or other places that need to track the completion of this job.
+ * or other places that need to track the completion of this job. It also
+ * initializes sequence numbers, which are fundamental for fence ordering.
*
* Refer to drm_sched_entity_push_job() documentation for locking
* considerations.
*
+ * Once this function was called, you *must* submit @job with
+ * drm_sched_entity_push_job().
+ *
* This can only be called if drm_sched_job_init() succeeded.
*/
void drm_sched_job_arm(struct drm_sched_job *job)
@@ -1015,13 +1019,14 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency);
* Cleans up the resources allocated with drm_sched_job_init().
*
* Drivers should call this from their error unwind code if @job is aborted
- * before it was submitted to an entity with drm_sched_entity_push_job().
+ * before drm_sched_job_arm() is called.
*
- * Since calling drm_sched_job_arm() causes the job's fences to be initialized,
- * it is up to the driver to ensure that fences that were exposed to external
- * parties get signaled. drm_sched_job_cleanup() does not ensure this.
+ * drm_sched_job_arm() is a point of no return since it initializes the fences
+ * and their sequence number etc. Once that function has been called, you *must*
+ * submit it with drm_sched_entity_push_job() and cannot simply abort it by
+ * calling drm_sched_job_cleanup().
*
- * This function must also be called in &struct drm_sched_backend_ops.free_job
+ * This function should be called in the &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
@@ -1029,10 +1034,15 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
unsigned long index;
if (kref_read(&job->s_fence->finished.refcount)) {
- /* drm_sched_job_arm() has been called */
+ /* The job has been processed by the scheduler, i.e.,
+ * drm_sched_job_arm() and drm_sched_entity_push_job() have
+ * been called.
+ */
dma_fence_put(&job->s_fence->finished);
} else {
- /* aborted job before arming */
+ /* The job was aborted before it has been committed to be run;
+ * notably, drm_sched_job_arm() has not been called.
+ */
drm_sched_fence_free(job->s_fence);
}
@@ -1220,20 +1230,23 @@ static void drm_sched_run_job_work(struct work_struct *w)
drm_sched_job_begin(sched_job);
trace_drm_run_job(sched_job, entity);
+ /*
+ * The run_job() callback must by definition return a fence whose
+ * refcount has been incremented for the scheduler already.
+ */
fence = sched->ops->run_job(sched_job);
complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence, fence);
if (!IS_ERR_OR_NULL(fence)) {
- /* Drop for original kref_init of the fence */
- dma_fence_put(fence);
-
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_job_done_cb);
if (r == -ENOENT)
drm_sched_job_done(sched_job, fence->error);
else if (r)
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
+
+ dma_fence_put(fence);
} else {
drm_sched_job_done(sched_job, IS_ERR(fence) ?
PTR_ERR(fence) : 0);
diff --git a/drivers/gpu/drm/scheduler/tests/Makefile b/drivers/gpu/drm/scheduler/tests/Makefile
new file mode 100644
index 000000000000..5bf707bad373
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+drm-sched-tests-y := \
+ mock_scheduler.o \
+ tests_basic.o
+
+obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += drm-sched-tests.o
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
new file mode 100644
index 000000000000..f999c8859cf7
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Valve Corporation */
+
+#include "sched_tests.h"
+
+/*
+ * Here we implement the mock "GPU" (or the scheduler backend) which is used by
+ * the DRM scheduler unit tests in order to exercise the core functionality.
+ *
+ * Test cases are implemented in a separate file.
+ */
+
+/**
+ * drm_mock_sched_entity_new - Create a new mock scheduler entity
+ *
+ * @test: KUnit test owning the entity
+ * @priority: Scheduling priority
+ * @sched: Mock scheduler on which the entity can be scheduled
+ *
+ * Returns: New mock scheduler entity with allocation managed by the test
+ */
+struct drm_mock_sched_entity *
+drm_mock_sched_entity_new(struct kunit *test,
+ enum drm_sched_priority priority,
+ struct drm_mock_scheduler *sched)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_gpu_scheduler *drm_sched;
+ int ret;
+
+ entity = kunit_kzalloc(test, sizeof(*entity), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, entity);
+
+ drm_sched = &sched->base;
+ ret = drm_sched_entity_init(&entity->base,
+ priority,
+ &drm_sched, 1,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ entity->test = test;
+
+ return entity;
+}
+
+/**
+ * drm_mock_sched_entity_free - Destroys a mock scheduler entity
+ *
+ * @entity: Entity to destroy
+ *
+ * To be used from the test cases once done with the entity.
+ */
+void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity)
+{
+ drm_sched_entity_destroy(&entity->base);
+}
+
+static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(job->base.sched);
+
+ lockdep_assert_held(&sched->lock);
+
+ job->flags |= DRM_MOCK_SCHED_JOB_DONE;
+ list_move_tail(&job->link, &sched->done_list);
+ dma_fence_signal(&job->hw_fence);
+ complete(&job->done);
+}
+
+static enum hrtimer_restart
+drm_mock_sched_job_signal_timer(struct hrtimer *hrtimer)
+{
+ struct drm_mock_sched_job *job =
+ container_of(hrtimer, typeof(*job), timer);
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(job->base.sched);
+ struct drm_mock_sched_job *next;
+ ktime_t now = ktime_get();
+ unsigned long flags;
+ LIST_HEAD(signal);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->job_list, link) {
+ if (!job->duration_us)
+ break;
+
+ if (ktime_before(now, job->finish_at))
+ break;
+
+ sched->hw_timeline.cur_seqno = job->hw_fence.seqno;
+ drm_mock_sched_job_complete(job);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * drm_mock_sched_job_new - Create a new mock scheduler job
+ *
+ * @test: KUnit test owning the job
+ * @entity: Scheduler entity of the job
+ *
+ * Returns: New mock scheduler job with allocation managed by the test
+ */
+struct drm_mock_sched_job *
+drm_mock_sched_job_new(struct kunit *test,
+ struct drm_mock_sched_entity *entity)
+{
+ struct drm_mock_sched_job *job;
+ int ret;
+
+ job = kunit_kzalloc(test, sizeof(*job), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, job);
+
+ ret = drm_sched_job_init(&job->base,
+ &entity->base,
+ 1,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ job->test = test;
+
+ init_completion(&job->done);
+ spin_lock_init(&job->lock);
+ INIT_LIST_HEAD(&job->link);
+ hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
+ return job;
+}
+
+static const char *drm_mock_sched_hw_fence_driver_name(struct dma_fence *fence)
+{
+ return "drm_mock_sched";
+}
+
+static const char *
+drm_mock_sched_hw_fence_timeline_name(struct dma_fence *fence)
+{
+ struct drm_mock_sched_job *job =
+ container_of(fence, typeof(*job), hw_fence);
+
+ return (const char *)job->base.sched->name;
+}
+
+static void drm_mock_sched_hw_fence_release(struct dma_fence *fence)
+{
+ struct drm_mock_sched_job *job =
+ container_of(fence, typeof(*job), hw_fence);
+
+ hrtimer_cancel(&job->timer);
+
+ /* Containing job is freed by the kunit framework */
+}
+
+static const struct dma_fence_ops drm_mock_sched_hw_fence_ops = {
+ .get_driver_name = drm_mock_sched_hw_fence_driver_name,
+ .get_timeline_name = drm_mock_sched_hw_fence_timeline_name,
+ .release = drm_mock_sched_hw_fence_release,
+};
+
+static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+
+ dma_fence_init(&job->hw_fence,
+ &drm_mock_sched_hw_fence_ops,
+ &job->lock,
+ sched->hw_timeline.context,
+ atomic_inc_return(&sched->hw_timeline.next_seqno));
+
+ dma_fence_get(&job->hw_fence); /* Reference for the job_list */
+
+ spin_lock_irq(&sched->lock);
+ if (job->duration_us) {
+ ktime_t prev_finish_at = 0;
+
+ if (!list_empty(&sched->job_list)) {
+ struct drm_mock_sched_job *prev =
+ list_last_entry(&sched->job_list, typeof(*prev),
+ link);
+
+ prev_finish_at = prev->finish_at;
+ }
+
+ if (!prev_finish_at)
+ prev_finish_at = ktime_get();
+
+ job->finish_at = ktime_add_us(prev_finish_at, job->duration_us);
+ }
+ list_add_tail(&job->link, &sched->job_list);
+ if (job->finish_at)
+ hrtimer_start(&job->timer, job->finish_at, HRTIMER_MODE_ABS);
+ spin_unlock_irq(&sched->lock);
+
+ return &job->hw_fence;
+}
+
+static enum drm_gpu_sched_stat
+mock_sched_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+
+ job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT;
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+static void mock_sched_free_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ /* Remove from the scheduler done list. */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_del(&job->link);
+ spin_unlock_irqrestore(&sched->lock, flags);
+ dma_fence_put(&job->hw_fence);
+
+ drm_sched_job_cleanup(sched_job);
+
+ /* Mock job itself is freed by the kunit framework. */
+}
+
+static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
+ .run_job = mock_sched_run_job,
+ .timedout_job = mock_sched_timedout_job,
+ .free_job = mock_sched_free_job
+};
+
+/**
+ * drm_mock_sched_new - Create a new mock scheduler
+ *
+ * @test: KUnit test owning the job
+ * @timeout: Job timeout to set
+ *
+ * Returns: New mock scheduler with allocation managed by the test
+ */
+struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
+{
+ struct drm_sched_init_args args = {
+ .ops = &drm_mock_scheduler_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = U32_MAX,
+ .hang_limit = 1,
+ .timeout = timeout,
+ .name = "drm-mock-scheduler",
+ };
+ struct drm_mock_scheduler *sched;
+ int ret;
+
+ sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, sched);
+
+ ret = drm_sched_init(&sched->base, &args);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sched->test = test;
+ sched->hw_timeline.context = dma_fence_context_alloc(1);
+ atomic_set(&sched->hw_timeline.next_seqno, 0);
+ INIT_LIST_HEAD(&sched->job_list);
+ INIT_LIST_HEAD(&sched->done_list);
+ spin_lock_init(&sched->lock);
+
+ return sched;
+}
+
+/**
+ * drm_mock_sched_fini - Destroys a mock scheduler
+ *
+ * @sched: Scheduler to destroy
+ *
+ * To be used from the test cases once done with the scheduler.
+ */
+void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
+{
+ struct drm_mock_sched_job *job, *next;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ drm_sched_wqueue_stop(&sched->base);
+
+ /* Force complete all unfinished jobs. */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->job_list, link)
+ list_move_tail(&job->link, &list);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ list_for_each_entry(job, &list, link)
+ hrtimer_cancel(&job->timer);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &list, link)
+ drm_mock_sched_job_complete(job);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ /*
+ * Free completed jobs and jobs not yet processed by the DRM scheduler
+ * free worker.
+ */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->done_list, link)
+ list_move_tail(&job->link, &list);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ list_for_each_entry_safe(job, next, &list, link)
+ mock_sched_free_job(&job->base);
+
+ drm_sched_fini(&sched->base);
+}
+
+/**
+ * drm_mock_sched_advance - Advances the mock scheduler timeline
+ *
+ * @sched: Scheduler timeline to advance
+ * @num: By how many jobs to advance
+ *
+ * Advancing the scheduler timeline by a number of seqnos will trigger
+ * signalling of the hardware fences and unlinking the jobs from the internal
+ * scheduler tracking.
+ *
+ * This can be used from test cases which want complete control of the simulated
+ * job execution timing. For example submitting one job with no set duration
+ * would never complete it before test cases advances the timeline by one.
+ */
+unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
+ unsigned int num)
+{
+ struct drm_mock_sched_job *job, *next;
+ unsigned int found = 0;
+ unsigned long flags;
+ LIST_HEAD(signal);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ if (WARN_ON_ONCE(sched->hw_timeline.cur_seqno + num <
+ sched->hw_timeline.cur_seqno))
+ goto unlock;
+ sched->hw_timeline.cur_seqno += num;
+ list_for_each_entry_safe(job, next, &sched->job_list, link) {
+ if (sched->hw_timeline.cur_seqno < job->hw_fence.seqno)
+ break;
+
+ drm_mock_sched_job_complete(job);
+ found++;
+ }
+unlock:
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ return found;
+}
+
+MODULE_DESCRIPTION("DRM mock scheduler and tests");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
new file mode 100644
index 000000000000..27caf8285fb7
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _SCHED_TESTS_H_
+#define _SCHED_TESTS_H_
+
+#include <kunit/test.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/dma-fence.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include <drm/gpu_scheduler.h>
+
+/*
+ * DOC: Mock DRM scheduler data structures
+ *
+ * drm_mock_* data structures are used to implement a mock "GPU".
+ *
+ * They subclass the core DRM scheduler objects and add their data on top, which
+ * enables tracking the submitted jobs and simulating their execution with the
+ * attributes as specified by the test case.
+ */
+
+/**
+ * struct drm_mock_scheduler - implements a trivial mock GPU execution engine
+ *
+ * @base: DRM scheduler base class
+ * @test: Backpointer to owning the kunit test case
+ * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list
+ * @job_list: List of jobs submitted to the mock GPU
+ * @done_list: List of jobs completed by the mock GPU
+ * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and
+ * @cur_seqno for implementing a struct dma_fence signaling the
+ * simulated job completion.
+ *
+ * Trivial mock GPU execution engine tracks submitted jobs and enables
+ * completing them strictly in submission order.
+ */
+struct drm_mock_scheduler {
+ struct drm_gpu_scheduler base;
+
+ struct kunit *test;
+
+ spinlock_t lock;
+ struct list_head job_list;
+ struct list_head done_list;
+
+ struct {
+ u64 context;
+ atomic_t next_seqno;
+ unsigned int cur_seqno;
+ } hw_timeline;
+};
+
+/**
+ * struct drm_mock_sched_entity - implements a mock GPU sched entity
+ *
+ * @base: DRM scheduler entity base class
+ * @test: Backpointer to owning the kunit test case
+ *
+ * Mock GPU sched entity is used by the test cases to submit jobs to the mock
+ * scheduler.
+ */
+struct drm_mock_sched_entity {
+ struct drm_sched_entity base;
+
+ struct kunit *test;
+};
+
+/**
+ * struct drm_mock_sched_job - implements a mock GPU job
+ *
+ * @base: DRM sched job base class
+ * @done: Completion signaling job completion.
+ * @flags: Flags designating job state.
+ * @link: List head element used by job tracking by the drm_mock_scheduler
+ * @timer: Timer used for simulating job execution duration
+ * @duration_us: Simulated job duration in micro seconds, or zero if in manual
+ * timeline advance mode
+ * @finish_at: Absolute time when the jobs with set duration will complete
+ * @lock: Lock used for @hw_fence
+ * @hw_fence: Fence returned to DRM scheduler as the hardware fence
+ * @test: Backpointer to owning the kunit test case
+ *
+ * Mock GPU sched job is used by the test cases to submit jobs to the mock
+ * scheduler.
+ */
+struct drm_mock_sched_job {
+ struct drm_sched_job base;
+
+ struct completion done;
+
+#define DRM_MOCK_SCHED_JOB_DONE 0x1
+#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+ unsigned long flags;
+
+ struct list_head link;
+ struct hrtimer timer;
+
+ unsigned int duration_us;
+ ktime_t finish_at;
+
+ spinlock_t lock;
+ struct dma_fence hw_fence;
+
+ struct kunit *test;
+};
+
+static inline struct drm_mock_scheduler *
+drm_sched_to_mock_sched(struct drm_gpu_scheduler *sched)
+{
+ return container_of(sched, struct drm_mock_scheduler, base);
+};
+
+static inline struct drm_mock_sched_entity *
+drm_sched_entity_to_mock_entity(struct drm_sched_entity *sched_entity)
+{
+ return container_of(sched_entity, struct drm_mock_sched_entity, base);
+};
+
+static inline struct drm_mock_sched_job *
+drm_sched_job_to_mock_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct drm_mock_sched_job, base);
+};
+
+struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test,
+ long timeout);
+void drm_mock_sched_fini(struct drm_mock_scheduler *sched);
+unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
+ unsigned int num);
+
+struct drm_mock_sched_entity *
+drm_mock_sched_entity_new(struct kunit *test,
+ enum drm_sched_priority priority,
+ struct drm_mock_scheduler *sched);
+void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity);
+
+struct drm_mock_sched_job *
+drm_mock_sched_job_new(struct kunit *test,
+ struct drm_mock_sched_entity *entity);
+
+/**
+ * drm_mock_sched_job_submit - Arm and submit a job in one go
+ *
+ * @job: Job to arm and submit
+ */
+static inline void drm_mock_sched_job_submit(struct drm_mock_sched_job *job)
+{
+ drm_sched_job_arm(&job->base);
+ drm_sched_entity_push_job(&job->base);
+}
+
+/**
+ * drm_mock_sched_job_set_duration_us - Set a job duration
+ *
+ * @job: Job to set the duration for
+ * @duration_us: Duration in micro seconds
+ *
+ * Jobs with duration set will be automatically completed by the mock scheduler
+ * as the timeline progresses, unless a job without a set duration is
+ * encountered in the timelime in which case calling drm_mock_sched_advance()
+ * will be required to bump the timeline.
+ */
+static inline void
+drm_mock_sched_job_set_duration_us(struct drm_mock_sched_job *job,
+ unsigned int duration_us)
+{
+ job->duration_us = duration_us;
+}
+
+/**
+ * drm_mock_sched_job_is_finished - Check if a job is finished
+ *
+ * @job: Job to check
+ *
+ * Returns: true if finished
+ */
+static inline bool
+drm_mock_sched_job_is_finished(struct drm_mock_sched_job *job)
+{
+ return job->flags & DRM_MOCK_SCHED_JOB_DONE;
+}
+
+/**
+ * drm_mock_sched_job_wait_finished - Wait until a job is finished
+ *
+ * @job: Job to wait for
+ * @timeout: Wait time in jiffies
+ *
+ * Returns: true if finished within the timeout provided, otherwise false
+ */
+static inline bool
+drm_mock_sched_job_wait_finished(struct drm_mock_sched_job *job, long timeout)
+{
+ if (job->flags & DRM_MOCK_SCHED_JOB_DONE)
+ return true;
+
+ return wait_for_completion_timeout(&job->done, timeout) != 0;
+}
+
+/**
+ * drm_mock_sched_job_wait_scheduled - Wait until a job is scheduled
+ *
+ * @job: Job to wait for
+ * @timeout: Wait time in jiffies
+ *
+ * Returns: true if scheduled within the timeout provided, otherwise false
+ */
+static inline bool
+drm_mock_sched_job_wait_scheduled(struct drm_mock_sched_job *job, long timeout)
+{
+ KUNIT_ASSERT_EQ(job->test, job->flags & DRM_MOCK_SCHED_JOB_DONE, 0);
+
+ return dma_fence_wait_timeout(&job->base.s_fence->scheduled,
+ false,
+ timeout) != 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
new file mode 100644
index 000000000000..7230057e0594
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Valve Corporation */
+
+#include <linux/delay.h>
+
+#include "sched_tests.h"
+
+/*
+ * DRM scheduler basic tests should check the basic functional correctness of
+ * the scheduler, including some very light smoke testing. More targeted tests,
+ * for example focusing on testing specific bugs and other more complicated test
+ * scenarios, should be implemented in separate source units.
+ */
+
+static int drm_sched_basic_init(struct kunit *test)
+{
+ test->priv = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+
+ return 0;
+}
+
+static void drm_sched_basic_exit(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+
+ drm_mock_sched_fini(sched);
+}
+
+static int drm_sched_timeout_init(struct kunit *test)
+{
+ test->priv = drm_mock_sched_new(test, HZ);
+
+ return 0;
+}
+
+static void drm_sched_basic_submit(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ unsigned int i;
+ bool done;
+
+ /*
+ * Submit one job to the scheduler and verify that it gets scheduled
+ * and completed only when the mock hw backend processes it.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+struct drm_sched_basic_params {
+ const char *description;
+ unsigned int queue_depth;
+ unsigned int num_entities;
+ unsigned int job_us;
+ bool dep_chain;
+};
+
+static const struct drm_sched_basic_params drm_sched_basic_cases[] = {
+ {
+ .description = "A queue of jobs in a single entity",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 1,
+ },
+ {
+ .description = "A chain of dependent jobs across multiple entities",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 1,
+ .dep_chain = true,
+ },
+ {
+ .description = "Multiple independent job queues",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 4,
+ },
+ {
+ .description = "Multiple inter-dependent job queues",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 4,
+ .dep_chain = true,
+ },
+};
+
+static void
+drm_sched_basic_desc(const struct drm_sched_basic_params *params, char *desc)
+{
+ strscpy(desc, params->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(drm_sched_basic, drm_sched_basic_cases, drm_sched_basic_desc);
+
+static void drm_sched_basic_test(struct kunit *test)
+{
+ const struct drm_sched_basic_params *params = test->param_value;
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job, *prev = NULL;
+ struct drm_mock_sched_entity **entity;
+ unsigned int i, cur_ent = 0;
+ bool done;
+
+ entity = kunit_kcalloc(test, params->num_entities, sizeof(*entity),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, entity);
+
+ for (i = 0; i < params->num_entities; i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < params->queue_depth; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= params->num_entities;
+ drm_mock_sched_job_set_duration_us(job, params->job_us);
+ if (params->dep_chain && prev)
+ drm_sched_job_add_dependency(&job->base,
+ dma_fence_get(&prev->base.s_fence->finished));
+ drm_mock_sched_job_submit(job);
+ prev = job;
+ }
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ for (i = 0; i < params->num_entities; i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static void drm_sched_basic_entity_cleanup(struct kunit *test)
+{
+ struct drm_mock_sched_job *job, *mid, *prev = NULL;
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity[4];
+ const unsigned int qd = 100;
+ unsigned int i, cur_ent = 0;
+ bool done;
+
+ /*
+ * Submit a queue of jobs across different entities with an explicit
+ * chain of dependencies between them and trigger entity cleanup while
+ * the queue is still being processed.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ if (prev)
+ drm_sched_job_add_dependency(&job->base,
+ dma_fence_get(&prev->base.s_fence->finished));
+ drm_mock_sched_job_submit(job);
+ if (i == qd / 2)
+ mid = job;
+ prev = job;
+ }
+
+ done = drm_mock_sched_job_wait_finished(mid, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ /* Exit with half of the queue still pending to be executed. */
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static struct kunit_case drm_sched_basic_tests[] = {
+ KUNIT_CASE(drm_sched_basic_submit),
+ KUNIT_CASE_PARAM(drm_sched_basic_test, drm_sched_basic_gen_params),
+ KUNIT_CASE(drm_sched_basic_entity_cleanup),
+ {}
+};
+
+static struct kunit_suite drm_sched_basic = {
+ .name = "drm_sched_basic_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_basic_tests,
+};
+
+static void drm_sched_basic_timeout(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ bool done;
+
+ /*
+ * Submit a single job against a scheduler with the timeout configured
+ * and verify that the timeout handling will run if the backend fails
+ * to complete it in time.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
+ 0);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
+ DRM_MOCK_SCHED_JOB_TIMEDOUT);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+static struct kunit_case drm_sched_timeout_tests[] = {
+ KUNIT_CASE(drm_sched_basic_timeout),
+ {}
+};
+
+static struct kunit_suite drm_sched_timeout = {
+ .name = "drm_sched_basic_timeout_tests",
+ .init = drm_sched_timeout_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_timeout_tests,
+};
+
+static void drm_sched_priorities(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT];
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 100;
+ unsigned int i, cur_ent = 0;
+ enum drm_sched_priority p;
+ bool done;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * priorities.
+ */
+
+ BUILD_BUG_ON(DRM_SCHED_PRIORITY_KERNEL > DRM_SCHED_PRIORITY_LOW);
+ BUILD_BUG_ON(ARRAY_SIZE(entity) != DRM_SCHED_PRIORITY_COUNT);
+
+ for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++)
+ entity[p] = drm_mock_sched_entity_new(test, p, sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static void drm_sched_change_priority(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT];
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 1000;
+ unsigned int i, cur_ent = 0;
+ enum drm_sched_priority p;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * priorities and while waiting for them to complete, periodically keep
+ * changing their priorities.
+ *
+ * We set up the queue-depth (qd) and job duration so the priority
+ * changing loop has some time to interact with submissions to the
+ * backend and job completions as they progress.
+ */
+
+ for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++)
+ entity[p] = drm_mock_sched_entity_new(test, p, sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ do {
+ drm_sched_entity_set_priority(&entity[cur_ent]->base,
+ (entity[cur_ent]->base.priority + 1) %
+ DRM_SCHED_PRIORITY_COUNT);
+ cur_ent++;
+ cur_ent %= ARRAY_SIZE(entity);
+ usleep_range(200, 500);
+ } while (!drm_mock_sched_job_is_finished(job));
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static struct kunit_case drm_sched_priority_tests[] = {
+ KUNIT_CASE(drm_sched_priorities),
+ KUNIT_CASE(drm_sched_change_priority),
+ {}
+};
+
+static struct kunit_suite drm_sched_priority = {
+ .name = "drm_sched_basic_priority_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_priority_tests,
+};
+
+static void drm_sched_test_modify_sched(struct kunit *test)
+{
+ unsigned int i, cur_ent = 0, cur_sched = 0;
+ struct drm_mock_sched_entity *entity[13];
+ struct drm_mock_scheduler *sched[3];
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 1000;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * schedulers and while waiting for them to complete, periodically keep
+ * changing schedulers associated with each entity.
+ *
+ * We set up the queue-depth (qd) and job duration so the sched modify
+ * loop has some time to interact with submissions to the backend and
+ * job completions as they progress.
+ *
+ * For the number of schedulers and entities we use primes in order to
+ * perturb the entity->sched assignments with less of a regular pattern.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(sched); i++)
+ sched[i] = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched[i % ARRAY_SIZE(sched)]);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ do {
+ struct drm_gpu_scheduler *modify;
+
+ usleep_range(200, 500);
+ cur_ent++;
+ cur_ent %= ARRAY_SIZE(entity);
+ cur_sched++;
+ cur_sched %= ARRAY_SIZE(sched);
+ modify = &sched[cur_sched]->base;
+ drm_sched_entity_modify_sched(&entity[cur_ent]->base, &modify,
+ 1);
+ } while (!drm_mock_sched_job_is_finished(job));
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+
+ for (i = 0; i < ARRAY_SIZE(sched); i++)
+ drm_mock_sched_fini(sched[i]);
+}
+
+static struct kunit_case drm_sched_modify_sched_tests[] = {
+ KUNIT_CASE(drm_sched_test_modify_sched),
+ {}
+};
+
+static struct kunit_suite drm_sched_modify_sched = {
+ .name = "drm_sched_basic_modify_sched_tests",
+ .test_cases = drm_sched_modify_sched_tests,
+};
+
+static void drm_sched_test_credits(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_scheduler *sched;
+ struct drm_mock_sched_job *job[2];
+ bool done;
+ int i;
+
+ /*
+ * Check that the configured credit limit is respected.
+ */
+
+ sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+ sched->base.credit_limit = 1;
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ job[0] = drm_mock_sched_job_new(test, entity);
+ job[1] = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job[0]);
+ drm_mock_sched_job_submit(job[1]);
+
+ done = drm_mock_sched_job_wait_scheduled(job[0], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_scheduled(job[1], HZ);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_scheduled(job[1], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job[1], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+ drm_mock_sched_fini(sched);
+}
+
+static struct kunit_case drm_sched_credits_tests[] = {
+ KUNIT_CASE(drm_sched_test_credits),
+ {}
+};
+
+static struct kunit_suite drm_sched_credits = {
+ .name = "drm_sched_basic_credits_tests",
+ .test_cases = drm_sched_credits_tests,
+};
+
+kunit_test_suites(&drm_sched_basic,
+ &drm_sched_timeout,
+ &drm_sched_priority,
+ &drm_sched_modify_sched,
+ &drm_sched_credits);
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index cb2816985305..a3447622a33c 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -784,19 +784,12 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu,
{
struct platform_device *pdev = to_platform_device(dev);
struct dpu_context *ctx = &dpu->ctx;
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O resource\n");
- return -EINVAL;
- }
-
- ctx->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!ctx->base) {
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base)) {
dev_err(dev, "failed to map dpu registers\n");
- return -EFAULT;
+ return PTR_ERR(ctx->base);
}
ctx->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index 8fc26479bb6b..23b0e1dc547a 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -901,18 +901,11 @@ static int sprd_dsi_context_init(struct sprd_dsi *dsi,
{
struct platform_device *pdev = to_platform_device(dev);
struct dsi_context *ctx = &dsi->ctx;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O resource\n");
- return -EINVAL;
- }
-
- ctx->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!ctx->base) {
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base)) {
drm_err(dsi->drm, "failed to map dsi host registers\n");
- return -ENXIO;
+ return PTR_ERR(ctx->base);
}
ctx->regmap = devm_regmap_init(dev, &regmap_tst_io, dsi, &byte_config);
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 063f82d23d80..8c529b0cca8b 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -177,7 +177,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct device_node *vtg_np;
struct sti_compositor *compo;
- struct resource *res;
unsigned int i;
compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
@@ -194,17 +193,10 @@ static int sti_compositor_probe(struct platform_device *pdev)
memcpy(&compo->data, of_match_node(compositor_of_match, np)->data,
sizeof(struct sti_compositor_data));
-
- /* Get Memory ressources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENXIO;
- }
- compo->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (compo->regs == NULL) {
+ compo->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(compo->regs)) {
DRM_ERROR("Register mapping failed\n");
- return -ENXIO;
+ return PTR_ERR(compo->regs);
}
/* Get clock resources */
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 4dcddd02629b..74a1eef4674e 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -511,7 +511,6 @@ static int sti_dvo_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_dvo *dvo;
- struct resource *res;
struct device_node *np = dev->of_node;
DRM_INFO("%s\n", __func__);
@@ -523,16 +522,9 @@ static int sti_dvo_probe(struct platform_device *pdev)
}
dvo->dev = pdev->dev;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvo-reg");
- if (!res) {
- DRM_ERROR("Invalid dvo resource\n");
- return -ENOMEM;
- }
- dvo->regs = devm_ioremap(dev, res->start,
- resource_size(res));
- if (!dvo->regs)
- return -ENOMEM;
+ dvo->regs = devm_platform_ioremap_resource_byname(pdev, "dvo-reg");
+ if (IS_ERR(dvo->regs))
+ return PTR_ERR(dvo->regs);
dvo->clk_pix = devm_clk_get(dev, "dvo_pix");
if (IS_ERR(dvo->clk_pix)) {
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 14fdc00d2ba0..d202b6c1eb8f 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -693,7 +693,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
connector->hda = hda;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
@@ -750,16 +750,9 @@ static int sti_hda_probe(struct platform_device *pdev)
return -ENOMEM;
hda->dev = pdev->dev;
-
- /* Get resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg");
- if (!res) {
- DRM_ERROR("Invalid hda resource\n");
- return -ENOMEM;
- }
- hda->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hda->regs)
- return -ENOMEM;
+ hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg");
+ if (IS_ERR(hda->regs))
+ return PTR_ERR(hda->regs);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 164a34d793d8..37b8d619066e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1380,7 +1380,6 @@ static int sti_hdmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sti_hdmi *hdmi;
struct device_node *np = dev->of_node;
- struct resource *res;
struct device_node *ddc;
int ret;
@@ -1399,17 +1398,9 @@ static int sti_hdmi_probe(struct platform_device *pdev)
}
hdmi->dev = pdev->dev;
-
- /* Get resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg");
- if (!res) {
- DRM_ERROR("Invalid hdmi resource\n");
- ret = -ENOMEM;
- goto release_adapter;
- }
- hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hdmi->regs) {
- ret = -ENOMEM;
+ hdmi->regs = devm_platform_ioremap_resource_byname(pdev, "hdmi-reg");
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
goto release_adapter;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 0f658709c9d0..03684062309b 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1356,7 +1356,6 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *vtg_np;
struct sti_hqvdp *hqvdp;
- struct resource *res;
DRM_DEBUG_DRIVER("\n");
@@ -1367,17 +1366,10 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
}
hqvdp->dev = dev;
-
- /* Get Memory resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENXIO;
- }
- hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hqvdp->regs) {
+ hqvdp->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hqvdp->regs)) {
DRM_ERROR("Register mapping failed\n");
- return -ENXIO;
+ return PTR_ERR(hqvdp->regs);
}
/* Get clock resources */
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index af6c06f448c4..6a464b035de8 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -838,7 +838,6 @@ static int sti_tvout_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct sti_tvout *tvout;
- struct resource *res;
DRM_INFO("%s\n", __func__);
@@ -850,16 +849,9 @@ static int sti_tvout_probe(struct platform_device *pdev)
return -ENOMEM;
tvout->dev = dev;
-
- /* get memory resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg");
- if (!res) {
- DRM_ERROR("Invalid glue resource\n");
- return -ENOMEM;
- }
- tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!tvout->regs)
- return -ENOMEM;
+ tvout->regs = devm_platform_ioremap_resource_byname(pdev, "tvout-reg");
+ if (IS_ERR(tvout->regs))
+ return PTR_ERR(tvout->regs);
/* get reset resources */
tvout->reset = devm_reset_control_get(dev, "tvout");
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 5ba469b711b5..ee81691b3203 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -380,23 +380,15 @@ static int vtg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_vtg *vtg;
- struct resource *res;
int ret;
vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL);
if (!vtg)
return -ENOMEM;
-
- /* Get Memory ressources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENOMEM;
- }
- vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vtg->regs) {
+ vtg->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vtg->regs)) {
DRM_ERROR("failed to remap I/O memory\n");
- return -ENOMEM;
+ return PTR_ERR(vtg->regs);
}
vtg->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 4613e8e3b8fd..a3ae9a93ce66 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -934,28 +934,27 @@ static const struct drm_connector_funcs lvds_conn_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int lvds_attach(struct drm_bridge *bridge,
+static int lvds_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct stm_lvds *lvds = bridge_to_stm_lvds(bridge);
struct drm_connector *connector = &lvds->connector;
- struct drm_encoder *encoder = bridge->encoder;
int ret;
- if (!bridge->encoder) {
+ if (!encoder) {
drm_err(bridge->dev, "Parent encoder object not found\n");
return -ENODEV;
}
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
/* No cloning support */
- bridge->encoder->possible_clones = 0;
+ encoder->possible_clones = 0;
/* If we have a next bridge just attach it. */
if (lvds->next_bridge)
- return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
+ return drm_bridge_attach(encoder, lvds->next_bridge,
bridge, flags);
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
diff --git a/drivers/gpu/drm/sysfb/Kconfig b/drivers/gpu/drm/sysfb/Kconfig
new file mode 100644
index 000000000000..9c9884c7efc6
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/Kconfig
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "Drivers for system framebuffers"
+ depends on DRM
+
+config DRM_SYSFB_HELPER
+ tristate
+ depends on DRM
+
+config DRM_EFIDRM
+ tristate "EFI framebuffer driver"
+ depends on DRM && MMU && EFI && (!SYSFB_SIMPLEFB || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ select SYSFB
+ help
+ DRM driver for EFI framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via EFI interfaces.
+
+config DRM_OFDRM
+ tristate "Open Firmware display driver"
+ depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ help
+ DRM driver for Open Firmware framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the Open Firmware before the kernel boots. Scanout buffer, size,
+ and display format must be provided via device tree.
+
+config DRM_SIMPLEDRM
+ tristate "Simple framebuffer driver"
+ depends on DRM && MMU
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ help
+ DRM driver for simple platform-provided framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via device tree,
+ UEFI, VESA, etc.
+
+ On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
+ to use UEFI and VESA framebuffers.
+
+config DRM_VESADRM
+ tristate "VESA framebuffer driver"
+ depends on DRM && MMU && X86 && (!SYSFB_SIMPLEFB || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ select SYSFB
+ help
+ DRM driver for VESA framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via VBE interfaces.
+
+endmenu
diff --git a/drivers/gpu/drm/sysfb/Makefile b/drivers/gpu/drm/sysfb/Makefile
new file mode 100644
index 000000000000..0d2518c97163
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_SYSFB_HELPER) += drm_sysfb_helper.o
+
+obj-$(CONFIG_DRM_EFIDRM) += efidrm.o
+obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
+obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
+obj-$(CONFIG_DRM_VESADRM) += vesadrm.o
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.c b/drivers/gpu/drm/sysfb/drm_sysfb_helper.c
new file mode 100644
index 000000000000..262490a71792
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "drm_sysfb_helper.h"
+
+MODULE_DESCRIPTION("Helpers for DRM sysfb drivers");
+MODULE_LICENSE("GPL");
+
+struct drm_display_mode drm_sysfb_mode(unsigned int width,
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm)
+{
+ /*
+ * Assume a monitor resolution of 96 dpi to
+ * get a somewhat reasonable screen size.
+ */
+ if (!width_mm)
+ width_mm = DRM_MODE_RES_MM(width, 96ul);
+ if (!height_mm)
+ height_mm = DRM_MODE_RES_MM(height, 96ul);
+
+ {
+ const struct drm_display_mode mode = {
+ DRM_MODE_INIT(60, width, height, width_mm, height_mm)
+ };
+
+ return mode;
+ }
+}
+EXPORT_SYMBOL(drm_sysfb_mode);
+
+/*
+ * Plane
+ */
+
+int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_shadow_plane_state *new_shadow_plane_state =
+ to_drm_shadow_plane_state(new_plane_state);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ if (new_fb->format != sysfb->fb_format) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
+ sysfb->fb_pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+ new_sysfb_crtc_state->format = new_fb->format;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_check);
+
+void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ unsigned int dst_pitch = sysfb->fb_pitch;
+ const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = sysfb->fb_addr;
+ struct drm_rect dst_clip = plane_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
+ drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
+ &damage, &shadow_plane_state->fmtcnv_state);
+ }
+
+ drm_dev_exit(idx);
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_update);
+
+void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct iosys_map dst = sysfb->fb_addr;
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
+ unsigned int dst_pitch = sysfb->fb_pitch;
+ const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_rect dst_clip;
+ unsigned long lines, linepixels, i;
+ int idx;
+
+ drm_rect_init(&dst_clip,
+ plane_state->src_x >> 16, plane_state->src_y >> 16,
+ plane_state->src_w >> 16, plane_state->src_h >> 16);
+
+ lines = drm_rect_height(&dst_clip);
+ linepixels = drm_rect_width(&dst_clip);
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ /* Clear buffer to black if disabled */
+ dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
+ for (i = 0; i < lines; ++i) {
+ memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
+ dst_vmap += dst_pitch;
+ }
+
+ drm_dev_exit(idx);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_disable);
+
+int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+
+ sb->width = sysfb->fb_mode.hdisplay;
+ sb->height = sysfb->fb_mode.vdisplay;
+ sb->format = sysfb->fb_format;
+ sb->pitch[0] = sysfb->fb_pitch;
+ sb->map[0] = sysfb->fb_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer);
+
+/*
+ * CRTC
+ */
+
+static void drm_sysfb_crtc_state_destroy(struct drm_sysfb_crtc_state *sysfb_crtc_state)
+{
+ __drm_atomic_helper_crtc_destroy_state(&sysfb_crtc_state->base);
+
+ kfree(sysfb_crtc_state);
+}
+
+enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sysfb->fb_mode);
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_helper_mode_valid);
+
+int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ int ret;
+
+ if (!new_crtc_state->enable)
+ return 0;
+
+ ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
+ if (ret)
+ return ret;
+
+ if (new_crtc_state->color_mgmt_changed) {
+ const size_t gamma_lut_length =
+ sysfb->fb_gamma_lut_size * sizeof(struct drm_color_lut);
+ const struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
+
+ if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
+ drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_helper_atomic_check);
+
+void drm_sysfb_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+
+ if (crtc->state)
+ drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc->state));
+
+ sysfb_crtc_state = kzalloc(sizeof(*sysfb_crtc_state), GFP_KERNEL);
+ if (sysfb_crtc_state)
+ __drm_atomic_helper_crtc_reset(crtc, &sysfb_crtc_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_reset);
+
+struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+
+ if (drm_WARN_ON(dev, !crtc_state))
+ return NULL;
+
+ new_sysfb_crtc_state = kzalloc(sizeof(*new_sysfb_crtc_state), GFP_KERNEL);
+ if (!new_sysfb_crtc_state)
+ return NULL;
+
+ sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new_sysfb_crtc_state->base);
+ new_sysfb_crtc_state->format = sysfb_crtc_state->format;
+
+ return &new_sysfb_crtc_state->base;
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_atomic_duplicate_state);
+
+void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
+{
+ drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc_state));
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_atomic_destroy_state);
+
+/*
+ * Connector
+ */
+
+static int drm_sysfb_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
+{
+ struct drm_sysfb_device *sysfb = data;
+ const u8 *edid = sysfb->edid;
+ size_t off = block * EDID_LENGTH;
+ size_t end = off + len;
+
+ if (!edid)
+ return -EINVAL;
+ if (end > EDID_LENGTH)
+ return -EINVAL;
+ memcpy(buf, &edid[off], len);
+
+ /*
+ * We don't have EDID extensions available and reporting them
+ * will upset DRM helpers. Thus clear the extension field and
+ * update the checksum. Adding the extension flag to the checksum
+ * does this.
+ */
+ buf[127] += buf[126];
+ buf[126] = 0;
+
+ return 0;
+}
+
+int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(connector->dev);
+ const struct drm_edid *drm_edid;
+
+ if (sysfb->edid) {
+ drm_edid = drm_edid_read_custom(connector, drm_sysfb_get_edid_block, sysfb);
+ drm_edid_connector_update(connector, drm_edid);
+ drm_edid_free(drm_edid);
+ }
+
+ /* Return the fixed mode even with EDID */
+ return drm_connector_helper_get_modes_fixed(connector, &sysfb->fb_mode);
+}
+EXPORT_SYMBOL(drm_sysfb_connector_helper_get_modes);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
new file mode 100644
index 000000000000..3684bd0ef085
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef DRM_SYSFB_HELPER_H
+#define DRM_SYSFB_HELPER_H
+
+#include <linux/container_of.h>
+#include <linux/iosys-map.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_modes.h>
+
+struct drm_format_info;
+
+struct drm_display_mode drm_sysfb_mode(unsigned int width,
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm);
+
+/*
+ * Device
+ */
+
+struct drm_sysfb_device {
+ struct drm_device dev;
+
+ const u8 *edid; /* can be NULL */
+
+ /* hardware settings */
+ struct drm_display_mode fb_mode;
+ const struct drm_format_info *fb_format;
+ unsigned int fb_pitch;
+ unsigned int fb_gamma_lut_size;
+
+ /* hardware-framebuffer kernel address */
+ struct iosys_map fb_addr;
+};
+
+static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *dev)
+{
+ return container_of(dev, struct drm_sysfb_device, dev);
+}
+
+/*
+ * Plane
+ */
+
+int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state);
+void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
+#define DRM_SYSFB_PLANE_NFORMATS(_num_native) \
+ ((_num_native) + 1)
+
+#define DRM_SYSFB_PLANE_FORMAT_MODIFIERS \
+ DRM_FORMAT_MOD_LINEAR, \
+ DRM_FORMAT_MOD_INVALID
+
+#define DRM_SYSFB_PLANE_HELPER_FUNCS \
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
+ .atomic_check = drm_sysfb_plane_helper_atomic_check, \
+ .atomic_update = drm_sysfb_plane_helper_atomic_update, \
+ .atomic_disable = drm_sysfb_plane_helper_atomic_disable, \
+ .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer
+
+#define DRM_SYSFB_PLANE_FUNCS \
+ .update_plane = drm_atomic_helper_update_plane, \
+ .disable_plane = drm_atomic_helper_disable_plane, \
+ DRM_GEM_SHADOW_PLANE_FUNCS
+
+/*
+ * CRTC
+ */
+
+struct drm_sysfb_crtc_state {
+ struct drm_crtc_state base;
+
+ /* Primary-plane format; required for color mgmt. */
+ const struct drm_format_info *format;
+};
+
+static inline struct drm_sysfb_crtc_state *
+to_drm_sysfb_crtc_state(struct drm_crtc_state *base)
+{
+ return container_of(base, struct drm_sysfb_crtc_state, base);
+}
+
+enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+
+#define DRM_SYSFB_CRTC_HELPER_FUNCS \
+ .mode_valid = drm_sysfb_crtc_helper_mode_valid, \
+ .atomic_check = drm_sysfb_crtc_helper_atomic_check
+
+void drm_sysfb_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
+void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
+
+#define DRM_SYSFB_CRTC_FUNCS \
+ .reset = drm_sysfb_crtc_reset, \
+ .set_config = drm_atomic_helper_set_config, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = drm_sysfb_crtc_atomic_duplicate_state, \
+ .atomic_destroy_state = drm_sysfb_crtc_atomic_destroy_state
+
+/*
+ * Connector
+ */
+
+int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector);
+
+#define DRM_SYSFB_CONNECTOR_HELPER_FUNCS \
+ .get_modes = drm_sysfb_connector_helper_get_modes
+
+#define DRM_SYSFB_CONNECTOR_FUNCS \
+ .reset = drm_atomic_helper_connector_reset, \
+ .fill_modes = drm_helper_probe_single_connector_modes, \
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, \
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+
+/*
+ * Mode config
+ */
+
+#define DRM_SYSFB_MODE_CONFIG_FUNCS \
+ .fb_create = drm_gem_fb_create_with_dirty, \
+ .atomic_check = drm_atomic_helper_check, \
+ .atomic_commit = drm_atomic_helper_commit
+
+#endif
diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c
new file mode 100644
index 000000000000..af90064a4c04
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/efidrm.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/aperture.h>
+#include <linux/efi.h>
+#include <linux/limits.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/edid.h>
+#include <video/pixel_format.h>
+
+#include "drm_sysfb_helper.h"
+
+#define DRIVER_NAME "efidrm"
+#define DRIVER_DESC "DRM driver for EFI platform devices"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static int efidrm_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (max > INT_MAX)
+ max = INT_MAX;
+ if (value > max) {
+ drm_err(dev, "%s of %llu exceeds maximum of %u\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+
+static int efidrm_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (!value) {
+ drm_err(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ }
+ return efidrm_get_validated_int(dev, name, value, max);
+}
+
+static s64 efidrm_get_validated_size0(struct drm_device *dev, const char *name,
+ u64 value, u64 max)
+{
+ if (!value) {
+ drm_err(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ } else if (value > max) {
+ drm_err(dev, "%s of %llu exceeds maximum of %llu\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+
+static int efidrm_get_width_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return efidrm_get_validated_int0(dev, "width", si->lfb_width, U16_MAX);
+}
+
+static int efidrm_get_height_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return efidrm_get_validated_int0(dev, "height", si->lfb_height, U16_MAX);
+}
+
+static struct resource *efidrm_get_memory_si(struct drm_device *dev,
+ const struct screen_info *si,
+ struct resource *res)
+{
+ ssize_t num;
+
+ num = screen_info_resources(si, res, 1);
+ if (!num) {
+ drm_err(dev, "memory resource not found\n");
+ return NULL;
+ }
+
+ return res;
+}
+
+static int efidrm_get_stride_si(struct drm_device *dev, const struct screen_info *si,
+ const struct drm_format_info *format,
+ unsigned int width, unsigned int height, u64 size)
+{
+ u64 lfb_linelength = si->lfb_linelength;
+
+ if (!lfb_linelength)
+ lfb_linelength = drm_format_info_min_pitch(format, 0, width);
+
+ return efidrm_get_validated_int0(dev, "stride", lfb_linelength, div64_u64(size, height));
+}
+
+static u64 efidrm_get_visible_size_si(struct drm_device *dev, const struct screen_info *si,
+ unsigned int height, unsigned int stride, u64 size)
+{
+ u64 vsize = PAGE_ALIGN(height * stride);
+
+ return efidrm_get_validated_size0(dev, "visible size", vsize, size);
+}
+
+static const struct drm_format_info *efidrm_get_format_si(struct drm_device *dev,
+ const struct screen_info *si)
+{
+ static const struct {
+ struct pixel_format pixel;
+ u32 fourcc;
+ } efi_formats[] = {
+ { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, },
+ { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, },
+ { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
+ { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
+ { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ { PIXEL_FORMAT_XRGB2101010, DRM_FORMAT_XRGB2101010, },
+ };
+ const struct drm_format_info *format = NULL;
+ u32 bits_per_pixel;
+ size_t i;
+
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
+
+ for (i = 0; i < ARRAY_SIZE(efi_formats); ++i) {
+ const struct pixel_format *f = &efi_formats[i].pixel;
+
+ if (bits_per_pixel == f->bits_per_pixel &&
+ si->red_size == f->red.length &&
+ si->red_pos == f->red.offset &&
+ si->green_size == f->green.length &&
+ si->green_pos == f->green.offset &&
+ si->blue_size == f->blue.length &&
+ si->blue_pos == f->blue.offset) {
+ format = drm_format_info(efi_formats[i].fourcc);
+ break;
+ }
+ }
+
+ if (!format)
+ return ERR_PTR(-EINVAL);
+ if (format->is_color_indexed)
+ return ERR_PTR(-EINVAL);
+
+ return format;
+}
+
+static u64 efidrm_get_mem_flags(struct drm_device *dev, resource_size_t start,
+ resource_size_t len)
+{
+ u64 attribute = EFI_MEMORY_UC | EFI_MEMORY_WC |
+ EFI_MEMORY_WT | EFI_MEMORY_WB;
+ u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
+ resource_size_t end = start + len;
+ efi_memory_desc_t md;
+ u64 md_end;
+
+ if (!efi_enabled(EFI_MEMMAP) || efi_mem_desc_lookup(start, &md))
+ goto out;
+
+ md_end = md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT);
+ if (end > md_end)
+ goto out;
+
+ attribute &= md.attribute;
+ if (attribute) {
+ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+ mem_flags &= attribute;
+ }
+
+out:
+ return mem_flags;
+}
+
+/*
+ * EFI device
+ */
+
+struct efidrm_device {
+ struct drm_sysfb_device sysfb;
+
+ /* modesetting */
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+/*
+ * Modesetting
+ */
+
+static const u64 efidrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
+};
+
+static const struct drm_plane_helper_funcs efidrm_primary_plane_helper_funcs = {
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs efidrm_primary_plane_funcs = {
+ DRM_SYSFB_PLANE_FUNCS,
+ .destroy = drm_plane_cleanup,
+};
+
+static const struct drm_crtc_helper_funcs efidrm_crtc_helper_funcs = {
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs efidrm_crtc_funcs = {
+ DRM_SYSFB_CRTC_FUNCS,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_encoder_funcs efidrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_helper_funcs efidrm_connector_helper_funcs = {
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs efidrm_connector_funcs = {
+ DRM_SYSFB_CONNECTOR_FUNCS,
+ .destroy = drm_connector_cleanup,
+};
+
+static const struct drm_mode_config_funcs efidrm_mode_config_funcs = {
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ const struct screen_info *si;
+ const struct drm_format_info *format;
+ int width, height, stride;
+ u64 vsize, mem_flags;
+ struct resource resbuf;
+ struct resource *res;
+ struct efidrm_device *efi;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ struct resource *mem = NULL;
+ void __iomem *screen_base;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ si = dev_get_platdata(&pdev->dev);
+ if (!si)
+ return ERR_PTR(-ENODEV);
+ if (screen_info_video_type(si) != VIDEO_TYPE_EFI)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * EFI DRM driver
+ */
+
+ efi = devm_drm_dev_alloc(&pdev->dev, drv, struct efidrm_device, sysfb.dev);
+ if (IS_ERR(efi))
+ return ERR_CAST(efi);
+ sysfb = &efi->sysfb;
+ dev = &sysfb->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Hardware settings
+ */
+
+ format = efidrm_get_format_si(dev, si);
+ if (IS_ERR(format))
+ return ERR_CAST(format);
+ width = efidrm_get_width_si(dev, si);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = efidrm_get_height_si(dev, si);
+ if (height < 0)
+ return ERR_PTR(height);
+ res = efidrm_get_memory_si(dev, si, &resbuf);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ stride = efidrm_get_stride_si(dev, si, format, width, height, resource_size(res));
+ if (stride < 0)
+ return ERR_PTR(stride);
+ vsize = efidrm_get_visible_size_si(dev, si, height, stride, resource_size(res));
+ if (!vsize)
+ return ERR_PTR(-EINVAL);
+
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
+ &format->format, width, height, stride);
+
+#ifdef CONFIG_X86
+ if (drm_edid_header_is_valid(edid_info.dummy) == 8)
+ sysfb->edid = edid_info.dummy;
+#endif
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
+
+ /*
+ * Memory management
+ */
+
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ mem_flags = efidrm_get_mem_flags(dev, res->start, vsize);
+
+ if (mem_flags & EFI_MEMORY_WC)
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ else if (mem_flags & EFI_MEMORY_UC)
+ screen_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ else if (mem_flags & EFI_MEMORY_WT)
+ screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem),
+ MEMREMAP_WT);
+ else if (mem_flags & EFI_MEMORY_WB)
+ screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem),
+ MEMREMAP_WB);
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+
+ /*
+ * Modesetting
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->depth;
+ dev->mode_config.funcs = &efidrm_mode_config_funcs;
+
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ efi->formats, ARRAY_SIZE(efi->formats));
+
+ primary_plane = &efi->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &efidrm_primary_plane_funcs,
+ efi->formats, nformats,
+ efidrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &efidrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &efi->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &efidrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &efidrm_crtc_helper_funcs);
+
+ /* Encoder */
+
+ encoder = &efi->encoder;
+ ret = drm_encoder_init(dev, encoder, &efidrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &efi->connector;
+ ret = drm_connector_init(dev, connector, &efidrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &efidrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+ if (sysfb->edid)
+ drm_connector_attach_edid_property(connector);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return efi;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(efidrm_fops);
+
+static struct drm_driver efidrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &efidrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int efidrm_probe(struct platform_device *pdev)
+{
+ struct efidrm_device *efi;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ int ret;
+
+ efi = efidrm_device_create(&efidrm_driver, pdev);
+ if (IS_ERR(efi))
+ return PTR_ERR(efi);
+ sysfb = &efi->sysfb;
+ dev = &sysfb->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(dev, sysfb->fb_format);
+
+ return 0;
+}
+
+static void efidrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+}
+
+static struct platform_driver efidrm_platform_driver = {
+ .driver = {
+ .name = "efi-framebuffer",
+ },
+ .probe = efidrm_probe,
+ .remove = efidrm_remove,
+};
+
+module_platform_driver(efidrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c
index 13491c0e704a..86c1a0c80ceb 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/sysfb/ofdrm.c
@@ -12,6 +12,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
@@ -21,7 +22,8 @@
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+
+#include "drm_sysfb_helper.h"
#define DRIVER_NAME "ofdrm"
#define DRIVER_DESC "DRM driver for OF platform devices"
@@ -226,6 +228,16 @@ static u64 display_get_address_of(struct drm_device *dev, struct device_node *of
return address;
}
+static const u8 *display_get_edid_of(struct drm_device *dev, struct device_node *of_node,
+ u8 buf[EDID_LENGTH])
+{
+ int ret = of_property_read_u8_array(of_node, "EDID", buf, EDID_LENGTH);
+
+ if (ret)
+ return NULL;
+ return buf;
+}
+
static bool is_avivo(u32 vendor, u32 device)
{
/* This will match most R5xx */
@@ -290,22 +302,17 @@ struct ofdrm_device_funcs {
};
struct ofdrm_device {
- struct drm_device dev;
- struct platform_device *pdev;
+ struct drm_sysfb_device sysfb;
const struct ofdrm_device_funcs *funcs;
- /* firmware-buffer settings */
- struct iosys_map screen_base;
- struct drm_display_mode mode;
- const struct drm_format_info *format;
- unsigned int pitch;
-
/* colormap */
void __iomem *cmap_base;
+ u8 edid[EDID_LENGTH];
+
/* modesetting */
- uint32_t formats[8];
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
@@ -314,7 +321,7 @@ struct ofdrm_device {
static struct ofdrm_device *ofdrm_device_of_dev(struct drm_device *dev)
{
- return container_of(dev, struct ofdrm_device, dev);
+ return container_of(to_drm_sysfb_device(dev), struct ofdrm_device, sysfb);
}
/*
@@ -354,7 +361,7 @@ static void ofdrm_pci_release(void *data)
static int ofdrm_device_init_pci(struct ofdrm_device *odev)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct pci_dev *pcidev;
@@ -397,7 +404,7 @@ static int ofdrm_device_init_pci(struct ofdrm_device *odev)
static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
struct resource *fb_res)
{
- struct platform_device *pdev = to_platform_device(odev->dev.dev);
+ struct platform_device *pdev = to_platform_device(odev->sysfb.dev.dev);
struct resource *res, *max_res = NULL;
u32 i;
@@ -423,7 +430,7 @@ static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct device_node *of_node,
int bar_no, unsigned long offset, unsigned long size)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
const __be32 *addr_p;
u64 max_size, address;
unsigned int flags;
@@ -456,7 +463,7 @@ static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
u64 address;
void __iomem *cmap_base;
@@ -618,7 +625,7 @@ static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev,
cpu_to_be32(0x00),
};
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
u64 address;
void __iomem *cmap_base;
@@ -648,7 +655,7 @@ static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index
static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
const struct drm_format_info *format)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
int i;
switch (format->format) {
@@ -687,7 +694,7 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
int i;
switch (format->format) {
@@ -731,205 +738,27 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
* Modesetting
*/
-struct ofdrm_crtc_state {
- struct drm_crtc_state base;
-
- /* Primary-plane format; required for color mgmt. */
- const struct drm_format_info *format;
-};
-
-static struct ofdrm_crtc_state *to_ofdrm_crtc_state(struct drm_crtc_state *base)
-{
- return container_of(base, struct ofdrm_crtc_state, base);
-}
-
-static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state)
-{
- __drm_atomic_helper_crtc_destroy_state(&ofdrm_crtc_state->base);
- kfree(ofdrm_crtc_state);
-}
-
-static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const u64 ofdrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
-static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *new_state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
- struct drm_shadow_plane_state *new_shadow_plane_state =
- to_drm_shadow_plane_state(new_plane_state);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
- struct ofdrm_crtc_state *new_ofdrm_crtc_state;
- int ret;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- if (ret)
- return ret;
- else if (!new_plane_state->visible)
- return 0;
-
- if (new_fb->format != odev->format) {
- void *buf;
-
- /* format conversion necessary; reserve buffer */
- buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
- odev->pitch, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state);
- new_ofdrm_crtc_state->format = new_fb->format;
-
- return 0;
-}
-
-static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- unsigned int dst_pitch = odev->pitch;
- const struct drm_format_info *dst_format = odev->format;
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect damage;
- int ret, idx;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return;
-
- if (!drm_dev_enter(dev, &idx))
- goto out_drm_gem_fb_end_cpu_access;
-
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct iosys_map dst = odev->screen_base;
- struct drm_rect dst_clip = plane_state->dst;
-
- if (!drm_rect_intersect(&dst_clip, &damage))
- continue;
-
- iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
- drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
- }
-
- drm_dev_exit(idx);
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-}
-
-static void ofdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct iosys_map dst = odev->screen_base;
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
- unsigned int dst_pitch = odev->pitch;
- const struct drm_format_info *dst_format = odev->format;
- struct drm_rect dst_clip;
- unsigned long lines, linepixels, i;
- int idx;
-
- drm_rect_init(&dst_clip,
- plane_state->src_x >> 16, plane_state->src_y >> 16,
- plane_state->src_w >> 16, plane_state->src_h >> 16);
-
- lines = drm_rect_height(&dst_clip);
- linepixels = drm_rect_width(&dst_clip);
-
- if (!drm_dev_enter(dev, &idx))
- return;
-
- /* Clear buffer to black if disabled */
- dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
- for (i = 0; i < lines; ++i) {
- memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
- dst_vmap += dst_pitch;
- }
-
- drm_dev_exit(idx);
-}
-
static const struct drm_plane_helper_funcs ofdrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = ofdrm_primary_plane_helper_atomic_check,
- .atomic_update = ofdrm_primary_plane_helper_atomic_update,
- .atomic_disable = ofdrm_primary_plane_helper_atomic_disable,
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs ofdrm_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_SYSFB_PLANE_FUNCS,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static enum drm_mode_status ofdrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
-
- return drm_crtc_helper_mode_valid_fixed(crtc, mode, &odev->mode);
-}
-
-static int ofdrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_atomic_state *new_state)
-{
- static const size_t gamma_lut_length = OFDRM_GAMMA_LUT_SIZE * sizeof(struct drm_color_lut);
-
- struct drm_device *dev = crtc->dev;
- struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
- int ret;
-
- if (!new_crtc_state->enable)
- return 0;
-
- ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
- if (ret)
- return ret;
-
- if (new_crtc_state->color_mgmt_changed) {
- struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
-
- if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
- drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- struct ofdrm_crtc_state *ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
- const struct drm_format_info *format = ofdrm_crtc_state->format;
+ const struct drm_format_info *format = sysfb_crtc_state->format;
if (crtc_state->gamma_lut)
ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
@@ -938,91 +767,31 @@ static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_ato
}
}
-/*
- * The CRTC is always enabled. Screen updates are performed by
- * the primary plane's atomic_update function. Disabling clears
- * the screen in the primary plane's atomic_disable function.
- */
static const struct drm_crtc_helper_funcs ofdrm_crtc_helper_funcs = {
- .mode_valid = ofdrm_crtc_helper_mode_valid,
- .atomic_check = ofdrm_crtc_helper_atomic_check,
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
.atomic_flush = ofdrm_crtc_helper_atomic_flush,
};
-static void ofdrm_crtc_reset(struct drm_crtc *crtc)
-{
- struct ofdrm_crtc_state *ofdrm_crtc_state =
- kzalloc(sizeof(*ofdrm_crtc_state), GFP_KERNEL);
-
- if (crtc->state)
- ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc->state));
-
- if (ofdrm_crtc_state)
- __drm_atomic_helper_crtc_reset(crtc, &ofdrm_crtc_state->base);
- else
- __drm_atomic_helper_crtc_reset(crtc, NULL);
-}
-
-static struct drm_crtc_state *ofdrm_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_crtc_state *crtc_state = crtc->state;
- struct ofdrm_crtc_state *new_ofdrm_crtc_state;
- struct ofdrm_crtc_state *ofdrm_crtc_state;
-
- if (drm_WARN_ON(dev, !crtc_state))
- return NULL;
-
- new_ofdrm_crtc_state = kzalloc(sizeof(*new_ofdrm_crtc_state), GFP_KERNEL);
- if (!new_ofdrm_crtc_state)
- return NULL;
-
- ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
-
- __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ofdrm_crtc_state->base);
- new_ofdrm_crtc_state->format = ofdrm_crtc_state->format;
-
- return &new_ofdrm_crtc_state->base;
-}
-
-static void ofdrm_crtc_atomic_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
-{
- ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc_state));
-}
-
static const struct drm_crtc_funcs ofdrm_crtc_funcs = {
- .reset = ofdrm_crtc_reset,
+ DRM_SYSFB_CRTC_FUNCS,
.destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = ofdrm_crtc_atomic_duplicate_state,
- .atomic_destroy_state = ofdrm_crtc_atomic_destroy_state,
};
-static int ofdrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct ofdrm_device *odev = ofdrm_device_of_dev(connector->dev);
-
- return drm_connector_helper_get_modes_fixed(connector, &odev->mode);
-}
+static const struct drm_encoder_funcs ofdrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
static const struct drm_connector_helper_funcs ofdrm_connector_helper_funcs = {
- .get_modes = ofdrm_connector_helper_get_modes,
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
};
static const struct drm_connector_funcs ofdrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ DRM_SYSFB_CONNECTOR_FUNCS,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs ofdrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
};
/*
@@ -1072,32 +841,19 @@ static const struct ofdrm_device_funcs ofdrm_qemu_device_funcs = {
.cmap_write = ofdrm_qemu_cmap_write,
};
-static struct drm_display_mode ofdrm_mode(unsigned int width, unsigned int height)
-{
- /*
- * Assume a monitor resolution of 96 dpi to
- * get a somewhat reasonable screen size.
- */
- const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height,
- DRM_MODE_RES_MM(width, 96ul),
- DRM_MODE_RES_MM(height, 96ul))
- };
-
- return mode;
-}
-
static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
struct device_node *of_node = pdev->dev.of_node;
struct ofdrm_device *odev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
enum ofdrm_model model;
bool big_endian;
int width, height, depth, linebytes;
const struct drm_format_info *format;
u64 address;
+ const u8 *edid;
resource_size_t fb_size, fb_base, fb_pgbase, fb_pgsize;
struct resource *res, *mem;
void __iomem *screen_base;
@@ -1109,10 +865,11 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
size_t nformats;
int ret;
- odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, dev);
+ odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, sysfb.dev);
if (IS_ERR(odev))
return ERR_CAST(odev);
- dev = &odev->dev;
+ sysfb = &odev->sysfb;
+ dev = &sysfb->dev;
platform_set_drvdata(pdev, dev);
ret = ofdrm_device_init_pci(odev);
@@ -1246,16 +1003,22 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
}
}
+ /* EDID is optional */
+ edid = display_get_edid_of(dev, of_node, odev->edid);
+
/*
* Firmware framebuffer
*/
- iosys_map_set_vaddr_iomem(&odev->screen_base, screen_base);
- odev->mode = ofdrm_mode(width, height);
- odev->format = format;
- odev->pitch = linebytes;
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = linebytes;
+ if (odev->cmap_base)
+ sysfb->fb_gamma_lut_size = OFDRM_GAMMA_LUT_SIZE;
+ sysfb->edid = edid;
- drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&odev->mode));
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, linebytes=%d byte\n",
&format->format, width, height, linebytes);
@@ -1302,15 +1065,16 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
return ERR_PTR(ret);
drm_crtc_helper_add(crtc, &ofdrm_crtc_helper_funcs);
- if (odev->cmap_base) {
- drm_mode_crtc_set_gamma_size(crtc, OFDRM_GAMMA_LUT_SIZE);
- drm_crtc_enable_color_mgmt(crtc, 0, false, OFDRM_GAMMA_LUT_SIZE);
+ if (sysfb->fb_gamma_lut_size) {
+ ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size);
+ if (!ret)
+ drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size);
}
/* Encoder */
encoder = &odev->encoder;
- ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
+ ret = drm_encoder_init(dev, encoder, &ofdrm_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ERR_PTR(ret);
encoder->possible_crtcs = drm_crtc_mask(crtc);
@@ -1326,6 +1090,8 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
drm_connector_set_panel_orientation_with_quirk(connector,
DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
width, height);
+ if (edid)
+ drm_connector_attach_edid_property(connector);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
@@ -1360,19 +1126,21 @@ static struct drm_driver ofdrm_driver = {
static int ofdrm_probe(struct platform_device *pdev)
{
struct ofdrm_device *odev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int ret;
odev = ofdrm_device_create(&ofdrm_driver, pdev);
if (IS_ERR(odev))
return PTR_ERR(odev);
- dev = &odev->dev;
+ sysfb = &odev->sysfb;
+ dev = &sysfb->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
- drm_client_setup(dev, odev->format);
+ drm_client_setup(dev, sysfb->fb_format);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index 5d9ab8adf800..f37b1994de71 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -14,7 +14,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -26,9 +25,10 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
+#include "drm_sysfb_helper.h"
+
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
#define DRIVER_MAJOR 1
@@ -217,7 +217,7 @@ simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node)
*/
struct simpledrm_device {
- struct drm_device dev;
+ struct drm_sysfb_device sysfb;
/* clocks */
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
@@ -236,28 +236,14 @@ struct simpledrm_device {
struct device_link **pwr_dom_links;
#endif
- /* simplefb settings */
- struct drm_display_mode mode;
- const struct drm_format_info *format;
- unsigned int pitch;
-
- /* memory management */
- struct iosys_map screen_base;
-
/* modesetting */
- uint32_t formats[8];
- size_t nformats;
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
};
-static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
-{
- return container_of(dev, struct simpledrm_device, dev);
-}
-
/*
* Hardware
*/
@@ -284,7 +270,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
static void simpledrm_device_release_clocks(void *res)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ struct simpledrm_device *sdev = res;
unsigned int i;
for (i = 0; i < sdev->clk_count; ++i) {
@@ -297,7 +283,7 @@ static void simpledrm_device_release_clocks(void *res)
static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
{
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct clk *clock;
@@ -382,7 +368,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
static void simpledrm_device_release_regulators(void *res)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ struct simpledrm_device *sdev = res;
unsigned int i;
for (i = 0; i < sdev->regulator_count; ++i) {
@@ -395,7 +381,7 @@ static void simpledrm_device_release_regulators(void *res)
static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
{
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct property *prop;
@@ -516,7 +502,7 @@ static void simpledrm_device_detach_genpd(void *res)
static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
{
- struct device *dev = sdev->dev.dev;
+ struct device *dev = sdev->sysfb.dev.dev;
int i;
sdev->pwr_dom_count = of_count_phandle_with_args(dev->of_node, "power-domains",
@@ -548,7 +534,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
simpledrm_device_detach_genpd(sdev);
return ret;
}
- drm_warn(&sdev->dev,
+ drm_warn(&sdev->sysfb.dev,
"pm_domain_attach_by_id(%u) failed: %d\n", i, ret);
continue;
}
@@ -559,7 +545,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!sdev->pwr_dom_links[i])
- drm_warn(&sdev->dev, "failed to link power-domain %d\n", i);
+ drm_warn(&sdev->sysfb.dev, "failed to link power-domain %d\n", i);
}
return devm_add_action_or_reset(dev, simpledrm_device_detach_genpd, sdev);
@@ -575,210 +561,56 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
* Modesetting
*/
-static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const u64 simpledrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
-static int simpledrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_shadow_plane_state *new_shadow_plane_state =
- to_drm_shadow_plane_state(new_plane_state);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- int ret;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- if (ret)
- return ret;
- else if (!new_plane_state->visible)
- return 0;
-
- if (new_fb->format != sdev->format) {
- void *buf;
-
- /* format conversion necessary; reserve buffer */
- buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
- sdev->pitch, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect damage;
- int ret, idx;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return;
-
- if (!drm_dev_enter(dev, &idx))
- goto out_drm_gem_fb_end_cpu_access;
-
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct drm_rect dst_clip = plane_state->dst;
- struct iosys_map dst = sdev->screen_base;
-
- if (!drm_rect_intersect(&dst_clip, &damage))
- continue;
-
- iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
- drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data,
- fb, &damage, &shadow_plane_state->fmtcnv_state);
- }
-
- drm_dev_exit(idx);
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-}
-
-static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- int idx;
-
- if (!drm_dev_enter(dev, &idx))
- return;
-
- /* Clear screen to black if disabled */
- iosys_map_memset(&sdev->screen_base, 0, 0, sdev->pitch * sdev->mode.vdisplay);
-
- drm_dev_exit(idx);
-}
-
-static int simpledrm_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
- struct drm_scanout_buffer *sb)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(plane->dev);
-
- sb->width = sdev->mode.hdisplay;
- sb->height = sdev->mode.vdisplay;
- sb->format = sdev->format;
- sb->pitch[0] = sdev->pitch;
- sb->map[0] = sdev->screen_base;
-
- return 0;
-}
-
static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = simpledrm_primary_plane_helper_atomic_check,
- .atomic_update = simpledrm_primary_plane_helper_atomic_update,
- .atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
- .get_scanout_buffer = simpledrm_primary_plane_helper_get_scanout_buffer,
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs simpledrm_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_SYSFB_PLANE_FUNCS,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(crtc->dev);
-
- return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode);
-}
-
-/*
- * The CRTC is always enabled. Screen updates are performed by
- * the primary plane's atomic_update function. Disabling clears
- * the screen in the primary plane's atomic_disable function.
- */
static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
- .mode_valid = simpledrm_crtc_helper_mode_valid,
- .atomic_check = drm_crtc_helper_atomic_check,
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
};
static const struct drm_crtc_funcs simpledrm_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
+ DRM_SYSFB_CRTC_FUNCS,
.destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_encoder_funcs simpledrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
-
- return drm_connector_helper_get_modes_fixed(connector, &sdev->mode);
-}
-
static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
- .get_modes = simpledrm_connector_helper_get_modes,
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
};
static const struct drm_connector_funcs simpledrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ DRM_SYSFB_CONNECTOR_FUNCS,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
};
/*
* Init / Cleanup
*/
-static struct drm_display_mode simpledrm_mode(unsigned int width,
- unsigned int height,
- unsigned int width_mm,
- unsigned int height_mm)
-{
- const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height, width_mm, height_mm)
- };
-
- return mode;
-}
-
static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct simpledrm_device *sdev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int width, height, stride;
int width_mm = 0, height_mm = 0;
@@ -793,10 +625,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
size_t nformats;
int ret;
- sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, dev);
+ sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, sysfb.dev);
if (IS_ERR(sdev))
return ERR_CAST(sdev);
- dev = &sdev->dev;
+ sysfb = &sdev->sysfb;
+ dev = &sysfb->dev;
platform_set_drvdata(pdev, sdev);
/*
@@ -858,20 +691,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
return ERR_PTR(-EINVAL);
}
- /*
- * Assume a monitor resolution of 96 dpi if physical dimensions
- * are not specified to get a somewhat reasonable screen size.
- */
- if (!width_mm)
- width_mm = DRM_MODE_RES_MM(width, 96ul);
- if (!height_mm)
- height_mm = DRM_MODE_RES_MM(height, 96ul);
-
- sdev->mode = simpledrm_mode(width, height, width_mm, height_mm);
- sdev->format = format;
- sdev->pitch = stride;
+ sysfb->fb_mode = drm_sysfb_mode(width, height, width_mm, height_mm);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
- drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sdev->mode));
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
&format->format, width, height, stride);
@@ -895,7 +719,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (IS_ERR(screen_base))
return screen_base;
- iosys_map_set_vaddr(&sdev->screen_base, screen_base);
+ iosys_map_set_vaddr(&sysfb->fb_addr, screen_base);
} else {
void __iomem *screen_base;
@@ -928,7 +752,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (!screen_base)
return ERR_PTR(-ENOMEM);
- iosys_map_set_vaddr_iomem(&sdev->screen_base, screen_base);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
}
/*
@@ -1027,19 +851,21 @@ static struct drm_driver simpledrm_driver = {
static int simpledrm_probe(struct platform_device *pdev)
{
struct simpledrm_device *sdev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int ret;
sdev = simpledrm_device_create(&simpledrm_driver, pdev);
if (IS_ERR(sdev))
return PTR_ERR(sdev);
- dev = &sdev->dev;
+ sysfb = &sdev->sysfb;
+ dev = &sysfb->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
- drm_client_setup(dev, sdev->format);
+ drm_client_setup(dev, sdev->sysfb.fb_format);
return 0;
}
@@ -1047,7 +873,7 @@ static int simpledrm_probe(struct platform_device *pdev)
static void simpledrm_remove(struct platform_device *pdev)
{
struct simpledrm_device *sdev = platform_get_drvdata(pdev);
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
drm_dev_unplug(dev);
}
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
new file mode 100644
index 000000000000..9cc50e3072ea
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/aperture.h>
+#include <linux/ioport.h>
+#include <linux/limits.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/edid.h>
+#include <video/pixel_format.h>
+#include <video/vga.h>
+
+#include "drm_sysfb_helper.h"
+
+#define DRIVER_NAME "vesadrm"
+#define DRIVER_DESC "DRM driver for VESA platform devices"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define VESADRM_GAMMA_LUT_SIZE 256
+
+static int vesadrm_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (max > INT_MAX)
+ max = INT_MAX;
+ if (value > max) {
+ drm_err(dev, "%s of %llu exceeds maximum of %u\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+
+static int vesadrm_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (!value) {
+ drm_err(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ }
+ return vesadrm_get_validated_int(dev, name, value, max);
+}
+
+static s64 vesadrm_get_validated_size0(struct drm_device *dev, const char *name,
+ u64 value, u64 max)
+{
+ if (!value) {
+ drm_err(dev, "vesadrm: %s of 0 not allowed\n", name);
+ return -EINVAL;
+ } else if (value > max) {
+ drm_err(dev, "vesadrm: %s of %llu exceeds maximum of %llu\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+
+static int vesadrm_get_width_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return vesadrm_get_validated_int0(dev, "width", si->lfb_width, U16_MAX);
+}
+
+static int vesadrm_get_height_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return vesadrm_get_validated_int0(dev, "height", si->lfb_height, U16_MAX);
+}
+
+static struct resource *vesadrm_get_memory_si(struct drm_device *dev,
+ const struct screen_info *si,
+ struct resource *res)
+{
+ ssize_t num;
+
+ num = screen_info_resources(si, res, 1);
+ if (!num) {
+ drm_err(dev, "vesadrm: memory resource not found\n");
+ return NULL;
+ }
+
+ return res;
+}
+
+static int vesadrm_get_stride_si(struct drm_device *dev, const struct screen_info *si,
+ const struct drm_format_info *format,
+ unsigned int width, unsigned int height, u64 size)
+{
+ u64 lfb_linelength = si->lfb_linelength;
+
+ if (!lfb_linelength)
+ lfb_linelength = drm_format_info_min_pitch(format, 0, width);
+
+ return vesadrm_get_validated_int0(dev, "stride", lfb_linelength, div64_u64(size, height));
+}
+
+static u64 vesadrm_get_visible_size_si(struct drm_device *dev, const struct screen_info *si,
+ unsigned int height, unsigned int stride, u64 size)
+{
+ u64 vsize = PAGE_ALIGN(height * stride);
+
+ return vesadrm_get_validated_size0(dev, "visible size", vsize, size);
+}
+
+static const struct drm_format_info *vesadrm_get_format_si(struct drm_device *dev,
+ const struct screen_info *si)
+{
+ static const struct {
+ struct pixel_format pixel;
+ u32 fourcc;
+ } vesa_formats[] = {
+ { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, },
+ { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, },
+ { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
+ { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
+ { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ };
+ const struct drm_format_info *format = NULL;
+ u32 bits_per_pixel;
+ size_t i;
+
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
+
+ for (i = 0; i < ARRAY_SIZE(vesa_formats); ++i) {
+ const struct pixel_format *f = &vesa_formats[i].pixel;
+
+ if (bits_per_pixel == f->bits_per_pixel &&
+ si->red_size == f->red.length &&
+ si->red_pos == f->red.offset &&
+ si->green_size == f->green.length &&
+ si->green_pos == f->green.offset &&
+ si->blue_size == f->blue.length &&
+ si->blue_pos == f->blue.offset) {
+ format = drm_format_info(vesa_formats[i].fourcc);
+ break;
+ }
+ }
+
+ if (!format)
+ return ERR_PTR(-EINVAL);
+ if (format->is_color_indexed)
+ return ERR_PTR(-EINVAL);
+
+ return format;
+}
+
+/*
+ * VESA device
+ */
+
+struct vesadrm_device {
+ struct drm_sysfb_device sysfb;
+
+#if defined(CONFIG_X86_32)
+ /* VESA Protected Mode interface */
+ struct {
+ const u8 *PrimaryPalette;
+ } pmi;
+#endif
+
+ void (*cmap_write)(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue);
+
+ /* modesetting */
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
+{
+ return container_of(to_drm_sysfb_device(dev), struct vesadrm_device, sysfb);
+}
+
+/*
+ * Palette
+ */
+
+static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ u8 i8, r8, g8, b8;
+
+ if (index > 255)
+ return;
+
+ i8 = index;
+ r8 = red >> 8;
+ g8 = green >> 8;
+ b8 = blue >> 8;
+
+ outb_p(i8, VGA_PEL_IW);
+ outb_p(r8, VGA_PEL_D);
+ outb_p(g8, VGA_PEL_D);
+ outb_p(b8, VGA_PEL_D);
+}
+
+#if defined(CONFIG_X86_32)
+static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ u32 i32 = index;
+ struct {
+ u8 b8;
+ u8 g8;
+ u8 r8;
+ u8 x8;
+ } PaletteEntry = {
+ blue >> 8,
+ green >> 8,
+ red >> 8,
+ 0x00,
+ };
+
+ if (index > 255)
+ return;
+
+ __asm__ __volatile__ (
+ "call *(%%esi)"
+ : /* no return value */
+ : "a" (0x4f09),
+ "b" (0),
+ "c" (1),
+ "d" (i32),
+ "D" (&PaletteEntry),
+ "S" (&vesa->pmi.PrimaryPalette));
+}
+#endif
+
+static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ size_t i;
+ u16 r16, g16, b16;
+
+ switch (format->format) {
+ case DRM_FORMAT_XRGB1555:
+ for (i = 0; i < 32; ++i) {
+ r16 = i * 8 + i / 4;
+ r16 |= (r16 << 8) | r16;
+ vesa->cmap_write(vesa, i, r16, r16, r16);
+ }
+ break;
+ case DRM_FORMAT_RGB565:
+ for (i = 0; i < 32; ++i) {
+ r16 = i * 8 + i / 4;
+ r16 |= (r16 << 8) | r16;
+ g16 = i * 4 + i / 16;
+ g16 |= (g16 << 8) | g16;
+ b16 = r16;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ for (i = 32; i < 64; ++i) {
+ g16 = i * 4 + i / 16;
+ g16 |= (g16 << 8) | g16;
+ vesa->cmap_write(vesa, i, 0, g16, 0);
+ }
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ for (i = 0; i < 256; ++i) {
+ r16 = (i << 8) | i;
+ vesa->cmap_write(vesa, i, r16, r16, r16);
+ }
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_set_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ size_t i;
+ u16 r16, g16, b16;
+
+ switch (format->format) {
+ case DRM_FORMAT_XRGB1555:
+ for (i = 0; i < 32; ++i) {
+ r16 = lut[i * 8 + i / 4].red;
+ g16 = lut[i * 8 + i / 4].green;
+ b16 = lut[i * 8 + i / 4].blue;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ break;
+ case DRM_FORMAT_RGB565:
+ for (i = 0; i < 32; ++i) {
+ r16 = lut[i * 8 + i / 4].red;
+ g16 = lut[i * 4 + i / 16].green;
+ b16 = lut[i * 8 + i / 4].blue;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ for (i = 32; i < 64; ++i)
+ vesa->cmap_write(vesa, i, 0, lut[i * 4 + i / 16].green, 0);
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ for (i = 0; i < 256; ++i)
+ vesa->cmap_write(vesa, i, lut[i].red, lut[i].green, lut[i].blue);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+/*
+ * Modesetting
+ */
+
+static const u64 vesadrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
+};
+
+static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = {
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs vesadrm_primary_plane_funcs = {
+ DRM_SYSFB_PLANE_FUNCS,
+ .destroy = drm_plane_cleanup,
+};
+
+static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct vesadrm_device *vesa = to_vesadrm_device(dev);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ /*
+ * The gamma LUT has to be reloaded after changing the primary
+ * plane's color format.
+ */
+ if (crtc_state->enable && crtc_state->color_mgmt_changed) {
+ if (sysfb_crtc_state->format == sysfb->fb_format) {
+ if (crtc_state->gamma_lut)
+ vesadrm_set_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ else
+ vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ } else {
+ vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ }
+ }
+}
+
+static const struct drm_crtc_helper_funcs vesadrm_crtc_helper_funcs = {
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
+ .atomic_flush = vesadrm_crtc_helper_atomic_flush,
+};
+
+static const struct drm_crtc_funcs vesadrm_crtc_funcs = {
+ DRM_SYSFB_CRTC_FUNCS,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_encoder_funcs vesadrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_helper_funcs vesadrm_connector_helper_funcs = {
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs vesadrm_connector_funcs = {
+ DRM_SYSFB_CONNECTOR_FUNCS,
+ .destroy = drm_connector_cleanup,
+};
+
+static const struct drm_mode_config_funcs vesadrm_mode_config_funcs = {
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ const struct screen_info *si;
+ const struct drm_format_info *format;
+ int width, height, stride;
+ u64 vsize;
+ struct resource resbuf;
+ struct resource *res;
+ struct vesadrm_device *vesa;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ struct resource *mem = NULL;
+ void __iomem *screen_base;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ si = dev_get_platdata(&pdev->dev);
+ if (!si)
+ return ERR_PTR(-ENODEV);
+ if (screen_info_video_type(si) != VIDEO_TYPE_VLFB)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * VESA DRM driver
+ */
+
+ vesa = devm_drm_dev_alloc(&pdev->dev, drv, struct vesadrm_device, sysfb.dev);
+ if (IS_ERR(vesa))
+ return ERR_CAST(vesa);
+ sysfb = &vesa->sysfb;
+ dev = &sysfb->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Hardware settings
+ */
+
+ format = vesadrm_get_format_si(dev, si);
+ if (IS_ERR(format))
+ return ERR_CAST(format);
+ width = vesadrm_get_width_si(dev, si);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = vesadrm_get_height_si(dev, si);
+ if (height < 0)
+ return ERR_PTR(height);
+ res = vesadrm_get_memory_si(dev, si, &resbuf);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ stride = vesadrm_get_stride_si(dev, si, format, width, height, resource_size(res));
+ if (stride < 0)
+ return ERR_PTR(stride);
+ vsize = vesadrm_get_visible_size_si(dev, si, height, stride, resource_size(res));
+ if (!vsize)
+ return ERR_PTR(-EINVAL);
+
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
+ &format->format, width, height, stride);
+
+ if (!__screen_info_vbe_mode_nonvga(si)) {
+ vesa->cmap_write = vesadrm_vga_cmap_write;
+#if defined(CONFIG_X86_32)
+ } else {
+ phys_addr_t pmi_base = __screen_info_vesapm_info_base(si);
+ const u16 *pmi_addr = phys_to_virt(pmi_base);
+
+ vesa->pmi.PrimaryPalette = (u8 *)pmi_addr + pmi_addr[2];
+ vesa->cmap_write = vesadrm_pmi_cmap_write;
+#endif
+ }
+
+#ifdef CONFIG_X86
+ if (drm_edid_header_is_valid(edid_info.dummy) == 8)
+ sysfb->edid = edid_info.dummy;
+#endif
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
+ if (vesa->cmap_write)
+ sysfb->fb_gamma_lut_size = VESADRM_GAMMA_LUT_SIZE;
+
+ /*
+ * Memory management
+ */
+
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+
+ /*
+ * Modesetting
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->depth;
+ dev->mode_config.funcs = &vesadrm_mode_config_funcs;
+
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ vesa->formats, ARRAY_SIZE(vesa->formats));
+
+ primary_plane = &vesa->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &vesadrm_primary_plane_funcs,
+ vesa->formats, nformats,
+ vesadrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &vesadrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &vesa->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &vesadrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &vesadrm_crtc_helper_funcs);
+
+ if (sysfb->fb_gamma_lut_size) {
+ ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size);
+ if (!ret)
+ drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size);
+ }
+
+ /* Encoder */
+
+ encoder = &vesa->encoder;
+ ret = drm_encoder_init(dev, encoder, &vesadrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &vesa->connector;
+ ret = drm_connector_init(dev, connector, &vesadrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &vesadrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+ if (sysfb->edid)
+ drm_connector_attach_edid_property(connector);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return vesa;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(vesadrm_fops);
+
+static struct drm_driver vesadrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &vesadrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int vesadrm_probe(struct platform_device *pdev)
+{
+ struct vesadrm_device *vesa;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ int ret;
+
+ vesa = vesadrm_device_create(&vesadrm_driver, pdev);
+ if (IS_ERR(vesa))
+ return PTR_ERR(vesa);
+ sysfb = &vesa->sysfb;
+ dev = &sysfb->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(dev, sysfb->fb_format);
+
+ return 0;
+}
+
+static void vesadrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+}
+
+static struct platform_driver vesadrm_platform_driver = {
+ .driver = {
+ .name = "vesa-framebuffer",
+ },
+ .probe = vesadrm_probe,
+ .remove = vesadrm_remove,
+};
+
+module_platform_driver(vesadrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
index 08fbd8f151a1..990e744b0923 100644
--- a/drivers/gpu/drm/tegra/dp.c
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -256,73 +256,6 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
}
/**
- * drm_dp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * drm_dp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* drm_dp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
index cb12ed0c54e7..695060cafac0 100644
--- a/drivers/gpu/drm/tegra/dp.h
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -164,8 +164,6 @@ int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
void drm_dp_link_update_rates(struct drm_dp_link *link);
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_choose(struct drm_dp_link *link,
const struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 9bb077558167..b5089b772267 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1564,7 +1564,6 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
static int tegra_dsi_probe(struct platform_device *pdev)
{
struct tegra_dsi *dsi;
- struct resource *regs;
int err;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
@@ -1636,8 +1635,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
goto remove;
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs)) {
err = PTR_ERR(dsi->regs);
goto remove;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index f98f70eda906..21f3dfdcc5c9 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2666,7 +2666,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder)
* the AUX transactions would just be timing out.
*/
if (output->connector.status != connector_status_disconnected) {
- err = drm_dp_link_power_down(sor->aux, &sor->link);
+ err = drm_dp_link_power_down(sor->aux, sor->link.revision);
if (err < 0)
dev_err(sor->dev, "failed to power down link: %d\n",
err);
@@ -2882,7 +2882,7 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder)
else
dev_dbg(sor->dev, "link training succeeded\n");
- err = drm_dp_link_power_up(sor->aux, &sor->link);
+ err = drm_dp_link_power_up(sor->aux, sor->link.revision);
if (err < 0)
dev_err(sor->dev, "failed to power up DP link: %d\n", err);
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 0109bcf7faa5..3afd6587df08 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -4,7 +4,9 @@ obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \
drm_kunit_helpers.o
obj-$(CONFIG_DRM_KUNIT_TEST) += \
+ drm_atomic_test.o \
drm_atomic_state_test.o \
+ drm_bridge_test.o \
drm_buddy_test.o \
drm_cmdline_parser_test.o \
drm_connector_test.o \
diff --git a/drivers/gpu/drm/tests/drm_atomic_test.c b/drivers/gpu/drm/tests/drm_atomic_test.c
new file mode 100644
index 000000000000..ea91bec6569e
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_atomic_test.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_atomic functions
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#include <kunit/test.h>
+
+struct drm_atomic_test_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static const struct drm_connector_helper_funcs drm_atomic_init_connector_helper_funcs = {
+};
+
+static const struct drm_connector_funcs drm_atomic_init_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static struct drm_atomic_test_priv *create_device(struct kunit *test)
+{
+ struct drm_atomic_test_priv *priv;
+ struct drm_connector *connector;
+ struct drm_encoder *enc;
+ struct drm_device *drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_atomic_test_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ drm = &priv->drm;
+ plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ if (IS_ERR(plane))
+ return ERR_CAST(plane);
+ priv->plane = plane;
+
+ crtc = drm_kunit_helper_create_crtc(test, drm,
+ plane, NULL,
+ NULL,
+ NULL);
+ if (IS_ERR(crtc))
+ return ERR_CAST(crtc);
+ priv->crtc = crtc;
+
+ enc = &priv->encoder;
+ ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ enc->possible_crtcs = drm_crtc_mask(crtc);
+
+ connector = &priv->connector;
+ ret = drmm_connector_init(drm, connector,
+ &drm_atomic_init_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(connector, &drm_atomic_init_connector_helper_funcs);
+
+ drm_connector_attach_encoder(connector, enc);
+
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+static void drm_test_drm_atomic_get_connector_for_encoder(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_test_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_connector *curr_connector;
+ int ret;
+
+ priv = create_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, &priv->drm,
+ priv->crtc, &priv->connector,
+ mode, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_enable;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn:
+ curr_connector = drm_atomic_get_connector_for_encoder(&priv->encoder,
+ &ctx);
+ if (PTR_ERR(curr_connector) == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_conn;
+ }
+ KUNIT_EXPECT_PTR_EQ(test, curr_connector, &priv->connector);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static struct kunit_case drm_atomic_get_connector_for_encoder_tests[] = {
+ KUNIT_CASE(drm_test_drm_atomic_get_connector_for_encoder),
+ { }
+};
+
+
+static struct kunit_suite drm_atomic_get_connector_for_encoder_test_suite = {
+ .name = "drm_test_atomic_get_connector_for_encoder",
+ .test_cases = drm_atomic_get_connector_for_encoder_tests,
+};
+
+kunit_test_suite(drm_atomic_get_connector_for_encoder_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_DESCRIPTION("Kunit test for drm_atomic functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c
new file mode 100644
index 000000000000..ff88ec2e911c
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_bridge_test.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_bridge functions
+ */
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include <kunit/test.h>
+
+struct drm_bridge_init_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoder;
+ struct drm_bridge bridge;
+ struct drm_connector *connector;
+ unsigned int enable_count;
+ unsigned int disable_count;
+};
+
+static void drm_test_bridge_enable(struct drm_bridge *bridge)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->enable_count++;
+}
+
+static void drm_test_bridge_disable(struct drm_bridge *bridge)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->disable_count++;
+}
+
+static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
+ .enable = drm_test_bridge_enable,
+ .disable = drm_test_bridge_disable,
+};
+
+static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->enable_count++;
+}
+
+static void drm_test_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->disable_count++;
+}
+
+static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = {
+ .atomic_enable = drm_test_bridge_atomic_enable,
+ .atomic_disable = drm_test_bridge_atomic_disable,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+KUNIT_DEFINE_ACTION_WRAPPER(drm_bridge_remove_wrapper,
+ drm_bridge_remove,
+ struct drm_bridge *);
+
+static int drm_kunit_bridge_add(struct kunit *test,
+ struct drm_bridge *bridge)
+{
+ drm_bridge_add(bridge);
+
+ return kunit_add_action_or_reset(test,
+ drm_bridge_remove_wrapper,
+ bridge);
+}
+
+static struct drm_bridge_init_priv *
+drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
+{
+ struct drm_bridge_init_priv *priv;
+ struct drm_encoder *enc;
+ struct drm_bridge *bridge;
+ struct drm_device *drm;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_bridge_init_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ drm = &priv->drm;
+ priv->plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ if (IS_ERR(priv->plane))
+ return ERR_CAST(priv->plane);
+
+ priv->crtc = drm_kunit_helper_create_crtc(test, drm,
+ priv->plane, NULL,
+ NULL,
+ NULL);
+ if (IS_ERR(priv->crtc))
+ return ERR_CAST(priv->crtc);
+
+ enc = &priv->encoder;
+ ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ enc->possible_crtcs = drm_crtc_mask(priv->crtc);
+
+ bridge = &priv->bridge;
+ bridge->type = DRM_MODE_CONNECTOR_VIRTUAL;
+ bridge->funcs = funcs;
+
+ ret = drm_kunit_bridge_add(test, bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_bridge_attach(enc, bridge, NULL, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv->connector = drm_bridge_connector_init(drm, enc);
+ if (IS_ERR(priv->connector))
+ return ERR_CAST(priv->connector);
+
+ drm_connector_attach_encoder(priv->connector, enc);
+
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+/*
+ * Test that drm_bridge_get_current_state() returns the last committed
+ * state for an atomic bridge.
+ */
+static void drm_test_drm_bridge_get_current_state_atomic(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_bridge_state *curr_bridge_state;
+ struct drm_bridge_state *bridge_state;
+ struct drm_atomic_state *state;
+ struct drm_bridge *bridge;
+ struct drm_device *drm;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &priv->drm;
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_commit:
+ bridge = &priv->bridge;
+ bridge_state = drm_atomic_get_bridge_state(state, bridge);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bridge_state);
+
+ ret = drm_atomic_commit(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_state:
+ ret = drm_modeset_lock(&bridge->base.lock, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_state;
+ }
+
+ curr_bridge_state = drm_bridge_get_current_state(bridge);
+ KUNIT_EXPECT_PTR_EQ(test, curr_bridge_state, bridge_state);
+
+ drm_modeset_unlock(&bridge->base.lock);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that drm_bridge_get_current_state() returns NULL for a
+ * non-atomic bridge.
+ */
+static void drm_test_drm_bridge_get_current_state_legacy(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv;
+ struct drm_bridge *bridge;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ /*
+ * NOTE: Strictly speaking, we should take the bridge->base.lock
+ * before calling that function. However, bridge->base is only
+ * initialized if the bridge is atomic, while we explicitly
+ * initialize one that isn't there.
+ *
+ * In order to avoid unnecessary warnings, let's skip the
+ * locking. The function would return NULL in all cases anyway,
+ * so we don't really have any concurrency to worry about.
+ */
+ bridge = &priv->bridge;
+ KUNIT_EXPECT_NULL(test, drm_bridge_get_current_state(bridge));
+}
+
+static struct kunit_case drm_bridge_get_current_state_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_get_current_state_atomic),
+ KUNIT_CASE(drm_test_drm_bridge_get_current_state_legacy),
+ { }
+};
+
+
+static struct kunit_suite drm_bridge_get_current_state_test_suite = {
+ .name = "drm_test_bridge_get_current_state",
+ .test_cases = drm_bridge_get_current_state_tests,
+};
+
+/*
+ * Test that an atomic bridge is properly power-cycled when calling
+ * drm_bridge_helper_reset_crtc().
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_atomic(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_commit:
+ ret = drm_kunit_helper_enable_crtc_connector(test,
+ &priv->drm, priv->crtc,
+ priv->connector,
+ mode,
+ &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+}
+
+/*
+ * Test that calling drm_bridge_helper_reset_crtc() on a disabled atomic
+ * bridge will fail and not call the enable / disable callbacks
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 0);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 0);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 0);
+}
+
+/*
+ * Test that a non-atomic bridge is properly power-cycled when calling
+ * drm_bridge_helper_reset_crtc().
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_legacy(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_commit:
+ ret = drm_kunit_helper_enable_crtc_connector(test,
+ &priv->drm, priv->crtc,
+ priv->connector,
+ mode,
+ &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+}
+
+static struct kunit_case drm_bridge_helper_reset_crtc_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic),
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic_disabled),
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_legacy),
+ { }
+};
+
+static struct kunit_suite drm_bridge_helper_reset_crtc_test_suite = {
+ .name = "drm_test_bridge_helper_reset_crtc",
+ .test_cases = drm_bridge_helper_reset_crtc_tests,
+};
+
+kunit_test_suites(
+ &drm_bridge_get_current_state_test_suite,
+ &drm_bridge_helper_reset_crtc_test_suite,
+);
+
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_DESCRIPTION("Kunit test for drm_bridge functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
index b2fdb1a774fe..3f44fe5e92e4 100644
--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
+++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
@@ -88,7 +88,8 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- struct drm_display_mode *expected_mode, *mode;
+ struct drm_display_mode *expected_mode;
+ const struct drm_display_mode *mode;
const char *cmdline = "1920x1080@60";
int ret;
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index fd4215e2f982..81cadaecdd4f 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -134,7 +134,7 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -142,14 +142,14 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test)
ret = drm_gem_shmem_pin(shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
drm_gem_shmem_unpin(shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
}
/*
@@ -168,24 +168,24 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_vmap(shmem, &map);
+ ret = drm_gem_shmem_vmap_locked(shmem, &map);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1);
iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
for (i = 0; i < TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
- drm_gem_shmem_vunmap(shmem, &map);
+ drm_gem_shmem_vunmap_locked(shmem, &map);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
}
/*
@@ -251,7 +251,7 @@ static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
sgt = drm_gem_shmem_get_pages_sgt(shmem);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
for_each_sgtable_sg(sgt, sg, si) {
@@ -281,17 +281,17 @@ static void drm_gem_shmem_test_madvise(struct kunit *test)
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_madvise(shmem, 1);
+ ret = drm_gem_shmem_madvise_locked(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, 1);
/* Set madv to a negative value */
- ret = drm_gem_shmem_madvise(shmem, -1);
+ ret = drm_gem_shmem_madvise_locked(shmem, -1);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
/* Check that madv cannot be set back to a positive value */
- ret = drm_gem_shmem_madvise(shmem, 0);
+ ret = drm_gem_shmem_madvise_locked(shmem, 0);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
}
@@ -319,7 +319,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test)
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_FALSE(test, ret);
- ret = drm_gem_shmem_madvise(shmem, 1);
+ ret = drm_gem_shmem_madvise_locked(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
/* The scatter/gather table will be freed by drm_gem_shmem_free */
@@ -329,7 +329,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test)
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_TRUE(test, ret);
- drm_gem_shmem_purge(shmem);
+ drm_gem_shmem_purge_locked(shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
KUNIT_EXPECT_NULL(test, shmem->sgt);
KUNIT_EXPECT_EQ(test, shmem->madv, -1);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index e97efd3af9ed..7ffd666753b1 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -55,49 +55,6 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
return preferred;
}
-static int light_up_connector(struct kunit *test,
- struct drm_device *drm,
- struct drm_crtc *crtc,
- struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_atomic_state *state;
- struct drm_connector_state *conn_state;
- struct drm_crtc_state *crtc_state;
- int ret;
-
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
-
-retry:
- conn_state = drm_atomic_get_connector_state(state, connector);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
-
- ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- ret = drm_modeset_backoff(ctx);
- if (!ret)
- goto retry;
- }
- KUNIT_EXPECT_EQ(test, ret, 0);
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
-
- ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
-
- crtc_state->enable = true;
- crtc_state->active = true;
-
- ret = drm_atomic_commit(state);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- return 0;
-}
-
static int set_connector_edid(struct kunit *test, struct drm_connector *connector,
const char *edid, size_t edid_len)
{
@@ -298,7 +255,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -364,7 +324,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -432,7 +395,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -489,7 +455,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -547,7 +516,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -606,7 +578,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -666,7 +641,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -725,7 +703,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -789,7 +770,10 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -865,7 +849,10 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -941,7 +928,10 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -988,7 +978,10 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1037,7 +1030,10 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1086,7 +1082,10 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1134,7 +1133,10 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
/* You shouldn't be doing that at home. */
@@ -1208,7 +1210,10 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1282,7 +1287,10 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1347,7 +1355,10 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1414,7 +1425,10 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1483,7 +1497,10 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1543,7 +1560,10 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1605,7 +1625,10 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1645,7 +1668,10 @@ static void drm_test_check_disable_connector(struct kunit *test)
drm = &priv->drm;
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 6f6616cf4966..5f7257840d8e 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -2,6 +2,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
@@ -271,6 +272,66 @@ drm_kunit_helper_create_crtc(struct kunit *test,
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc);
+/**
+ * drm_kunit_helper_enable_crtc_connector - Enables a CRTC -> Connector output
+ * @test: The test context object
+ * @drm: The device to alloc the plane for
+ * @crtc: The CRTC to enable
+ * @connector: The Connector to enable
+ * @mode: The display mode to configure the CRTC with
+ * @ctx: Locking context
+ *
+ * This function creates an atomic update to enable the route from @crtc
+ * to @connector, with the given @mode.
+ *
+ * Returns:
+ *
+ * A pointer to the new CRTC, or an ERR_PTR() otherwise. If the error
+ * returned is EDEADLK, the entire atomic sequence must be restarted.
+ */
+int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ if (ret)
+ return ret;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
+ if (ret)
+ return ret;
+
+ crtc_state->enable = true;
+ crtc_state->active = true;
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_enable_crtc_connector);
+
static void kunit_action_drm_mode_destroy(void *ptr)
{
struct drm_display_mode *mode = ptr;
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
index 17a86bed8054..95b4aeff2775 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.c
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -34,11 +34,12 @@ static inline struct tidss_encoder
}
static int tidss_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge);
- return drm_bridge_attach(bridge->encoder, t_enc->next_bridge,
+ return drm_bridge_attach(encoder, t_enc->next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 54c84c9801c1..95c1457d7730 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -65,20 +65,6 @@ config DRM_GM12U320
This is a KMS driver for projectors which use the GM12U320 chipset
for video transfer over USB2/3, such as the Acer C120 mini projector.
-config DRM_OFDRM
- tristate "Open Firmware display driver"
- depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
- select APERTURE_HELPERS
- select DRM_CLIENT_SELECTION
- select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
- help
- DRM driver for Open Firmware framebuffers.
-
- This driver assumes that the display hardware has been initialized
- by the Open Firmware before the kernel boots. Scanout buffer, size,
- and display format must be provided via device tree.
-
config DRM_PANEL_MIPI_DBI
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI
@@ -95,24 +81,6 @@ config DRM_PANEL_MIPI_DBI
https://github.com/notro/panel-mipi-dbi/wiki.
To compile this driver as a module, choose M here.
-config DRM_SIMPLEDRM
- tristate "Simple framebuffer driver"
- depends on DRM && MMU
- select APERTURE_HELPERS
- select DRM_CLIENT_SELECTION
- select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
- help
- DRM driver for simple platform-provided framebuffers.
-
- This driver assumes that the display hardware has been initialized
- by the firmware or bootloader before the kernel boots. Scanout
- buffer, size, and display format must be provided via device tree,
- UEFI, VESA, etc.
-
- On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
- to use UEFI and VESA framebuffers.
-
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 0a3a7837a58b..ba4a60bb72bd 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -5,9 +5,7 @@ obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
-obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
-obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c
index 4370ba22dd88..703b9a41a086 100644
--- a/drivers/gpu/drm/tiny/appletbdrm.c
+++ b/drivers/gpu/drm/tiny/appletbdrm.c
@@ -45,7 +45,7 @@
#define APPLETBDRM_BULK_MSG_TIMEOUT 1000
#define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm)
-#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev))
+#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface((adev)->drm.dev))
struct appletbdrm_msg_request_header {
__le16 unk_00;
@@ -123,8 +123,6 @@ struct appletbdrm_fb_request_response {
} __packed;
struct appletbdrm_device {
- struct device *dmadev;
-
unsigned int in_ep;
unsigned int out_ep;
@@ -612,22 +610,10 @@ static const struct drm_encoder_funcs appletbdrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct appletbdrm_device *adev = drm_to_adev(dev);
-
- if (!adev->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops);
static const struct drm_driver appletbdrm_drm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = appletbdrm_driver_gem_prime_import,
.name = "appletbdrm",
.desc = "Apple Touch Bar DRM Driver",
.major = 1,
@@ -747,6 +733,7 @@ static int appletbdrm_probe(struct usb_interface *intf,
struct device *dev = &intf->dev;
struct appletbdrm_device *adev;
struct drm_device *drm = NULL;
+ struct device *dma_dev;
int ret;
ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL);
@@ -761,12 +748,19 @@ static int appletbdrm_probe(struct usb_interface *intf,
adev->in_ep = bulk_in->bEndpointAddress;
adev->out_ep = bulk_out->bEndpointAddress;
- adev->dmadev = dev;
drm = &adev->drm;
usb_set_intfdata(intf, adev);
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ drm_warn(drm, "buffer sharing not supported"); /* not an error */
+ }
+
ret = appletbdrm_get_information(adev);
if (ret) {
drm_err(drm, "Failed to get display information\n");
diff --git a/drivers/gpu/drm/tiny/cirrus-qemu.c b/drivers/gpu/drm/tiny/cirrus-qemu.c
index 52ec1e4ea9e5..ccf3f6551344 100644
--- a/drivers/gpu/drm/tiny/cirrus-qemu.c
+++ b/drivers/gpu/drm/tiny/cirrus-qemu.c
@@ -70,20 +70,6 @@ struct cirrus_device {
#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
-struct cirrus_primary_plane_state {
- struct drm_shadow_plane_state base;
-
- /* HW scanout buffer */
- const struct drm_format_info *format;
- unsigned int pitch;
-};
-
-static inline struct cirrus_primary_plane_state *
-to_cirrus_primary_plane_state(struct drm_plane_state *plane_state)
-{
- return container_of(plane_state, struct cirrus_primary_plane_state, base.base);
-};
-
/* ------------------------------------------------------------------ */
/*
* The meat of this driver. The core passes us a mode and we have to program
@@ -144,37 +130,6 @@ static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
}
-static const struct drm_format_info *cirrus_convert_to(struct drm_framebuffer *fb)
-{
- if (fb->format->format == DRM_FORMAT_XRGB8888 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
- if (fb->width * 3 <= CIRRUS_MAX_PITCH)
- /* convert from XR24 to RG24 */
- return drm_format_info(DRM_FORMAT_RGB888);
- else
- /* convert from XR24 to RG16 */
- return drm_format_info(DRM_FORMAT_RGB565);
- }
- return NULL;
-}
-
-static const struct drm_format_info *cirrus_format(struct drm_framebuffer *fb)
-{
- const struct drm_format_info *format = cirrus_convert_to(fb);
-
- if (format)
- return format;
- return fb->format;
-}
-
-static int cirrus_pitch(struct drm_framebuffer *fb)
-{
- const struct drm_format_info *format = cirrus_convert_to(fb);
-
- if (format)
- return drm_format_info_min_pitch(format, 0, fb->width);
- return fb->pitches[0];
-}
-
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
u32 addr;
@@ -318,7 +273,6 @@ static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch)
/* Enable extended blanking and pitch bits, and enable full memory */
cr1b = 0x22;
cr1b |= (pitch >> 7) & 0x10;
- cr1b |= (pitch >> 6) & 0x40;
wreg_crt(cirrus, 0x1b, cr1b);
cirrus_set_start_address(cirrus, 0);
@@ -342,13 +296,10 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct cirrus_primary_plane_state *new_primary_plane_state =
- to_cirrus_primary_plane_state(new_plane_state);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc *new_crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state = NULL;
int ret;
- unsigned int pitch;
if (new_crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
@@ -362,17 +313,12 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
else if (!new_plane_state->visible)
return 0;
- pitch = cirrus_pitch(fb);
-
/* validate size constraints */
- if (pitch > CIRRUS_MAX_PITCH)
+ if (fb->pitches[0] > CIRRUS_MAX_PITCH)
return -EINVAL;
- else if (pitch * fb->height > CIRRUS_VRAM_SIZE)
+ else if (fb->pitches[0] > CIRRUS_VRAM_SIZE / fb->height)
return -EINVAL;
- new_primary_plane_state->format = cirrus_format(fb);
- new_primary_plane_state->pitch = pitch;
-
return 0;
}
@@ -381,15 +327,10 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
{
struct cirrus_device *cirrus = to_cirrus(plane->dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
- const struct drm_format_info *format = primary_plane_state->format;
- unsigned int pitch = primary_plane_state->pitch;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct cirrus_primary_plane_state *old_primary_plane_state =
- to_cirrus_primary_plane_state(old_plane_state);
+ struct drm_framebuffer *old_fb = old_plane_state->fb;
struct iosys_map vaddr = IOSYS_MAP_INIT_VADDR_IOMEM(cirrus->vram);
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
@@ -401,18 +342,17 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(&cirrus->dev, &idx))
return;
- if (old_primary_plane_state->format != format)
- cirrus_format_set(cirrus, format);
- if (old_primary_plane_state->pitch != pitch)
- cirrus_pitch_set(cirrus, pitch);
+ if (!old_fb || old_fb->format != fb->format)
+ cirrus_format_set(cirrus, fb->format);
+ if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
+ cirrus_pitch_set(cirrus, fb->pitches[0]);
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
- unsigned int offset = drm_fb_clip_offset(pitch, format, &damage);
+ unsigned int offset = drm_fb_clip_offset(fb->pitches[0], fb->format, &damage);
struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset);
- drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
+ drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage);
}
drm_dev_exit(idx);
@@ -424,62 +364,11 @@ static const struct drm_plane_helper_funcs cirrus_primary_plane_helper_funcs = {
.atomic_update = cirrus_primary_plane_helper_atomic_update,
};
-static struct drm_plane_state *
-cirrus_primary_plane_atomic_duplicate_state(struct drm_plane *plane)
-{
- struct drm_plane_state *plane_state = plane->state;
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
- struct cirrus_primary_plane_state *new_primary_plane_state;
- struct drm_shadow_plane_state *new_shadow_plane_state;
-
- if (!plane_state)
- return NULL;
-
- new_primary_plane_state = kzalloc(sizeof(*new_primary_plane_state), GFP_KERNEL);
- if (!new_primary_plane_state)
- return NULL;
- new_shadow_plane_state = &new_primary_plane_state->base;
-
- __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
- new_primary_plane_state->format = primary_plane_state->format;
- new_primary_plane_state->pitch = primary_plane_state->pitch;
-
- return &new_shadow_plane_state->base;
-}
-
-static void cirrus_primary_plane_atomic_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *plane_state)
-{
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
-
- __drm_gem_destroy_shadow_plane_state(&primary_plane_state->base);
- kfree(primary_plane_state);
-}
-
-static void cirrus_reset_primary_plane(struct drm_plane *plane)
-{
- struct cirrus_primary_plane_state *primary_plane_state;
-
- if (plane->state) {
- cirrus_primary_plane_atomic_destroy_state(plane, plane->state);
- plane->state = NULL; /* must be set to NULL here */
- }
-
- primary_plane_state = kzalloc(sizeof(*primary_plane_state), GFP_KERNEL);
- if (!primary_plane_state)
- return;
- __drm_gem_reset_shadow_plane(plane, &primary_plane_state->base);
-}
-
static const struct drm_plane_funcs cirrus_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = cirrus_reset_primary_plane,
- .atomic_duplicate_state = cirrus_primary_plane_atomic_duplicate_state,
- .atomic_destroy_state = cirrus_primary_plane_atomic_destroy_state,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int cirrus_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
@@ -614,9 +503,17 @@ static enum drm_mode_status cirrus_mode_config_mode_valid(struct drm_device *dev
const struct drm_display_mode *mode)
{
const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888);
- uint64_t pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ u64 pitch;
- if (pitch * mode->vdisplay > CIRRUS_VRAM_SIZE)
+ if (drm_WARN_ON_ONCE(dev, !format))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > CIRRUS_MAX_PITCH)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > CIRRUS_VRAM_SIZE / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 41e9bfb2e2ff..fb0004166f4a 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -86,7 +86,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
struct gm12u320_device {
struct drm_device dev;
- struct device *dmadev;
struct drm_simple_display_pipe pipe;
struct drm_connector conn;
unsigned char *cmd_buf;
@@ -602,22 +601,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct gm12u320_device *gm12u320 = to_gm12u320(dev);
-
- if (!gm12u320->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static const struct drm_driver gm12u320_drm_driver = {
@@ -630,7 +613,6 @@ static const struct drm_driver gm12u320_drm_driver = {
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gm12u320_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
};
@@ -645,6 +627,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
{
struct gm12u320_device *gm12u320;
struct drm_device *dev;
+ struct device *dma_dev;
int ret;
/*
@@ -660,16 +643,20 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
return PTR_ERR(gm12u320);
dev = &gm12u320->dev;
- gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!gm12u320->dmadev)
+ dma_dev = usb_intf_get_dma_device(interface);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
ret = drmm_mode_config_init(dev);
if (ret)
- goto err_put_device;
+ return ret;
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
@@ -679,15 +666,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = gm12u320_conn_init(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
@@ -697,31 +684,24 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
- goto err_put_device;
+ return ret;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_put_device;
+ return ret;
drm_client_setup(dev, NULL);
return 0;
-
-err_put_device:
- put_device(gm12u320->dmadev);
- return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = to_gm12u320(dev);
- put_device(gm12u320->dmadev);
- gm12u320->dmadev = NULL;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
}
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index f8f20d2f6174..e08e5a138420 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -340,7 +340,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+ KUNIT_ASSERT_NOT_NULL(test, resv);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 95b86003c50d..e218a7ce490e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -235,7 +235,7 @@ static void ttm_bo_delayed_delete(struct work_struct *work)
bo = container_of(work, typeof(*bo), delayed_delete);
- dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_cleanup_memtype_use(bo);
@@ -270,7 +270,7 @@ static void ttm_bo_release(struct kref *kref)
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, bo->resource);
- if (!dma_resv_test_signaled(bo->base.resv,
+ if (!dma_resv_test_signaled(&bo->base._resv,
DMA_RESV_USAGE_BOOKKEEP) ||
(want_init_on_free() && (bo->ttm != NULL)) ||
bo->type == ttm_bo_type_sg ||
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 05b3a152cc33..d1bc3f165b27 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -49,22 +49,6 @@ static int udl_usb_reset_resume(struct usb_interface *interface)
return drm_mode_config_helper_resume(dev);
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct udl_device *udl = to_udl(dev);
-
- if (!udl->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static const struct drm_driver driver = {
@@ -73,7 +57,6 @@ static const struct drm_driver driver = {
/* GEM hooks */
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = udl_driver_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
@@ -127,9 +110,8 @@ static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- drm_kms_helper_poll_fini(dev);
- udl_drop_usb(dev);
drm_dev_unplug(dev);
+ udl_drop_usb(dev);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index be00dc1d87a1..e67e7e2e6f1f 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -51,7 +51,6 @@ struct urb_list {
struct udl_device {
struct drm_device drm;
struct device *dev;
- struct device *dmadev;
struct drm_plane primary_plane;
struct drm_crtc crtc;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 3ebe2ce55dfd..48260a821b8d 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -308,12 +308,17 @@ int udl_init(struct udl_device *udl)
{
struct drm_device *dev = &udl->drm;
int ret = -ENOMEM;
+ struct device *dma_dev;
DRM_DEBUG("\n");
- udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!udl->dmadev)
+ dma_dev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
mutex_init(&udl->gem_lock);
@@ -336,25 +341,18 @@ int udl_init(struct udl_device *udl)
if (ret)
goto err;
- drm_kms_helper_poll_init(dev);
-
return 0;
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
- put_device(udl->dmadev);
DRM_ERROR("%d\n", ret);
return ret;
}
int udl_drop_usb(struct drm_device *dev)
{
- struct udl_device *udl = to_udl(dev);
-
udl_free_urb_list(dev);
- put_device(udl->dmadev);
- udl->dmadev = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index bbb04f98886a..3b65e93ea0ae 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -535,6 +535,7 @@ int udl_modeset_init(struct drm_device *dev)
return ret;
drm_mode_config_reset(dev);
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 76816f2551c1..7e789e181af0 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -21,74 +21,74 @@ struct v3d_reg_def {
};
static const struct v3d_reg_def v3d_hub_reg_defs[] = {
- REGDEF(33, 42, V3D_HUB_AXICFG),
- REGDEF(33, 71, V3D_HUB_UIFCFG),
- REGDEF(33, 71, V3D_HUB_IDENT0),
- REGDEF(33, 71, V3D_HUB_IDENT1),
- REGDEF(33, 71, V3D_HUB_IDENT2),
- REGDEF(33, 71, V3D_HUB_IDENT3),
- REGDEF(33, 71, V3D_HUB_INT_STS),
- REGDEF(33, 71, V3D_HUB_INT_MSK_STS),
-
- REGDEF(33, 71, V3D_MMU_CTL),
- REGDEF(33, 71, V3D_MMU_VIO_ADDR),
- REGDEF(33, 71, V3D_MMU_VIO_ID),
- REGDEF(33, 71, V3D_MMU_DEBUG_INFO),
-
- REGDEF(71, 71, V3D_GMP_STATUS(71)),
- REGDEF(71, 71, V3D_GMP_CFG(71)),
- REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_HUB_AXICFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_UIFCFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT0),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT1),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT2),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT3),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_MSK_STS),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_CTL),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ADDR),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ID),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_DEBUG_INFO),
+
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_STATUS(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_CFG(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_VIO_ADDR(71)),
};
static const struct v3d_reg_def v3d_gca_reg_defs[] = {
- REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN),
- REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK),
+ REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN),
+ REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN_ACK),
};
static const struct v3d_reg_def v3d_core_reg_defs[] = {
- REGDEF(33, 71, V3D_CTL_IDENT0),
- REGDEF(33, 71, V3D_CTL_IDENT1),
- REGDEF(33, 71, V3D_CTL_IDENT2),
- REGDEF(33, 71, V3D_CTL_MISCCFG),
- REGDEF(33, 71, V3D_CTL_INT_STS),
- REGDEF(33, 71, V3D_CTL_INT_MSK_STS),
- REGDEF(33, 71, V3D_CLE_CT0CS),
- REGDEF(33, 71, V3D_CLE_CT0CA),
- REGDEF(33, 71, V3D_CLE_CT0EA),
- REGDEF(33, 71, V3D_CLE_CT1CS),
- REGDEF(33, 71, V3D_CLE_CT1CA),
- REGDEF(33, 71, V3D_CLE_CT1EA),
-
- REGDEF(33, 71, V3D_PTB_BPCA),
- REGDEF(33, 71, V3D_PTB_BPCS),
-
- REGDEF(33, 42, V3D_GMP_STATUS(33)),
- REGDEF(33, 42, V3D_GMP_CFG(33)),
- REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)),
-
- REGDEF(33, 71, V3D_ERR_FDBGO),
- REGDEF(33, 71, V3D_ERR_FDBGB),
- REGDEF(33, 71, V3D_ERR_FDBGS),
- REGDEF(33, 71, V3D_ERR_STAT),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT0),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT1),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT2),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_MISCCFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_MSK_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0EA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1EA),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCS),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_STATUS(33)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_CFG(33)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_VIO_ADDR(33)),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGO),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGB),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_STAT),
};
static const struct v3d_reg_def v3d_csd_reg_defs[] = {
- REGDEF(41, 71, V3D_CSD_STATUS),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)),
- REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7),
+ REGDEF(V3D_GEN_41, V3D_GEN_71, V3D_CSD_STATUS),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG0(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG1(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG2(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG3(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG4(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG5(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG6(41)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG0(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG1(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG2(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG3(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG4(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG5(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG6(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG7),
};
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
@@ -164,7 +164,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU));
seq_printf(m, "TFU: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU));
- if (v3d->ver <= 42) {
+ if (v3d->ver <= V3D_GEN_42) {
seq_printf(m, "TSY: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
}
@@ -196,11 +196,11 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
seq_printf(m, " QPUs: %d\n", nslc * qups);
seq_printf(m, " Semaphores: %d\n",
V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
- if (v3d->ver <= 42) {
+ if (v3d->ver <= V3D_GEN_42) {
seq_printf(m, " BCG int: %d\n",
(ident2 & V3D_IDENT2_BCG_INT) != 0);
}
- if (v3d->ver < 40) {
+ if (v3d->ver < V3D_GEN_41) {
seq_printf(m, " Override TMU: %d\n",
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
}
@@ -234,7 +234,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
int core = 0;
int measure_ms = 1000;
- if (v3d->ver >= 40) {
+ if (v3d->ver >= V3D_GEN_41) {
int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
V3D_SET_FIELD_VER(cycle_count_reg,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 852015214e97..5e997ae8bc9c 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched/clock.h>
@@ -92,7 +93,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
args->value = 1;
return 0;
case DRM_V3D_PARAM_SUPPORTS_PERFMON:
- args->value = (v3d->ver >= 40);
+ args->value = (v3d->ver >= V3D_GEN_41);
return 0;
case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
args->value = 1;
@@ -254,14 +255,44 @@ static const struct drm_driver v3d_drm_driver = {
};
static const struct of_device_id v3d_of_match[] = {
- { .compatible = "brcm,2711-v3d" },
- { .compatible = "brcm,2712-v3d" },
- { .compatible = "brcm,7268-v3d" },
- { .compatible = "brcm,7278-v3d" },
+ { .compatible = "brcm,2711-v3d", .data = (void *)V3D_GEN_42 },
+ { .compatible = "brcm,2712-v3d", .data = (void *)V3D_GEN_71 },
+ { .compatible = "brcm,7268-v3d", .data = (void *)V3D_GEN_33 },
+ { .compatible = "brcm,7278-v3d", .data = (void *)V3D_GEN_41 },
{},
};
MODULE_DEVICE_TABLE(of, v3d_of_match);
+static void
+v3d_idle_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_CLEAR_POWER_OFF);
+
+ if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS),
+ V3D_SMS_STATE) == V3D_SMS_IDLE), 100)) {
+ DRM_ERROR("Failed to power up SMS\n");
+ }
+
+ v3d_reset_sms(v3d);
+}
+
+static void
+v3d_power_off_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_POWER_OFF);
+
+ if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS),
+ V3D_SMS_STATE) == V3D_SMS_POWER_OFF_STATE), 100)) {
+ DRM_ERROR("Failed to power off SMS\n");
+ }
+}
+
static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
@@ -274,6 +305,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct drm_device *drm;
struct v3d_dev *v3d;
+ enum v3d_gen gen;
int ret;
u32 mmu_debug;
u32 ident1, ident3;
@@ -287,6 +319,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drm);
+ gen = (uintptr_t)of_device_get_match_data(dev);
+ v3d->ver = gen;
+
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
return ret;
@@ -295,6 +330,12 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (v3d->ver >= V3D_GEN_71) {
+ ret = map_regs(v3d, &v3d->sms_regs, "sms");
+ if (ret)
+ return ret;
+ }
+
v3d->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(v3d->clk))
return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
@@ -305,6 +346,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
return ret;
}
+ v3d_idle_sms(v3d);
+
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
ret = dma_set_mask_and_coherent(dev, mask);
@@ -316,6 +359,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ident1 = V3D_READ(V3D_HUB_IDENT1);
v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV));
+ /* Make sure that the V3D tech version retrieved from the HW is equal
+ * to the one advertised by the device tree.
+ */
+ WARN_ON(v3d->ver != gen);
+
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
@@ -340,7 +388,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
}
}
- if (v3d->ver < 41) {
+ if (v3d->ver < V3D_GEN_41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
goto clk_disable;
@@ -400,6 +448,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
v3d->mmu_scratch_paddr);
+ v3d_power_off_sms(v3d);
+
clk_disable_unprepare(v3d->clk);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9deaefa0f95b..b51f0b648a08 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -94,11 +94,18 @@ struct v3d_perfmon {
u64 values[] __counted_by(ncounters);
};
+enum v3d_gen {
+ V3D_GEN_33 = 33,
+ V3D_GEN_41 = 41,
+ V3D_GEN_42 = 42,
+ V3D_GEN_71 = 71,
+};
+
struct v3d_dev {
struct drm_device drm;
/* Short representation (e.g. 33, 41) of the V3D tech version */
- int ver;
+ enum v3d_gen ver;
/* Short representation (e.g. 5, 6) of the V3D tech revision */
int rev;
@@ -111,6 +118,7 @@ struct v3d_dev {
void __iomem *core_regs[3];
void __iomem *bridge_regs;
void __iomem *gca_regs;
+ void __iomem *sms_regs;
struct clk *clk;
struct reset_control *reset;
@@ -199,7 +207,7 @@ to_v3d_dev(struct drm_device *dev)
static inline bool
v3d_has_csd(struct v3d_dev *v3d)
{
- return v3d->ver >= 41;
+ return v3d->ver >= V3D_GEN_41;
}
#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
@@ -261,6 +269,15 @@ to_v3d_fence(struct dma_fence *fence)
#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
+#define V3D_SMS_IDLE 0x0
+#define V3D_SMS_ISOLATING_FOR_RESET 0xa
+#define V3D_SMS_RESETTING 0xb
+#define V3D_SMS_ISOLATING_FOR_POWER_OFF 0xc
+#define V3D_SMS_POWER_OFF_STATE 0xd
+
+#define V3D_SMS_READ(offset) readl(v3d->sms_regs + (offset))
+#define V3D_SMS_WRITE(offset, val) writel(val, v3d->sms_regs + (offset))
+
#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
@@ -539,6 +556,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
+void v3d_reset_sms(struct v3d_dev *v3d);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_clean_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b1e681630ded..d7d16da78db3 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -25,7 +25,7 @@ v3d_init_core(struct v3d_dev *v3d, int core)
* type. If you want the default behavior, you can still put
* "2" in the indirect texture state's output_type field.
*/
- if (v3d->ver < 40)
+ if (v3d->ver < V3D_GEN_41)
V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
/* Whenever we flush the L2T cache, we always want to flush
@@ -58,7 +58,7 @@ v3d_idle_axi(struct v3d_dev *v3d, int core)
static void
v3d_idle_gca(struct v3d_dev *v3d)
{
- if (v3d->ver >= 41)
+ if (v3d->ver >= V3D_GEN_41)
return;
V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
@@ -105,6 +105,22 @@ v3d_reset_v3d(struct v3d_dev *v3d)
}
void
+v3d_reset_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_REE_CS, V3D_SET_FIELD(0x4, V3D_SMS_STATE));
+
+ if (wait_for(!(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS),
+ V3D_SMS_STATE) == V3D_SMS_ISOLATING_FOR_RESET) &&
+ !(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS),
+ V3D_SMS_STATE) == V3D_SMS_RESETTING), 100)) {
+ DRM_ERROR("Failed to wait for SMS reset\n");
+ }
+}
+
+void
v3d_reset(struct v3d_dev *v3d)
{
struct drm_device *dev = &v3d->drm;
@@ -119,6 +135,7 @@ v3d_reset(struct v3d_dev *v3d)
v3d_idle_axi(v3d, 0);
v3d_idle_gca(v3d);
+ v3d_reset_sms(v3d);
v3d_reset_v3d(v3d);
v3d_mmu_set_page_table(v3d);
@@ -132,13 +149,13 @@ v3d_reset(struct v3d_dev *v3d)
static void
v3d_flush_l3(struct v3d_dev *v3d)
{
- if (v3d->ver < 41) {
+ if (v3d->ver < V3D_GEN_41) {
u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
- if (v3d->ver < 33) {
+ if (v3d->ver < V3D_GEN_33) {
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
}
@@ -151,7 +168,7 @@ v3d_flush_l3(struct v3d_dev *v3d)
static void
v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
{
- if (v3d->ver > 32)
+ if (v3d->ver >= V3D_GEN_33)
return;
V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 72b6a119412f..29f63f572d35 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -143,7 +143,7 @@ v3d_irq(int irq, void *arg)
/* We shouldn't be triggering these if we have GMP in
* always-allowed mode.
*/
- if (v3d->ver < 71 && (intsts & V3D_INT_GMPV))
+ if (v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_GMPV))
dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
@@ -200,7 +200,7 @@ v3d_hub_irq(int irq, void *arg)
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
- if (v3d->ver >= 41) {
+ if (v3d->ver >= V3D_GEN_41) {
axi_id = axi_id >> 5;
if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
client = v3d41_axi_ids[axi_id];
@@ -217,7 +217,7 @@ v3d_hub_irq(int irq, void *arg)
status = IRQ_HANDLED;
}
- if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
+ if (v3d->ver >= V3D_GEN_71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
dev_err(v3d->drm.dev, "GMP Violation\n");
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index 3ebda2fa46fc..9a3fe5255874 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -200,10 +200,10 @@ void v3d_perfmon_init(struct v3d_dev *v3d)
const struct v3d_perf_counter_desc *counters = NULL;
unsigned int max = 0;
- if (v3d->ver >= 71) {
+ if (v3d->ver >= V3D_GEN_71) {
counters = v3d_v71_performance_counters;
max = ARRAY_SIZE(v3d_v71_performance_counters);
- } else if (v3d->ver >= 42) {
+ } else if (v3d->ver >= V3D_GEN_42) {
counters = v3d_v42_performance_counters;
max = ARRAY_SIZE(v3d_v42_performance_counters);
}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 6da3c69082bd..c1870265eaee 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -515,4 +515,30 @@
# define V3D_ERR_VPAERGS BIT(1)
# define V3D_ERR_VPAEABB BIT(0)
+#define V3D_SMS_REE_CS 0x00000
+#define V3D_SMS_TEE_CS 0x00400
+# define V3D_SMS_INTERRUPT BIT(31)
+# define V3D_SMS_POWER_OFF BIT(30)
+# define V3D_SMS_CLEAR_POWER_OFF BIT(29)
+# define V3D_SMS_LOCK BIT(28)
+# define V3D_SMS_CLEAR_LOCK BIT(27)
+# define V3D_SMS_SVP_MODE_EXIT BIT(26)
+# define V3D_SMS_CLEAR_SVP_MODE_EXIT BIT(25)
+# define V3D_SMS_SVP_MODE_ENTER BIT(24)
+# define V3D_SMS_CLEAR_SVP_MODE_ENTER BIT(23)
+# define V3D_SMS_THEIR_MODE_EXIT BIT(22)
+# define V3D_SMS_THEIR_MODE_ENTER BIT(21)
+# define V3D_SMS_OUR_MODE_EXIT BIT(20)
+# define V3D_SMS_CLEAR_OUR_MODE_EXIT BIT(19)
+# define V3D_SMS_SEQ_PC_MASK V3D_MASK(16, 10)
+# define V3D_SMS_SEQ_PC_SHIFT 10
+# define V3D_SMS_HUBCORE_STATUS_MASK V3D_MASK(9, 8)
+# define V3D_SMS_HUBCORE_STATUS_SHIFT 8
+# define V3D_SMS_NEW_MODE_MASK V3D_MASK(7, 6)
+# define V3D_SMS_NEW_MODE_SHIFT 6
+# define V3D_SMS_OLD_MODE_MASK V3D_MASK(5, 4)
+# define V3D_SMS_OLD_MODE_SHIFT 4
+# define V3D_SMS_STATE_MASK V3D_MASK(3, 0)
+# define V3D_SMS_STATE_SHIFT 0
+
#endif /* V3D_REGS_H */
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 34c42d6e12cd..b3be08b0ca91 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -357,11 +357,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
- if (v3d->ver >= 71)
+ if (v3d->ver >= V3D_GEN_71)
V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
- if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
+ if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
@@ -412,7 +412,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
*
* XXX: Set the CFG7 register
*/
- if (v3d->ver >= 71)
+ if (v3d->ver >= V3D_GEN_71)
V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0);
/* CFG0 write kicks off the job. */
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
index e70d7c3076ac..577d9a956369 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
@@ -61,6 +61,19 @@ static const struct drm_display_mode default_mode = {
DRM_SIMPLE_MODE(640, 480, 64, 48)
};
+/**
+ * vc4_mock_atomic_add_output() - Enables an output in a state
+ * @test: The test context object
+ * @state: Atomic state to enable the output in.
+ * @type: Type of the output encoder
+ *
+ * Adds an output CRTC and connector to a state, and enables them.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure. If the error is
+ * EDEADLK, the entire atomic sequence must be restarted. All other
+ * errors are fatal.
+ */
int vc4_mock_atomic_add_output(struct kunit *test,
struct drm_atomic_state *state,
enum vc4_encoder_type type)
@@ -75,30 +88,49 @@ int vc4_mock_atomic_add_output(struct kunit *test,
int ret;
encoder = vc4_find_encoder_by_type(drm, type);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+ if (!encoder)
+ return -ENODEV;
crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+ if (!crtc)
+ return -ENODEV;
output = encoder_to_vc4_dummy_output(encoder);
conn = &output->connector;
conn_state = drm_atomic_get_connector_state(state, conn);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state->active = true;
return 0;
}
+/**
+ * vc4_mock_atomic_del_output() - Disables an output in a state
+ * @test: The test context object
+ * @state: Atomic state to disable the output in.
+ * @type: Type of the output encoder
+ *
+ * Adds an output CRTC and connector to a state, and disables them.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure. If the error is
+ * EDEADLK, the entire atomic sequence must be restarted. All other
+ * errors are fatal.
+ */
int vc4_mock_atomic_del_output(struct kunit *test,
struct drm_atomic_state *state,
enum vc4_encoder_type type)
@@ -113,26 +145,32 @@ int vc4_mock_atomic_del_output(struct kunit *test,
int ret;
encoder = vc4_find_encoder_by_type(drm, type);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+ if (!encoder)
+ return -ENODEV;
crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+ if (!crtc)
+ return -ENODEV;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
crtc_state->active = false;
ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
output = encoder_to_vc4_dummy_output(encoder);
conn = &output->connector;
conn_state = drm_atomic_get_connector_state(state, conn);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index 992e8f5c5c6e..d1f694029169 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -20,7 +20,6 @@
struct pv_muxing_priv {
struct vc4_dev *vc4;
- struct drm_atomic_state *state;
};
static bool check_fifo_conflict(struct kunit *test,
@@ -677,18 +676,41 @@ static void drm_vc4_test_pv_muxing(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
const struct pv_muxing_priv *priv = test->priv;
- struct drm_atomic_state *state = priv->state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
unsigned int i;
int ret;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ vc4 = priv->vc4;
+ drm = &vc4->base;
+
+retry:
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
for (i = 0; i < params->nencoders; i++) {
enum vc4_encoder_type enc_type = params->encoders[i];
ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
}
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test,
@@ -700,33 +722,61 @@ static void drm_vc4_test_pv_muxing(struct kunit *test)
KUNIT_EXPECT_TRUE(test, check_channel_for_encoder(test, state, enc_type,
params->check_fn));
}
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
const struct pv_muxing_priv *priv = test->priv;
- struct drm_atomic_state *state = priv->state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
unsigned int i;
int ret;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ vc4 = priv->vc4;
+ drm = &vc4->base;
+
+retry:
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
for (i = 0; i < params->nencoders; i++) {
enum vc4_encoder_type enc_type = params->encoders[i];
ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
}
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static int vc4_pv_muxing_test_init(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
- struct drm_modeset_acquire_ctx ctx;
struct pv_muxing_priv *priv;
- struct drm_device *drm;
struct vc4_dev *vc4;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
@@ -737,15 +787,6 @@ static int vc4_pv_muxing_test_init(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
priv->vc4 = vc4;
- drm_modeset_acquire_init(&ctx, 0);
-
- drm = &vc4->base;
- priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
return 0;
}
@@ -800,13 +841,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -823,13 +877,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -874,16 +941,35 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -908,13 +994,26 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -968,25 +1067,50 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
-
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 779b22efe27b..efc6f6078b02 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1160,12 +1160,13 @@ static void vc4_dsi_bridge_enable(struct drm_bridge *bridge,
}
static int vc4_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge,
&dsi->bridge, flags);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 37238a12baa5..a29a6ef266f9 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -51,6 +51,7 @@
#include <linux/reset.h>
#include <sound/dmaengine_pcm.h>
#include <sound/hdmi-codec.h>
+#include <sound/jack.h>
#include <sound/pcm_drm_eld.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -372,13 +373,13 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
* the lock for now.
*/
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
+
if (status == connector_status_disconnected) {
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
return;
}
- drm_atomic_helper_connector_hdmi_hotplug(connector, status);
-
cec_s_phys_addr(vc4_hdmi->cec_adap,
connector->display_info.source_physical_address, false);
@@ -2175,6 +2176,22 @@ static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs = {
.shutdown = vc4_hdmi_audio_shutdown,
};
+static int vc4_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct vc4_hdmi *vc4_hdmi = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component;
+ int ret;
+
+ ret = snd_soc_card_jack_new(rtd->card, "HDMI Jack", SND_JACK_LINEOUT,
+ &vc4_hdmi->hdmi_jack);
+ if (ret) {
+ dev_err(rtd->dev, "HDMI Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ return snd_soc_component_set_jack(component, &vc4_hdmi->hdmi_jack, NULL);
+}
+
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2288,6 +2305,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dai_link->cpus->dai_name = dev_name(dev);
dai_link->codecs->name = dev_name(&vc4_hdmi->connector.hdmi_audio.codec_pdev->dev);
dai_link->platforms->name = dev_name(dev);
+ dai_link->init = vc4_hdmi_codec_init;
card->dai_link = dai_link;
card->num_links = 1;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index e3d989ca302b..a31157c99bee 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -4,6 +4,7 @@
#include <drm/drm_connector.h>
#include <media/cec.h>
#include <sound/dmaengine_pcm.h>
+#include <sound/hdmi-codec.h>
#include <sound/soc.h>
#include "vc4_drv.h"
@@ -211,6 +212,12 @@ struct vc4_hdmi {
* KMS hooks. Protected by @mutex.
*/
enum hdmi_colorspace output_format;
+
+ /**
+ * @hdmi_jack: Represents the connection state of the HDMI plug, for
+ * ALSA jack detection.
+ */
+ struct snd_soc_jack hdmi_jack;
};
#define connector_to_vc4_hdmi(_connector) \
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index c5e84d3494d2..056d344c5411 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -2080,7 +2080,7 @@ static int vc6_plane_mode_set(struct drm_plane *plane,
/* HPPF plane 1 */
vc4_dlist_write(vc4_state, kernel);
/* VPPF plane 1 */
- vc4_dlist_write(vc4_state, kernel);
+ vc4_dlist_write(vc4_state, kernel);
}
}
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 37bb1fb58cf9..b611c7c8ca2d 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -53,25 +53,10 @@ static void vgem_fence_release(struct dma_fence *base)
dma_fence_free(&fence->base);
}
-static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
-{
- snprintf(str, size, "%llu", fence->seqno);
-}
-
-static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
- int size)
-{
- snprintf(str, size, "%llu",
- dma_fence_is_signaled(fence) ? fence->seqno : 0);
-}
-
static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.release = vgem_fence_release,
-
- .fence_value_str = vgem_fence_value_str,
- .timeline_value_str = vgem_fence_timeline_value_str,
};
static void vgem_fence_timeout(struct timer_list *t)
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f28357dbde35..44c1d8ef3c4d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -49,26 +49,10 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f)
return false;
}
-static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
-{
- snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
-}
-
-static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
- int size)
-{
- struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
-
- snprintf(str, size, "%llu",
- (u64)atomic64_read(&fence->drv->last_fence_id));
-}
-
static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name = virtio_gpu_get_timeline_name,
.signaled = virtio_gpu_fence_signaled,
- .fence_value_str = virtio_gpu_fence_value_str,
- .timeline_value_str = virtio_gpu_timeline_value_str,
};
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 4de2a63ccd18..9b4bde3fda18 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -75,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
- .cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = virtgpu_gem_map_dma_buf,
diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig
index 9def079f685b..3c02f928ffe6 100644
--- a/drivers/gpu/drm/vkms/Kconfig
+++ b/drivers/gpu/drm/vkms/Kconfig
@@ -14,3 +14,18 @@ config DRM_VKMS
a VKMS.
If M is selected the module will be called vkms.
+
+config DRM_VKMS_KUNIT_TEST
+ tristate "KUnit tests for VKMS" if !KUNIT_ALL_TESTS
+ depends on DRM_VKMS && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for VKMS. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on VKMS.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 1b28a6a32948..d657865e573f 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -6,6 +6,9 @@ vkms-y := \
vkms_formats.o \
vkms_crtc.o \
vkms_composer.o \
- vkms_writeback.o
+ vkms_writeback.o \
+ vkms_connector.o \
+ vkms_config.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/vkms/tests/.kunitconfig b/drivers/gpu/drm/vkms/tests/.kunitconfig
new file mode 100644
index 000000000000..6a2d87068edc
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_VKMS=y
+CONFIG_DRM_VKMS_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile
new file mode 100644
index 000000000000..9ded37b67a46
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms_config_test.o
diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
new file mode 100644
index 000000000000..ff4566cf9925
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
@@ -0,0 +1,951 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include "../vkms_config.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static size_t vkms_config_get_num_planes(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ count++;
+
+ return count;
+}
+
+static size_t vkms_config_get_num_encoders(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ count++;
+
+ return count;
+}
+
+static size_t vkms_config_get_num_connectors(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ count++;
+
+ return count;
+}
+
+static struct vkms_config_plane *get_first_plane(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ return plane_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_crtc *get_first_crtc(struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ vkms_config_for_each_crtc(config, crtc_cfg)
+ return crtc_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_encoder *get_first_encoder(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ return encoder_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_connector *get_first_connector(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ return connector_cfg;
+
+ return NULL;
+}
+
+struct default_config_case {
+ bool enable_cursor;
+ bool enable_writeback;
+ bool enable_overlay;
+};
+
+static void vkms_config_test_empty_config(struct kunit *test)
+{
+ struct vkms_config *config;
+ const char *dev_name = "test";
+
+ config = vkms_config_create(dev_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* The dev_name string and the config have different lifetimes */
+ dev_name = NULL;
+ KUNIT_EXPECT_STREQ(test, vkms_config_get_device_name(config), "test");
+
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_planes(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static struct default_config_case default_config_cases[] = {
+ { false, false, false },
+ { true, false, false },
+ { true, true, false },
+ { true, false, true },
+ { false, true, false },
+ { false, true, true },
+ { false, false, true },
+ { true, true, true },
+};
+
+KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL);
+
+static void vkms_config_test_default_config(struct kunit *test)
+{
+ const struct default_config_case *params = test->param_value;
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ int n_primaries = 0;
+ int n_cursors = 0;
+ int n_overlays = 0;
+
+ config = vkms_config_default_create(params->enable_cursor,
+ params->enable_writeback,
+ params->enable_overlay);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Planes */
+ vkms_config_for_each_plane(config, plane_cfg) {
+ switch (vkms_config_plane_get_type(plane_cfg)) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ n_primaries++;
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ n_cursors++;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ n_overlays++;
+ break;
+ default:
+ KUNIT_FAIL_AND_ABORT(test, "Unknown plane type");
+ }
+ }
+ KUNIT_EXPECT_EQ(test, n_primaries, 1);
+ KUNIT_EXPECT_EQ(test, n_cursors, params->enable_cursor ? 1 : 0);
+ KUNIT_EXPECT_EQ(test, n_overlays, params->enable_overlay ? 8 : 0);
+
+ /* CRTCs */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+
+ crtc_cfg = get_first_crtc(config);
+ KUNIT_EXPECT_EQ(test, vkms_config_crtc_get_writeback(crtc_cfg),
+ params->enable_writeback);
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ int n_possible_crtcs = 0;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ KUNIT_EXPECT_PTR_EQ(test, crtc_cfg, possible_crtc);
+ n_possible_crtcs++;
+ }
+ KUNIT_EXPECT_EQ(test, n_possible_crtcs, 1);
+ }
+
+ /* Encoders */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 1);
+
+ /* Connectors */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 1);
+
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_planes(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ int n_planes = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ n_planes++;
+ KUNIT_ASSERT_EQ(test, n_planes, 0);
+
+ plane_cfg1 = vkms_config_create_plane(config);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg1)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 1);
+ n_planes = 0;
+
+ plane_cfg2 = vkms_config_create_plane(config);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 2);
+ n_planes = 0;
+
+ vkms_config_destroy_plane(plane_cfg1);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg2)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 0);
+ vkms_config_for_each_crtc(config, crtc_cfg)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ vkms_config_destroy_crtc(config, crtc_cfg2);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ int n_encoders = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ n_encoders++;
+ KUNIT_ASSERT_EQ(test, n_encoders, 0);
+
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ encoder_cfg2 = vkms_config_create_encoder(config);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 2);
+ n_encoders = 0;
+
+ vkms_config_destroy_encoder(config, encoder_cfg2);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_connectors(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ int n_connectors = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ n_connectors++;
+ KUNIT_ASSERT_EQ(test, n_connectors, 0);
+
+ connector_cfg1 = vkms_config_create_connector(config);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 1);
+ n_connectors = 0;
+
+ connector_cfg2 = vkms_config_create_connector(config);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1 &&
+ connector_cfg != connector_cfg2)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 2);
+ n_connectors = 0;
+
+ vkms_config_destroy_connector(connector_cfg2);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 1);
+ n_connectors = 0;
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_plane_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No planes */
+ plane_cfg = get_first_plane(config);
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many planes */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_plane(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_plane_type(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ int err;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg = get_first_plane(config);
+ vkms_config_destroy_plane(plane_cfg);
+
+ crtc_cfg = get_first_crtc(config);
+
+ /* Invalid: No primary plane */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Multiple primary planes */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: One primary plane */
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Multiple cursor planes */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: One primary and one cursor plane */
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Second CRTC without primary plane */
+ crtc_cfg = vkms_config_create_crtc(config);
+ encoder_cfg = vkms_config_create_encoder(config);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: Second CRTC with a primary plane */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_plane_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg = get_first_plane(config);
+ crtc_cfg = get_first_crtc(config);
+
+ /* Invalid: Primary plane without a possible CRTC */
+ vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_crtc_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_crtc *crtc_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No CRTCs */
+ crtc_cfg = get_first_crtc(config);
+ vkms_config_destroy_crtc(config, crtc_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many CRTCs */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_crtc(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_encoder_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No encoders */
+ encoder_cfg = get_first_encoder(config);
+ vkms_config_destroy_encoder(config, encoder_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many encoders */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_encoder(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_encoder *encoder_cfg;
+ int err;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ crtc_cfg1 = get_first_crtc(config);
+
+ /* Invalid: Encoder without a possible CRTC */
+ encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: Second CRTC with shared encoder */
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Second CRTC without encoders */
+ vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg2);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: First CRTC with 2 possible encoder */
+ vkms_config_destroy_plane(plane_cfg);
+ vkms_config_destroy_crtc(config, crtc_cfg2);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_connector_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No connectors */
+ connector_cfg = get_first_connector(config);
+ vkms_config_destroy_connector(connector_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many connectors */
+ for (n = 0; n <= 32; n++)
+ connector_cfg = vkms_config_create_connector(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_connector_possible_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ encoder_cfg = get_first_encoder(config);
+ connector_cfg = get_first_connector(config);
+
+ /* Invalid: Connector without a possible encoder */
+ vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_attach_different_configs(struct kunit *test)
+{
+ struct vkms_config *config1, *config2;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ int err;
+
+ config1 = vkms_config_create("test1");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config1);
+
+ config2 = vkms_config_create("test2");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config2);
+
+ plane_cfg1 = vkms_config_create_plane(config1);
+ crtc_cfg1 = vkms_config_create_crtc(config1);
+ encoder_cfg1 = vkms_config_create_encoder(config1);
+ connector_cfg1 = vkms_config_create_connector(config1);
+
+ plane_cfg2 = vkms_config_create_plane(config2);
+ crtc_cfg2 = vkms_config_create_crtc(config2);
+ encoder_cfg2 = vkms_config_create_encoder(config2);
+ connector_cfg2 = vkms_config_create_connector(config2);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
+
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ vkms_config_destroy(config1);
+ vkms_config_destroy(config2);
+}
+
+static void vkms_config_test_plane_attach_crtc(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *overlay_cfg;
+ struct vkms_config_plane *primary_cfg;
+ struct vkms_config_plane *cursor_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ overlay_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY);
+ primary_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY);
+ cursor_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR);
+
+ crtc_cfg = vkms_config_create_crtc(config);
+
+ /* No primary or cursor planes */
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Overlay plane, but no primary or cursor planes */
+ err = vkms_config_plane_attach_crtc(overlay_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Primary plane, attaching it twice must fail */
+ err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg);
+ KUNIT_EXPECT_NE(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_primary_plane(config, crtc_cfg),
+ primary_cfg);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Primary and cursor planes */
+ err = vkms_config_plane_attach_crtc(cursor_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_primary_plane(config, crtc_cfg),
+ primary_cfg);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_cursor_plane(config, crtc_cfg),
+ cursor_cfg);
+
+ /* Detach primary and destroy cursor plane */
+ vkms_config_plane_detach_crtc(overlay_cfg, crtc_cfg);
+ vkms_config_plane_detach_crtc(primary_cfg, crtc_cfg);
+ vkms_config_destroy_plane(cursor_cfg);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ int n_crtcs = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg1 = vkms_config_create_plane(config);
+ plane_cfg2 = vkms_config_create_plane(config);
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ /* No possible CRTCs */
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Plane 1 attached to CRTC 1 and 2 */
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 2);
+ n_crtcs = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Plane 1 attached to CRTC 1 and plane 2 to CRTC 2 */
+ vkms_config_plane_detach_crtc(plane_cfg1, crtc_cfg2);
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+ n_crtcs = 0;
+
+ err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ int n_crtcs = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ encoder_cfg2 = vkms_config_create_encoder(config);
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ /* No possible CRTCs */
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Encoder 1 attached to CRTC 1 and 2 */
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 2);
+ n_crtcs = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Encoder 1 attached to CRTC 1 and encoder 2 to CRTC 2 */
+ vkms_config_encoder_detach_crtc(encoder_cfg1, crtc_cfg2);
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+ n_crtcs = 0;
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_connector_get_possible_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+ int n_encoders = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ connector_cfg1 = vkms_config_create_connector(config);
+ connector_cfg2 = vkms_config_create_connector(config);
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ encoder_cfg2 = vkms_config_create_encoder(config);
+
+ /* No possible encoders */
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ /* Connector 1 attached to encoders 1 and 2 */
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg1 &&
+ possible_encoder != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 2);
+ n_encoders = 0;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ /* Connector 1 attached to encoder 1 and connector 2 to encoder 2 */
+ vkms_config_connector_detach_encoder(connector_cfg1, encoder_cfg2);
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+
+ vkms_config_destroy(config);
+}
+
+static struct kunit_case vkms_config_test_cases[] = {
+ KUNIT_CASE(vkms_config_test_empty_config),
+ KUNIT_CASE_PARAM(vkms_config_test_default_config,
+ default_config_gen_params),
+ KUNIT_CASE(vkms_config_test_get_planes),
+ KUNIT_CASE(vkms_config_test_get_crtcs),
+ KUNIT_CASE(vkms_config_test_get_encoders),
+ KUNIT_CASE(vkms_config_test_get_connectors),
+ KUNIT_CASE(vkms_config_test_invalid_plane_number),
+ KUNIT_CASE(vkms_config_test_valid_plane_type),
+ KUNIT_CASE(vkms_config_test_valid_plane_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_invalid_crtc_number),
+ KUNIT_CASE(vkms_config_test_invalid_encoder_number),
+ KUNIT_CASE(vkms_config_test_valid_encoder_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_invalid_connector_number),
+ KUNIT_CASE(vkms_config_test_valid_connector_possible_encoders),
+ KUNIT_CASE(vkms_config_test_attach_different_configs),
+ KUNIT_CASE(vkms_config_test_plane_attach_crtc),
+ KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_encoder_get_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_connector_get_possible_encoders),
+ {}
+};
+
+static struct kunit_suite vkms_config_test_suite = {
+ .name = "vkms-config",
+ .test_cases = vkms_config_test_cases,
+};
+
+kunit_test_suite(vkms_config_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kunit test for vkms config utility");
diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c
new file mode 100644
index 000000000000..a1df5659b0fb
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_config.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/slab.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
+#include <kunit/visibility.h>
+
+#include "vkms_config.h"
+
+struct vkms_config *vkms_config_create(const char *dev_name)
+{
+ struct vkms_config *config;
+
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return ERR_PTR(-ENOMEM);
+
+ config->dev_name = kstrdup_const(dev_name, GFP_KERNEL);
+ if (!config->dev_name) {
+ kfree(config);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&config->planes);
+ INIT_LIST_HEAD(&config->crtcs);
+ INIT_LIST_HEAD(&config->encoders);
+ INIT_LIST_HEAD(&config->connectors);
+
+ return config;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create);
+
+struct vkms_config *vkms_config_default_create(bool enable_cursor,
+ bool enable_writeback,
+ bool enable_overlay)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+ int n;
+
+ config = vkms_config_create(DEFAULT_DEVICE_NAME);
+ if (IS_ERR(config))
+ return config;
+
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+
+ crtc_cfg = vkms_config_create_crtc(config);
+ if (IS_ERR(crtc_cfg))
+ goto err_alloc;
+ vkms_config_crtc_set_writeback(crtc_cfg, enable_writeback);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+
+ if (enable_overlay) {
+ for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+
+ vkms_config_plane_set_type(plane_cfg,
+ DRM_PLANE_TYPE_OVERLAY);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+ }
+ }
+
+ if (enable_cursor) {
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+ }
+
+ encoder_cfg = vkms_config_create_encoder(config);
+ if (IS_ERR(encoder_cfg))
+ goto err_alloc;
+
+ if (vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg))
+ goto err_alloc;
+
+ connector_cfg = vkms_config_create_connector(config);
+ if (IS_ERR(connector_cfg))
+ goto err_alloc;
+
+ if (vkms_config_connector_attach_encoder(connector_cfg, encoder_cfg))
+ goto err_alloc;
+
+ return config;
+
+err_alloc:
+ vkms_config_destroy(config);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_default_create);
+
+void vkms_config_destroy(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg, *plane_tmp;
+ struct vkms_config_crtc *crtc_cfg, *crtc_tmp;
+ struct vkms_config_encoder *encoder_cfg, *encoder_tmp;
+ struct vkms_config_connector *connector_cfg, *connector_tmp;
+
+ list_for_each_entry_safe(plane_cfg, plane_tmp, &config->planes, link)
+ vkms_config_destroy_plane(plane_cfg);
+
+ list_for_each_entry_safe(crtc_cfg, crtc_tmp, &config->crtcs, link)
+ vkms_config_destroy_crtc(config, crtc_cfg);
+
+ list_for_each_entry_safe(encoder_cfg, encoder_tmp, &config->encoders, link)
+ vkms_config_destroy_encoder(config, encoder_cfg);
+
+ list_for_each_entry_safe(connector_cfg, connector_tmp, &config->connectors, link)
+ vkms_config_destroy_connector(connector_cfg);
+
+ kfree_const(config->dev_name);
+ kfree(config);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy);
+
+static bool valid_plane_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_planes;
+
+ n_planes = list_count_nodes((struct list_head *)&config->planes);
+ if (n_planes <= 0 || n_planes >= 32) {
+ drm_info(dev, "The number of planes must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_planes_for_crtc(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_plane *plane_cfg;
+ bool has_primary_plane = false;
+ bool has_cursor_plane = false;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ enum drm_plane_type type;
+
+ type = vkms_config_plane_get_type(plane_cfg);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc != crtc_cfg)
+ continue;
+
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ if (has_primary_plane) {
+ drm_info(dev, "Multiple primary planes\n");
+ return false;
+ }
+
+ has_primary_plane = true;
+ } else if (type == DRM_PLANE_TYPE_CURSOR) {
+ if (has_cursor_plane) {
+ drm_info(dev, "Multiple cursor planes\n");
+ return false;
+ }
+
+ has_cursor_plane = true;
+ }
+ }
+ }
+
+ if (!has_primary_plane) {
+ drm_info(dev, "Primary plane not found\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_plane_possible_crtcs(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_plane *plane_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ if (xa_empty(&plane_cfg->possible_crtcs)) {
+ drm_info(dev, "All planes must have at least one possible CRTC\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool valid_crtc_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_crtcs;
+
+ n_crtcs = list_count_nodes((struct list_head *)&config->crtcs);
+ if (n_crtcs <= 0 || n_crtcs >= 32) {
+ drm_info(dev, "The number of CRTCs must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_encoder_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_encoders;
+
+ n_encoders = list_count_nodes((struct list_head *)&config->encoders);
+ if (n_encoders <= 0 || n_encoders >= 32) {
+ drm_info(dev, "The number of encoders must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_encoder_possible_crtcs(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ if (xa_empty(&encoder_cfg->possible_crtcs)) {
+ drm_info(dev, "All encoders must have at least one possible CRTC\n");
+ return false;
+ }
+ }
+
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ bool crtc_has_encoder = false;
+
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg,
+ idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ crtc_has_encoder = true;
+ }
+ }
+
+ if (!crtc_has_encoder) {
+ drm_info(dev, "All CRTCs must have at least one possible encoder\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool valid_connector_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_connectors;
+
+ n_connectors = list_count_nodes((struct list_head *)&config->connectors);
+ if (n_connectors <= 0 || n_connectors >= 32) {
+ drm_info(dev, "The number of connectors must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_connector_possible_encoders(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg) {
+ if (xa_empty(&connector_cfg->possible_encoders)) {
+ drm_info(dev,
+ "All connectors must have at least one possible encoder\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool vkms_config_is_valid(const struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ if (!valid_plane_number(config))
+ return false;
+
+ if (!valid_crtc_number(config))
+ return false;
+
+ if (!valid_encoder_number(config))
+ return false;
+
+ if (!valid_connector_number(config))
+ return false;
+
+ if (!valid_plane_possible_crtcs(config))
+ return false;
+
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (!valid_planes_for_crtc(config, crtc_cfg))
+ return false;
+ }
+
+ if (!valid_encoder_possible_crtcs(config))
+ return false;
+
+ if (!valid_connector_possible_encoders(config))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid);
+
+static int vkms_config_show(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ const char *dev_name;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+
+ dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config);
+ seq_printf(m, "dev_name=%s\n", dev_name);
+
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ seq_puts(m, "plane:\n");
+ seq_printf(m, "\ttype=%d\n",
+ vkms_config_plane_get_type(plane_cfg));
+ }
+
+ vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) {
+ seq_puts(m, "crtc:\n");
+ seq_printf(m, "\twriteback=%d\n",
+ vkms_config_crtc_get_writeback(crtc_cfg));
+ }
+
+ vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg)
+ seq_puts(m, "encoder\n");
+
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg)
+ seq_puts(m, "connector\n");
+
+ return 0;
+}
+
+static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
+ { "vkms_config", vkms_config_show, 0 },
+};
+
+void vkms_config_register_debugfs(struct vkms_device *vkms_device)
+{
+ drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
+ ARRAY_SIZE(vkms_config_debugfs_list));
+}
+
+struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+
+ plane_cfg = kzalloc(sizeof(*plane_cfg), GFP_KERNEL);
+ if (!plane_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ plane_cfg->config = config;
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
+ xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC);
+
+ list_add_tail(&plane_cfg->link, &config->planes);
+
+ return plane_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_plane);
+
+void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg)
+{
+ xa_destroy(&plane_cfg->possible_crtcs);
+ list_del(&plane_cfg->link);
+ kfree(plane_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_plane);
+
+int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ u32 crtc_idx = 0;
+
+ if (plane_cfg->config != crtc_cfg->config)
+ return -EINVAL;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&plane_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
+ xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_attach_crtc);
+
+void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ xa_erase(&plane_cfg->possible_crtcs, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_detach_crtc);
+
+struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ crtc_cfg = kzalloc(sizeof(*crtc_cfg), GFP_KERNEL);
+ if (!crtc_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_cfg->config = config;
+ vkms_config_crtc_set_writeback(crtc_cfg, false);
+
+ list_add_tail(&crtc_cfg->link, &config->crtcs);
+
+ return crtc_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_crtc);
+
+void vkms_config_destroy_crtc(struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg);
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg);
+
+ list_del(&crtc_cfg->link);
+ kfree(crtc_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_crtc);
+
+/**
+ * vkms_config_crtc_get_plane() - Return the first attached plane to a CRTC with
+ * the specific type
+ * @config: Configuration containing the CRTC and the plane
+ * @crtc_cfg: Only find planes attached to this CRTC
+ * @type: Plane type to search
+ *
+ * Returns:
+ * The first plane found attached to @crtc_cfg with the type @type.
+ */
+static struct vkms_config_plane *vkms_config_crtc_get_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg,
+ enum drm_plane_type type)
+{
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *possible_crtc;
+ enum drm_plane_type current_type;
+ unsigned long idx = 0;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ current_type = vkms_config_plane_get_type(plane_cfg);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg && current_type == type)
+ return plane_cfg;
+ }
+ }
+
+ return NULL;
+}
+
+struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_PRIMARY);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_primary_plane);
+
+struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_CURSOR);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_cursor_plane);
+
+struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+
+ encoder_cfg = kzalloc(sizeof(*encoder_cfg), GFP_KERNEL);
+ if (!encoder_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ encoder_cfg->config = config;
+ xa_init_flags(&encoder_cfg->possible_crtcs, XA_FLAGS_ALLOC);
+
+ list_add_tail(&encoder_cfg->link, &config->encoders);
+
+ return encoder_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_encoder);
+
+void vkms_config_destroy_encoder(struct vkms_config *config,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg);
+
+ xa_destroy(&encoder_cfg->possible_crtcs);
+ list_del(&encoder_cfg->link);
+ kfree(encoder_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_encoder);
+
+int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ u32 crtc_idx = 0;
+
+ if (encoder_cfg->config != crtc_cfg->config)
+ return -EINVAL;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&encoder_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
+ xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_attach_crtc);
+
+void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ xa_erase(&encoder_cfg->possible_crtcs, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_detach_crtc);
+
+struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ connector_cfg = kzalloc(sizeof(*connector_cfg), GFP_KERNEL);
+ if (!connector_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ connector_cfg->config = config;
+ xa_init_flags(&connector_cfg->possible_encoders, XA_FLAGS_ALLOC);
+
+ list_add_tail(&connector_cfg->link, &config->connectors);
+
+ return connector_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_connector);
+
+void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg)
+{
+ xa_destroy(&connector_cfg->possible_encoders);
+ list_del(&connector_cfg->link);
+ kfree(connector_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_connector);
+
+int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+ u32 encoder_idx = 0;
+
+ if (connector_cfg->config != encoder_cfg->config)
+ return -EINVAL;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg, idx,
+ possible_encoder) {
+ if (possible_encoder == encoder_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&connector_cfg->possible_encoders, &encoder_idx,
+ encoder_cfg, xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_attach_encoder);
+
+void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg, idx,
+ possible_encoder) {
+ if (possible_encoder == encoder_cfg)
+ xa_erase(&connector_cfg->possible_encoders, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_detach_encoder);
diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h
new file mode 100644
index 000000000000..0118e3f99706
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_config.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_CONFIG_H_
+#define _VKMS_CONFIG_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "vkms_drv.h"
+
+/**
+ * struct vkms_config - General configuration for VKMS driver
+ *
+ * @dev_name: Name of the device
+ * @planes: List of planes configured for the device
+ * @crtcs: List of CRTCs configured for the device
+ * @encoders: List of encoders configured for the device
+ * @connectors: List of connectors configured for the device
+ * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
+ */
+struct vkms_config {
+ const char *dev_name;
+ struct list_head planes;
+ struct list_head crtcs;
+ struct list_head encoders;
+ struct list_head connectors;
+ struct vkms_device *dev;
+};
+
+/**
+ * struct vkms_config_plane
+ *
+ * @link: Link to the others planes in vkms_config
+ * @config: The vkms_config this plane belongs to
+ * @type: Type of the plane. The creator of configuration needs to ensures that
+ * at least one primary plane is present.
+ * @possible_crtcs: Array of CRTCs that can be used with this plane
+ * @plane: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS plane during
+ * device creation. This pointer is not managed by the configuration and
+ * must be managed by other means.
+ */
+struct vkms_config_plane {
+ struct list_head link;
+ struct vkms_config *config;
+
+ enum drm_plane_type type;
+ struct xarray possible_crtcs;
+
+ /* Internal usage */
+ struct vkms_plane *plane;
+};
+
+/**
+ * struct vkms_config_crtc
+ *
+ * @link: Link to the others CRTCs in vkms_config
+ * @config: The vkms_config this CRTC belongs to
+ * @writeback: If true, a writeback buffer can be attached to the CRTC
+ * @crtc: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS CRTC during
+ * device creation. This pointer is not managed by the configuration and
+ * must be managed by other means.
+ */
+struct vkms_config_crtc {
+ struct list_head link;
+ struct vkms_config *config;
+
+ bool writeback;
+
+ /* Internal usage */
+ struct vkms_output *crtc;
+};
+
+/**
+ * struct vkms_config_encoder
+ *
+ * @link: Link to the others encoders in vkms_config
+ * @config: The vkms_config this CRTC belongs to
+ * @possible_crtcs: Array of CRTCs that can be used with this encoder
+ * @encoder: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS encoder
+ * during device creation. This pointer is not managed by the
+ * configuration and must be managed by other means.
+ */
+struct vkms_config_encoder {
+ struct list_head link;
+ struct vkms_config *config;
+
+ struct xarray possible_crtcs;
+
+ /* Internal usage */
+ struct drm_encoder *encoder;
+};
+
+/**
+ * struct vkms_config_connector
+ *
+ * @link: Link to the others connector in vkms_config
+ * @config: The vkms_config this connector belongs to
+ * @possible_encoders: Array of encoders that can be used with this connector
+ * @connector: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS connector
+ * during device creation. This pointer is not managed by the
+ * configuration and must be managed by other means.
+ */
+struct vkms_config_connector {
+ struct list_head link;
+ struct vkms_config *config;
+
+ struct xarray possible_encoders;
+
+ /* Internal usage */
+ struct vkms_connector *connector;
+};
+
+/**
+ * vkms_config_for_each_plane - Iterate over the vkms_config planes
+ * @config: &struct vkms_config pointer
+ * @plane_cfg: &struct vkms_config_plane pointer used as cursor
+ */
+#define vkms_config_for_each_plane(config, plane_cfg) \
+ list_for_each_entry((plane_cfg), &(config)->planes, link)
+
+/**
+ * vkms_config_for_each_crtc - Iterate over the vkms_config CRTCs
+ * @config: &struct vkms_config pointer
+ * @crtc_cfg: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_for_each_crtc(config, crtc_cfg) \
+ list_for_each_entry((crtc_cfg), &(config)->crtcs, link)
+
+/**
+ * vkms_config_for_each_encoder - Iterate over the vkms_config encoders
+ * @config: &struct vkms_config pointer
+ * @encoder_cfg: &struct vkms_config_encoder pointer used as cursor
+ */
+#define vkms_config_for_each_encoder(config, encoder_cfg) \
+ list_for_each_entry((encoder_cfg), &(config)->encoders, link)
+
+/**
+ * vkms_config_for_each_connector - Iterate over the vkms_config connectors
+ * @config: &struct vkms_config pointer
+ * @connector_cfg: &struct vkms_config_connector pointer used as cursor
+ */
+#define vkms_config_for_each_connector(config, connector_cfg) \
+ list_for_each_entry((connector_cfg), &(config)->connectors, link)
+
+/**
+ * vkms_config_plane_for_each_possible_crtc - Iterate over the vkms_config_plane
+ * possible CRTCs
+ * @plane_cfg: &struct vkms_config_plane pointer
+ * @idx: Index of the cursor
+ * @possible_crtc: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) \
+ xa_for_each(&(plane_cfg)->possible_crtcs, idx, (possible_crtc))
+
+/**
+ * vkms_config_encoder_for_each_possible_crtc - Iterate over the
+ * vkms_config_encoder possible CRTCs
+ * @encoder_cfg: &struct vkms_config_encoder pointer
+ * @idx: Index of the cursor
+ * @possible_crtc: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) \
+ xa_for_each(&(encoder_cfg)->possible_crtcs, idx, (possible_crtc))
+
+/**
+ * vkms_config_connector_for_each_possible_encoder - Iterate over the
+ * vkms_config_connector possible encoders
+ * @connector_cfg: &struct vkms_config_connector pointer
+ * @idx: Index of the cursor
+ * @possible_encoder: &struct vkms_config_encoder pointer used as cursor
+ */
+#define vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, possible_encoder) \
+ xa_for_each(&(connector_cfg)->possible_encoders, idx, (possible_encoder))
+
+/**
+ * vkms_config_create() - Create a new VKMS configuration
+ * @dev_name: Name of the device
+ *
+ * Returns:
+ * The new vkms_config or an error. Call vkms_config_destroy() to free the
+ * returned configuration.
+ */
+struct vkms_config *vkms_config_create(const char *dev_name);
+
+/**
+ * vkms_config_default_create() - Create the configuration for the default device
+ * @enable_cursor: Create or not a cursor plane
+ * @enable_writeback: Create or not a writeback connector
+ * @enable_overlay: Create or not overlay planes
+ *
+ * Returns:
+ * The default vkms_config or an error. Call vkms_config_destroy() to free the
+ * returned configuration.
+ */
+struct vkms_config *vkms_config_default_create(bool enable_cursor,
+ bool enable_writeback,
+ bool enable_overlay);
+
+/**
+ * vkms_config_destroy() - Free a VKMS configuration
+ * @config: vkms_config to free
+ */
+void vkms_config_destroy(struct vkms_config *config);
+
+/**
+ * vkms_config_get_device_name() - Return the name of the device
+ * @config: Configuration to get the device name from
+ *
+ * Returns:
+ * The device name. Only valid while @config is valid.
+ */
+static inline const char *
+vkms_config_get_device_name(struct vkms_config *config)
+{
+ return config->dev_name;
+}
+
+/**
+ * vkms_config_get_num_crtcs() - Return the number of CRTCs in the configuration
+ * @config: Configuration to get the number of CRTCs from
+ */
+static inline size_t vkms_config_get_num_crtcs(struct vkms_config *config)
+{
+ return list_count_nodes(&config->crtcs);
+}
+
+/**
+ * vkms_config_is_valid() - Validate a configuration
+ * @config: Configuration to validate
+ *
+ * Returns:
+ * Whether the configuration is valid or not.
+ * For example, a configuration without primary planes is not valid.
+ */
+bool vkms_config_is_valid(const struct vkms_config *config);
+
+/**
+ * vkms_config_register_debugfs() - Register a debugfs file to show the device's
+ * configuration
+ * @vkms_device: Device to register
+ */
+void vkms_config_register_debugfs(struct vkms_device *vkms_device);
+
+/**
+ * vkms_config_create_plane() - Add a new plane configuration
+ * @config: Configuration to add the plane to
+ *
+ * Returns:
+ * The new plane configuration or an error. Call vkms_config_destroy_plane() to
+ * free the returned plane configuration.
+ */
+struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_plane() - Remove and free a plane configuration
+ * @plane_cfg: Plane configuration to destroy
+ */
+void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg);
+
+/**
+ * vkms_config_plane_type() - Return the plane type
+ * @plane_cfg: Plane to get the type from
+ */
+static inline enum drm_plane_type
+vkms_config_plane_get_type(struct vkms_config_plane *plane_cfg)
+{
+ return plane_cfg->type;
+}
+
+/**
+ * vkms_config_plane_set_type() - Set the plane type
+ * @plane_cfg: Plane to set the type to
+ * @type: New plane type
+ */
+static inline void
+vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg,
+ enum drm_plane_type type)
+{
+ plane_cfg->type = type;
+}
+
+/**
+ * vkms_config_plane_attach_crtc - Attach a plane to a CRTC
+ * @plane_cfg: Plane to attach
+ * @crtc_cfg: CRTC to attach @plane_cfg to
+ */
+int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_plane_detach_crtc - Detach a plane from a CRTC
+ * @plane_cfg: Plane to detach
+ * @crtc_cfg: CRTC to detach @plane_cfg from
+ */
+void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_crtc() - Add a new CRTC configuration
+ * @config: Configuration to add the CRTC to
+ *
+ * Returns:
+ * The new CRTC configuration or an error. Call vkms_config_destroy_crtc() to
+ * free the returned CRTC configuration.
+ */
+struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_crtc() - Remove and free a CRTC configuration
+ * @config: Configuration to remove the CRTC from
+ * @crtc_cfg: CRTC configuration to destroy
+ */
+void vkms_config_destroy_crtc(struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_crtc_get_writeback() - If a writeback connector will be created
+ * @crtc_cfg: CRTC with or without a writeback connector
+ */
+static inline bool
+vkms_config_crtc_get_writeback(struct vkms_config_crtc *crtc_cfg)
+{
+ return crtc_cfg->writeback;
+}
+
+/**
+ * vkms_config_crtc_set_writeback() - If a writeback connector will be created
+ * @crtc_cfg: Target CRTC
+ * @writeback: Enable or disable the writeback connector
+ */
+static inline void
+vkms_config_crtc_set_writeback(struct vkms_config_crtc *crtc_cfg,
+ bool writeback)
+{
+ crtc_cfg->writeback = writeback;
+}
+
+/**
+ * vkms_config_crtc_primary_plane() - Return the primary plane for a CRTC
+ * @config: Configuration containing the CRTC
+ * @crtc_config: Target CRTC
+ *
+ * Note that, if multiple primary planes are found, the first one is returned.
+ * In this case, the configuration will be invalid. See vkms_config_is_valid().
+ *
+ * Returns:
+ * The primary plane or NULL if none is assigned yet.
+ */
+struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_crtc_cursor_plane() - Return the cursor plane for a CRTC
+ * @config: Configuration containing the CRTC
+ * @crtc_config: Target CRTC
+ *
+ * Note that, if multiple cursor planes are found, the first one is returned.
+ * In this case, the configuration will be invalid. See vkms_config_is_valid().
+ *
+ * Returns:
+ * The cursor plane or NULL if none is assigned yet.
+ */
+struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_encoder() - Add a new encoder configuration
+ * @config: Configuration to add the encoder to
+ *
+ * Returns:
+ * The new encoder configuration or an error. Call vkms_config_destroy_encoder()
+ * to free the returned encoder configuration.
+ */
+struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_encoder() - Remove and free a encoder configuration
+ * @config: Configuration to remove the encoder from
+ * @encoder_cfg: Encoder configuration to destroy
+ */
+void vkms_config_destroy_encoder(struct vkms_config *config,
+ struct vkms_config_encoder *encoder_cfg);
+
+/**
+ * vkms_config_encoder_attach_crtc - Attach a encoder to a CRTC
+ * @encoder_cfg: Encoder to attach
+ * @crtc_cfg: CRTC to attach @encoder_cfg to
+ */
+int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_encoder_detach_crtc - Detach a encoder from a CRTC
+ * @encoder_cfg: Encoder to detach
+ * @crtc_cfg: CRTC to detach @encoder_cfg from
+ */
+void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_connector() - Add a new connector configuration
+ * @config: Configuration to add the connector to
+ *
+ * Returns:
+ * The new connector configuration or an error. Call
+ * vkms_config_destroy_connector() to free the returned connector configuration.
+ */
+struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_connector() - Remove and free a connector configuration
+ * @connector_cfg: Connector configuration to destroy
+ */
+void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg);
+
+/**
+ * vkms_config_connector_attach_encoder - Attach a connector to an encoder
+ * @connector_cfg: Connector to attach
+ * @encoder_cfg: Encoder to attach @connector_cfg to
+ */
+int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg);
+
+/**
+ * vkms_config_connector_detach_encoder - Detach a connector from an encoder
+ * @connector_cfg: Connector to detach
+ * @encoder_cfg: Encoder to detach @connector_cfg from
+ */
+void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg);
+
+#endif /* _VKMS_CONFIG_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c
new file mode 100644
index 000000000000..48b10cba322a
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_connector.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+
+#include "vkms_connector.h"
+
+static const struct drm_connector_funcs vkms_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int vkms_conn_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ /* Use the default modes list from DRM */
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+ return count;
+}
+
+static struct drm_encoder *vkms_conn_best_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
+static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+ .best_encoder = vkms_conn_best_encoder,
+};
+
+struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+ struct vkms_connector *connector;
+ int ret;
+
+ connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drmm_connector_init(dev, &connector->base, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(&connector->base, &vkms_conn_helper_funcs);
+
+ return connector;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_connector.h b/drivers/gpu/drm/vkms/vkms_connector.h
new file mode 100644
index 000000000000..c9149c1b7af0
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_connector.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_CONNECTOR_H_
+#define _VKMS_CONNECTOR_H_
+
+#include "vkms_drv.h"
+
+/**
+ * struct vkms_connector - VKMS custom type wrapping around the DRM connector
+ *
+ * @drm: Base DRM connector
+ */
+struct vkms_connector {
+ struct drm_connector base;
+};
+
+/**
+ * vkms_connector_init() - Initialize a connector
+ * @vkmsdev: VKMS device containing the connector
+ *
+ * Returns:
+ * The connector or an error on failure.
+ */
+struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev);
+
+#endif /* _VKMS_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index b6de91134a22..a24d1655f7b8 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -27,11 +27,9 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
+#include "vkms_config.h"
#include "vkms_drv.h"
-#include <drm/drm_print.h>
-#include <drm/drm_debugfs.h>
-
#define DRIVER_NAME "vkms"
#define DRIVER_DESC "Virtual Kernel Mode Setting"
#define DRIVER_MAJOR 1
@@ -81,23 +79,6 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_cleanup_planes(dev, old_state);
}
-static int vkms_config_show(struct seq_file *m, void *data)
-{
- struct drm_debugfs_entry *entry = m->private;
- struct drm_device *dev = entry->dev;
- struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
-
- seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback);
- seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor);
- seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay);
-
- return 0;
-}
-
-static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
- { "vkms_config", vkms_config_show, 0 },
-};
-
static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.fops = &vkms_driver_fops,
@@ -170,8 +151,10 @@ static int vkms_create(struct vkms_config *config)
int ret;
struct platform_device *pdev;
struct vkms_device *vkms_device;
+ const char *dev_name;
- pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ dev_name = vkms_config_get_device_name(config);
+ pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -198,7 +181,8 @@ static int vkms_create(struct vkms_config *config)
goto out_devres;
}
- ret = drm_vblank_init(&vkms_device->drm, 1);
+ ret = drm_vblank_init(&vkms_device->drm,
+ vkms_config_get_num_crtcs(config));
if (ret) {
DRM_ERROR("Failed to vblank\n");
goto out_devres;
@@ -208,8 +192,7 @@ static int vkms_create(struct vkms_config *config)
if (ret)
goto out_devres;
- drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
- ARRAY_SIZE(vkms_config_debugfs_list));
+ vkms_config_register_debugfs(vkms_device);
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
@@ -231,17 +214,13 @@ static int __init vkms_init(void)
int ret;
struct vkms_config *config;
- config = kmalloc(sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- config->cursor = enable_cursor;
- config->writeback = enable_writeback;
- config->overlay = enable_overlay;
+ config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay);
+ if (IS_ERR(config))
+ return PTR_ERR(config);
ret = vkms_create(config);
if (ret) {
- kfree(config);
+ vkms_config_destroy(config);
return ret;
}
@@ -275,7 +254,7 @@ static void __exit vkms_exit(void)
return;
vkms_destroy(default_config);
- kfree(default_config);
+ vkms_config_destroy(default_config);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index abbb652be2b5..a74a7fc3a056 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -12,6 +12,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_writeback.h>
+#define DEFAULT_DEVICE_NAME "vkms"
+
#define XRES_MIN 10
#define YRES_MIN 10
@@ -189,20 +191,7 @@ struct vkms_output {
spinlock_t composer_lock;
};
-/**
- * struct vkms_config - General configuration for VKMS driver
- *
- * @writeback: If true, a writeback buffer can be attached to the CRTC
- * @cursor: If true, a cursor plane is created in the VKMS device
- * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device
- * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
- */
-struct vkms_config {
- bool writeback;
- bool cursor;
- bool overlay;
- struct vkms_device *dev;
-};
+struct vkms_config;
/**
* struct vkms_device - Description of a VKMS device
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 22f0d678af3a..8d7ca0cdd79f 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,121 +1,111 @@
// SPDX-License-Identifier: GPL-2.0+
+#include "vkms_config.h"
+#include "vkms_connector.h"
#include "vkms_drv.h"
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
#include <drm/drm_managed.h>
-#include <drm/drm_probe_helper.h>
-
-static const struct drm_connector_funcs vkms_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int vkms_conn_get_modes(struct drm_connector *connector)
-{
- int count;
-
- /* Use the default modes list from DRM */
- count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
- drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
-
- return count;
-}
-
-static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
- .get_modes = vkms_conn_get_modes,
-};
int vkms_output_init(struct vkms_device *vkmsdev)
{
struct drm_device *dev = &vkmsdev->drm;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct vkms_output *output;
- struct vkms_plane *primary, *overlay, *cursor = NULL;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
int ret;
int writeback;
- unsigned int n;
-
- /*
- * Initialize used plane. One primary plane is required to perform the composition.
- *
- * The overlay and cursor planes are not mandatory, but can be used to perform complex
- * composition.
- */
- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(primary))
- return PTR_ERR(primary);
-
- if (vkmsdev->config->cursor) {
- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
- if (IS_ERR(cursor))
- return PTR_ERR(cursor);
- }
- output = vkms_crtc_init(dev, &primary->base,
- cursor ? &cursor->base : NULL);
- if (IS_ERR(output)) {
- DRM_ERROR("Failed to allocate CRTC\n");
- return PTR_ERR(output);
- }
+ if (!vkms_config_is_valid(vkmsdev->config))
+ return -EINVAL;
- if (vkmsdev->config->overlay) {
- for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
- overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(overlay)) {
- DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
- return PTR_ERR(overlay);
- }
- overlay->base.possible_crtcs = drm_crtc_mask(&output->crtc);
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ enum drm_plane_type type;
+
+ type = vkms_config_plane_get_type(plane_cfg);
+
+ plane_cfg->plane = vkms_plane_init(vkmsdev, type);
+ if (IS_ERR(plane_cfg->plane)) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
+ return PTR_ERR(plane_cfg->plane);
}
}
- connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
- if (!connector) {
- DRM_ERROR("Failed to allocate connector\n");
- return -ENOMEM;
- }
+ vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) {
+ struct vkms_config_plane *primary, *cursor;
- ret = drmm_connector_init(dev, connector, &vkms_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL, NULL);
- if (ret) {
- DRM_ERROR("Failed to init connector\n");
- return ret;
- }
+ primary = vkms_config_crtc_primary_plane(vkmsdev->config, crtc_cfg);
+ cursor = vkms_config_crtc_cursor_plane(vkmsdev->config, crtc_cfg);
- drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+ crtc_cfg->crtc = vkms_crtc_init(dev, &primary->plane->base,
+ cursor ? &cursor->plane->base : NULL);
+ if (IS_ERR(crtc_cfg->crtc)) {
+ DRM_ERROR("Failed to allocate CRTC\n");
+ return PTR_ERR(crtc_cfg->crtc);
+ }
- encoder = drmm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL);
- if (!encoder) {
- DRM_ERROR("Failed to allocate encoder\n");
- return -ENOMEM;
+ /* Initialize the writeback component */
+ if (vkms_config_crtc_get_writeback(crtc_cfg)) {
+ writeback = vkms_enable_writeback_connector(vkmsdev, crtc_cfg->crtc);
+ if (writeback)
+ DRM_ERROR("Failed to init writeback connector\n");
+ }
}
- ret = drmm_encoder_init(dev, encoder, NULL,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- if (ret) {
- DRM_ERROR("Failed to init encoder\n");
- return ret;
+
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ plane_cfg->plane->base.possible_crtcs |=
+ drm_crtc_mask(&possible_crtc->crtc->crtc);
+ }
}
- encoder->possible_crtcs = drm_crtc_mask(&output->crtc);
- /* Attach the encoder and the connector */
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_ERROR("Failed to attach connector to encoder\n");
- return ret;
+ vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ encoder_cfg->encoder = drmm_kzalloc(dev, sizeof(*encoder_cfg->encoder), GFP_KERNEL);
+ if (!encoder_cfg->encoder) {
+ DRM_ERROR("Failed to allocate encoder\n");
+ return -ENOMEM;
+ }
+ ret = drmm_encoder_init(dev, encoder_cfg->encoder, NULL,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ return ret;
+ }
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ encoder_cfg->encoder->possible_crtcs |=
+ drm_crtc_mask(&possible_crtc->crtc->crtc);
+ }
}
- /* Initialize the writeback component */
- if (vkmsdev->config->writeback) {
- writeback = vkms_enable_writeback_connector(vkmsdev, output);
- if (writeback)
- DRM_ERROR("Failed to init writeback connector\n");
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg) {
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+
+ connector_cfg->connector = vkms_connector_init(vkmsdev);
+ if (IS_ERR(connector_cfg->connector)) {
+ DRM_ERROR("Failed to init connector\n");
+ return PTR_ERR(connector_cfg->connector);
+ }
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg,
+ idx,
+ possible_encoder) {
+ ret = drm_connector_attach_encoder(&connector_cfg->connector->base,
+ possible_encoder->encoder);
+ if (ret) {
+ DRM_ERROR("Failed to attach connector to encoder\n");
+ return ret;
+ }
+ }
}
drm_mode_config_reset(dev);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 46a4ab688a7f..b168fd7fe9b3 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
- vmwgfx_gem.o vmwgfx_vkms.o
+ vmwgfx_gem.o vmwgfx_vkms.o vmwgfx_cursor_plane.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 9b5b8c1f063b..f031a312c783 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -36,8 +36,7 @@ static void vmw_bo_release(struct vmw_bo *vbo)
{
struct vmw_resource *res;
- WARN_ON(vbo->tbo.base.funcs &&
- kref_read(&vbo->tbo.base.refcount) != 0);
+ WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
xa_destroy(&vbo->detached_resources);
@@ -51,11 +50,13 @@ static void vmw_bo_release(struct vmw_bo *vbo)
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void)vmw_resource_reserve(res, false, true);
vmw_resource_mob_detach(res);
+ if (res->dirty)
+ res->func->dirty_free(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
res->guest_memory_bo = NULL;
res->guest_memory_offset = 0;
- vmw_resource_unreserve(res, false, false, false, NULL,
+ vmw_resource_unreserve(res, true, false, false, NULL,
0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -73,9 +74,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
- WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_release(vbo);
+ WARN_ON(vbo->dirty);
kfree(vbo);
}
@@ -467,6 +468,7 @@ int vmw_bo_create(struct vmw_private *vmw,
if (unlikely(ret != 0))
goto out_error;
+ (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs;
return ret;
out_error:
*p_bo = NULL;
@@ -848,9 +850,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
- xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
+ return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
}
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
@@ -887,3 +889,9 @@ out:
surf = vmw_res_to_srf(res);
return surf;
}
+
+s32 vmw_bo_mobid(struct vmw_bo *vbo)
+{
+ WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB);
+ return (s32)vbo->tbo.resource->start;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 11e330c7c7f5..cf84a163bfcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -141,7 +141,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
@@ -204,12 +204,12 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf)
*buf = NULL;
if (tmp_buf)
- ttm_bo_put(&tmp_buf->tbo);
+ drm_gem_object_put(&tmp_buf->tbo.base);
}
static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{
- ttm_bo_get(&buf->tbo);
+ drm_gem_object_get(&buf->tbo.base);
return buf;
}
@@ -233,4 +233,6 @@ static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
return container_of((gobj), struct vmw_bo, tbo.base);
}
+s32 vmw_bo_mobid(struct vmw_bo *vbo);
+
#endif // VMWGFX_BO_H
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index a7c07692262b..98331c4c0335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
new file mode 100644
index 000000000000..718832b08d96
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+#include "vmwgfx_cursor_plane.h"
+
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmw_surface_cache.h"
+
+#include "drm/drm_atomic.h"
+#include "drm/drm_atomic_helper.h"
+#include "drm/drm_plane.h"
+#include <asm/page.h>
+
+#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
+#define VMW_CURSOR_SNOOP_WIDTH 64
+#define VMW_CURSOR_SNOOP_HEIGHT 64
+
+struct vmw_svga_fifo_cmd_define_cursor {
+ u32 cmd;
+ SVGAFifoCmdDefineAlphaCursor cursor;
+};
+
+/**
+ * vmw_send_define_cursor_cmd - queue a define cursor command
+ * @dev_priv: the private driver struct
+ * @image: buffer which holds the cursor image
+ * @width: width of the mouse cursor image
+ * @height: height of the mouse cursor image
+ * @hotspotX: the horizontal position of mouse hotspot
+ * @hotspotY: the vertical position of mouse hotspot
+ */
+static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct vmw_svga_fifo_cmd_define_cursor *cmd;
+ const u32 image_size = width * height * sizeof(*image);
+ const u32 cmd_size = sizeof(*cmd) + image_size;
+
+ /*
+ * Try to reserve fifocmd space and swallow any failures;
+ * such reservations cannot be left unconsumed for long
+ * under the risk of clogging other fifocmd users, so
+ * we treat reservations separtely from the way we treat
+ * other fallible KMS-atomic resources at prepare_fb
+ */
+ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
+
+ if (unlikely(!cmd))
+ return;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(&cmd[1], image, image_size);
+
+ cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+ cmd->cursor.id = 0;
+ cmd->cursor.width = width;
+ cmd->cursor.height = height;
+ cmd->cursor.hotspotX = hotspotX;
+ cmd->cursor.hotspotY = hotspotY;
+
+ vmw_cmd_commit_flush(dev_priv, cmd_size);
+}
+
+static void
+vmw_cursor_plane_update_legacy(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+ s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+
+ if (WARN_ON(!surface || !surface->snooper.image))
+ return;
+
+ if (vps->cursor.legacy.id != surface->snooper.id) {
+ vmw_send_define_cursor_cmd(vmw, surface->snooper.image,
+ vps->base.crtc_w, vps->base.crtc_h,
+ hotspot_x, hotspot_y);
+ vps->cursor.legacy.id = surface->snooper.id;
+ }
+}
+
+static enum vmw_cursor_update_type
+vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+
+ if (surface && surface->snooper.image)
+ return VMW_CURSOR_UPDATE_LEGACY;
+
+ if (vmw->has_mob) {
+ if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
+ return VMW_CURSOR_UPDATE_MOB;
+ }
+
+ return VMW_CURSOR_UPDATE_NONE;
+}
+
+static void vmw_cursor_update_mob(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ SVGAGBCursorHeader *header;
+ SVGAGBAlphaCursorHeader *alpha_header;
+ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
+ u32 *image = vmw_bo_map_and_cache(bo);
+ const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image);
+
+ header = vmw_bo_map_and_cache(vps->cursor.mob);
+ alpha_header = &header->header.alphaHeader;
+
+ memset(header, 0, sizeof(*header));
+
+ header->type = SVGA_ALPHA_CURSOR;
+ header->sizeInBytes = image_size;
+
+ alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+ alpha_header->width = vps->base.crtc_w;
+ alpha_header->height = vps->base.crtc_h;
+
+ memcpy(header + 1, image, image_size);
+ vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob));
+
+ vmw_bo_unmap(bo);
+ vmw_bo_unmap(vps->cursor.mob);
+}
+
+static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
+ u32 w, u32 h)
+{
+ switch (update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ case VMW_CURSOR_UPDATE_NONE:
+ return 0;
+ case VMW_CURSOR_UPDATE_MOB:
+ return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
+ }
+ return 0;
+}
+
+static void vmw_cursor_mob_destroy(struct vmw_bo **vbo)
+{
+ if (!(*vbo))
+ return;
+
+ ttm_bo_unpin(&(*vbo)->tbo);
+ vmw_bo_unreference(vbo);
+}
+
+/**
+ * vmw_cursor_mob_unmap - Unmaps the cursor mobs.
+ *
+ * @vps: state of the cursor plane
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_unmap(struct vmw_plane_state *vps)
+{
+ int ret = 0;
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo || !vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
+ if (likely(ret == 0)) {
+ vmw_bo_unmap(vbo);
+ ttm_bo_unreserve(&vbo->tbo);
+ }
+
+ return ret;
+}
+
+static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ u32 i;
+
+ if (!vps->cursor.mob)
+ return;
+
+ vmw_cursor_mob_unmap(vps);
+
+ /* Look for a free slot to return this mob to the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (!vcp->cursor_mobs[i]) {
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Cache is full: See if this mob is bigger than an existing mob. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i]->tbo.base.size <
+ vps->cursor.mob->tbo.base.size) {
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Destroy it if it's not worth caching. */
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+}
+
+static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ u32 i;
+ u32 cursor_max_dim, mob_max_size;
+ struct vmw_fence_obj *fence = NULL;
+ int ret;
+
+ if (!dev_priv->has_mob ||
+ (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
+ return -EINVAL;
+
+ mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+ cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
+
+ if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
+ vps->base.crtc_h > cursor_max_dim)
+ return -EINVAL;
+
+ if (vps->cursor.mob) {
+ if (vps->cursor.mob->tbo.base.size >= size)
+ return 0;
+ vmw_cursor_mob_put(vcp, vps);
+ }
+
+ /* Look for an unused mob in the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i] &&
+ vcp->cursor_mobs[i]->tbo.base.size >= size) {
+ vps->cursor.mob = vcp->cursor_mobs[i];
+ vcp->cursor_mobs[i] = NULL;
+ return 0;
+ }
+ }
+ /* Create a new mob if we can't find an existing one. */
+ ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB,
+ &vps->cursor.mob);
+
+ if (ret != 0)
+ return ret;
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL);
+ if (ret != 0)
+ goto teardown;
+
+ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (ret != 0) {
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+ goto teardown;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+
+ return 0;
+
+teardown:
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+ return ret;
+}
+
+static void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y)
+{
+ const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
+ : SVGA_CURSOR_ON_HIDE;
+ u32 count;
+
+ spin_lock(&dev_priv->cursor_lock);
+ if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
+ } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ } else {
+ vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
+ }
+ spin_unlock(&dev_priv->cursor_lock);
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ SVGA3dCopyBox *box;
+ u32 box_count;
+ void *virtual;
+ bool is_iomem;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int i, ret;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+
+ /* No snooper installed, nothing to copy */
+ if (!srf->snooper.image)
+ return;
+
+ if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+ DRM_ERROR("face and mipmap for cursors should never != 0\n");
+ return;
+ }
+
+ if (cmd->header.size < 64) {
+ DRM_ERROR("at least one full copy box must be given\n");
+ return;
+ }
+
+ box = (SVGA3dCopyBox *)&cmd[1];
+ box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+ sizeof(SVGA3dCopyBox);
+
+ if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+ box->d != 1 || box_count != 1 ||
+ box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle more dst & src != 0 */
+ /* TODO handle more then one copy */
+ DRM_ERROR("Can't snoop dma request for cursor!\n");
+ DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+ box->srcx, box->srcy, box->srcz,
+ box->x, box->y, box->z,
+ box->w, box->h, box->d, box_count,
+ cmd->dma.guest.ptr.offset);
+ return;
+ }
+
+ kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+ kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(bo, true, false, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+
+ if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
+ memcpy(srf->snooper.image, virtual,
+ VMW_CURSOR_SNOOP_HEIGHT * image_pitch);
+ } else {
+ /* Image is unsigned pointer. */
+ for (i = 0; i < box->h; i++)
+ memcpy(srf->snooper.image + i * image_pitch,
+ virtual + i * cmd->dma.guest.pitch,
+ box->w * desc->pitchBytesPerBlock);
+ }
+ srf->snooper.id++;
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(bo);
+}
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ u32 i;
+
+ vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+
+ drm_plane_cleanup(plane);
+}
+
+/**
+ * vmw_cursor_mob_map - Maps the cursor mobs.
+ *
+ * @vps: plane_state
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_map(struct vmw_plane_state *vps)
+{
+ int ret;
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo)
+ return -EINVAL;
+
+ if (vbo->tbo.base.size < size)
+ return -EINVAL;
+
+ if (vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL);
+ if (unlikely(ret != 0))
+ return -ENOMEM;
+
+ vmw_bo_map_and_cache(vbo);
+
+ ttm_bo_unreserve(&vbo->tbo);
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_cleanup_fb - Unpins the plane surface
+ *
+ * @plane: cursor plane
+ * @old_state: contains the state to clean up
+ *
+ * Unmaps all cursor bo mappings and unpins the cursor surface
+ *
+ * Returns 0 on success
+ */
+void
+vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+ if (!vmw_user_object_is_null(&vps->uo))
+ vmw_user_object_unmap(&vps->uo);
+
+ vmw_cursor_mob_unmap(vps);
+ vmw_cursor_mob_put(vcp, vps);
+
+ vmw_du_plane_unpin_surf(vps);
+ vmw_user_object_unref(&vps->uo);
+}
+
+static bool
+vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo);
+ struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo);
+ struct vmw_surface *surf;
+ bool dirty = false;
+ int ret;
+
+ if (new_bo != old_bo)
+ return true;
+
+ if (new_bo) {
+ if (!old_bo) {
+ return true;
+ } else if (new_bo->dirty) {
+ vmw_bo_dirty_scan(new_bo);
+ dirty = vmw_bo_is_dirty(new_bo);
+ if (dirty) {
+ surf = vmw_user_object_surface(&new_vps->uo);
+ if (surf)
+ vmw_bo_dirty_transfer_to_res(&surf->res);
+ else
+ vmw_bo_dirty_clear(new_bo);
+ }
+ return dirty;
+ } else if (new_bo != old_bo) {
+ /*
+ * Currently unused because the top exits right away.
+ * In most cases buffer being different will mean
+ * that the contents is different. For the few percent
+ * of cases where that's not true the cost of doing
+ * the memcmp on all other seems to outweight the
+ * benefits. Leave the conditional to be able to
+ * trivially validate it by removing the initial
+ * if (new_bo != old_bo) at the start.
+ */
+ void *old_image;
+ void *new_image;
+ bool changed = false;
+ struct ww_acquire_ctx ctx;
+ const u32 size = new_vps->base.crtc_w *
+ new_vps->base.crtc_h * sizeof(u32);
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ old_image = vmw_bo_map_and_cache(old_bo);
+ new_image = vmw_bo_map_and_cache(new_bo);
+
+ if (old_image && new_image && old_image != new_image)
+ changed = memcmp(old_image, new_image, size) !=
+ 0;
+
+ ttm_bo_unreserve(&new_bo->tbo);
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
+
+ return changed;
+ }
+ return false;
+ }
+
+ return false;
+}
+
+static bool
+vmw_cursor_plane_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
+ old_vps->base.crtc_h != new_vps->base.crtc_h)
+ return true;
+
+ if (old_vps->base.hotspot_x != new_vps->base.hotspot_x ||
+ old_vps->base.hotspot_y != new_vps->base.hotspot_y)
+ return true;
+
+ if (old_vps->cursor.legacy.hotspot_x !=
+ new_vps->cursor.legacy.hotspot_x ||
+ old_vps->cursor.legacy.hotspot_y !=
+ new_vps->cursor.legacy.hotspot_y)
+ return true;
+
+ if (old_vps->base.fb != new_vps->base.fb)
+ return true;
+
+ return false;
+}
+
+/**
+ * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it
+ *
+ * @plane: display plane
+ * @new_state: info on the new plane state, including the FB
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ struct vmw_bo *bo = NULL;
+ struct vmw_surface *surface;
+ int ret = 0;
+
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ vmw_user_object_unmap(&vps->uo);
+ vmw_user_object_unref(&vps->uo);
+ }
+
+ if (fb) {
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vps->uo.surface = NULL;
+ } else {
+ memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
+ }
+ vmw_user_object_ref(&vps->uo);
+ }
+
+ vps->cursor.update_type = vmw_cursor_update_type(vmw, vps);
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || vps->cursor.legacy.id == surface->snooper.id)
+ vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
+ break;
+ case VMW_CURSOR_UPDATE_MOB: {
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
+ struct ttm_operation_ctx ctx = { false, false };
+
+ ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
+ if (ret != 0)
+ return -ENOMEM;
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret != 0)
+ return -ENOMEM;
+
+ /*
+ * vmw_bo_pin_reserved also validates, so to skip
+ * the extra validation use ttm_bo_pin directly
+ */
+ if (!bo->tbo.pin_count)
+ ttm_bo_pin(&bo->tbo);
+
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ const u32 size = new_state->crtc_w *
+ new_state->crtc_h *
+ sizeof(u32);
+
+ (void)vmw_bo_map_and_cache_size(bo, size);
+ } else {
+ vmw_bo_map_and_cache(bo);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+ }
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ if (!vmw_cursor_plane_changed(vps, old_vps) &&
+ !vmw_cursor_buffer_changed(vps, old_vps)) {
+ vps->cursor.update_type =
+ VMW_CURSOR_UPDATE_NONE;
+ } else {
+ vmw_cursor_mob_get(vcp, vps);
+ vmw_cursor_mob_map(vps);
+ }
+ }
+ }
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_atomic_check - check if the new state is okay
+ *
+ * @plane: cursor plane
+ * @state: info on the new plane state
+ *
+ * This is a chance to fail if the new cursor state does not fit
+ * our requirements.
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ int ret = 0;
+ struct drm_crtc_state *crtc_state = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ enum vmw_cursor_update_type update_type;
+ struct drm_framebuffer *fb = new_state->fb;
+
+ if (new_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING, true,
+ true);
+ if (ret)
+ return ret;
+
+ /* Turning off */
+ if (!fb)
+ return 0;
+
+ update_type = vmw_cursor_update_type(vmw, vps);
+ if (update_type == VMW_CURSOR_UPDATE_LEGACY) {
+ if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH ||
+ new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) {
+ drm_warn(&vmw->drm,
+ "Invalid cursor dimensions (%d, %d)\n",
+ new_state->crtc_w, new_state->crtc_h);
+ return -EINVAL;
+ }
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || !surface->snooper.image) {
+ drm_warn(&vmw->drm,
+ "surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void
+vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
+ struct vmw_private *dev_priv = vmw_priv(plane->dev);
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ s32 hotspot_x, hotspot_y, cursor_x, cursor_y;
+
+ /*
+ * Hide the cursor if the new bo is null
+ */
+ if (vmw_user_object_is_null(&vps->uo)) {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return;
+ }
+
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ vmw_cursor_plane_update_legacy(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_MOB:
+ vmw_cursor_update_mob(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ /*
+ * For all update types update the cursor position
+ */
+ cursor_x = new_state->crtc_x + du->set_gui_x;
+ cursor_y = new_state->crtc_y + du->set_gui_y;
+
+ hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x;
+ hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y;
+
+ vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x,
+ cursor_y + hotspot_y);
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_cursor_bypass_arg *arg = data;
+ struct vmw_display_unit *du;
+ struct vmw_plane_state *vps;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+ }
+
+ crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata)
+{
+ if (!file_priv->atomic && metadata->scanout &&
+ metadata->num_sizes == 1 &&
+ metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
+ metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
+ metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
+ VMW_CURSOR_SNOOP_HEIGHT *
+ desc->pitchBytesPerBlock;
+ void *image = kzalloc(cursor_size_bytes, GFP_KERNEL);
+
+ if (!image) {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ return image;
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
new file mode 100644
index 000000000000..40694925a70e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_CURSOR_PLANE_H
+#define VMWGFX_CURSOR_PLANE_H
+
+#include "device_include/svga3d_cmd.h"
+#include "drm/drm_file.h"
+#include "drm/drm_fourcc.h"
+#include "drm/drm_plane.h"
+
+#include <linux/types.h>
+
+struct SVGA3dCmdHeader;
+struct ttm_buffer_object;
+struct vmw_bo;
+struct vmw_cursor;
+struct vmw_private;
+struct vmw_surface;
+struct vmw_user_object;
+
+#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
+
+static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+enum vmw_cursor_update_type {
+ VMW_CURSOR_UPDATE_NONE = 0,
+ VMW_CURSOR_UPDATE_LEGACY,
+ VMW_CURSOR_UPDATE_MOB,
+};
+
+struct vmw_cursor_plane_state {
+ enum vmw_cursor_update_type update_type;
+ bool changed;
+ bool surface_changed;
+ struct vmw_bo *mob;
+ struct {
+ s32 hotspot_x;
+ s32 hotspot_y;
+ u32 id;
+ } legacy;
+};
+
+/**
+ * Derived class for cursor plane object
+ *
+ * @base DRM plane object
+ * @cursor.cursor_mobs Cursor mobs available for re-use
+ */
+struct vmw_cursor_plane {
+ struct drm_plane base;
+
+ struct vmw_bo *cursor_mobs[3];
+};
+
+struct vmw_surface_metadata;
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata);
+void vmw_cursor_cmd_dma_snoop(SVGA3dCmdHeader *header,
+ struct vmw_surface *srf,
+ struct ttm_buffer_object *bo);
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane);
+
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+#endif /* VMWGFX_CURSOR_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0f32471c8533..0695a342b1ef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,31 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-
#include "vmwgfx_drv.h"
#include "vmwgfx_bo.h"
@@ -1324,9 +1304,6 @@ static void vmw_master_set(struct drm_device *dev,
static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
-
- vmw_kms_legacy_hotspot_clear(dev_priv);
}
bool vmwgfx_supported(struct vmw_private *vmw)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5275ef632d4b..594af8eb04c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,29 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef _VMWGFX_DRV_H_
@@ -58,7 +38,7 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 20
+#define VMWGFX_DRIVER_MINOR 21
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_NUM_DISPLAY_UNITS 8
@@ -100,10 +80,6 @@
#define VMW_RES_SHADER ttm_driver_type4
#define VMW_RES_HT_ORDER 12
-#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
-#define VMW_CURSOR_SNOOP_WIDTH 64
-#define VMW_CURSOR_SNOOP_HEIGHT 64
-
#define MKSSTAT_CAPACITY_LOG2 5U
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
@@ -201,7 +177,7 @@ enum vmw_cmdbuf_res_type {
struct vmw_cmdbuf_res_manager;
struct vmw_cursor_snooper {
- size_t age;
+ size_t id;
uint32_t *image;
};
@@ -846,9 +822,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
* GEM related functionality - vmwgfx_gem.c
*/
struct vmw_bo_params;
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo);
+extern const struct drm_gem_object_funcs vmw_gem_object_funcs;
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -1050,7 +1024,6 @@ int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
@@ -1067,7 +1040,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
int vmw_kms_suspend(struct drm_device *dev);
int vmw_kms_resume(struct drm_device *dev);
void vmw_kms_lost_device(struct drm_device *dev);
@@ -1393,8 +1365,10 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/* Resource dirtying - vmwgfx_page_dirty.c */
+bool vmw_bo_is_dirty(struct vmw_bo *vbo);
void vmw_bo_dirty_scan(struct vmw_bo *vbo);
int vmw_bo_dirty_add(struct vmw_bo *vbo);
+void vmw_bo_dirty_clear(struct vmw_bo *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
void vmw_bo_dirty_release(struct vmw_bo *vbo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e52d73eba48..e831e324e737 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,29 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
+
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
@@ -4086,6 +4068,23 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
return 0;
}
+/*
+ * DMA fence callback to remove a seqno_waiter
+ */
+struct seqno_waiter_rm_context {
+ struct dma_fence_cb base;
+ struct vmw_private *dev_priv;
+};
+
+static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct seqno_waiter_rm_context *ctx =
+ container_of(cb, struct seqno_waiter_rm_context, base);
+
+ vmw_seqno_waiter_remove(ctx->dev_priv);
+ kfree(ctx);
+}
+
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands, void *kernel_commands,
@@ -4266,6 +4265,15 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
+ struct seqno_waiter_rm_context *ctx =
+ kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx->dev_priv = dev_priv;
+ vmw_seqno_waiter_add(dev_priv);
+ if (dma_fence_add_callback(&fence->base, &ctx->base,
+ seqno_waiter_rm_cb) < 0) {
+ vmw_seqno_waiter_remove(dev_priv);
+ kfree(ctx);
+ }
}
}
@@ -4512,8 +4520,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out;
- vmw_kms_cursor_post_execbuf(dev_priv);
-
out:
if (in_fence)
dma_fence_put(in_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index ed5015ced392..026c9b699604 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -140,7 +140,7 @@ static const struct vm_operations_struct vmw_vm_ops = {
.close = ttm_bo_vm_close,
};
-static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
.open = vmw_gem_object_open,
.close = vmw_gem_object_close,
@@ -154,20 +154,6 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vm_ops = &vmw_vm_ops,
};
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo)
-{
- int ret = vmw_bo_create(vmw, params, p_vbo);
-
- if (ret != 0)
- goto out_no_bo;
-
- (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
-out_no_bo:
- return ret;
-}
-
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -183,7 +169,7 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
+ ret = vmw_bo_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 1912ac1cde6d..05b1c54a070c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,33 +1,15 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
+
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
+#include "vmwgfx_resource_priv.h"
#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
@@ -59,474 +41,6 @@ void vmw_du_cleanup(struct vmw_display_unit *du)
drm_connector_cleanup(&du->connector);
}
-/*
- * Display Unit Cursor functions
- */
-
-static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY);
-
-struct vmw_svga_fifo_cmd_define_cursor {
- u32 cmd;
- SVGAFifoCmdDefineAlphaCursor cursor;
-};
-
-/**
- * vmw_send_define_cursor_cmd - queue a define cursor command
- * @dev_priv: the private driver struct
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- struct vmw_svga_fifo_cmd_define_cursor *cmd;
- const u32 image_size = width * height * sizeof(*image);
- const u32 cmd_size = sizeof(*cmd) + image_size;
-
- /* Try to reserve fifocmd space and swallow any failures;
- such reservations cannot be left unconsumed for long
- under the risk of clogging other fifocmd users, so
- we treat reservations separtely from the way we treat
- other fallible KMS-atomic resources at prepare_fb */
- cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
-
- if (unlikely(!cmd))
- return;
-
- memset(cmd, 0, sizeof(*cmd));
-
- memcpy(&cmd[1], image, image_size);
-
- cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
- cmd->cursor.id = 0;
- cmd->cursor.width = width;
- cmd->cursor.height = height;
- cmd->cursor.hotspotX = hotspotX;
- cmd->cursor.hotspotY = hotspotY;
-
- vmw_cmd_commit_flush(dev_priv, cmd_size);
-}
-
-/**
- * vmw_cursor_update_image - update the cursor image on the provided plane
- * @dev_priv: the private driver struct
- * @vps: the plane state of the cursor plane
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_cursor_update_image(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- if (vps->cursor.bo)
- vmw_cursor_update_mob(dev_priv, vps, image,
- vps->base.crtc_w, vps->base.crtc_h,
- hotspotX, hotspotY);
-
- else
- vmw_send_define_cursor_cmd(dev_priv, image, width, height,
- hotspotX, hotspotY);
-}
-
-
-/**
- * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
- *
- * Called from inside vmw_du_cursor_plane_atomic_update to actually
- * make the cursor-image live.
- *
- * @dev_priv: device to work with
- * @vps: the plane state of the cursor plane
- * @image: cursor source data to fill the MOB with
- * @width: source data width
- * @height: source data height
- * @hotspotX: cursor hotspot x
- * @hotspotY: cursor hotspot Y
- */
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- SVGAGBCursorHeader *header;
- SVGAGBAlphaCursorHeader *alpha_header;
- const u32 image_size = width * height * sizeof(*image);
-
- header = vmw_bo_map_and_cache(vps->cursor.bo);
- alpha_header = &header->header.alphaHeader;
-
- memset(header, 0, sizeof(*header));
-
- header->type = SVGA_ALPHA_CURSOR;
- header->sizeInBytes = image_size;
-
- alpha_header->hotspotX = hotspotX;
- alpha_header->hotspotY = hotspotY;
- alpha_header->width = width;
- alpha_header->height = height;
-
- memcpy(header + 1, image, image_size);
- vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
- vps->cursor.bo->tbo.resource->start);
-}
-
-
-static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
-{
- return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
-}
-
-/**
- * vmw_du_cursor_plane_acquire_image -- Acquire the image data
- * @vps: cursor plane state
- */
-static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
-{
- struct vmw_surface *surf;
-
- if (vmw_user_object_is_null(&vps->uo))
- return NULL;
-
- surf = vmw_user_object_surface(&vps->uo);
- if (surf && !vmw_user_object_is_mapped(&vps->uo))
- return surf->snooper.image;
-
- return vmw_user_object_map(&vps->uo);
-}
-
-static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
- struct vmw_plane_state *new_vps)
-{
- void *old_image;
- void *new_image;
- u32 size;
- bool changed;
-
- if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
- old_vps->base.crtc_h != new_vps->base.crtc_h)
- return true;
-
- if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
- old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
- return true;
-
- size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
-
- old_image = vmw_du_cursor_plane_acquire_image(old_vps);
- new_image = vmw_du_cursor_plane_acquire_image(new_vps);
-
- changed = false;
- if (old_image && new_image && old_image != new_image)
- changed = memcmp(old_image, new_image, size) != 0;
-
- return changed;
-}
-
-static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
-{
- if (!(*vbo))
- return;
-
- ttm_bo_unpin(&(*vbo)->tbo);
- vmw_bo_unreference(vbo);
-}
-
-static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- u32 i;
-
- if (!vps->cursor.bo)
- return;
-
- vmw_du_cursor_plane_unmap_cm(vps);
-
- /* Look for a free slot to return this mob to the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (!vcp->cursor_mobs[i]) {
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Cache is full: See if this mob is bigger than an existing mob. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i]->tbo.base.size <
- vps->cursor.bo->tbo.base.size) {
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Destroy it if it's not worth caching. */
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
-}
-
-static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- u32 i;
- u32 cursor_max_dim, mob_max_size;
- struct vmw_fence_obj *fence = NULL;
- int ret;
-
- if (!dev_priv->has_mob ||
- (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
- return -EINVAL;
-
- mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
-
- if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
- vps->base.crtc_h > cursor_max_dim)
- return -EINVAL;
-
- if (vps->cursor.bo) {
- if (vps->cursor.bo->tbo.base.size >= size)
- return 0;
- vmw_du_put_cursor_mob(vcp, vps);
- }
-
- /* Look for an unused mob in the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i] &&
- vcp->cursor_mobs[i]->tbo.base.size >= size) {
- vps->cursor.bo = vcp->cursor_mobs[i];
- vcp->cursor_mobs[i] = NULL;
- return 0;
- }
- }
- /* Create a new mob if we can't find an existing one. */
- ret = vmw_bo_create_and_populate(dev_priv, size,
- VMW_BO_DOMAIN_MOB,
- &vps->cursor.bo);
-
- if (ret != 0)
- return ret;
-
- /* Fence the mob creation so we are guarateed to have the mob */
- ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
- if (ret != 0)
- goto teardown;
-
- ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (ret != 0) {
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- goto teardown;
- }
-
- dma_fence_wait(&fence->base, false);
- dma_fence_put(&fence->base);
-
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- return 0;
-
-teardown:
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
- return ret;
-}
-
-
-static void vmw_cursor_update_position(struct vmw_private *dev_priv,
- bool show, int x, int y)
-{
- const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
- : SVGA_CURSOR_ON_HIDE;
- uint32_t count;
-
- spin_lock(&dev_priv->cursor_lock);
- if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
- vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
- } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
- count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
- } else {
- vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
- }
- spin_unlock(&dev_priv->cursor_lock);
-}
-
-void vmw_kms_cursor_snoop(struct vmw_surface *srf,
- struct ttm_object_file *tfile,
- struct ttm_buffer_object *bo,
- SVGA3dCmdHeader *header)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- SVGA3dCopyBox *box;
- unsigned box_count;
- void *virtual;
- bool is_iomem;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
- int i, ret;
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
-
- cmd = container_of(header, struct vmw_dma_cmd, header);
-
- /* No snooper installed, nothing to copy */
- if (!srf->snooper.image)
- return;
-
- if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
- DRM_ERROR("face and mipmap for cursors should never != 0\n");
- return;
- }
-
- if (cmd->header.size < 64) {
- DRM_ERROR("at least one full copy box must be given\n");
- return;
- }
-
- box = (SVGA3dCopyBox *)&cmd[1];
- box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
- sizeof(SVGA3dCopyBox);
-
- if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
- box->x != 0 || box->y != 0 || box->z != 0 ||
- box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->d != 1 || box_count != 1 ||
- box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
- /* TODO handle none page aligned offsets */
- /* TODO handle more dst & src != 0 */
- /* TODO handle more then one copy */
- DRM_ERROR("Can't snoop dma request for cursor!\n");
- DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
- box->srcx, box->srcy, box->srcz,
- box->x, box->y, box->z,
- box->w, box->h, box->d, box_count,
- cmd->dma.guest.ptr.offset);
- return;
- }
-
- kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
- kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(bo, true, false, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
-
- if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
- memcpy(srf->snooper.image, virtual,
- VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
- } else {
- /* Image is unsigned pointer. */
- for (i = 0; i < box->h; i++)
- memcpy(srf->snooper.image + i * image_pitch,
- virtual + i * cmd->dma.guest.pitch,
- box->w * desc->pitchBytesPerBlock);
- }
-
- srf->snooper.age++;
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(bo);
-}
-
-/**
- * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
- *
- * @dev_priv: Pointer to the device private struct.
- *
- * Clears all legacy hotspots.
- */
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- drm_modeset_lock_all(dev);
- drm_for_each_crtc(crtc, dev) {
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = 0;
- du->hotspot_y = 0;
- }
- drm_modeset_unlock_all(dev);
-}
-
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- mutex_lock(&dev->mode_config.mutex);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- if (!du->cursor_surface ||
- du->cursor_age == du->cursor_surface->snooper.age ||
- !du->cursor_surface->snooper.image)
- continue;
-
- du->cursor_age = du->cursor_surface->snooper.age;
- vmw_send_define_cursor_cmd(dev_priv,
- du->cursor_surface->snooper.image,
- VMW_CURSOR_SNOOP_WIDTH,
- VMW_CURSOR_SNOOP_HEIGHT,
- du->hotspot_x + du->core_hotspot_x,
- du->hotspot_y + du->core_hotspot_y);
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- u32 i;
-
- vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
-
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
-
- drm_plane_cleanup(plane);
-}
-
void vmw_du_primary_plane_destroy(struct drm_plane *plane)
{
@@ -575,262 +89,6 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
/**
- * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
- *
- * @vps: plane_state
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
-{
- int ret;
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- struct ttm_buffer_object *bo;
-
- if (!vps->cursor.bo)
- return -EINVAL;
-
- bo = &vps->cursor.bo->tbo;
-
- if (bo->base.size < size)
- return -EINVAL;
-
- if (vps->cursor.bo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- vmw_bo_map_and_cache(vps->cursor.bo);
-
- ttm_bo_unreserve(bo);
-
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- return 0;
-}
-
-
-/**
- * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
- *
- * @vps: state of the cursor plane
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
-{
- int ret = 0;
- struct vmw_bo *vbo = vps->cursor.bo;
-
- if (!vbo || !vbo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
- if (likely(ret == 0)) {
- vmw_bo_unmap(vbo);
- ttm_bo_unreserve(&vbo->tbo);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
- *
- * @plane: cursor plane
- * @old_state: contains the state to clean up
- *
- * Unmaps all cursor bo mappings and unpins the cursor surface
- *
- * Returns 0 on success
- */
-void
-vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
-
- if (!vmw_user_object_is_null(&vps->uo))
- vmw_user_object_unmap(&vps->uo);
-
- vmw_du_cursor_plane_unmap_cm(vps);
- vmw_du_put_cursor_mob(vcp, vps);
-
- vmw_du_plane_unpin_surf(vps);
- vmw_user_object_unref(&vps->uo);
-}
-
-
-/**
- * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
- *
- * @plane: display plane
- * @new_state: info on the new plane state, including the FB
- *
- * Returns 0 on success
- */
-int
-vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_framebuffer *fb = new_state->fb;
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_bo *bo = NULL;
- int ret = 0;
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_user_object_unmap(&vps->uo);
- vmw_user_object_unref(&vps->uo);
- }
-
- if (fb) {
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
- vps->uo.surface = NULL;
- } else {
- memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
- }
- vmw_user_object_ref(&vps->uo);
- }
-
- bo = vmw_user_object_buffer(&vps->uo);
- if (bo) {
- struct ttm_operation_ctx ctx = {false, false};
-
- ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
- if (ret != 0)
- return -ENOMEM;
-
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret != 0)
- return -ENOMEM;
-
- vmw_bo_pin_reserved(bo, true);
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
-
- (void)vmw_bo_map_and_cache_size(bo, size);
- } else {
- vmw_bo_map_and_cache(bo);
- }
- ttm_bo_unreserve(&bo->tbo);
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_du_get_cursor_mob(vcp, vps);
- vmw_du_cursor_plane_map_cm(vps);
- }
-
- return 0;
-}
-
-
-void
-vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
- struct vmw_bo *old_bo = NULL;
- struct vmw_bo *new_bo = NULL;
- struct ww_acquire_ctx ctx;
- s32 hotspot_x, hotspot_y;
- int ret;
-
- hotspot_x = du->hotspot_x + new_state->hotspot_x;
- hotspot_y = du->hotspot_y + new_state->hotspot_y;
-
- du->cursor_surface = vmw_user_object_surface(&vps->uo);
-
- if (vmw_user_object_is_null(&vps->uo)) {
- vmw_cursor_update_position(dev_priv, false, 0, 0);
- return;
- }
-
- vps->cursor.hotspot_x = hotspot_x;
- vps->cursor.hotspot_y = hotspot_y;
-
- if (du->cursor_surface)
- du->cursor_age = du->cursor_surface->snooper.age;
-
- ww_acquire_init(&ctx, &reservation_ww_class);
-
- if (!vmw_user_object_is_null(&old_vps->uo)) {
- old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
- if (ret != 0)
- return;
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- new_bo = vmw_user_object_buffer(&vps->uo);
- if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
- if (ret != 0) {
- if (old_bo) {
- ttm_bo_unreserve(&old_bo->tbo);
- ww_acquire_fini(&ctx);
- }
- return;
- }
- } else {
- new_bo = NULL;
- }
- }
- if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
- /*
- * If it hasn't changed, avoid making the device do extra
- * work by keeping the old cursor active.
- */
- struct vmw_cursor_plane_state tmp = old_vps->cursor;
- old_vps->cursor = vps->cursor;
- vps->cursor = tmp;
- } else {
- void *image = vmw_du_cursor_plane_acquire_image(vps);
- if (image)
- vmw_cursor_update_image(dev_priv, vps, image,
- new_state->crtc_w,
- new_state->crtc_h,
- hotspot_x, hotspot_y);
- }
-
- if (new_bo)
- ttm_bo_unreserve(&new_bo->tbo);
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
-
- ww_acquire_fini(&ctx);
-
- du->cursor_x = new_state->crtc_x + du->set_gui_x;
- du->cursor_y = new_state->crtc_y + du->set_gui_y;
-
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + hotspot_x,
- du->cursor_y + hotspot_y);
-
- du->core_hotspot_x = hotspot_x - du->hotspot_x;
- du->core_hotspot_y = hotspot_y - du->hotspot_y;
-}
-
-
-/**
* vmw_du_primary_plane_atomic_check - check if the new state is okay
*
* @plane: display plane
@@ -873,66 +131,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
return ret;
}
-
-/**
- * vmw_du_cursor_plane_atomic_check - check if the new state is okay
- *
- * @plane: cursor plane
- * @state: info on the new plane state
- *
- * This is a chance to fail if the new cursor state does not fit
- * our requirements.
- *
- * Returns 0 on success
- */
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- int ret = 0;
- struct drm_crtc_state *crtc_state = NULL;
- struct vmw_surface *surface = NULL;
- struct drm_framebuffer *fb = new_state->fb;
-
- if (new_state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
- new_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, true);
- if (ret)
- return ret;
-
- /* Turning off */
- if (!fb)
- return 0;
-
- /* A lot of the code assumes this */
- if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
- DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
- new_state->crtc_w, new_state->crtc_h);
- return -EINVAL;
- }
-
- if (!vmw_framebuffer_to_vfb(fb)->bo) {
- surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
-
- WARN_ON(!surface);
-
- if (!surface ||
- (!surface->snooper.image && !surface->res.guest_memory_bo)) {
- DRM_ERROR("surface not suitable for cursor\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1076,7 +274,7 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
vps->cpp = 0;
- memset(&vps->cursor, 0, sizeof(vps->cursor));
+ vps->cursor.mob = NULL;
/* Each ref counted resource needs to be acquired again */
vmw_user_object_ref(&vps->uo);
@@ -1221,7 +419,20 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
+ if (bo) {
+ vmw_bo_dirty_release(bo);
+ /*
+ * bo->dirty is reference counted so it being NULL
+ * means that the surface wasn't coherent to begin
+ * with and so we have to free the dirty tracker
+ * in the vmw_resource
+ */
+ if (!bo->dirty && surf && surf->res.dirty)
+ surf->res.func->dirty_free(&surf->res);
+ }
drm_framebuffer_cleanup(framebuffer);
vmw_user_object_unref(&vfbs->uo);
@@ -1375,6 +586,7 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
+ vmw_bo_dirty_release(vfbd->buffer);
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
@@ -1505,6 +717,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
struct vmw_user_object uo = {0};
+ struct vmw_bo *bo;
+ struct vmw_surface *surface;
int ret;
/* returns either a bo or surface */
@@ -1534,6 +748,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
err_out:
+ bo = vmw_user_object_buffer(&uo);
+ surface = vmw_user_object_surface(&uo);
/* vmw_user_object_lookup takes one ref so does new_fb */
vmw_user_object_unref(&uo);
@@ -1542,6 +758,14 @@ err_out:
return ERR_PTR(ret);
}
+ ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ ret = vmw_bo_dirty_add(bo);
+ if (!ret && surface && surface->res.func->dirty_alloc) {
+ surface->res.coherent = true;
+ ret = surface->res.func->dirty_alloc(&surface->res);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+
return &vfb->base;
}
@@ -1974,44 +1198,6 @@ int vmw_kms_close(struct vmw_private *dev_priv)
return ret;
}
-int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_cursor_bypass_arg *arg = data;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
- }
-
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
- }
-
- crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
-
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 4eab581883e2..511e29cdb987 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,40 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
+#include "vmwgfx_cursor_plane.h"
+#include "vmwgfx_drv.h"
+
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
-#include "vmwgfx_drv.h"
-
/**
* struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
* @plane: Plane which is being updated.
@@ -235,16 +216,11 @@ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
};
-static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
#define vmw_crtc_state_to_vcs(x) container_of(x, struct vmw_crtc_state, base)
#define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base)
#define vmw_connector_state_to_vcs(x) \
container_of(x, struct vmw_connector_state, base)
-#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
/**
* Derived class for crtc state object
@@ -255,11 +231,6 @@ struct vmw_crtc_state {
struct drm_crtc_state base;
};
-struct vmw_cursor_plane_state {
- struct vmw_bo *bo;
- s32 hotspot_x;
- s32 hotspot_y;
-};
/**
* Derived class for plane state object
@@ -283,7 +254,6 @@ struct vmw_plane_state {
/* For CPU Blit */
unsigned int cpp;
- bool surf_mapped;
struct vmw_cursor_plane_state cursor;
};
@@ -317,17 +287,6 @@ struct vmw_connector_state {
int gui_y;
};
-/**
- * Derived class for cursor plane object
- *
- * @base DRM plane object
- * @cursor.cursor_mobs Cursor mobs available for re-use
- */
-struct vmw_cursor_plane {
- struct drm_plane base;
-
- struct vmw_bo *cursor_mobs[3];
-};
/**
* Base class display unit.
@@ -343,17 +302,6 @@ struct vmw_display_unit {
struct drm_plane primary;
struct vmw_cursor_plane cursor;
- struct vmw_surface *cursor_surface;
- size_t cursor_age;
-
- int cursor_x;
- int cursor_y;
-
- int hotspot_x;
- int hotspot_y;
- s32 core_hotspot_x;
- s32 core_hotspot_y;
-
unsigned unit;
/*
@@ -403,8 +351,6 @@ struct vmw_display_unit {
*/
void vmw_du_init(struct vmw_display_unit *du);
void vmw_du_cleanup(struct vmw_display_unit *du);
-void vmw_du_crtc_save(struct drm_crtc *crtc);
-void vmw_du_crtc_restore(struct drm_crtc *crtc);
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t size,
@@ -460,19 +406,10 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state);
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state);
-void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state);
-int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
void vmw_du_plane_reset(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index f0b429525467..c23c9195f0dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -372,7 +372,7 @@ static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -383,10 +383,10 @@ static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7055cbefc768..d8204d4265d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -282,8 +282,7 @@ out_no_setup:
}
vmw_bo_unpin_unlocked(&batch->otable_bo->tbo);
- ttm_bo_put(&batch->otable_bo->tbo);
- batch->otable_bo = NULL;
+ vmw_bo_unreference(&batch->otable_bo);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 74ff2812d66a..7de20e56082c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -1,27 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
#include "vmwgfx_bo.h"
@@ -71,6 +52,11 @@ struct vmw_bo_dirty {
unsigned long bitmap[];
};
+bool vmw_bo_is_dirty(struct vmw_bo *vbo)
+{
+ return vbo->dirty && (vbo->dirty->start < vbo->dirty->end);
+}
+
/**
* vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
* @vbo: The buffer object to scan
@@ -341,6 +327,41 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
dirty->end = res_start;
}
+void vmw_bo_dirty_clear(struct vmw_bo *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t start, cur, end;
+ unsigned long res_start = 0;
+ unsigned long res_end = vbo->tbo.base.size;
+
+ WARN_ON_ONCE(res_start & ~PAGE_MASK);
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ cur = max(res_start, dirty->start);
+ res_end = max(res_end, dirty->end);
+ while (cur < res_end) {
+ unsigned long num;
+
+ start = find_next_bit(&dirty->bitmap[0], res_end, cur);
+ if (start >= res_end)
+ break;
+
+ end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
+ cur = end + 1;
+ num = end - start;
+ bitmap_clear(&dirty->bitmap[0], start, num);
+ }
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
/**
* vmw_bo_dirty_clear_res - Clear a resource's dirty region from
* its backing mob.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a73af8a355fb..388011696941 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -273,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource;
res = converter->base_obj_to_res(base);
- kref_get(&res->kref);
+ vmw_resource_reference(res);
*p_res = res;
ret = 0;
@@ -347,7 +347,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
+ ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
@@ -531,9 +531,9 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
}
INIT_LIST_HEAD(&val_list);
- ttm_bo_get(&res->guest_memory_bo->tbo);
val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
+ drm_gem_object_get(&val_buf->bo->base);
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -557,7 +557,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
if (guest_memory_dirty)
vmw_user_bo_unref(&res->guest_memory_bo);
@@ -619,7 +619,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 32029d80b72b..5f5f5a94301f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -445,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vps->uo.buffer);
vmw_overlay_resume_all(dev_priv);
if (ret)
return ret;
@@ -764,7 +764,7 @@ static const struct drm_plane_funcs vmw_sou_plane_funcs = {
static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -775,10 +775,10 @@ static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f5d2ed1b0a72..20aab725e53a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1482,7 +1482,7 @@ static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -1494,10 +1494,10 @@ static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
@@ -1584,6 +1584,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(&cursor->base, &vmw_stdu_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&cursor->base);
ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5721c74da3e0..7e281c3c6bc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,32 +1,13 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#include "vmwgfx_bo.h"
+#include "vmwgfx_cursor_plane.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -658,7 +639,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- WARN_ON_ONCE(res->dirty);
+ WARN_ON(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
@@ -689,8 +670,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
* Dumb buffers own the resource and they'll unref the
* resource themselves
*/
- if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
- return;
+ WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb);
vmw_resource_unreference(&res);
}
@@ -818,25 +798,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->guest_memory_size = cur_bo_offset;
- if (!file_priv->atomic &&
- metadata->scanout &&
- metadata->num_sizes == 1 &&
- metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
- metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
- metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
- VMW_CURSOR_SNOOP_HEIGHT *
- desc->pitchBytesPerBlock;
- srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
+
+ srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata);
+ if (IS_ERR(srf->snooper.image)) {
+ ret = PTR_ERR(srf->snooper.image);
+ goto out_no_copy;
}
if (drm_is_primary_client(file_priv))
@@ -864,14 +830,17 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv,
- &params,
- &res->guest_memory_bo);
+ ret = vmw_bo_create(dev_priv, &params, &res->guest_memory_bo);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1670,6 +1639,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
+ if (res->guest_memory_bo) {
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
VMW_RES_SURFACE,
@@ -1684,7 +1661,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->guest_memory_size;
if (res->guest_memory_bo) {
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
rep->buffer_size = res->guest_memory_bo->tbo.base.size;
@@ -2358,12 +2334,19 @@ int vmw_dumb_create(struct drm_file *file_priv,
vbo = res->guest_memory_bo;
vbo->is_dumb = true;
vbo->dumb_surface = vmw_res_to_srf(res);
-
+ drm_gem_object_put(&vbo->tbo.base);
+ /*
+ * Unset the user surface dtor since this in not actually exposed
+ * to userspace. The suface is owned via the dumb_buffer's GEM handle
+ */
+ struct vmw_user_surface *usurf = container_of(vbo->dumb_surface,
+ struct vmw_user_surface, srf);
+ usurf->prime.base.refcount_release = NULL;
err:
if (res)
vmw_resource_unreference(&res);
- if (ret)
- ttm_ref_object_base_unref(tfile, arg.rep.handle);
+
+ ttm_ref_object_base_unref(tfile, arg.rep.handle);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e7625b3f71e0..7ee93e7191c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -262,9 +262,8 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
bo_node->hash.key);
}
val_buf = &bo_node->base;
- val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
- if (!val_buf->bo)
- return -ESRCH;
+ vmw_bo_reference(vbo);
+ val_buf->bo = &vbo->tbo;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
}
@@ -656,7 +655,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_res_node *val;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- ttm_bo_put(entry->base.bo);
+ drm_gem_object_put(&entry->base.bo->base);
entry->base.bo = NULL;
}
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index dbecca9bdd54..cfabf5e2a0bb 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -22,6 +22,7 @@ config DRM_ZYNQMP_DPSUB_AUDIO
bool "ZynqMP DisplayPort Audio Support"
depends on DRM_ZYNQMP_DPSUB
depends on SND && SND_SOC
+ depends on SND_SOC=y || DRM_ZYNQMP_DPSUB=m
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Choose this option to enable DisplayPort audio support in the ZynqMP
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index a6a4a871f197..11d2415fb5a1 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1481,6 +1481,7 @@ static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp,
*/
static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
@@ -1494,7 +1495,7 @@ static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
}
if (dp->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, dp->next_bridge,
+ ret = drm_bridge_attach(encoder, dp->next_bridge,
bridge, flags);
if (ret < 0)
goto error;
diff --git a/drivers/platform/arm64/acer-aspire1-ec.c b/drivers/platform/arm64/acer-aspire1-ec.c
index 2df42406430d..958fe1bf5f85 100644
--- a/drivers/platform/arm64/acer-aspire1-ec.c
+++ b/drivers/platform/arm64/acer-aspire1-ec.c
@@ -366,7 +366,8 @@ static const struct power_supply_desc aspire_ec_adp_psy_desc = {
* USB-C DP Alt mode HPD.
*/
-static int aspire_ec_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int aspire_ec_bridge_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
}
diff --git a/drivers/video/screen_info_generic.c b/drivers/video/screen_info_generic.c
index 64117c6367ab..900e9386eceb 100644
--- a/drivers/video/screen_info_generic.c
+++ b/drivers/video/screen_info_generic.c
@@ -144,3 +144,39 @@ ssize_t screen_info_resources(const struct screen_info *si, struct resource *r,
return pos - r;
}
EXPORT_SYMBOL(screen_info_resources);
+
+/*
+ * The meaning of depth and bpp for direct-color formats is
+ * inconsistent:
+ *
+ * - DRM format info specifies depth as the number of color
+ * bits; including alpha, but not including filler bits.
+ * - Linux' EFI platform code computes lfb_depth from the
+ * individual color channels, including the reserved bits.
+ * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
+ * versions use 15.
+ * - On the kernel command line, 'bpp' of 32 is usually
+ * XRGB8888 including the filler bits, but 15 is XRGB1555
+ * not including the filler bit.
+ *
+ * It is not easily possible to fix this in struct screen_info,
+ * as this could break UAPI. The best solution is to compute
+ * bits_per_pixel from the color bits, reserved bits and
+ * reported lfb_depth, whichever is highest.
+ */
+
+u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si)
+{
+ u32 bits_per_pixel = si->lfb_depth;
+
+ if (bits_per_pixel > 8) {
+ bits_per_pixel = max(max3(si->red_size + si->red_pos,
+ si->green_size + si->green_pos,
+ si->blue_size + si->blue_pos),
+ si->rsvd_size + si->rsvd_pos);
+ bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
+ }
+
+ return bits_per_pixel;
+}
+EXPORT_SYMBOL(__screen_info_lfb_bits_per_pixel);
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 5ae4241959f2..d9614e2c8939 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -528,13 +528,72 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
/**
+ * drm_dp_dpcd_read_data() - read a series of bytes from the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+ * @offset: address of the (first) register to read
+ * @buffer: buffer to store the register values
+ * @size: number of bytes in @buffer
+ *
+ * Returns zero (0) on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+static inline int drm_dp_dpcd_read_data(struct drm_dp_aux *aux,
+ unsigned int offset,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_read(aux, offset, buffer, size);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
+ * drm_dp_dpcd_write_data() - write a series of bytes to the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+ * @offset: address of the (first) register to write
+ * @buffer: buffer containing the values to write
+ * @size: number of bytes in @buffer
+ *
+ * Returns zero (0) on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+static inline int drm_dp_dpcd_write_data(struct drm_dp_aux *aux,
+ unsigned int offset,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, offset, buffer, size);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
* drm_dp_dpcd_readb() - read a single byte from the DPCD
* @aux: DisplayPort AUX channel
* @offset: address of the register to read
* @valuep: location where the value of the register will be stored
*
* Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_read_byte() instead.
*/
static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
unsigned int offset, u8 *valuep)
@@ -549,7 +608,8 @@ static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
* @value: value to write to the register
*
* Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_write_byte() instead.
*/
static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
unsigned int offset, u8 value)
@@ -557,6 +617,34 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
return drm_dp_dpcd_write(aux, offset, &value, 1);
}
+/**
+ * drm_dp_dpcd_read_byte() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns zero (0) on success, or a negative error code on failure.
+ */
+static inline int drm_dp_dpcd_read_byte(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read_data(aux, offset, valuep, 1);
+}
+
+/**
+ * drm_dp_dpcd_write_byte() - write a single byte to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to write
+ * @value: value to write to the register
+ *
+ * Returns zero (0) on success, or a negative error code on failure.
+ */
+static inline int drm_dp_dpcd_write_byte(struct drm_dp_aux *aux,
+ unsigned int offset, u8 value)
+{
+ return drm_dp_dpcd_write_data(aux, offset, &value, 1);
+}
+
int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
u8 dpcd[DP_RECEIVER_CAP_SIZE]);
@@ -566,6 +654,8 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
enum drm_dp_phy dp_phy,
u8 link_status[DP_LINK_STATUS_SIZE]);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision);
int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
int vcpid, u8 start_time_slot, u8 time_slot_count);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 4c673f0698fe..38636a593c9d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -625,6 +625,9 @@ drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_connector *
drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
struct drm_crtc *
drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state,
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index d4c75d59fa12..4e418a29a9ff 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -73,7 +73,7 @@ struct drm_bridge_funcs {
*
* Zero on success, error code on failure.
*/
- int (*attach)(struct drm_bridge *bridge,
+ int (*attach)(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
/**
@@ -681,8 +681,10 @@ struct drm_bridge_funcs {
/**
* @hdmi_audio_startup:
*
- * Called when ASoC starts an audio stream setup. The
- * @hdmi_audio_startup() is optional.
+ * Called when ASoC starts an audio stream setup.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
*
* Returns:
* 0 on success, a negative error code otherwise
@@ -693,8 +695,10 @@ struct drm_bridge_funcs {
/**
* @hdmi_audio_prepare:
* Configures HDMI-encoder for audio stream. Can be called multiple
- * times for each setup. Mandatory if HDMI audio is enabled in the
- * bridge's configuration.
+ * times for each setup.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
*
* Returns:
* 0 on success, a negative error code otherwise
@@ -707,8 +711,10 @@ struct drm_bridge_funcs {
/**
* @hdmi_audio_shutdown:
*
- * Shut down the audio stream. Mandatory if HDMI audio is enabled in
- * the bridge's configuration.
+ * Shut down the audio stream.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
*
* Returns:
* 0 on success, a negative error code otherwise
@@ -719,8 +725,10 @@ struct drm_bridge_funcs {
/**
* @hdmi_audio_mute_stream:
*
- * Mute/unmute HDMI audio stream. The @hdmi_audio_mute_stream callback
- * is optional.
+ * Mute/unmute HDMI audio stream.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
*
* Returns:
* 0 on success, a negative error code otherwise
@@ -730,6 +738,65 @@ struct drm_bridge_funcs {
bool enable, int direction);
/**
+ * @dp_audio_startup:
+ *
+ * Called when ASoC starts a DisplayPort audio stream setup.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_startup)(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+
+ /**
+ * @dp_audio_prepare:
+ * Configures DisplayPort audio stream. Can be called multiple
+ * times for each setup.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_prepare)(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @dp_audio_shutdown:
+ *
+ * Shut down the DisplayPort audio stream.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*dp_audio_shutdown)(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+
+ /**
+ * @dp_audio_mute_stream:
+ *
+ * Mute/unmute DisplayPort audio stream.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_mute_stream)(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ bool enable, int direction);
+
+ /**
* @debugfs_init:
*
* Allows bridges to create bridge-specific debugfs files.
@@ -814,6 +881,32 @@ enum drm_bridge_ops {
* drivers.
*/
DRM_BRIDGE_OP_HDMI = BIT(4),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_AUDIO: The bridge provides HDMI audio operations.
+ * Bridges that set this flag must implement the
+ * &drm_bridge_funcs->hdmi_audio_prepare and
+ * &drm_bridge_funcs->hdmi_audio_shutdown callbacks.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers. Also it is not possible to have a bridge in the chain that
+ * sets @DRM_BRIDGE_OP_DP_AUDIO if there is a bridge that sets this
+ * flag.
+ */
+ DRM_BRIDGE_OP_HDMI_AUDIO = BIT(5),
+ /**
+ * @DRM_BRIDGE_OP_DP_AUDIO: The bridge provides DisplayPort audio operations.
+ * Bridges that set this flag must implement the
+ * &drm_bridge_funcs->dp_audio_prepare and
+ * &drm_bridge_funcs->dp_audio_shutdown callbacks.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers. Also it is not possible to have a bridge in the chain that
+ * sets @DRM_BRIDGE_OP_HDMI_AUDIO if there is a bridge that sets this
+ * flag.
+ */
+ DRM_BRIDGE_OP_DP_AUDIO = BIT(6),
};
/**
@@ -840,6 +933,18 @@ struct drm_bridge {
const struct drm_bridge_timings *timings;
/** @funcs: control functions */
const struct drm_bridge_funcs *funcs;
+
+ /**
+ * @container: Pointer to the private driver struct embedding this
+ * @struct drm_bridge.
+ */
+ void *container;
+
+ /**
+ * @refcount: reference count of users referencing this bridge.
+ */
+ struct kref refcount;
+
/** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
/** @ops: bitmask of operations supported by the bridge */
@@ -914,23 +1019,28 @@ struct drm_bridge {
unsigned int max_bpc;
/**
- * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec
+ * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec if
+ * either of @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO is set.
*/
struct device *hdmi_audio_dev;
/**
* @hdmi_audio_max_i2s_playback_channels: maximum number of playback
- * I2S channels for the HDMI codec
+ * I2S channels for the @DRM_BRIDGE_OP_HDMI_AUDIO or
+ * @DRM_BRIDGE_OP_DP_AUDIO.
*/
int hdmi_audio_max_i2s_playback_channels;
/**
- * @hdmi_audio_spdif_playback: set if HDMI codec has S/PDIF playback port
+ * @hdmi_audio_spdif_playback: set if this bridge has S/PDIF playback
+ * port for @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
*/
unsigned int hdmi_audio_spdif_playback : 1;
/**
- * @hdmi_audio_dai_port: sound DAI port, -1 if it is not enabled
+ * @hdmi_audio_dai_port: sound DAI port for either of
+ * @DRM_BRIDGE_OP_HDMI_AUDIO and @DRM_BRIDGE_OP_DP_AUDIO, -1 if it is
+ * not used.
*/
int hdmi_audio_dai_port;
};
@@ -941,6 +1051,30 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
return container_of(priv, struct drm_bridge, base);
}
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge);
+void drm_bridge_put(struct drm_bridge *bridge);
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs);
+
+/**
+ * devm_drm_bridge_alloc - Allocate and initialize a bridge
+ * @dev: struct device of the bridge device
+ * @type: the type of the struct which contains struct &drm_bridge
+ * @member: the name of the &drm_bridge within @type
+ * @funcs: callbacks for this bridge
+ *
+ * The reference count of the returned bridge is initialized to 1. This
+ * reference will be automatically dropped via devm (by calling
+ * drm_bridge_put()) when @dev is removed.
+ *
+ * Returns:
+ * Pointer to new bridge, or ERR_PTR on failure.
+ */
+#define devm_drm_bridge_alloc(dev, type, member, funcs) \
+ ((type *)__devm_drm_bridge_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs))
+
void drm_bridge_add(struct drm_bridge *bridge);
int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
@@ -958,6 +1092,38 @@ static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
#endif
/**
+ * drm_bridge_get_current_state() - Get the current bridge state
+ * @bridge: bridge object
+ *
+ * This function must be called with the modeset lock held.
+ *
+ * RETURNS:
+ *
+ * The current bridge state, or NULL if there is none.
+ */
+static inline struct drm_bridge_state *
+drm_bridge_get_current_state(struct drm_bridge *bridge)
+{
+ if (!bridge)
+ return NULL;
+
+ /*
+ * Only atomic bridges will have bridge->base initialized by
+ * drm_atomic_private_obj_init(), so we need to make sure we're
+ * working with one before we try to use the lock.
+ */
+ if (!bridge->funcs || !bridge->funcs->atomic_reset)
+ return NULL;
+
+ drm_modeset_lock_assert_held(&bridge->base.lock);
+
+ if (!bridge->base.state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(bridge->base.state);
+}
+
+/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
*
@@ -1108,4 +1274,7 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
+void drm_bridge_debugfs_params(struct dentry *root);
+void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder);
+
#endif
diff --git a/include/drm/drm_bridge_helper.h b/include/drm/drm_bridge_helper.h
new file mode 100644
index 000000000000..6c35b479ec2a
--- /dev/null
+++ b/include/drm/drm_bridge_helper.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __DRM_BRIDGE_HELPER_H_
+#define __DRM_BRIDGE_HELPER_H_
+
+struct drm_bridge;
+struct drm_modeset_acquire_ctx;
+
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif // __DRM_BRIDGE_HELPER_H_
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 6ea54a578cda..e2f894f1b90a 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -65,6 +65,28 @@ struct drm_device {
struct device *dev;
/**
+ * @dma_dev:
+ *
+ * Device for DMA operations. Only required if the device @dev
+ * cannot perform DMA by itself. Should be NULL otherwise. Call
+ * drm_dev_dma_dev() to get the DMA device instead of using this
+ * field directly. Call drm_dev_set_dma_dev() to set this field.
+ *
+ * DRM devices are sometimes bound to virtual devices that cannot
+ * perform DMA by themselves. Drivers should set this field to the
+ * respective DMA controller.
+ *
+ * Devices on USB and other peripheral busses also cannot perform
+ * DMA by themselves. The @dma_dev field should point the bus
+ * controller that does DMA on behalve of such a device. Required
+ * for importing buffers via dma-buf.
+ *
+ * If set, the DRM core automatically releases the reference on the
+ * device.
+ */
+ struct device *dma_dev;
+
+ /**
* @managed:
*
* Managed resources linked to the lifetime of this &drm_device as
@@ -327,4 +349,23 @@ struct drm_device {
struct dentry *debugfs_root;
};
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev);
+
+/**
+ * drm_dev_dma_dev - returns the DMA device for a DRM device
+ * @dev: DRM device
+ *
+ * Returns the DMA device of the given DRM device. By default, this
+ * the DRM device's parent. See drm_dev_set_dma_dev().
+ *
+ * Returns:
+ * A DMA-capable device for the DRM device.
+ */
+static inline struct device *drm_dev_dma_dev(struct drm_device *dev)
+{
+ if (dev->dma_dev)
+ return dev->dma_dev;
+ return dev->dev;
+}
+
#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index eaac5e665892..b38409670868 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -437,7 +437,7 @@ bool drm_detect_monitor_audio(const struct edid *edid);
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay);
+ unsigned int hdisplay, unsigned int vdisplay);
int drm_edid_header_is_valid(const void *edid);
bool drm_edid_is_valid(struct edid *edid);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 2bf893eabb4b..9b71f7a9f3f8 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -159,7 +159,8 @@ struct drm_gem_object_funcs {
* @vmap:
*
* Returns a virtual address for the buffer. Used by the
- * drm_gem_dmabuf_vmap() helper.
+ * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
@@ -169,7 +170,8 @@ struct drm_gem_object_funcs {
* @vunmap:
*
* Releases the address previously returned by @vmap. Used by the
- * drm_gem_dmabuf_vunmap() helper.
+ * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
@@ -192,7 +194,8 @@ struct drm_gem_object_funcs {
* @evict:
*
* Evicts gem object out from memory. Used by the drm_gem_object_evict()
- * helper. Returns 0 on success, -errno otherwise.
+ * helper. Returns 0 on success, -errno otherwise. Called with a held
+ * GEM reservation lock.
*
* This callback is optional.
*/
@@ -537,8 +540,8 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
void drm_gem_lock(struct drm_gem_object *obj);
void drm_gem_unlock(struct drm_gem_object *obj);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out);
@@ -561,7 +564,7 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj));
-int drm_gem_evict(struct drm_gem_object *obj);
+int drm_gem_evict_locked(struct drm_gem_object *obj);
/**
* drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index cef5a6b5a4d6..b4f993da3cae 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -37,7 +37,18 @@ struct drm_gem_shmem_object {
* Reference count on the pages table.
* The pages are put when the count reaches zero.
*/
- unsigned int pages_use_count;
+ refcount_t pages_use_count;
+
+ /**
+ * @pages_pin_count:
+ *
+ * Reference count on the pinned pages table.
+ *
+ * Pages are hard-pinned and reside in memory if count
+ * greater than zero. Otherwise, when count is zero, the pages are
+ * allowed to be evicted and purged by memory shrinker.
+ */
+ refcount_t pages_pin_count;
/**
* @madv: State for madvise
@@ -71,7 +82,7 @@ struct drm_gem_shmem_object {
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
- unsigned int vmap_use_count;
+ refcount_t vmap_use_count;
/**
* @pages_mark_dirty_on_put:
@@ -102,28 +113,28 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de
struct vfsmount *gemfs);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map);
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map);
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
{
return (shmem->madv > 0) &&
- !shmem->vmap_use_count && shmem->sgt &&
+ !refcount_read(&shmem->pages_pin_count) && shmem->sgt &&
!shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base);
}
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
@@ -214,12 +225,12 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_
}
/*
- * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap()
+ * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked()
* @obj: GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
*
- * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should
- * use it as their &drm_gem_object_funcs.vmap handler.
+ * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -229,7 +240,7 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- return drm_gem_shmem_vmap(shmem, map);
+ return drm_gem_shmem_vmap_locked(shmem, map);
}
/*
@@ -237,15 +248,15 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
* @obj: GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
- * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should
- * use it as their &drm_gem_object_funcs.vunmap handler.
+ * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vunmap handler.
*/
static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
struct iosys_map *map)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_gem_shmem_vunmap(shmem, map);
+ drm_gem_shmem_vunmap_locked(shmem, map);
}
/**
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index 1c62d1d4458c..4948379237e9 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -9,6 +9,7 @@
#include <kunit/test.h>
+struct drm_connector;
struct drm_crtc_funcs;
struct drm_crtc_helper_funcs;
struct drm_device;
@@ -118,6 +119,13 @@ drm_kunit_helper_create_crtc(struct kunit *test,
const struct drm_crtc_funcs *funcs,
const struct drm_crtc_helper_funcs *helper_funcs);
+int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx);
+
int drm_kunit_add_mode_destroy_action(struct kunit *test,
struct drm_display_mode *mode);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index bd40a443385c..d6796ce9f5ff 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -293,6 +293,7 @@ void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
const void *payload, size_t size);
ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
size_t num_params, void *data, size_t size);
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format);
#define mipi_dsi_msleep(ctx, delay) \
do { \
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 271765e2e9f2..4b8f0370b79b 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -532,8 +532,8 @@ struct drm_mode_config {
*/
struct list_head privobj_list;
- int min_width, min_height;
- int max_width, max_height;
+ unsigned int min_width, min_height;
+ unsigned int max_width, max_height;
const struct drm_mode_config_funcs *funcs;
/* output poll support */
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index a9c042c8dea1..31d84f901c51 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/kref.h>
struct backlight_device;
struct dentry;
@@ -266,12 +267,52 @@ struct drm_panel {
* If true then the panel has been enabled.
*/
bool enabled;
+
+ /**
+ * @container: Pointer to the private driver struct embedding this
+ * @struct drm_panel.
+ */
+ void *container;
+
+ /**
+ * @refcount: reference count of users referencing this panel.
+ */
+ struct kref refcount;
};
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type);
+
+/**
+ * devm_drm_panel_alloc - Allocate and initialize a refcounted panel.
+ *
+ * @dev: struct device of the panel device
+ * @type: the type of the struct which contains struct &drm_panel
+ * @member: the name of the &drm_panel within @type
+ * @funcs: callbacks for this panel
+ * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+ * the panel interface
+ *
+ * The reference count of the returned panel is initialized to 1. This
+ * reference will be automatically dropped via devm (by calling
+ * drm_panel_put()) when @dev is removed.
+ *
+ * Returns:
+ * Pointer to container structure embedding the panel, ERR_PTR on failure.
+ */
+#define devm_drm_panel_alloc(dev, type, member, funcs, connector_type) \
+ ((type *)__devm_drm_panel_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs, \
+ connector_type))
+
void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs,
int connector_type);
+struct drm_panel *drm_panel_get(struct drm_panel *panel);
+void drm_panel_put(struct drm_panel *panel);
+
void drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index d6ce7b218b77..840ae5f798c2 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -17,7 +17,7 @@ int drm_helper_probe_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force);
-int drmm_kms_helper_poll_init(struct drm_device *dev);
+void drmm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 50928a7ae98e..1a7e377d4cbb 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -383,8 +383,15 @@ struct drm_sched_job {
struct xarray dependencies;
};
+/**
+ * enum drm_gpu_sched_stat - the scheduler's status
+ *
+ * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use.
+ * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded.
+ * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore.
+ */
enum drm_gpu_sched_stat {
- DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
+ DRM_GPU_SCHED_STAT_NONE,
DRM_GPU_SCHED_STAT_NOMINAL,
DRM_GPU_SCHED_STAT_ENODEV,
};
@@ -410,10 +417,36 @@ struct drm_sched_backend_ops {
struct drm_sched_entity *s_entity);
/**
- * @run_job: Called to execute the job once all of the dependencies
- * have been resolved. This may be called multiple times, if
- * timedout_job() has happened and drm_sched_job_recovery()
- * decides to try it again.
+ * @run_job: Called to execute the job once all of the dependencies
+ * have been resolved.
+ *
+ * @sched_job: the job to run
+ *
+ * The deprecated drm_sched_resubmit_jobs() (called by &struct
+ * drm_sched_backend_ops.timedout_job) can invoke this again with the
+ * same parameters. Using this is discouraged because it violates
+ * dma_fence rules, notably dma_fence_init() has to be called on
+ * already initialized fences for a second time. Moreover, this is
+ * dangerous because attempts to allocate memory might deadlock with
+ * memory management code waiting for the reset to complete.
+ *
+ * TODO: Document what drivers should do / use instead.
+ *
+ * This method is called in a workqueue context - either from the
+ * submit_wq the driver passed through drm_sched_init(), or, if the
+ * driver passed NULL, a separate, ordered workqueue the scheduler
+ * allocated.
+ *
+ * Note that the scheduler expects to 'inherit' its own reference to
+ * this fence from the callback. It does not invoke an extra
+ * dma_fence_get() on it. Consequently, this callback must take a
+ * reference for the scheduler, and additional ones for the driver's
+ * respective needs.
+ *
+ * Return:
+ * * On success: dma_fence the driver must signal once the hardware has
+ * completed the job ("hardware fence").
+ * * On failure: NULL or an ERR_PTR.
*/
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
@@ -421,43 +454,52 @@ struct drm_sched_backend_ops {
* @timedout_job: Called when a job has taken too long to execute,
* to trigger GPU recovery.
*
- * This method is called in a workqueue context.
+ * @sched_job: The job that has timed out
+ *
+ * Drivers typically issue a reset to recover from GPU hangs.
+ * This procedure looks very different depending on whether a firmware
+ * or a hardware scheduler is being used.
+ *
+ * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each
+ * scheduler has one entity. Hence, the steps taken typically look as
+ * follows:
*
- * Drivers typically issue a reset to recover from GPU hangs, and this
- * procedure usually follows the following workflow:
+ * 1. Stop the scheduler using drm_sched_stop(). This will pause the
+ * scheduler workqueues and cancel the timeout work, guaranteeing
+ * that nothing is queued while the ring is being removed.
+ * 2. Remove the ring. The firmware will make sure that the
+ * corresponding parts of the hardware are resetted, and that other
+ * rings are not impacted.
+ * 3. Kill the entity and the associated scheduler.
*
- * 1. Stop the scheduler using drm_sched_stop(). This will park the
- * scheduler thread and cancel the timeout work, guaranteeing that
- * nothing is queued while we reset the hardware queue
- * 2. Try to gracefully stop non-faulty jobs (optional)
- * 3. Issue a GPU reset (driver-specific)
- * 4. Re-submit jobs using drm_sched_resubmit_jobs()
- * 5. Restart the scheduler using drm_sched_start(). At that point, new
- * jobs can be queued, and the scheduler thread is unblocked
+ *
+ * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from
+ * one or more entities to one ring. This implies that all entities
+ * associated with the affected scheduler cannot be torn down, because
+ * this would effectively also affect innocent userspace processes which
+ * did not submit faulty jobs (for example).
+ *
+ * Consequently, the procedure to recover with a hardware scheduler
+ * should look like this:
+ *
+ * 1. Stop all schedulers impacted by the reset using drm_sched_stop().
+ * 2. Kill the entity the faulty job stems from.
+ * 3. Issue a GPU reset on all faulty rings (driver-specific).
+ * 4. Re-submit jobs on all schedulers impacted by re-submitting them to
+ * the entities which are still alive.
+ * 5. Restart all schedulers that were stopped in step #1 using
+ * drm_sched_start().
*
* Note that some GPUs have distinct hardware queues but need to reset
* the GPU globally, which requires extra synchronization between the
- * timeout handler of the different &drm_gpu_scheduler. One way to
- * achieve this synchronization is to create an ordered workqueue
- * (using alloc_ordered_workqueue()) at the driver level, and pass this
- * queue to drm_sched_init(), to guarantee that timeout handlers are
- * executed sequentially. The above workflow needs to be slightly
- * adjusted in that case:
- *
- * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
- * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
- * the reset (optional)
- * 3. Issue a GPU reset on all faulty queues (driver-specific)
- * 4. Re-submit jobs on all schedulers impacted by the reset using
- * drm_sched_resubmit_jobs()
- * 5. Restart all schedulers that were stopped in step #1 using
- * drm_sched_start()
+ * timeout handlers of different schedulers. One way to achieve this
+ * synchronization is to create an ordered workqueue (using
+ * alloc_ordered_workqueue()) at the driver level, and pass this queue
+ * as drm_sched_init()'s @timeout_wq parameter. This will guarantee
+ * that timeout handlers are executed sequentially.
*
- * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
- * and the underlying driver has started or completed recovery.
+ * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat
*
- * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
- * available, i.e. has been unplugged.
*/
enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 36216d28d8bd..544f8f8c3f44 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -35,15 +35,6 @@ struct dma_buf_attachment;
*/
struct dma_buf_ops {
/**
- * @cache_sgt_mapping:
- *
- * If true the framework will cache the first mapping made for each
- * attachment. This avoids creating mappings for attachments multiple
- * times.
- */
- bool cache_sgt_mapping;
-
- /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -493,8 +484,6 @@ struct dma_buf_attach_ops {
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
- * @sgt: cached mapping.
- * @dir: direction of cached mapping.
* @peer2peer: true if the importer can handle peer resources without pages.
* @priv: exporter specific attachment data.
* @importer_ops: importer operations for this attachment, if provided
@@ -514,8 +503,6 @@ struct dma_buf_attachment {
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
- struct sg_table *sgt;
- enum dma_data_direction dir;
bool peer2peer;
const struct dma_buf_attach_ops *importer_ops;
void *importer_priv;
@@ -583,20 +570,6 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
return !!dmabuf->ops->pin;
}
-/**
- * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
- * mappings
- * @attach: the DMA-buf attachment to check
- *
- * Returns true if a DMA-buf importer wants to call the map/unmap functions with
- * the dma_resv lock held.
- */
-static inline bool
-dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
-{
- return !!attach->importer_ops;
-}
-
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
struct dma_buf_attachment *
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index e7ad819962e3..b12776883d14 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -169,8 +169,8 @@ struct dma_fence_ops {
* implementation know that there is another driver waiting on the
* signal (ie. hw->sw case).
*
- * This function can be called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
+ * This is called with irq's disabled, so only spinlocks which disable
+ * IRQ's can be used in the code outside of this callback.
*
* A return value of false indicates the fence already passed,
* or some failure occurred that made it impossible to enable
@@ -239,27 +239,6 @@ struct dma_fence_ops {
void (*release)(struct dma_fence *fence);
/**
- * @fence_value_str:
- *
- * Callback to fill in free-form debug info specific to this fence, like
- * the sequence number.
- *
- * This callback is optional.
- */
- void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
-
- /**
- * @timeline_value_str:
- *
- * Fills in the current value of the timeline as a string, like the
- * sequence number. Note that the specific fence passed to this function
- * should not matter, drivers should only use it to look up the
- * corresponding timeline structures.
- */
- void (*timeline_value_str)(struct dma_fence *fence,
- char *str, int size);
-
- /**
* @set_deadline:
*
* Callback to allow a fence waiter to inform the fence signaler of
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 6a4a3cec4638..923d68e07679 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -126,8 +126,17 @@ static inline unsigned int screen_info_video_type(const struct screen_info *si)
return VIDEO_TYPE_CGA;
}
+static inline u32 __screen_info_vesapm_info_base(const struct screen_info *si)
+{
+ if (si->vesapm_seg < 0xc000)
+ return 0;
+ return (si->vesapm_seg << 4) + si->vesapm_off;
+}
+
ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num);
+u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si);
+
#if defined(CONFIG_PCI)
void screen_info_apply_fixups(void);
struct pci_dev *screen_info_pci_dev(const struct screen_info *si);
diff --git a/include/uapi/drm/asahi_drm.h b/include/uapi/drm/asahi_drm.h
new file mode 100644
index 000000000000..de67f1c603af
--- /dev/null
+++ b/include/uapi/drm/asahi_drm.h
@@ -0,0 +1,1194 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) The Asahi Linux Contributors
+ * Copyright (C) 2018-2023 Collabora Ltd.
+ * Copyright (C) 2014-2018 Broadcom
+ */
+#ifndef _ASAHI_DRM_H_
+#define _ASAHI_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: Introduction to the Asahi UAPI
+ *
+ * This documentation describes the Asahi IOCTLs.
+ *
+ * Just a few generic rules about the data passed to the Asahi IOCTLs (cribbed
+ * from Panthor):
+ *
+ * - Structures must be aligned on 64-bit/8-byte. If the object is not
+ * naturally aligned, a padding field must be added.
+ * - Fields must be explicitly aligned to their natural type alignment with
+ * pad[0..N] fields.
+ * - All padding fields will be checked by the driver to make sure they are
+ * zeroed.
+ * - Flags can be added, but not removed/replaced.
+ * - New fields can be added to the main structures (the structures
+ * directly passed to the ioctl). Those fields can be added at the end of
+ * the structure, or replace existing padding fields. Any new field being
+ * added must preserve the behavior that existed before those fields were
+ * added when a value of zero is passed.
+ * - New fields can be added to indirect objects (objects pointed by the
+ * main structure), iff those objects are passed a size to reflect the
+ * size known by the userspace driver (see
+ * drm_asahi_cmd_header::size).
+ * - If the kernel driver is too old to know some fields, those will be
+ * ignored if zero, and otherwise rejected (and so will be zero on output).
+ * - If userspace is too old to know some fields, those will be zeroed
+ * (input) before the structure is parsed by the kernel driver.
+ * - Each new flag/field addition must come with a driver version update so
+ * the userspace driver doesn't have to guess which flags are supported.
+ * - Structures should not contain unions, as this would defeat the
+ * extensibility of such structures.
+ * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
+ * at the end of the drm_asahi_ioctl_id enum.
+ */
+
+/**
+ * enum drm_asahi_ioctl_id - IOCTL IDs
+ *
+ * Place new ioctls at the end, don't re-order, don't replace or remove entries.
+ *
+ * These IDs are not meant to be used directly. Use the DRM_IOCTL_ASAHI_xxx
+ * definitions instead.
+ */
+enum drm_asahi_ioctl_id {
+ /** @DRM_ASAHI_GET_PARAMS: Query device properties. */
+ DRM_ASAHI_GET_PARAMS = 0,
+
+ /** @DRM_ASAHI_GET_TIME: Query device time. */
+ DRM_ASAHI_GET_TIME,
+
+ /** @DRM_ASAHI_VM_CREATE: Create a GPU VM address space. */
+ DRM_ASAHI_VM_CREATE,
+
+ /** @DRM_ASAHI_VM_DESTROY: Destroy a VM. */
+ DRM_ASAHI_VM_DESTROY,
+
+ /** @DRM_ASAHI_VM_BIND: Bind/unbind memory to a VM. */
+ DRM_ASAHI_VM_BIND,
+
+ /** @DRM_ASAHI_GEM_CREATE: Create a buffer object. */
+ DRM_ASAHI_GEM_CREATE,
+
+ /**
+ * @DRM_ASAHI_GEM_MMAP_OFFSET: Get offset to pass to mmap() to map a
+ * given GEM handle.
+ */
+ DRM_ASAHI_GEM_MMAP_OFFSET,
+
+ /** @DRM_ASAHI_GEM_BIND_OBJECT: Bind memory as a special object */
+ DRM_ASAHI_GEM_BIND_OBJECT,
+
+ /** @DRM_ASAHI_QUEUE_CREATE: Create a scheduling queue. */
+ DRM_ASAHI_QUEUE_CREATE,
+
+ /** @DRM_ASAHI_QUEUE_DESTROY: Destroy a scheduling queue. */
+ DRM_ASAHI_QUEUE_DESTROY,
+
+ /** @DRM_ASAHI_SUBMIT: Submit commands to a queue. */
+ DRM_ASAHI_SUBMIT,
+};
+
+#define DRM_ASAHI_MAX_CLUSTERS 64
+
+/**
+ * struct drm_asahi_params_global - Global parameters.
+ *
+ * This struct may be queried by drm_asahi_get_params.
+ */
+struct drm_asahi_params_global {
+ /** @features: Feature bits from drm_asahi_feature */
+ __u64 features;
+
+ /** @gpu_generation: GPU generation, e.g. 13 for G13G */
+ __u32 gpu_generation;
+
+ /** @gpu_variant: GPU variant as a character, e.g. 'C' for G13C */
+ __u32 gpu_variant;
+
+ /**
+ * @gpu_revision: GPU revision in BCD, e.g. 0x00 for 'A0' or
+ * 0x21 for 'C1'
+ */
+ __u32 gpu_revision;
+
+ /** @chip_id: Chip ID in BCD, e.g. 0x8103 for T8103 */
+ __u32 chip_id;
+
+ /** @num_dies: Number of dies in the SoC */
+ __u32 num_dies;
+
+ /** @num_clusters_total: Number of GPU clusters (across all dies) */
+ __u32 num_clusters_total;
+
+ /**
+ * @num_cores_per_cluster: Number of logical cores per cluster
+ * (including inactive/nonexistent)
+ */
+ __u32 num_cores_per_cluster;
+
+ /** @max_frequency_khz: Maximum GPU core clock frequency */
+ __u32 max_frequency_khz;
+
+ /** @core_masks: Bitmask of present/enabled cores per cluster */
+ __u64 core_masks[DRM_ASAHI_MAX_CLUSTERS];
+
+ /**
+ * @vm_start: VM range start VMA. Together with @vm_end, this defines
+ * the window of valid GPU VAs. Userspace is expected to subdivide VAs
+ * out of this window.
+ *
+ * This window contains all virtual addresses that userspace needs to
+ * know about. There may be kernel-internal GPU VAs outside this range,
+ * but that detail is not relevant here.
+ */
+ __u64 vm_start;
+
+ /** @vm_end: VM range end VMA */
+ __u64 vm_end;
+
+ /**
+ * @vm_kernel_min_size: Minimum kernel VMA window size.
+ *
+ * When creating a VM, userspace is required to carve out a section of
+ * virtual addresses (within the range given by @vm_start and
+ * @vm_end). The kernel will allocate various internal structures
+ * within the specified VA range.
+ *
+ * Allowing userspace to choose the VA range for the kernel, rather than
+ * the kernel reserving VAs and requiring userspace to cope, can assist
+ * in implementing SVM.
+ */
+ __u64 vm_kernel_min_size;
+
+ /**
+ * @max_commands_per_submission: Maximum number of supported commands
+ * per submission. This mirrors firmware limits. Userspace must split up
+ * larger command buffers, which may require inserting additional
+ * synchronization.
+ */
+ __u32 max_commands_per_submission;
+
+ /**
+ * @max_attachments: Maximum number of drm_asahi_attachment's per
+ * command
+ */
+ __u32 max_attachments;
+
+ /**
+ * @command_timestamp_frequency_hz: Timebase frequency for timestamps
+ * written during command execution, specified via drm_asahi_timestamp
+ * structures. As this rate is controlled by the firmware, it is a
+ * queryable parameter.
+ *
+ * Userspace must divide by this frequency to convert timestamps to
+ * seconds, rather than hardcoding a particular firmware's rate.
+ */
+ __u64 command_timestamp_frequency_hz;
+};
+
+/**
+ * enum drm_asahi_feature - Feature bits
+ *
+ * This covers only features that userspace cannot infer from the architecture
+ * version. Most features don't need to be here.
+ */
+enum drm_asahi_feature {
+ /**
+ * @DRM_ASAHI_FEATURE_SOFT_FAULTS: GPU has "soft fault" enabled. Shader
+ * loads of unmapped memory will return zero. Shader stores to unmapped
+ * memory will be silently discarded. Note that only shader load/store
+ * is affected. Other hardware units are not affected, notably including
+ * texture sampling.
+ *
+ * Soft fault is set when initializing the GPU and cannot be runtime
+ * toggled. Therefore, it is exposed as a feature bit and not a
+ * userspace-settable flag on the VM. When soft fault is enabled,
+ * userspace can speculate memory accesses more aggressively.
+ */
+ DRM_ASAHI_FEATURE_SOFT_FAULTS = (1UL) << 0,
+};
+
+/**
+ * struct drm_asahi_get_params - Arguments passed to DRM_IOCTL_ASAHI_GET_PARAMS
+ */
+struct drm_asahi_get_params {
+ /** @param_group: Parameter group to fetch (MBZ) */
+ __u32 param_group;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @pointer: User pointer to write parameter struct */
+ __u64 pointer;
+
+ /**
+ * @size: Size of the user buffer. In case of older userspace, this may
+ * be less than sizeof(struct drm_asahi_params_global). The kernel will
+ * not write past the length specified here, allowing extensibility.
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_asahi_vm_create - Arguments passed to DRM_IOCTL_ASAHI_VM_CREATE
+ */
+struct drm_asahi_vm_create {
+ /**
+ * @kernel_start: Start of the kernel-reserved address range. See
+ * drm_asahi_params_global::vm_kernel_min_size.
+ *
+ * Both @kernel_start and @kernel_end must be within the range of
+ * valid VAs given by drm_asahi_params_global::vm_start and
+ * drm_asahi_params_global::vm_end. The size of the kernel range
+ * (@kernel_end - @kernel_start) must be at least
+ * drm_asahi_params_global::vm_kernel_min_size.
+ *
+ * Userspace must not bind any memory on this VM into this reserved
+ * range, it is for kernel use only.
+ */
+ __u64 kernel_start;
+
+ /**
+ * @kernel_end: End of the kernel-reserved address range. See
+ * @kernel_start.
+ */
+ __u64 kernel_end;
+
+ /** @vm_id: Returned VM ID */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_vm_destroy - Arguments passed to DRM_IOCTL_ASAHI_VM_DESTROY
+ */
+struct drm_asahi_vm_destroy {
+ /** @vm_id: VM ID to be destroyed */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_gem_flags - Flags for GEM creation
+ */
+enum drm_asahi_gem_flags {
+ /**
+ * @DRM_ASAHI_GEM_WRITEBACK: BO should be CPU-mapped as writeback.
+ *
+ * Map as writeback instead of write-combine. This optimizes for CPU
+ * reads.
+ */
+ DRM_ASAHI_GEM_WRITEBACK = (1L << 0),
+
+ /**
+ * @DRM_ASAHI_GEM_VM_PRIVATE: BO is private to this GPU VM (no exports).
+ */
+ DRM_ASAHI_GEM_VM_PRIVATE = (1L << 1),
+};
+
+/**
+ * struct drm_asahi_gem_create - Arguments passed to DRM_IOCTL_ASAHI_GEM_CREATE
+ */
+struct drm_asahi_gem_create {
+ /** @size: Size of the BO */
+ __u64 size;
+
+ /** @flags: Combination of drm_asahi_gem_flags flags. */
+ __u32 flags;
+
+ /**
+ * @vm_id: VM ID to assign to the BO, if DRM_ASAHI_GEM_VM_PRIVATE is set
+ */
+ __u32 vm_id;
+
+ /** @handle: Returned GEM handle for the BO */
+ __u32 handle;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_gem_mmap_offset - Arguments passed to
+ * DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET
+ */
+struct drm_asahi_gem_mmap_offset {
+ /** @handle: Handle for the object being mapped. */
+ __u32 handle;
+
+ /** @flags: Must be zero */
+ __u32 flags;
+
+ /** @offset: The fake offset to use for subsequent mmap call */
+ __u64 offset;
+};
+
+/**
+ * enum drm_asahi_bind_flags - Flags for GEM binding
+ */
+enum drm_asahi_bind_flags {
+ /**
+ * @DRM_ASAHI_BIND_UNBIND: Instead of binding a GEM object to the range,
+ * simply unbind the GPU VMA range.
+ */
+ DRM_ASAHI_BIND_UNBIND = (1L << 0),
+
+ /** @DRM_ASAHI_BIND_READ: Map BO with GPU read permission */
+ DRM_ASAHI_BIND_READ = (1L << 1),
+
+ /** @DRM_ASAHI_BIND_WRITE: Map BO with GPU write permission */
+ DRM_ASAHI_BIND_WRITE = (1L << 2),
+
+ /**
+ * @DRM_ASAHI_BIND_SINGLE_PAGE: Map a single page of the BO repeatedly
+ * across the VA range.
+ *
+ * This is useful to fill a VA range with scratch pages or zero pages.
+ * It is intended as a mechanism to accelerate sparse.
+ */
+ DRM_ASAHI_BIND_SINGLE_PAGE = (1L << 3),
+};
+
+/**
+ * struct drm_asahi_gem_bind_op - Description of a single GEM bind operation.
+ */
+struct drm_asahi_gem_bind_op {
+ /** @flags: Combination of drm_asahi_bind_flags flags. */
+ __u32 flags;
+
+ /** @handle: GEM object to bind (except for UNBIND) */
+ __u32 handle;
+
+ /**
+ * @offset: Offset into the object (except for UNBIND).
+ *
+ * For a regular bind, this is the beginning of the region of the GEM
+ * object to bind.
+ *
+ * For a single-page bind, this is the offset to the single page that
+ * will be repeatedly bound.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 offset;
+
+ /**
+ * @range: Number of bytes to bind/unbind to @addr.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 range;
+
+ /**
+ * @addr: Address to bind to.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 addr;
+};
+
+/**
+ * struct drm_asahi_vm_bind - Arguments passed to
+ * DRM_IOCTL_ASAHI_VM_BIND
+ */
+struct drm_asahi_vm_bind {
+ /** @vm_id: The ID of the VM to bind to */
+ __u32 vm_id;
+
+ /** @num_binds: number of binds in this IOCTL. */
+ __u32 num_binds;
+
+ /**
+ * @stride: Stride in bytes between consecutive binds. This allows
+ * extensibility of drm_asahi_gem_bind_op.
+ */
+ __u32 stride;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /**
+ * @userptr: User pointer to an array of @num_binds structures of type
+ * @drm_asahi_gem_bind_op and size @stride bytes.
+ */
+ __u64 userptr;
+};
+
+/**
+ * enum drm_asahi_bind_object_op - Special object bind operation
+ */
+enum drm_asahi_bind_object_op {
+ /** @DRM_ASAHI_BIND_OBJECT_OP_BIND: Bind a BO as a special GPU object */
+ DRM_ASAHI_BIND_OBJECT_OP_BIND = 0,
+
+ /** @DRM_ASAHI_BIND_OBJECT_OP_UNBIND: Unbind a special GPU object */
+ DRM_ASAHI_BIND_OBJECT_OP_UNBIND = 1,
+};
+
+/**
+ * enum drm_asahi_bind_object_flags - Special object bind flags
+ */
+enum drm_asahi_bind_object_flags {
+ /**
+ * @DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS: Map a BO as a timestamp
+ * buffer.
+ */
+ DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS = (1L << 0),
+};
+
+/**
+ * struct drm_asahi_gem_bind_object - Arguments passed to
+ * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT
+ */
+struct drm_asahi_gem_bind_object {
+ /** @op: Bind operation (enum drm_asahi_bind_object_op) */
+ __u32 op;
+
+ /** @flags: Combination of drm_asahi_bind_object_flags flags. */
+ __u32 flags;
+
+ /** @handle: GEM object to bind/unbind (BIND) */
+ __u32 handle;
+
+ /** @vm_id: The ID of the VM to operate on (MBZ currently) */
+ __u32 vm_id;
+
+ /** @offset: Offset into the object (BIND only) */
+ __u64 offset;
+
+ /** @range: Number of bytes to bind/unbind (BIND only) */
+ __u64 range;
+
+ /** @object_handle: Object handle (out for BIND, in for UNBIND) */
+ __u32 object_handle;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_cmd_type - Command type
+ */
+enum drm_asahi_cmd_type {
+ /**
+ * @DRM_ASAHI_CMD_RENDER: Render command, executing on the render
+ * subqueue. Combined vertex and fragment operation.
+ *
+ * Followed by a @drm_asahi_cmd_render payload.
+ */
+ DRM_ASAHI_CMD_RENDER = 0,
+
+ /**
+ * @DRM_ASAHI_CMD_COMPUTE: Compute command on the compute subqueue.
+ *
+ * Followed by a @drm_asahi_cmd_compute payload.
+ */
+ DRM_ASAHI_CMD_COMPUTE = 1,
+
+ /**
+ * @DRM_ASAHI_SET_VERTEX_ATTACHMENTS: Software command to set
+ * attachments for subsequent vertex shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_VERTEX_ATTACHMENTS = 2,
+
+ /**
+ * @DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS: Software command to set
+ * attachments for subsequent fragment shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS = 3,
+
+ /**
+ * @DRM_ASAHI_SET_COMPUTE_ATTACHMENTS: Software command to set
+ * attachments for subsequent compute shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_COMPUTE_ATTACHMENTS = 4,
+};
+
+/**
+ * enum drm_asahi_priority - Scheduling queue priority.
+ *
+ * These priorities are forwarded to the firmware to influence firmware
+ * scheduling. The exact policy is ultimately decided by firmware, but
+ * these enums allow userspace to communicate the intentions.
+ */
+enum drm_asahi_priority {
+ /** @DRM_ASAHI_PRIORITY_LOW: Low priority queue. */
+ DRM_ASAHI_PRIORITY_LOW = 0,
+
+ /** @DRM_ASAHI_PRIORITY_MEDIUM: Medium priority queue. */
+ DRM_ASAHI_PRIORITY_MEDIUM = 1,
+
+ /**
+ * @DRM_ASAHI_PRIORITY_HIGH: High priority queue.
+ *
+ * Reserved for future extension.
+ */
+ DRM_ASAHI_PRIORITY_HIGH = 2,
+
+ /**
+ * @DRM_ASAHI_PRIORITY_REALTIME: Real-time priority queue.
+ *
+ * Reserved for future extension.
+ */
+ DRM_ASAHI_PRIORITY_REALTIME = 3,
+};
+
+/**
+ * struct drm_asahi_queue_create - Arguments passed to
+ * DRM_IOCTL_ASAHI_QUEUE_CREATE
+ */
+struct drm_asahi_queue_create {
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @vm_id: The ID of the VM this queue is bound to */
+ __u32 vm_id;
+
+ /** @priority: One of drm_asahi_priority */
+ __u32 priority;
+
+ /** @queue_id: The returned queue ID */
+ __u32 queue_id;
+
+ /**
+ * @usc_exec_base: GPU base address for all USC binaries (shaders) on
+ * this queue. USC addresses are 32-bit relative to this 64-bit base.
+ *
+ * This sets the following registers on all queue commands:
+ *
+ * USC_EXEC_BASE_TA (vertex)
+ * USC_EXEC_BASE_ISP (fragment)
+ * USC_EXEC_BASE_CP (compute)
+ *
+ * While the hardware lets us configure these independently per command,
+ * we do not have a use case for this. Instead, we expect userspace to
+ * fix a 4GiB VA carveout for USC memory and pass its base address here.
+ */
+ __u64 usc_exec_base;
+};
+
+/**
+ * struct drm_asahi_queue_destroy - Arguments passed to
+ * DRM_IOCTL_ASAHI_QUEUE_DESTROY
+ */
+struct drm_asahi_queue_destroy {
+ /** @queue_id: The queue ID to be destroyed */
+ __u32 queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_sync_type - Sync item type
+ */
+enum drm_asahi_sync_type {
+ /** @DRM_ASAHI_SYNC_SYNCOBJ: Binary sync object */
+ DRM_ASAHI_SYNC_SYNCOBJ = 0,
+
+ /** @DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ: Timeline sync object */
+ DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ = 1,
+};
+
+/**
+ * struct drm_asahi_sync - Sync item
+ */
+struct drm_asahi_sync {
+ /** @sync_type: One of drm_asahi_sync_type */
+ __u32 sync_type;
+
+ /** @handle: The sync object handle */
+ __u32 handle;
+
+ /** @timeline_value: Timeline value for timeline sync objects */
+ __u64 timeline_value;
+};
+
+/**
+ * define DRM_ASAHI_BARRIER_NONE - Command index for no barrier
+ *
+ * This special value may be passed in to drm_asahi_command::vdm_barrier or
+ * drm_asahi_command::cdm_barrier to indicate that the respective subqueue
+ * should not wait on any previous work.
+ */
+#define DRM_ASAHI_BARRIER_NONE (0xFFFFu)
+
+/**
+ * struct drm_asahi_cmd_header - Top level command structure
+ *
+ * This struct is core to the command buffer definition and therefore is not
+ * extensible.
+ */
+struct drm_asahi_cmd_header {
+ /** @cmd_type: One of drm_asahi_cmd_type */
+ __u16 cmd_type;
+
+ /**
+ * @size: Size of this command, not including this header.
+ *
+ * For hardware commands, this enables extensibility of commands without
+ * requiring extra command types. Passing a command that is shorter
+ * than expected is explicitly allowed for backwards-compatibility.
+ * Truncated fields will be zeroed.
+ *
+ * For the synthetic attachment setting commands, this implicitly
+ * encodes the number of attachments. These commands take multiple
+ * fixed-size @drm_asahi_attachment structures as their payload, so size
+ * equals number of attachments * sizeof(struct drm_asahi_attachment).
+ */
+ __u16 size;
+
+ /**
+ * @vdm_barrier: VDM (render) command index to wait on.
+ *
+ * Barriers are indices relative to the beginning of a given submit. A
+ * barrier of 0 waits on commands submitted to the respective subqueue
+ * in previous submit ioctls. A barrier of N waits on N previous
+ * commands on the subqueue within the current submit ioctl. As a
+ * special case, passing @DRM_ASAHI_BARRIER_NONE avoids waiting on any
+ * commands in the subqueue.
+ *
+ * Examples:
+ *
+ * 0: This waits on all previous work.
+ *
+ * NONE: This does not wait for anything on this subqueue.
+ *
+ * 1: This waits on the first render command in the submit.
+ * This is valid only if there are multiple render commands in the
+ * same submit.
+ *
+ * Barriers are valid only for hardware commands. Synthetic software
+ * commands to set attachments must pass NONE here.
+ */
+ __u16 vdm_barrier;
+
+ /**
+ * @cdm_barrier: CDM (compute) command index to wait on.
+ *
+ * See @vdm_barrier, and replace VDM/render with CDM/compute.
+ */
+ __u16 cdm_barrier;
+};
+
+/**
+ * struct drm_asahi_submit - Arguments passed to DRM_IOCTL_ASAHI_SUBMIT
+ */
+struct drm_asahi_submit {
+ /**
+ * @syncs: An optional pointer to an array of drm_asahi_sync. The first
+ * @in_sync_count elements are in-syncs, then the remaining
+ * @out_sync_count elements are out-syncs. Using a single array with
+ * explicit partitioning simplifies handling.
+ */
+ __u64 syncs;
+
+ /**
+ * @cmdbuf: Pointer to the command buffer to submit.
+ *
+ * This is a flat command buffer. By design, it contains no CPU
+ * pointers, which makes it suitable for a virtgpu wire protocol without
+ * requiring any serializing/deserializing step.
+ *
+ * It consists of a series of commands. Each command begins with a
+ * fixed-size @drm_asahi_cmd_header header and is followed by a
+ * variable-length payload according to the type and size in the header.
+ *
+ * The combined count of "real" hardware commands must be nonzero and at
+ * most drm_asahi_params_global::max_commands_per_submission.
+ */
+ __u64 cmdbuf;
+
+ /** @flags: Flags for command submission (MBZ) */
+ __u32 flags;
+
+ /** @queue_id: The queue ID to be submitted to */
+ __u32 queue_id;
+
+ /**
+ * @in_sync_count: Number of sync objects to wait on before starting
+ * this job.
+ */
+ __u32 in_sync_count;
+
+ /**
+ * @out_sync_count: Number of sync objects to signal upon completion of
+ * this job.
+ */
+ __u32 out_sync_count;
+
+ /** @cmdbuf_size: Command buffer size in bytes */
+ __u32 cmdbuf_size;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_attachment - Describe an "attachment".
+ *
+ * Attachments are any memory written by shaders, notably including render
+ * target attachments written by the end-of-tile program. This is purely a hint
+ * about the accessed memory regions. It is optional to specify, which is
+ * fortunate as it cannot be specified precisely with bindless access anyway.
+ * But where possible, it's probably a good idea for userspace to include these
+ * hints, forwarded to the firmware.
+ *
+ * This struct is implicitly sized and therefore is not extensible.
+ */
+struct drm_asahi_attachment {
+ /** @pointer: Base address of the attachment */
+ __u64 pointer;
+
+ /** @size: Size of the attachment in bytes */
+ __u64 size;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @flags: MBZ */
+ __u32 flags;
+};
+
+enum drm_asahi_render_flags {
+ /**
+ * @DRM_ASAHI_RENDER_VERTEX_SCRATCH: A vertex stage shader uses scratch
+ * memory.
+ */
+ DRM_ASAHI_RENDER_VERTEX_SCRATCH = (1U << 0),
+
+ /**
+ * @DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES: Process even empty tiles.
+ * This must be set when clearing render targets.
+ */
+ DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES = (1U << 1),
+
+ /**
+ * @DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING: Run vertex stage on a single
+ * cluster (on multi-cluster GPUs)
+ *
+ * This harms performance but can workaround certain sync/coherency
+ * bugs, and therefore is useful for debugging.
+ */
+ DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING = (1U << 2),
+
+ /**
+ * @DRM_ASAHI_RENDER_DBIAS_IS_INT: Use integer depth bias formula.
+ *
+ * Graphics specifications contain two alternate formulas for depth
+ * bias, a float formula used with floating-point depth buffers and an
+ * integer formula using with unorm depth buffers. This flag specifies
+ * that the integer formula should be used. If omitted, the float
+ * formula is used instead.
+ *
+ * This corresponds to bit 18 of the relevant hardware control register,
+ * so we match that here for efficiency.
+ */
+ DRM_ASAHI_RENDER_DBIAS_IS_INT = (1U << 18),
+};
+
+/**
+ * struct drm_asahi_zls_buffer - Describe a depth or stencil buffer.
+ *
+ * These fields correspond to hardware registers in the ZLS (Z Load/Store) unit.
+ * There are three hardware registers for each field respectively for loads,
+ * stores, and partial renders. In practice, it makes sense to set all to the
+ * same values, except in exceptional cases not yet implemented in userspace, so
+ * we do not duplicate here for simplicity/efficiency.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_zls_buffer {
+ /** @base: Base address of the buffer */
+ __u64 base;
+
+ /**
+ * @comp_base: If the load buffer is compressed, address of the
+ * compression metadata section.
+ */
+ __u64 comp_base;
+
+ /**
+ * @stride: If layered rendering is enabled, the number of bytes
+ * between each layer of the buffer.
+ */
+ __u32 stride;
+
+ /**
+ * @comp_stride: If layered rendering is enabled, the number of bytes
+ * between each layer of the compression metadata.
+ */
+ __u32 comp_stride;
+};
+
+/**
+ * struct drm_asahi_timestamp - Describe a timestamp write.
+ *
+ * The firmware can optionally write the GPU timestamp at render pass
+ * granularities, but it needs to be mapped specially via
+ * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT. This structure therefore describes where to
+ * write as a handle-offset pair, rather than a GPU address like normal.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_timestamp {
+ /**
+ * @handle: Handle of the timestamp buffer, or 0 to skip this
+ * timestamp. If nonzero, this must equal the value returned in
+ * drm_asahi_gem_bind_object::object_handle.
+ */
+ __u32 handle;
+
+ /** @offset: Offset to write into the timestamp buffer */
+ __u32 offset;
+};
+
+/**
+ * struct drm_asahi_timestamps - Describe timestamp writes.
+ *
+ * Each operation that can be timestamped, can be timestamped at the start and
+ * end. Therefore, drm_asahi_timestamp structs always come in pairs, bundled
+ * together into drm_asahi_timestamps.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_timestamps {
+ /** @start: Timestamp recorded at the start of the operation */
+ struct drm_asahi_timestamp start;
+
+ /** @end: Timestamp recorded at the end of the operation */
+ struct drm_asahi_timestamp end;
+};
+
+/**
+ * struct drm_asahi_helper_program - Describe helper program configuration.
+ *
+ * The helper program is a compute-like kernel required for various hardware
+ * functionality. Its most important role is dynamically allocating
+ * scratch/stack memory for individual subgroups, by partitioning a static
+ * allocation shared for the whole device. It is supplied by userspace via
+ * drm_asahi_helper_program and internally dispatched by the hardware as needed.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_helper_program {
+ /**
+ * @binary: USC address to the helper program binary. This is a tagged
+ * pointer with configuration in the bottom bits.
+ */
+ __u32 binary;
+
+ /** @cfg: Additional configuration bits for the helper program. */
+ __u32 cfg;
+
+ /**
+ * @data: Data passed to the helper program. This value is not
+ * interpreted by the kernel, firmware, or hardware in any way. It is
+ * simply a sideband for userspace, set with the submit ioctl and read
+ * via special registers inside the helper program.
+ *
+ * In practice, userspace will pass a 64-bit GPU VA here pointing to the
+ * actual arguments, which presumably don't fit in 64-bits.
+ */
+ __u64 data;
+};
+
+/**
+ * struct drm_asahi_bg_eot - Describe a background or end-of-tile program.
+ *
+ * The background and end-of-tile programs are dispatched by the hardware at the
+ * beginning and end of rendering. As the hardware "tilebuffer" is simply local
+ * memory, these programs are necessary to implement API-level render targets.
+ * The fragment-like background program is responsible for loading either the
+ * clear colour or the existing render target contents, while the compute-like
+ * end-of-tile program stores the tilebuffer contents to memory.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_bg_eot {
+ /**
+ * @usc: USC address of the hardware USC words binding resources
+ * (including images and uniforms) and the program itself. Note this is
+ * an additional layer of indirection compared to the helper program,
+ * avoiding the need for a sideband for data. This is a tagged pointer
+ * with additional configuration in the bottom bits.
+ */
+ __u32 usc;
+
+ /**
+ * @rsrc_spec: Resource specifier for the program. This is a packed
+ * hardware data structure describing the required number of registers,
+ * uniforms, bound textures, and bound samplers.
+ */
+ __u32 rsrc_spec;
+};
+
+/**
+ * struct drm_asahi_cmd_render - Command to submit 3D
+ *
+ * This command submits a single render pass. The hardware control stream may
+ * include many draws and subpasses, but within the command, the framebuffer
+ * dimensions and attachments are fixed.
+ *
+ * The hardware requires the firmware to set a large number of Control Registers
+ * setting up state at render pass granularity before each command rendering 3D.
+ * The firmware bundles this state into data structures. Unfortunately, we
+ * cannot expose either any of that directly to userspace, because the
+ * kernel-firmware ABI is not stable. Although we can guarantee the firmware
+ * updates in tandem with the kernel, we cannot break old userspace when
+ * upgrading the firmware and kernel. Therefore, we need to abstract well the
+ * data structures to avoid tying our hands with future firmwares.
+ *
+ * The bulk of drm_asahi_cmd_render therefore consists of values of hardware
+ * control registers, marshalled via the firmware interface.
+ *
+ * The framebuffer/tilebuffer dimensions are also specified here. In addition to
+ * being passed to the firmware/hardware, the kernel requires these dimensions
+ * to calculate various essential tiling-related data structures. It is
+ * unfortunate that our submits are heavier than on vendors with saner
+ * hardware-software interfaces. The upshot is all of this information is
+ * readily available to userspace with all current APIs.
+ *
+ * It looks odd - but it's not overly burdensome and it ensures we can remain
+ * compatible with old userspace.
+ */
+struct drm_asahi_cmd_render {
+ /** @flags: Combination of drm_asahi_render_flags flags. */
+ __u32 flags;
+
+ /**
+ * @isp_zls_pixels: ISP_ZLS_PIXELS register value. This contains the
+ * depth/stencil width/height, which may differ from the framebuffer
+ * width/height.
+ */
+ __u32 isp_zls_pixels;
+
+ /**
+ * @vdm_ctrl_stream_base: VDM_CTRL_STREAM_BASE register value. GPU
+ * address to the beginning of the VDM control stream.
+ */
+ __u64 vdm_ctrl_stream_base;
+
+ /** @vertex_helper: Helper program used for the vertex shader */
+ struct drm_asahi_helper_program vertex_helper;
+
+ /** @fragment_helper: Helper program used for the fragment shader */
+ struct drm_asahi_helper_program fragment_helper;
+
+ /**
+ * @isp_scissor_base: ISP_SCISSOR_BASE register value. GPU address of an
+ * array of scissor descriptors indexed in the render pass.
+ */
+ __u64 isp_scissor_base;
+
+ /**
+ * @isp_dbias_base: ISP_DBIAS_BASE register value. GPU address of an
+ * array of depth bias values indexed in the render pass.
+ */
+ __u64 isp_dbias_base;
+
+ /**
+ * @isp_oclqry_base: ISP_OCLQRY_BASE register value. GPU address of an
+ * array of occlusion query results written by the render pass.
+ */
+ __u64 isp_oclqry_base;
+
+ /** @depth: Depth buffer */
+ struct drm_asahi_zls_buffer depth;
+
+ /** @stencil: Stencil buffer */
+ struct drm_asahi_zls_buffer stencil;
+
+ /** @zls_ctrl: ZLS_CTRL register value */
+ __u64 zls_ctrl;
+
+ /** @ppp_multisamplectl: PPP_MULTISAMPLECTL register value */
+ __u64 ppp_multisamplectl;
+
+ /**
+ * @sampler_heap: Base address of the sampler heap. This heap is used
+ * for both vertex shaders and fragment shaders. The registers are
+ * per-stage, but there is no known use case for separate heaps.
+ */
+ __u64 sampler_heap;
+
+ /** @ppp_ctrl: PPP_CTRL register value */
+ __u32 ppp_ctrl;
+
+ /** @width_px: Framebuffer width in pixels */
+ __u16 width_px;
+
+ /** @height_px: Framebuffer height in pixels */
+ __u16 height_px;
+
+ /** @layers: Number of layers in the framebuffer */
+ __u16 layers;
+
+ /** @sampler_count: Number of samplers in the sampler heap. */
+ __u16 sampler_count;
+
+ /** @utile_width_px: Width of a logical tilebuffer tile in pixels */
+ __u8 utile_width_px;
+
+ /** @utile_height_px: Height of a logical tilebuffer tile in pixels */
+ __u8 utile_height_px;
+
+ /** @samples: # of samples in the framebuffer. Must be 1, 2, or 4. */
+ __u8 samples;
+
+ /** @sample_size_B: # of bytes in the tilebuffer required per sample. */
+ __u8 sample_size_B;
+
+ /**
+ * @isp_merge_upper_x: 32-bit float used in the hardware triangle
+ * merging. Calculate as: tan(60 deg) * width.
+ *
+ * Making these values UAPI avoids requiring floating-point calculations
+ * in the kernel in the hot path.
+ */
+ __u32 isp_merge_upper_x;
+
+ /**
+ * @isp_merge_upper_y: 32-bit float. Calculate as: tan(60 deg) * height.
+ * See @isp_merge_upper_x.
+ */
+ __u32 isp_merge_upper_y;
+
+ /** @bg: Background program run for each tile at the start */
+ struct drm_asahi_bg_eot bg;
+
+ /** @eot: End-of-tile program ran for each tile at the end */
+ struct drm_asahi_bg_eot eot;
+
+ /**
+ * @partial_bg: Background program ran at the start of each tile when
+ * resuming the render pass during a partial render.
+ */
+ struct drm_asahi_bg_eot partial_bg;
+
+ /**
+ * @partial_eot: End-of-tile program ran at the end of each tile when
+ * pausing the render pass during a partial render.
+ */
+ struct drm_asahi_bg_eot partial_eot;
+
+ /**
+ * @isp_bgobjdepth: ISP_BGOBJDEPTH register value. This is the depth
+ * buffer clear value, encoded in the depth buffer's format: either a
+ * 32-bit float or a 16-bit unorm (with upper bits zeroed).
+ */
+ __u32 isp_bgobjdepth;
+
+ /**
+ * @isp_bgobjvals: ISP_BGOBJVALS register value. The bottom 8-bits
+ * contain the stencil buffer clear value.
+ */
+ __u32 isp_bgobjvals;
+
+ /** @ts_vtx: Timestamps for the vertex portion of the render */
+ struct drm_asahi_timestamps ts_vtx;
+
+ /** @ts_frag: Timestamps for the fragment portion of the render */
+ struct drm_asahi_timestamps ts_frag;
+};
+
+/**
+ * struct drm_asahi_cmd_compute - Command to submit compute
+ *
+ * This command submits a control stream consisting of compute dispatches. There
+ * is essentially no limit on how many compute dispatches may be included in a
+ * single compute command, although timestamps are at command granularity.
+ */
+struct drm_asahi_cmd_compute {
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @sampler_count: Number of samplers in the sampler heap. */
+ __u32 sampler_count;
+
+ /**
+ * @cdm_ctrl_stream_base: CDM_CTRL_STREAM_BASE register value. GPU
+ * address to the beginning of the CDM control stream.
+ */
+ __u64 cdm_ctrl_stream_base;
+
+ /**
+ * @cdm_ctrl_stream_end: GPU base address to the end of the hardware
+ * control stream. Note this only considers the first contiguous segment
+ * of the control stream, as the stream might jump elsewhere.
+ */
+ __u64 cdm_ctrl_stream_end;
+
+ /** @sampler_heap: Base address of the sampler heap. */
+ __u64 sampler_heap;
+
+ /** @helper: Helper program used for this compute command */
+ struct drm_asahi_helper_program helper;
+
+ /** @ts: Timestamps for the compute command */
+ struct drm_asahi_timestamps ts;
+};
+
+/**
+ * struct drm_asahi_get_time - Arguments passed to DRM_IOCTL_ASAHI_GET_TIME
+ */
+struct drm_asahi_get_time {
+ /** @flags: MBZ. */
+ __u64 flags;
+
+ /** @gpu_timestamp: On return, the GPU timestamp in nanoseconds. */
+ __u64 gpu_timestamp;
+};
+
+/**
+ * DRM_IOCTL_ASAHI() - Build an Asahi IOCTL number
+ * @__access: Access type. Must be R, W or RW.
+ * @__id: One of the DRM_ASAHI_xxx id.
+ * @__type: Suffix of the type being passed to the IOCTL.
+ *
+ * Don't use this macro directly, use the DRM_IOCTL_ASAHI_xxx
+ * values instead.
+ *
+ * Return: An IOCTL number to be passed to ioctl() from userspace.
+ */
+#define DRM_IOCTL_ASAHI(__access, __id, __type) \
+ DRM_IO ## __access(DRM_COMMAND_BASE + DRM_ASAHI_ ## __id, \
+ struct drm_asahi_ ## __type)
+
+/* Note: this is an enum so that it can be resolved by Rust bindgen. */
+enum {
+ DRM_IOCTL_ASAHI_GET_PARAMS = DRM_IOCTL_ASAHI(W, GET_PARAMS, get_params),
+ DRM_IOCTL_ASAHI_GET_TIME = DRM_IOCTL_ASAHI(WR, GET_TIME, get_time),
+ DRM_IOCTL_ASAHI_VM_CREATE = DRM_IOCTL_ASAHI(WR, VM_CREATE, vm_create),
+ DRM_IOCTL_ASAHI_VM_DESTROY = DRM_IOCTL_ASAHI(W, VM_DESTROY, vm_destroy),
+ DRM_IOCTL_ASAHI_VM_BIND = DRM_IOCTL_ASAHI(W, VM_BIND, vm_bind),
+ DRM_IOCTL_ASAHI_GEM_CREATE = DRM_IOCTL_ASAHI(WR, GEM_CREATE, gem_create),
+ DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET = DRM_IOCTL_ASAHI(WR, GEM_MMAP_OFFSET, gem_mmap_offset),
+ DRM_IOCTL_ASAHI_GEM_BIND_OBJECT = DRM_IOCTL_ASAHI(WR, GEM_BIND_OBJECT, gem_bind_object),
+ DRM_IOCTL_ASAHI_QUEUE_CREATE = DRM_IOCTL_ASAHI(WR, QUEUE_CREATE, queue_create),
+ DRM_IOCTL_ASAHI_QUEUE_DESTROY = DRM_IOCTL_ASAHI(W, QUEUE_DESTROY, queue_destroy),
+ DRM_IOCTL_ASAHI_SUBMIT = DRM_IOCTL_ASAHI(W, SUBMIT, submit),
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _ASAHI_DRM_H_ */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 7fba37b94401..e63a71d3c607 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -905,13 +905,17 @@ struct drm_syncobj_destroy {
};
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
+#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
+#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1)
struct drm_syncobj_handle {
__u32 handle;
__u32 flags;
__s32 fd;
__u32 pad;
+
+ __u64 point;
};
struct drm_syncobj_transfer {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index e41a3cec6a9e..81202a50dc9e 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -422,6 +422,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
#define DRM_FORMAT_MOD_VENDOR_MTK 0x0b
+#define DRM_FORMAT_MOD_VENDOR_APPLE 0x0c
/* add more to the end as needed */
@@ -1495,6 +1496,50 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define DRM_FORMAT_MOD_MTK_16L_32S_TILE DRM_FORMAT_MOD_MTK(MTK_FMT_MOD_TILE_16L32S)
/*
+ * Apple GPU-tiled layouts.
+ *
+ * Apple GPUs support nonlinear tilings with optional lossless compression.
+ *
+ * GPU-tiled images are divided into 16KiB tiles:
+ *
+ * Bytes per pixel Tile size
+ * --------------- ---------
+ * 1 128x128
+ * 2 128x64
+ * 4 64x64
+ * 8 64x32
+ * 16 32x32
+ *
+ * Tiles are raster-order. Pixels within a tile are interleaved (Morton order).
+ *
+ * Compressed images pad the body to 128-bytes and are immediately followed by a
+ * metadata section. The metadata section rounds the image dimensions to
+ * powers-of-two and contains 8 bytes for each 16x16 compression subtile.
+ * Subtiles are interleaved (Morton order).
+ *
+ * All images are 128-byte aligned.
+ *
+ * These layouts fundamentally do not have meaningful strides. No matter how we
+ * specify strides for these layouts, userspace unaware of Apple image layouts
+ * will be unable to use correctly the specified stride for any purpose.
+ * Userspace aware of the image layouts do not use strides. The most "correct"
+ * convention would be setting the image stride to 0. Unfortunately, some
+ * software assumes the stride is at least (width * bytes per pixel). We
+ * therefore require that stride equals (width * bytes per pixel). Since the
+ * stride is arbitrary here, we pick the simplest convention.
+ *
+ * Although containing two sections, compressed image layouts are treated in
+ * software as a single plane. This is modelled after AFBC, a similar
+ * scheme. Attempting to separate the sections to be "explicit" in DRM would
+ * only generate more confusion, as software does not treat the image this way.
+ *
+ * For detailed information on the hardware image layouts, see
+ * https://docs.mesa3d.org/drivers/asahi.html#image-layouts
+ */
+#define DRM_FORMAT_MOD_APPLE_GPU_TILED fourcc_mod_code(APPLE, 1)
+#define DRM_FORMAT_MOD_APPLE_GPU_TILED_COMPRESSED fourcc_mod_code(APPLE, 2)
+
+/*
* AMD modifiers
*
* Memory layout:
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index c2ce71987e9b..9debb320c34b 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -163,6 +163,12 @@ struct drm_virtgpu_3d_wait {
__u32 flags;
};
+#define VIRTGPU_DRM_CAPSET_VIRGL 1
+#define VIRTGPU_DRM_CAPSET_VIRGL2 2
+#define VIRTGPU_DRM_CAPSET_GFXSTREAM_VULKAN 3
+#define VIRTGPU_DRM_CAPSET_VENUS 4
+#define VIRTGPU_DRM_CAPSET_CROSS_DOMAIN 5
+#define VIRTGPU_DRM_CAPSET_DRM 6
struct drm_virtgpu_get_caps {
__u32 cap_set_id;
__u32 cap_set_ver;
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index bf2c9cabd207..be109777d10d 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -309,8 +309,9 @@ struct virtio_gpu_cmd_submit {
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
-/* 3 is reserved for gfxstream */
+#define VIRTIO_GPU_CAPSET_GFXSTREAM_VULKAN 3
#define VIRTIO_GPU_CAPSET_VENUS 4
+#define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5
#define VIRTIO_GPU_CAPSET_DRM 6
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
diff --git a/include/video/pixel_format.h b/include/video/pixel_format.h
new file mode 100644
index 000000000000..b5104b2a3a13
--- /dev/null
+++ b/include/video/pixel_format.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef VIDEO_PIXEL_FORMAT_H
+#define VIDEO_PIXEL_FORMAT_H
+
+struct pixel_format {
+ unsigned char bits_per_pixel;
+ bool indexed;
+ union {
+ struct {
+ struct {
+ unsigned char offset;
+ unsigned char length;
+ } alpha, red, green, blue;
+ };
+ struct {
+ unsigned char offset;
+ unsigned char length;
+ } index;
+ };
+};
+
+#define PIXEL_FORMAT_XRGB1555 \
+ { 16, false, { .alpha = {0, 0}, .red = {10, 5}, .green = {5, 5}, .blue = {0, 5} } }
+
+#define PIXEL_FORMAT_RGB565 \
+ { 16, false, { .alpha = {0, 0}, .red = {11, 5}, .green = {5, 6}, .blue = {0, 5} } }
+
+#define PIXEL_FORMAT_RGB888 \
+ { 24, false, { .alpha = {0, 0}, .red = {16, 8}, .green = {8, 8}, .blue = {0, 8} } }
+
+#define PIXEL_FORMAT_XRGB8888 \
+ { 32, false, { .alpha = {0, 0}, .red = {16, 8}, .green = {8, 8}, .blue = {0, 8} } }
+
+#define PIXEL_FORMAT_XBGR8888 \
+ { 32, false, { .alpha = {0, 0}, .red = {0, 8}, .green = {8, 8}, .blue = {16, 8} } }
+
+#define PIXEL_FORMAT_XRGB2101010 \
+ { 32, false, { .alpha = {0, 0}, .red = {20, 10}, .green = {10, 10}, .blue = {0, 10} } }
+
+#endif