summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/ABI/testing/sysfs-class-power1
-rw-r--r--Documentation/core-api/dma-attributes.rst8
-rw-r--r--Documentation/cpu-freq/cpu-drivers.rst3
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml17
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ingenic,jz4780-hdmi.yaml82
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/lvds.yaml (renamed from Documentation/devicetree/bindings/display/panel/lvds.yaml)35
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml77
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml76
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml86
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt219
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dither.yaml76
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dsc.yaml71
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,ethdr.yaml147
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml77
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,merge.yaml110
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,mutex.yaml83
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,od.yaml53
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,ovl-2l.yaml88
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml103
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,postmask.yaml69
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,rdma.yaml117
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,split.yaml58
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,ufoe.yaml61
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,wdma.yaml86
-rw-r--r--Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.yaml23
-rw-r--r--Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-lvds.yaml57
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpio/sifive,gpio.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml15
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-sifive.yaml1
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml2
-rw-r--r--Documentation/gpu/i915.rst1
-rw-r--r--Documentation/tools/rtla/common_hist_options.rst2
-rw-r--r--Documentation/tools/rtla/common_osnoise_description.rst2
-rw-r--r--Documentation/tools/rtla/rtla-osnoise-hist.rst2
-rw-r--r--Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst2
-rw-r--r--Documentation/virt/kvm/api.rst16
-rw-r--r--MAINTAINERS75
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/include/asm/el2_setup.h2
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio.c2
-rw-r--r--arch/parisc/include/asm/bitops.h8
-rw-r--r--arch/parisc/include/asm/uaccess.h29
-rw-r--r--arch/parisc/kernel/unaligned.c14
-rw-r--r--arch/parisc/lib/iomap.c18
-rw-r--r--arch/parisc/mm/init.c9
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S4
-rw-r--r--arch/powerpc/lib/sstep.c2
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig2
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/entry.S10
-rw-r--r--arch/riscv/kernel/sbi.c72
-rw-r--r--arch/riscv/kernel/trace_irq.c27
-rw-r--r--arch/riscv/kernel/trace_irq.h11
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/svm.h36
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c10
-rw-r--r--arch/x86/kernel/fpu/regset.c9
-rw-r--r--arch/x86/kernel/fpu/xstate.c5
-rw-r--r--arch/x86/kernel/kvm.c9
-rw-r--r--arch/x86/kernel/ptrace.c4
-rw-r--r--arch/x86/kvm/cpuid.c5
-rw-r--r--arch/x86/kvm/lapic.c7
-rw-r--r--arch/x86/kvm/mmu/mmu.c13
-rw-r--r--arch/x86/kvm/pmu.c7
-rw-r--r--arch/x86/kvm/svm/avic.c93
-rw-r--r--arch/x86/kvm/svm/nested.c26
-rw-r--r--arch/x86/kvm/svm/svm.c104
-rw-r--r--arch/x86/kvm/svm/svm.h15
-rw-r--r--arch/x86/kvm/vmx/vmx.c1
-rw-r--r--arch/x86/kvm/x86.c34
-rw-r--r--arch/x86/kvm/xen.c97
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/elevator.c2
-rw-r--r--block/fops.c2
-rw-r--r--block/genhd.c14
-rw-r--r--crypto/af_alg.c3
-rw-r--r--drivers/acpi/processor_idle.c5
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/ata/pata_hpt37x.c18
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/regmap/regmap-irq.c20
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c106
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c11
-rw-r--r--drivers/dma-buf/dma-fence-array.c14
-rw-r--r--drivers/dma-buf/dma-fence-chain.c15
-rw-r--r--drivers/dma-buf/dma-resv.c34
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c17
-rw-r--r--drivers/dma/sh/rcar-dmac.c9
-rw-r--r--drivers/dma/sh/shdma-base.c4
-rw-r--r--drivers/dma/stm32-dmamux.c4
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/gpio/gpio-rockchip.c56
-rw-r--r--drivers/gpio/gpio-tegra186.c14
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c276
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c14
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c58
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h37
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c413
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c3
-rw-r--r--drivers/gpu/drm/bridge/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/analogix/Kconfig2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c159
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h3
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c3352
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c6
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c18
-rw-r--r--drivers/gpu/drm/bridge/panel.c12
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c72
-rw-r--r--drivers/gpu/drm/dp/drm_dp.c86
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c14
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c15
-rw-r--r--drivers/gpu/drm/drm_buddy.c420
-rw-r--r--drivers/gpu/drm/drm_cache.c11
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c3
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_edid.c5
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c80
-rw-r--r--drivers/gpu/drm/drm_format_helper.c141
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c6
-rw-r--r--drivers/gpu/drm/drm_gem.c23
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c7
-rw-r--r--drivers/gpu/drm/drm_ioctl.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_plane.c23
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c1
-rw-r--r--drivers/gpu/drm/drm_syncobj.c61
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c3
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c3
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c271
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h26
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c14
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi_regs.h342
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c217
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c167
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c979
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c93
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c307
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c212
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h1
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c34
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c1
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h109
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_regs.h480
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.h17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.h15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c240
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.h23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c39
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h51
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c30
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c13
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c42
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c192
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c29
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c1
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c21
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c40
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c133
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h2630
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c45
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c45
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h80
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h82
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c126
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h20
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c204
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c143
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c38
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h37
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c31
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c148
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c31
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c45
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c19
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c28
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h328
-rw-r--r--drivers/gpu/drm/i915/i915_file_private.h108
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c101
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c89
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h11
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h1
-rw-r--r--drivers/gpu/drm/i915/i915_mitigations.c1
-rw-r--r--drivers/gpu/drm/i915/i915_module.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1203
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h27
-rw-r--r--drivers/gpu/drm/i915/i915_request.c13
-rw-r--r--drivers/gpu/drm/i915/i915_request.h6
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c76
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c569
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h34
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c418
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h234
-rw-r--r--drivers/gpu/drm/i915/i915_vma_snapshot.c134
-rw-r--r--drivers/gpu/drm/i915/i915_vma_snapshot.h112
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h19
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c1
-rw-r--r--drivers/gpu/drm/i915/intel_mchbar_regs.h228
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c477
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h1
-rw-r--r--drivers/gpu/drm/i915/intel_step.c15
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c42
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c29
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c210
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c120
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c31
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c13
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h3
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c15
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c3
-rw-r--r--drivers/gpu/drm/lib/drm_random.c3
-rw-r--r--drivers/gpu/drm/lib/drm_random.h2
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c1
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c5
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c200
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c20
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig24
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c81
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c561
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx424akp.c490
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c31
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h13
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c41
-rw-r--r--drivers/gpu/drm/radeon/atom.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c3
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c9
-rw-r--r--drivers/gpu/drm/selftests/Makefile3
-rw-r--r--drivers/gpu/drm/selftests/drm_buddy_selftests.h15
-rw-r--r--drivers/gpu/drm/selftests/test-drm_buddy.c992
-rw-r--r--drivers/gpu/drm/selftests/test-drm_framebuffer.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/solomon/Kconfig21
-rw-r--r--drivers/gpu/drm/solomon/Makefile2
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-i2c.c116
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c843
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h76
-rw-r--r--drivers/gpu/drm/sprd/Kconfig1
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c5
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c6
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c5
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c3
-rw-r--r--drivers/gpu/drm/stm/drv.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c7
-rw-r--r--drivers/gpu/drm/tegra/drm.c3
-rw-r--r--drivers/gpu/drm/tegra/falcon.c2
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c36
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c20
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c34
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h3
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c3
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c3
-rw-r--r--drivers/gpu/host1x/syncpt.c35
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c76
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h4
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c4
-rw-r--r--drivers/hid/hid-apple.c16
-rw-r--r--drivers/hid/hid-elo.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-goodix.c28
-rw-r--r--drivers/hv/hv_utils_transport.c2
-rw-r--r--drivers/hv/vmbus_drv.c9
-rw-r--r--drivers/hwmon/hwmon.c14
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c5
-rw-r--r--drivers/i2c/busses/Kconfig6
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c11
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c16
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c5
-rw-r--r--drivers/iio/accel/fxls8962af-core.c12
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c2
-rw-r--r--drivers/iio/accel/fxls8962af-spi.c2
-rw-r--r--drivers/iio/accel/fxls8962af.h3
-rw-r--r--drivers/iio/accel/kxcjk-1013.c5
-rw-r--r--drivers/iio/accel/mma9551.c5
-rw-r--r--drivers/iio/accel/mma9553.c5
-rw-r--r--drivers/iio/adc/ad7124.c2
-rw-r--r--drivers/iio/adc/men_z188_adc.c9
-rw-r--r--drivers/iio/adc/ti-tsc2046.c4
-rw-r--r--drivers/iio/addac/ad74413r.c17
-rw-r--r--drivers/iio/frequency/admv1013.c2
-rw-r--r--drivers/iio/gyro/bmg160_core.c5
-rw-r--r--drivers/iio/imu/adis16480.c7
-rw-r--r--drivers/iio/imu/kmx61.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c6
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c5
-rw-r--r--drivers/infiniband/core/cma.c40
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c39
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/mouse/psmouse-smbus.c10
-rw-r--r--drivers/input/touchscreen/zinitix.c12
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/mmc/core/block.c28
-rw-r--r--drivers/mtd/devices/phram.c12
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/nand/raw/Kconfig3
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c3
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c7
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c14
-rw-r--r--drivers/mtd/parsers/qcomsmempart.c36
-rw-r--r--drivers/net/bonding/bond_3ad.c30
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/dsa/Kconfig1
-rw-r--r--drivers/net/dsa/lan9303-core.c13
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c26
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c7
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c47
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c243
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c42
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/ieee802154/ca8210.c4
-rw-r--r--drivers/net/mctp/mctp-serial.c9
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c6
-rw-r--r--drivers/net/netdevsim/fib.c4
-rw-r--r--drivers/net/phy/mediatek-ge.c3
-rw-r--r--drivers/net/usb/cdc_ether.c12
-rw-r--r--drivers/net/usb/cdc_mbim.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/zaurus.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c13
-rw-r--r--drivers/net/xen-netback/xenbus.c14
-rw-r--r--drivers/nvme/host/core.c21
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/tcp.c63
-rw-r--r--drivers/nvmem/core.c2
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/unittest.c16
-rw-r--r--drivers/parisc/ccio-dma.c3
-rw-r--r--drivers/parisc/sba_iommu.c3
-rw-r--r--drivers/pci/controller/pci-hyperv.c13
-rw-r--r--drivers/pci/controller/pci-mvebu.c3
-rw-r--r--drivers/pci/controller/vmd.c14
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c1
-rw-r--r--drivers/pinctrl/pinctrl-k210.c4
-rw-r--r--drivers/pinctrl/pinctrl-starfive.c2
-rw-r--r--drivers/platform/surface/surface3_power.c13
-rw-r--r--drivers/platform/x86/amd-pmc.c42
-rw-r--r--drivers/platform/x86/asus-wmi.c2
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c3
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1
-rw-r--r--drivers/power/supply/bq256xx_charger.c3
-rw-r--r--drivers/power/supply/cw2015_battery.c2
-rw-r--r--drivers/regulator/core.c13
-rw-r--r--drivers/regulator/da9121-regulator.c16
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c5
-rw-r--r--drivers/scsi/qedi/qedi_fw.c6
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c15
-rw-r--r--drivers/spi/spi-rockchip.c13
-rw-r--r--drivers/spi/spi-zynq-qspi.c3
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c4
-rw-r--r--drivers/tty/n_gsm.c61
-rw-r--r--drivers/tty/n_tty.c6
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c3
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/drd.c6
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c21
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/function/rndis.c8
-rw-r--r--drivers/usb/gadget/function/rndis.h1
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c6
-rw-r--r--drivers/usb/host/xhci.c28
-rw-r--r--drivers/usb/serial/ch341.c1
-rw-r--r--drivers/usb/serial/option.c12
-rw-r--r--drivers/usb/typec/tipd/core.c7
-rw-r--r--drivers/vhost/vsock.c21
-rw-r--r--drivers/video/fbdev/broadsheetfb.c1
-rw-r--r--drivers/video/fbdev/core/fb_defio.c48
-rw-r--r--drivers/video/fbdev/metronomefb.c1
-rw-r--r--drivers/video/fbdev/udlfb.c1
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/extent_map.h8
-rw-r--r--fs/btrfs/file.c97
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/btrfs/ioctl.c261
-rw-r--r--fs/btrfs/lzo.c11
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/transaction.c12
-rw-r--r--fs/cifs/cifsacl.c9
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/fs_context.c4
-rw-r--r--fs/cifs/sess.c11
-rw-r--r--fs/cifs/xattr.c2
-rw-r--r--fs/configfs/dir.c14
-rw-r--r--fs/file_table.c8
-rw-r--r--fs/io_uring.c24
-rw-r--r--fs/namespace.c30
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/inode.c9
-rw-r--r--fs/nfs/nfs4proc.c3
-rw-r--r--fs/tracefs/inode.c5
-rw-r--r--fs/xfs/xfs_super.c7
-rw-r--r--include/drm/dp/drm_dp_helper.h24
-rw-r--r--include/drm/drm_bridge.h7
-rw-r--r--include/drm/drm_buddy.h17
-rw-r--r--include/drm/drm_connector.h11
-rw-r--r--include/drm/drm_format_helper.h4
-rw-r--r--include/drm/drm_gem_shmem_helper.h2
-rw-r--r--include/drm/drm_mode_config.h18
-rw-r--r--include/drm/drm_panel.h8
-rw-r--r--include/drm/drm_plane.h3
-rw-r--r--include/drm/gpu_scheduler.h3
-rw-r--r--include/drm/ttm/ttm_resource.h12
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/bpf.h9
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/dma-fence-array.h15
-rw-r--r--include/linux/dma-fence-chain.h18
-rw-r--r--include/linux/dma-fence.h38
-rw-r--r--include/linux/dma-mapping.h8
-rw-r--r--include/linux/dma-resv.h13
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/iosys-map.h218
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/nvme-tcp.h1
-rw-r--r--include/linux/nvmem-provider.h4
-rw-r--r--include/linux/sched/task.h4
-rw-r--r--include/linux/slab.h3
-rw-r--r--include/linux/string_helpers.h20
-rw-r--r--include/linux/trace_events.h22
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/checksum.h52
-rw-r--r--include/net/dsa.h1
-rw-r--r--include/net/ip6_fib.h10
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netfilter/nf_tables_offload.h2
-rw-r--r--include/net/netns/ipv6.h3
-rw-r--r--include/net/sock.h4
-rw-r--r--include/uapi/drm/drm_mode.h88
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--kernel/bpf/btf.c5
-rw-r--r--kernel/bpf/helpers.c2
-rw-r--r--kernel/bpf/syscall.c3
-rw-r--r--kernel/cgroup/cgroup-v1.c6
-rw-r--r--kernel/cgroup/cgroup.c14
-rw-r--r--kernel/cgroup/cpuset.c12
-rw-r--r--kernel/cred.c9
-rw-r--r--kernel/dma/swiotlb.c3
-rw-r--r--kernel/fork.c30
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/module_decompress.c2
-rw-r--r--kernel/sched/core.c34
-rw-r--r--kernel/sys.c20
-rw-r--r--kernel/trace/ftrace.c1
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_eprobe.c16
-rw-r--r--kernel/trace/trace_events_trigger.c73
-rw-r--r--kernel/trace/trace_osnoise.c53
-rw-r--r--kernel/trace/trace_probe.c10
-rw-r--r--kernel/trace/trace_probe.h1
-rw-r--r--kernel/trace/trace_selftest.c6
-rw-r--r--kernel/ucount.c3
-rw-r--r--lib/iov_iter.c2
-rw-r--r--lib/test_kasan.c5
-rw-r--r--mm/hugetlb.c11
-rw-r--r--mm/memblock.c10
-rw-r--r--mm/mmap.c1
-rw-r--r--mm/mprotect.c2
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/can/j1939/transport.c2
-rw-r--r--net/core/drop_monitor.c11
-rw-r--r--net/core/filter.c3
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dsa/dsa.c1
-rw-r--r--net/dsa/dsa_priv.h1
-rw-r--r--net/dsa/master.c7
-rw-r--r--net/dsa/port.c29
-rw-r--r--net/dsa/tag_lan9303.c21
-rw-r--r--net/ipv4/af_inet.c5
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/fib_lookup.h7
-rw-r--r--net/ipv4/fib_semantics.c6
-rw-r--r--net/ipv4/fib_trie.c22
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ping.c10
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/udp_tunnel_nic.c2
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/mac80211/mlme.c29
-rw-r--r--net/mctp/route.c11
-rw-r--r--net/mptcp/mib.c2
-rw-r--r--net/mptcp/mib.h2
-rw-r--r--net/mptcp/pm.c8
-rw-r--r--net/mptcp/pm_netlink.c29
-rw-r--r--net/netfilter/nf_tables_api.c16
-rw-r--r--net/netfilter/nf_tables_offload.c3
-rw-r--r--net/netfilter/nft_dup_netdev.c6
-rw-r--r--net/netfilter/nft_fwd_netdev.c6
-rw-r--r--net/netfilter/nft_immediate.c12
-rw-r--r--net/netfilter/nft_limit.c18
-rw-r--r--net/netfilter/nft_synproxy.c4
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/openvswitch/actions.c46
-rw-r--r--net/sched/act_api.c15
-rw-r--r--net/sched/act_ct.c5
-rw-r--r--net/sched/cls_api.c6
-rw-r--r--net/sched/sch_api.c22
-rw-r--r--net/sched/sch_generic.c29
-rw-r--r--net/smc/af_smc.c10
-rw-r--r--net/smc/smc_pnet.c42
-rw-r--r--net/smc/smc_pnet.h2
-rw-r--r--net/socket.c4
-rw-r--r--net/tipc/crypto.c2
-rw-r--r--net/tipc/name_table.c2
-rw-r--r--net/tipc/node.c13
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/vmw_vsock/af_vsock.c1
-rw-r--r--net/wireless/core.c17
-rw-r--r--security/selinux/ima.c4
-rw-r--r--security/tomoyo/audit.c2
-rw-r--r--security/tomoyo/common.c19
-rw-r--r--security/tomoyo/common.h1
-rw-r--r--sound/core/memalloc.c15
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/patch_realtek.c40
-rw-r--r--sound/soc/amd/acp/acp-mach.h1
-rw-r--r--sound/soc/amd/acp/acp-sof-mach.c4
-rw-r--r--sound/soc/codecs/rt5668.c12
-rw-r--r--sound/soc/codecs/rt5682.c12
-rw-r--r--sound/soc/codecs/rt5682s.c12
-rw-r--r--sound/soc/codecs/tas2770.c7
-rw-r--r--sound/soc/codecs/wm_adsp.c3
-rw-r--r--sound/soc/intel/skylake/skl.c1
-rw-r--r--sound/soc/qcom/lpass-platform.c8
-rw-r--r--sound/soc/soc-ops.c41
-rw-r--r--sound/soc/sof/intel/hda.c1
-rw-r--r--sound/usb/implicit.c4
-rw-r--r--sound/usb/mixer.c9
-rw-r--r--tools/arch/x86/include/asm/msr-index.h1
-rw-r--r--tools/cgroup/memcg_slabinfo.py30
-rw-r--r--tools/include/uapi/linux/kvm.h1
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/lib/perf/include/internal/cpumap.h6
-rw-r--r--tools/lib/perf/include/perf/cpumap.h5
-rw-r--r--tools/lib/perf/libperf.map1
-rw-r--r--tools/lib/perf/tests/test-cpumap.c11
-rw-r--r--tools/lib/perf/tests/test-evlist.c5
-rw-r--r--tools/lib/subcmd/subcmd-util.h11
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-trace.c23
-rw-r--r--tools/perf/tests/attr/README2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default-aarch649
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp2
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp-aarch649
-rw-r--r--tools/perf/tests/sigtrap.c15
-rw-r--r--tools/perf/util/bpf-loader.c3
-rw-r--r--tools/perf/util/cs-etm.c16
-rw-r--r--tools/perf/util/data.c7
-rw-r--r--tools/perf/util/evlist-hybrid.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_crash.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.h26
-rw-r--r--tools/testing/selftests/bpf/progs/timer_crash.c54
-rw-r--r--tools/testing/selftests/exec/Makefile4
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc2
-rw-r--r--tools/testing/selftests/kvm/Makefile1
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c1
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c4
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh44
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh32
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_concat_range.sh2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_fib.sh1
-rwxr-xr-xtools/testing/selftests/netfilter/nft_synproxy.sh117
-rw-r--r--tools/testing/selftests/seccomp/Makefile2
-rw-r--r--tools/testing/selftests/vm/map_fixed_noreplace.c49
-rw-r--r--tools/tracing/rtla/src/osnoise.c2
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c10
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c1
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c10
-rw-r--r--virt/kvm/kvm_main.c4
911 files changed, 22568 insertions, 10280 deletions
diff --git a/.mailmap b/.mailmap
index 8cd44b0c6579..10ee1103c823 100644
--- a/.mailmap
+++ b/.mailmap
@@ -333,6 +333,9 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
+Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
+Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
+Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index fde21d900420..859501366777 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -468,6 +468,7 @@ Description:
auto: Charge normally, respect thresholds
inhibit-charge: Do not charge while AC is attached
force-discharge: Force discharge while AC is attached
+ ================ ====================================
What: /sys/class/power_supply/<supply_name>/technology
Date: May 2007
diff --git a/Documentation/core-api/dma-attributes.rst b/Documentation/core-api/dma-attributes.rst
index 1887d92e8e92..17706dc91ec9 100644
--- a/Documentation/core-api/dma-attributes.rst
+++ b/Documentation/core-api/dma-attributes.rst
@@ -130,3 +130,11 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged
subsystem that the buffer is fully accessible at the elevated privilege
level (and ideally inaccessible or at least read-only at the
lesser-privileged levels).
+
+DMA_ATTR_OVERWRITE
+------------------
+
+This is a hint to the DMA-mapping subsystem that the device is expected to
+overwrite the entire mapped size, thus the caller does not require any of the
+previous buffer contents to be preserved. This allows bounce-buffering
+implementations to optimise DMA_FROM_DEVICE transfers.
diff --git a/Documentation/cpu-freq/cpu-drivers.rst b/Documentation/cpu-freq/cpu-drivers.rst
index 3b32336a7803..d84ededb66f9 100644
--- a/Documentation/cpu-freq/cpu-drivers.rst
+++ b/Documentation/cpu-freq/cpu-drivers.rst
@@ -75,6 +75,9 @@ And optionally
.resume - A pointer to a per-policy resume function which is called
with interrupts disabled and _before_ the governor is started again.
+ .ready - A pointer to a per-policy ready function which is called after
+ the policy is fully initialized.
+
.attr - A pointer to a NULL-terminated list of "struct freq_attr" which
allow to export values to sysfs.
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
index 1d3e88daca04..0d38d6fe3983 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
@@ -83,6 +83,9 @@ properties:
type: boolean
description: let the driver enable audio HDMI codec function or not.
+ aux-bus:
+ $ref: /schemas/display/dp-aux-bus.yaml#
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -167,5 +170,19 @@ examples:
};
};
};
+
+ aux-bus {
+ panel {
+ compatible = "innolux,n125hce-gn1";
+ power-supply = <&pp3300_disp_x>;
+ backlight = <&backlight_lcd0>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&anx7625_out>;
+ };
+ };
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/display/bridge/ingenic,jz4780-hdmi.yaml b/Documentation/devicetree/bindings/display/bridge/ingenic,jz4780-hdmi.yaml
new file mode 100644
index 000000000000..b8219eab4475
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ingenic,jz4780-hdmi.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/ingenic,jz4780-hdmi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bindings for Ingenic JZ4780 HDMI Transmitter
+
+maintainers:
+ - H. Nikolaus Schaller <hns@goldelico.com>
+
+description: |
+ The HDMI Transmitter in the Ingenic JZ4780 is a Synopsys DesignWare HDMI 1.4
+ TX controller IP with accompanying PHY IP.
+
+allOf:
+ - $ref: synopsys,dw-hdmi.yaml#
+
+properties:
+ compatible:
+ const: ingenic,jz4780-dw-hdmi
+
+ reg-io-width:
+ const: 4
+
+ clocks:
+ maxItems: 2
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Input from LCD controller output.
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Link to the HDMI connector.
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - ports
+ - reg-io-width
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/ingenic,jz4780-cgu.h>
+
+ hdmi: hdmi@10180000 {
+ compatible = "ingenic,jz4780-dw-hdmi";
+ reg = <0x10180000 0x8000>;
+ reg-io-width = <4>;
+ ddc-i2c-bus = <&i2c4>;
+ interrupt-parent = <&intc>;
+ interrupts = <3>;
+ clocks = <&cgu JZ4780_CLK_AHB0>, <&cgu JZ4780_CLK_HDMI>;
+ clock-names = "iahb", "isfr";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ hdmi_in: port@0 {
+ reg = <0>;
+ dw_hdmi_in: endpoint {
+ remote-endpoint = <&jz4780_lcd_out>;
+ };
+ };
+ hdmi_out: port@1 {
+ reg = <1>;
+ dw_hdmi_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
index 56b4c55f6f1b..3a8614e0f627 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -68,7 +68,7 @@ properties:
- vesa-24
description: |
The color signals mapping order. See details in
- Documentation/devicetree/bindings/display/panel/lvds.yaml
+ Documentation/devicetree/bindings/display/lvds.yaml
port@1:
$ref: /schemas/graph.yaml#/properties/port
diff --git a/Documentation/devicetree/bindings/display/panel/lvds.yaml b/Documentation/devicetree/bindings/display/lvds.yaml
index 49460c9dceea..7cd2ce7e9c33 100644
--- a/Documentation/devicetree/bindings/display/panel/lvds.yaml
+++ b/Documentation/devicetree/bindings/display/lvds.yaml
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/display/panel/lvds.yaml#
+$id: http://devicetree.org/schemas/display/lvds.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: LVDS Display Panel
+title: LVDS Display Common Properties
maintainers:
- Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
@@ -13,8 +13,8 @@ maintainers:
description: |+
LVDS is a physical layer specification defined in ANSI/TIA/EIA-644-A. Multiple
incompatible data link layers have been used over time to transmit image data
- to LVDS panels. This bindings supports display panels compatible with the
- following specifications.
+ to LVDS devices. This bindings supports devices compatible with the following
+ specifications.
[JEIDA] "Digital Interface Standards for Monitor", JEIDA-59-1999, February
1999 (Version 1.0), Japan Electronic Industry Development Association (JEIDA)
@@ -26,18 +26,7 @@ description: |+
Device compatible with those specifications have been marketed under the
FPD-Link and FlatLink brands.
-allOf:
- - $ref: panel-common.yaml#
-
properties:
- compatible:
- contains:
- const: panel-lvds
- description:
- Shall contain "panel-lvds" in addition to a mandatory panel-specific
- compatible string defined in individual panel bindings. The "panel-lvds"
- value shall never be used on its own.
-
data-mapping:
enum:
- jeida-18
@@ -96,22 +85,6 @@ properties:
If set, reverse the bit order described in the data mappings below on all
data lanes, transmitting bits for slots 6 to 0 instead of 0 to 6.
- port: true
- ports: true
-
-required:
- - compatible
- - data-mapping
- - width-mm
- - height-mm
- - panel-timing
-
-oneOf:
- - required:
- - port
- - required:
- - ports
-
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml
new file mode 100644
index 000000000000..225f9dd726d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,aal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display adaptive ambient light processor
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display adaptive ambient light processor, namely AAL,
+ is responsible for backlight power saving and sunlight visibility improving.
+ AAL device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8173-disp-aal
+ - items:
+ - enum:
+ - mediatek,mt2712-disp-aal
+ - mediatek,mt8183-disp-aal
+ - mediatek,mt8192-disp-aal
+ - mediatek,mt8195-disp-aal
+ - enum:
+ - mediatek,mt8173-disp-aal
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: AAL Clock
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ aal@14015000 {
+ compatible = "mediatek,mt8173-disp-aal";
+ reg = <0 0x14015000 0 0x1000>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_AAL>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x5000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml
new file mode 100644
index 000000000000..6894b6999412
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,ccorr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display color correction
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display color correction, namely CCORR, reproduces correct color
+ on panels with different color gamut.
+ CCORR device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8183-disp-ccorr
+ - items:
+ - const: mediatek,mt8192-disp-ccorr
+ - items:
+ - enum:
+ - mediatek,mt8195-disp-ccorr
+ - enum:
+ - mediatek,mt8192-disp-ccorr
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: CCORR Clock
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ ccorr0: ccorr@1400f000 {
+ compatible = "mediatek,mt8183-disp-ccorr";
+ reg = <0 0x1400f000 0 0x1000>;
+ interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ clocks = <&mmsys CLK_MM_DISP_CCORR0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xf000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml
new file mode 100644
index 000000000000..bc83155b3b4c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,color.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,color.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display color processor
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display color processor, namely COLOR, provides hue, luma and
+ saturation adjustments to get better picture quality and to have one panel
+ resemble the other in their output characteristics.
+ COLOR device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt2701-disp-color
+ - items:
+ - const: mediatek,mt8167-disp-color
+ - items:
+ - const: mediatek,mt8173-disp-color
+ - items:
+ - enum:
+ - mediatek,mt7623-disp-color
+ - mediatek,mt2712-disp-color
+ - enum:
+ - mediatek,mt2701-disp-color
+ - items:
+ - enum:
+ - mediatek,mt8183-disp-color
+ - mediatek,mt8192-disp-color
+ - mediatek,mt8195-disp-color
+ - enum:
+ - mediatek,mt8173-disp-color
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: COLOR Clock
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ color0: color@14013000 {
+ compatible = "mediatek,mt8173-disp-color";
+ reg = <0 0x14013000 0 0x1000>;
+ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_COLOR0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x3000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
deleted file mode 100644
index 78044c340e20..000000000000
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ /dev/null
@@ -1,219 +0,0 @@
-Mediatek display subsystem
-==========================
-
-The Mediatek display subsystem consists of various DISP function blocks in the
-MMSYS register space. The connections between them can be configured by output
-and input selectors in the MMSYS_CONFIG register space. Pixel clock and start
-of frame signal are distributed to the other function blocks by a DISP_MUTEX
-function block.
-
-All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
-For a description of the MMSYS_CONFIG binding, see
-Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
-
-DISP function blocks
-====================
-
-A display stream starts at a source function block that reads pixel data from
-memory and ends with a sink function block that drives pixels on a display
-interface, or writes pixels back to memory. All DISP function blocks have
-their own register space, interrupt, and clock gate. The blocks that can
-access memory additionally have to list the IOMMU and local arbiter they are
-connected to.
-
-For a description of the display interface sink function blocks, see
-Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt and
-Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml.
-
-Required properties (all function blocks):
-- compatible: "mediatek,<chip>-disp-<function>", one of
- "mediatek,<chip>-disp-ovl" - overlay (4 layers, blending, csc)
- "mediatek,<chip>-disp-ovl-2l" - overlay (2 layers, blending, csc)
- "mediatek,<chip>-disp-rdma" - read DMA / line buffer
- "mediatek,<chip>-disp-wdma" - write DMA
- "mediatek,<chip>-disp-ccorr" - color correction
- "mediatek,<chip>-disp-color" - color processor
- "mediatek,<chip>-disp-dither" - dither
- "mediatek,<chip>-disp-aal" - adaptive ambient light controller
- "mediatek,<chip>-disp-gamma" - gamma correction
- "mediatek,<chip>-disp-merge" - merge streams from two RDMA sources
- "mediatek,<chip>-disp-postmask" - control round corner for display frame
- "mediatek,<chip>-disp-split" - split stream to two encoders
- "mediatek,<chip>-disp-ufoe" - data compression engine
- "mediatek,<chip>-dsi" - DSI controller, see mediatek,dsi.txt
- "mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
- "mediatek,<chip>-disp-mutex" - display mutex
- "mediatek,<chip>-disp-od" - overdrive
- the supported chips are mt2701, mt7623, mt2712, mt8167, mt8173, mt8183 and mt8192.
-- reg: Physical base address and length of the function block register space
-- interrupts: The interrupt signal from the function block (required, except for
- merge and split function blocks).
-- clocks: device clocks
- See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
- For most function blocks this is just a single clock input. Only the DSI and
- DPI controller nodes have multiple clock inputs. These are documented in
- mediatek,dsi.txt and mediatek,dpi.txt, respectively.
- An exception is that the mt8183 mutex is always free running with no clocks property.
-
-Required properties (DMA function blocks):
-- compatible: Should be one of
- "mediatek,<chip>-disp-ovl"
- "mediatek,<chip>-disp-rdma"
- "mediatek,<chip>-disp-wdma"
- the supported chips are mt2701, mt8167 and mt8173.
-- larb: Should contain a phandle pointing to the local arbiter device as defined
- in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
-- iommus: Should point to the respective IOMMU block with master port as
- argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
- for details.
-
-Optional properties (RDMA function blocks):
-- mediatek,rdma-fifo-size: rdma fifo size may be different even in same SOC, add this
- property to the corresponding rdma
- the value is the Max value which defined in hardware data sheet.
- mediatek,rdma-fifo-size of mt8173-rdma0 is 8K
- mediatek,rdma-fifo-size of mt8183-rdma0 is 5K
- mediatek,rdma-fifo-size of mt8183-rdma1 is 2K
-
-Examples:
-
-mmsys: clock-controller@14000000 {
- compatible = "mediatek,mt8173-mmsys", "syscon";
- reg = <0 0x14000000 0 0x1000>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- #clock-cells = <1>;
-};
-
-ovl0: ovl@1400c000 {
- compatible = "mediatek,mt8173-disp-ovl";
- reg = <0 0x1400c000 0 0x1000>;
- interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_OVL0>;
- iommus = <&iommu M4U_PORT_DISP_OVL0>;
- mediatek,larb = <&larb0>;
-};
-
-ovl1: ovl@1400d000 {
- compatible = "mediatek,mt8173-disp-ovl";
- reg = <0 0x1400d000 0 0x1000>;
- interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_OVL1>;
- iommus = <&iommu M4U_PORT_DISP_OVL1>;
- mediatek,larb = <&larb4>;
-};
-
-rdma0: rdma@1400e000 {
- compatible = "mediatek,mt8173-disp-rdma";
- reg = <0 0x1400e000 0 0x1000>;
- interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_RDMA0>;
- iommus = <&iommu M4U_PORT_DISP_RDMA0>;
- mediatek,larb = <&larb0>;
- mediatek,rdma-fifosize = <8192>;
-};
-
-rdma1: rdma@1400f000 {
- compatible = "mediatek,mt8173-disp-rdma";
- reg = <0 0x1400f000 0 0x1000>;
- interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_RDMA1>;
- iommus = <&iommu M4U_PORT_DISP_RDMA1>;
- mediatek,larb = <&larb4>;
-};
-
-rdma2: rdma@14010000 {
- compatible = "mediatek,mt8173-disp-rdma";
- reg = <0 0x14010000 0 0x1000>;
- interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_RDMA2>;
- iommus = <&iommu M4U_PORT_DISP_RDMA2>;
- mediatek,larb = <&larb4>;
-};
-
-wdma0: wdma@14011000 {
- compatible = "mediatek,mt8173-disp-wdma";
- reg = <0 0x14011000 0 0x1000>;
- interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_WDMA0>;
- iommus = <&iommu M4U_PORT_DISP_WDMA0>;
- mediatek,larb = <&larb0>;
-};
-
-wdma1: wdma@14012000 {
- compatible = "mediatek,mt8173-disp-wdma";
- reg = <0 0x14012000 0 0x1000>;
- interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_WDMA1>;
- iommus = <&iommu M4U_PORT_DISP_WDMA1>;
- mediatek,larb = <&larb4>;
-};
-
-color0: color@14013000 {
- compatible = "mediatek,mt8173-disp-color";
- reg = <0 0x14013000 0 0x1000>;
- interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_COLOR0>;
-};
-
-color1: color@14014000 {
- compatible = "mediatek,mt8173-disp-color";
- reg = <0 0x14014000 0 0x1000>;
- interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_COLOR1>;
-};
-
-aal@14015000 {
- compatible = "mediatek,mt8173-disp-aal";
- reg = <0 0x14015000 0 0x1000>;
- interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_AAL>;
-};
-
-gamma@14016000 {
- compatible = "mediatek,mt8173-disp-gamma";
- reg = <0 0x14016000 0 0x1000>;
- interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_GAMMA>;
-};
-
-ufoe@1401a000 {
- compatible = "mediatek,mt8173-disp-ufoe";
- reg = <0 0x1401a000 0 0x1000>;
- interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_UFOE>;
-};
-
-dsi0: dsi@1401b000 {
- /* See mediatek,dsi.txt for details */
-};
-
-dpi0: dpi@1401d000 {
- /* See mediatek,dpi.txt for details */
-};
-
-mutex: mutex@14020000 {
- compatible = "mediatek,mt8173-disp-mutex";
- reg = <0 0x14020000 0 0x1000>;
- interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_MUTEX_32K>;
-};
-
-od@14023000 {
- compatible = "mediatek,mt8173-disp-od";
- reg = <0 0x14023000 0 0x1000>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_OD>;
-};
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dither.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dither.yaml
new file mode 100644
index 000000000000..9d89297f5f1d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dither.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,dither.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display dither processor
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display dither processor, namely DITHER, works by approximating
+ unavailable colors with available colors and by mixing and matching available
+ colors to mimic unavailable ones.
+ DITHER device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8183-disp-dither
+ - items:
+ - enum:
+ - mediatek,mt8192-disp-dither
+ - mediatek,mt8195-disp-dither
+ - enum:
+ - mediatek,mt8183-disp-dither
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: DITHER Clock
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ dither0: dither@14012000 {
+ compatible = "mediatek,mt8183-disp-dither";
+ reg = <0 0x14012000 0 0x1000>;
+ interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ clocks = <&mmsys CLK_MM_DISP_DITHER0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x2000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsc.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsc.yaml
new file mode 100644
index 000000000000..1ec083eff824
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsc.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,dsc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: mediatek display DSC controller
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ The DSC standard is a specification of the algorithms used for
+ compressing and decompressing image display streams, including
+ the specification of the syntax and semantics of the compressed
+ video bit stream. DSC is designed for real-time systems with
+ real-time compression, transmission, decompression and Display.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8195-disp-dsc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: DSC Wrapper Clock
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ mediatek,gce-client-reg:
+ description:
+ The register of client driver can be configured by gce with 4 arguments
+ defined in this property, such as phandle of gce, subsys id,
+ register offset and size.
+ Each subsys id is mapping to a base address of display function blocks
+ register which is defined in the gce header
+ include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ dsc0: disp_dsc_wrap@1c009000 {
+ compatible = "mediatek,mt8195-disp-dsc";
+ reg = <0 0x1c009000 0 0x1000>;
+ interrupts = <GIC_SPI 645 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
+ clocks = <&vdosys0 CLK_VDO0_DSC_WRAP0>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1c00XXXX 0x9000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,ethdr.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,ethdr.yaml
new file mode 100644
index 000000000000..131eed5eeeb7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,ethdr.yaml
@@ -0,0 +1,147 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,ethdr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek Ethdr Device Tree Bindings
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ ETHDR is designed for HDR video and graphics conversion in the external display path.
+ It handles multiple HDR input types and performs tone mapping, color space/color
+ format conversion, and then combine different layers, output the required HDR or
+ SDR signal to the subsequent display path. This engine is composed of two video
+ frontends, two graphic frontends, one video backend and a mixer. ETHDR has two
+ DMA function blocks, DS and ADL. These two function blocks read the pre-programmed
+ registers from DRAM and set them to HW in the v-blanking period.
+
+properties:
+ compatible:
+ items:
+ - const: mediatek,mt8195-disp-ethdr
+ reg:
+ maxItems: 7
+ reg-names:
+ items:
+ - const: mixer
+ - const: vdo_fe0
+ - const: vdo_fe1
+ - const: gfx_fe0
+ - const: gfx_fe1
+ - const: vdo_be
+ - const: adl_ds
+ interrupts:
+ minItems: 1
+ iommus:
+ description: The compatible property is DMA function blocks.
+ Should point to the respective IOMMU block with master port as argument,
+ see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml for
+ details.
+ minItems: 1
+ maxItems: 2
+ clocks:
+ items:
+ - description: mixer clock
+ - description: video frontend 0 clock
+ - description: video frontend 1 clock
+ - description: graphic frontend 0 clock
+ - description: graphic frontend 1 clock
+ - description: video backend clock
+ - description: autodownload and menuload clock
+ - description: video frontend 0 async clock
+ - description: video frontend 1 async clock
+ - description: graphic frontend 0 async clock
+ - description: graphic frontend 1 async clock
+ - description: video backend async clock
+ - description: ethdr top clock
+ clock-names:
+ items:
+ - const: mixer
+ - const: vdo_fe0
+ - const: vdo_fe1
+ - const: gfx_fe0
+ - const: gfx_fe1
+ - const: vdo_be
+ - const: adl_ds
+ - const: vdo_fe0_async
+ - const: vdo_fe1_async
+ - const: gfx_fe0_async
+ - const: gfx_fe1_async
+ - const: vdo_be_async
+ - const: ethdr_top
+ power-domains:
+ maxItems: 1
+ resets:
+ maxItems: 5
+ mediatek,gce-client-reg:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: The register of display function block to be set by gce.
+ There are 4 arguments in this property, gce node, subsys id, offset and
+ register size. The subsys id is defined in the gce header of each chips
+ include/include/dt-bindings/gce/<chip>-gce.h, mapping to the register of
+ display function block.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+
+ disp_ethdr@1c114000 {
+ compatible = "mediatek,mt8195-disp-ethdr";
+ reg = <0 0x1c114000 0 0x1000>,
+ <0 0x1c115000 0 0x1000>,
+ <0 0x1c117000 0 0x1000>,
+ <0 0x1c119000 0 0x1000>,
+ <0 0x1c11A000 0 0x1000>,
+ <0 0x1c11B000 0 0x1000>,
+ <0 0x1c11C000 0 0x1000>;
+ reg-names = "mixer", "vdo_fe0", "vdo_fe1", "gfx_fe0", "gfx_fe1",
+ "vdo_be", "adl_ds";
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c11XXXX 0x4000 0x1000>,
+ <&gce0 SUBSYS_1c11XXXX 0x5000 0x1000>,
+ <&gce0 SUBSYS_1c11XXXX 0x7000 0x1000>,
+ <&gce0 SUBSYS_1c11XXXX 0x9000 0x1000>,
+ <&gce0 SUBSYS_1c11XXXX 0xA000 0x1000>,
+ <&gce0 SUBSYS_1c11XXXX 0xB000 0x1000>,
+ <&gce0 SUBSYS_1c11XXXX 0xC000 0x1000>;
+ clocks = <&vdosys1 CLK_VDO1_DISP_MIXER>,
+ <&vdosys1 CLK_VDO1_HDR_VDO_FE0>,
+ <&vdosys1 CLK_VDO1_HDR_VDO_FE1>,
+ <&vdosys1 CLK_VDO1_HDR_GFX_FE0>,
+ <&vdosys1 CLK_VDO1_HDR_GFX_FE1>,
+ <&vdosys1 CLK_VDO1_HDR_VDO_BE>,
+ <&vdosys1 CLK_VDO1_26M_SLOW>,
+ <&vdosys1 CLK_VDO1_HDR_VDO_FE0_DL_ASYNC>,
+ <&vdosys1 CLK_VDO1_HDR_VDO_FE1_DL_ASYNC>,
+ <&vdosys1 CLK_VDO1_HDR_GFX_FE0_DL_ASYNC>,
+ <&vdosys1 CLK_VDO1_HDR_GFX_FE1_DL_ASYNC>,
+ <&vdosys1 CLK_VDO1_HDR_VDO_BE_DL_ASYNC>,
+ <&topckgen CLK_TOP_ETHDR_SEL>;
+ clock-names = "mixer", "vdo_fe0", "vdo_fe1", "gfx_fe0", "gfx_fe1",
+ "vdo_be", "adl_ds", "vdo_fe0_async", "vdo_fe1_async",
+ "gfx_fe0_async", "gfx_fe1_async","vdo_be_async",
+ "ethdr_top";
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ iommus = <&iommu_vpp M4U_PORT_L3_HDR_DS>,
+ <&iommu_vpp M4U_PORT_L3_HDR_ADL>;
+ interrupts = <GIC_SPI 517 IRQ_TYPE_LEVEL_HIGH 0>; /* disp mixer */
+ resets = <&vdosys1 MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE0_DL_ASYNC>,
+ <&vdosys1 MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE1_DL_ASYNC>,
+ <&vdosys1 MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE0_DL_ASYNC>,
+ <&vdosys1 MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE1_DL_ASYNC>,
+ <&vdosys1 MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_BE_DL_ASYNC>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml
new file mode 100644
index 000000000000..247baad147b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,gamma.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,gamma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display gamma correction
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display gamma correction, namely GAMMA, provides a nonlinear
+ operation used to adjust luminance in display system.
+ GAMMA device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8173-disp-gamma
+ - items:
+ - const: mediatek,mt8183-disp-gamma
+ - items:
+ - enum:
+ - mediatek,mt8192-disp-gamma
+ - mediatek,mt8195-disp-gamma
+ - enum:
+ - mediatek,mt8183-disp-gamma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: GAMMA Clock
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ gamma@14016000 {
+ compatible = "mediatek,mt8173-disp-gamma";
+ reg = <0 0x14016000 0 0x1000>;
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_GAMMA>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x6000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,merge.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,merge.yaml
new file mode 100644
index 000000000000..d5cd69b7f501
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,merge.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,merge.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display merge
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display merge, namely MERGE, is used to merge two slice-per-line
+ inputs into one side-by-side output.
+ MERGE device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8173-disp-merge
+ - items:
+ - const: mediatek,mt8195-disp-merge
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ maxItems: 2
+ items:
+ - description: MERGE Clock
+ - description: MERGE Async Clock
+ Controlling the synchronous process between MERGE and other display
+ function blocks cross clock domain.
+
+ clock-names:
+ maxItems: 2
+ items:
+ - const: merge
+ - const: merge_async
+
+ mediatek,merge-fifo-en:
+ description:
+ The setting of merge fifo is mainly provided for the display latency
+ buffer to ensure that the back-end panel display data will not be
+ underrun, a little more data is needed in the fifo.
+ According to the merge fifo settings, when the water level is detected
+ to be insufficient, it will trigger RDMA sending ultra and preulra
+ command to SMI to speed up the data rate.
+ type: boolean
+
+ mediatek,merge-mute:
+ description: Support mute function. Mute the content of merge output.
+ type: boolean
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+ resets:
+ description: reset controller
+ See Documentation/devicetree/bindings/reset/reset.txt for details.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ merge@14017000 {
+ compatible = "mediatek,mt8173-disp-merge";
+ reg = <0 0x14017000 0 0x1000>;
+ power-domains = <&spm MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_MERGE>;
+ };
+
+ merge5: disp_vpp_merge5@1c110000 {
+ compatible = "mediatek,mt8195-disp-merge";
+ reg = <0 0x1c110000 0 0x1000>;
+ interrupts = <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&vdosys1 CLK_VDO1_VPP_MERGE4>,
+ <&vdosys1 CLK_VDO1_MERGE4_DL_ASYNC>;
+ clock-names = "merge","merge_async";
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1c11XXXX 0x0000 0x1000>;
+ mediatek,merge-fifo-en = <1>;
+ resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE4_DL_ASYNC>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,mutex.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,mutex.yaml
new file mode 100644
index 000000000000..6eca525eced0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,mutex.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,mutex.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek mutex
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek mutex, namely MUTEX, is used to send the triggers signals called
+ Start Of Frame (SOF) / End Of Frame (EOF) to each sub-modules on the display
+ data path or MDP data path.
+ In some SoC, such as mt2701, MUTEX could be a hardware mutex which protects
+ the shadow register.
+ MUTEX device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt2701-disp-mutex
+ - items:
+ - const: mediatek,mt2712-disp-mutex
+ - items:
+ - const: mediatek,mt8167-disp-mutex
+ - items:
+ - const: mediatek,mt8173-disp-mutex
+ - items:
+ - const: mediatek,mt8183-disp-mutex
+ - items:
+ - const: mediatek,mt8192-disp-mutex
+ - items:
+ - const: mediatek,mt8195-disp-mutex
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: MUTEX Clock
+
+ mediatek,gce-events:
+ description:
+ The event id which is mapping to the specific hardware event signal
+ to gce. The event id is defined in the gce header
+ include/dt-bindings/gce/<chip>-gce.h of each chips.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ mutex: mutex@14020000 {
+ compatible = "mediatek,mt8173-disp-mutex";
+ reg = <0 0x14020000 0 0x1000>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_MUTEX_32K>;
+ mediatek,gce-events = <CMDQ_EVENT_MUTEX0_STREAM_EOF>,
+ <CMDQ_EVENT_MUTEX1_STREAM_EOF>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,od.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,od.yaml
new file mode 100644
index 000000000000..7519db315217
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,od.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,od.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display overdirve
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display overdrive, namely OD, increases the transition values
+ of pixels between consecutive frames to make LCD rotate faster.
+ OD device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt2712-disp-od
+ - items:
+ - const: mediatek,mt8173-disp-od
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: OD Clock
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ od@14023000 {
+ compatible = "mediatek,mt8173-disp-od";
+ reg = <0 0x14023000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_DISP_OD>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl-2l.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl-2l.yaml
new file mode 100644
index 000000000000..611a2dbdefa4
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl-2l.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,ovl-2l.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display overlay 2 layer
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display overlay 2 layer, namely OVL-2L, provides 2 more layer
+ for OVL.
+ OVL-2L device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8183-disp-ovl-2l
+ - items:
+ - const: mediatek,mt8192-disp-ovl-2l
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: OVL-2L Clock
+
+ iommus:
+ description:
+ This property should point to the respective IOMMU block with master port as argument,
+ see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml for details.
+
+ mediatek,larb:
+ description:
+ This property should contain a phandle pointing to the local arbiter devices defined in
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml.
+ It must sort according to the local arbiter index, like larb0, larb1, larb2...
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 32
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+ - iommus
+
+additionalProperties: false
+
+examples:
+ - |
+
+ ovl_2l0: ovl@14009000 {
+ compatible = "mediatek,mt8183-disp-ovl-2l";
+ reg = <0 0x14009000 0 0x1000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ clocks = <&mmsys CLK_MM_DISP_OVL0_2L>;
+ iommus = <&iommu M4U_PORT_DISP_2L_OVL0_LARB0>;
+ mediatek,larb = <&larb0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x9000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml
new file mode 100644
index 000000000000..e71f79bc2dee
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,ovl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display overlay
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display overlay, namely OVL, can do alpha blending from
+ the memory.
+ OVL device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt2701-disp-ovl
+ - items:
+ - const: mediatek,mt8173-disp-ovl
+ - items:
+ - const: mediatek,mt8183-disp-ovl
+ - items:
+ - const: mediatek,mt8192-disp-ovl
+ - items:
+ - enum:
+ - mediatek,mt7623-disp-ovl
+ - mediatek,mt2712-disp-ovl
+ - enum:
+ - mediatek,mt2701-disp-ovl
+ - items:
+ - enum:
+ - mediatek,mt8195-disp-ovl
+ - enum:
+ - mediatek,mt8183-disp-ovl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: OVL Clock
+
+ iommus:
+ description:
+ This property should point to the respective IOMMU block with master port as argument,
+ see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml for details.
+
+ mediatek,larb:
+ description:
+ This property should contain a phandle pointing to the local arbiter devices defined in
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml.
+ It must sort according to the local arbiter index, like larb0, larb1, larb2...
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 32
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+ - iommu
+
+additionalProperties: false
+
+examples:
+ - |
+
+ ovl0: ovl@1400c000 {
+ compatible = "mediatek,mt8173-disp-ovl";
+ reg = <0 0x1400c000 0 0x1000>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_OVL0>;
+ iommus = <&iommu M4U_PORT_DISP_OVL0>;
+ mediatek,larb = <&larb0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xc000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,postmask.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,postmask.yaml
new file mode 100644
index 000000000000..6ac1da2e8871
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,postmask.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,postmask.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display postmask
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display postmask, namely POSTMASK, provides round corner pattern
+ generation.
+ POSTMASK device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8192-disp-postmask
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: POSTMASK Clock
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ postmask0: postmask@1400d000 {
+ compatible = "mediatek,mt8192-disp-postmask";
+ reg = <0 0x1400d000 0 0x1000>;
+ interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&scpsys MT8192_POWER_DOMAIN_DISP>;
+ clocks = <&mmsys CLK_MM_DISP_POSTMASK0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xd000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,rdma.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,rdma.yaml
new file mode 100644
index 000000000000..8ef821641672
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,rdma.yaml
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,rdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek Read Direct Memory Access
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek Read Direct Memory Access(RDMA) component used to read the
+ data into DMA. It provides real time data to the back-end panel
+ driver, such as DSI, DPI and DP_INTF.
+ It contains one line buffer to store the sufficient pixel data.
+ RDMA device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt2701-disp-rdma
+ - items:
+ - const: mediatek,mt8173-disp-rdma
+ - items:
+ - const: mediatek,mt8183-disp-rdma
+ - items:
+ - const: mediatek,mt8195-disp-rdma
+ - items:
+ - enum:
+ - mediatek,mt7623-disp-rdma
+ - mediatek,mt2712-disp-rdma
+ - enum:
+ - mediatek,mt2701-disp-rdma
+ - items:
+ - enum:
+ - mediatek,mt8192-disp-rdma
+ - enum:
+ - mediatek,mt8183-disp-rdma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: RDMA Clock
+
+ iommus:
+ description:
+ This property should point to the respective IOMMU block with master port as argument,
+ see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml for details.
+
+ mediatek,larb:
+ description:
+ This property should contain a phandle pointing to the local arbiter devices defined in
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml.
+ It must sort according to the local arbiter index, like larb0, larb1, larb2...
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 32
+
+ mediatek,rdma-fifo-size:
+ description:
+ rdma fifo size may be different even in same SOC, add this property to the
+ corresponding rdma.
+ The value below is the Max value which defined in hardware data sheet
+ mediatek,rdma-fifo-size of mt8173-rdma0 is 8K
+ mediatek,rdma-fifo-size of mt8183-rdma0 is 5K
+ mediatek,rdma-fifo-size of mt8183-rdma1 is 2K
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [8192, 5120, 2048]
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+ - iommus
+
+additionalProperties: false
+
+examples:
+ - |
+
+ rdma0: rdma@1400e000 {
+ compatible = "mediatek,mt8173-disp-rdma";
+ reg = <0 0x1400e000 0 0x1000>;
+ interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_RDMA0>;
+ iommus = <&iommu M4U_PORT_DISP_RDMA0>;
+ mediatek,larb = <&larb0>;
+ mediatek,rdma-fifosize = <8192>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xe000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,split.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,split.yaml
new file mode 100644
index 000000000000..4f08e89c1067
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,split.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,split.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display split
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display split, namely SPLIT, is used to split stream to two
+ encoders.
+ SPLIT device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8173-disp-split
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: SPLIT Clock
+
+required:
+ - compatible
+ - reg
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ split0: split@14018000 {
+ compatible = "mediatek,mt8173-disp-split";
+ reg = <0 0x14018000 0 0x1000>;
+ power-domains = <&spm MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_SPLIT0>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,ufoe.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,ufoe.yaml
new file mode 100644
index 000000000000..6e8748529e73
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,ufoe.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,ufoe.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek display UFOe
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek display UFOe stands for Unified Frame Optimization engine.
+ UFOe can cut the data rate for DSI port which may lead to reduce power
+ consumption.
+ UFOe device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8173-disp-ufoe
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: UFOe Clock
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ ufoe@1401a000 {
+ compatible = "mediatek,mt8173-disp-ufoe";
+ reg = <0 0x1401a000 0 0x1000>;
+ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_UFOE>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,wdma.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,wdma.yaml
new file mode 100644
index 000000000000..aaf5649b6413
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,wdma.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/mediatek/mediatek,wdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek Write Direct Memory Access
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ Mediatek Write Direct Memory Access(WDMA) component used to write
+ the data into DMA.
+ WDMA device node must be siblings to the central MMSYS_CONFIG node.
+ For a description of the MMSYS_CONFIG binding, see
+ Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml
+ for details.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: mediatek,mt8173-disp-wdma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle. See
+ Documentation/devicetree/bindings/power/power-domain.yaml for details.
+
+ clocks:
+ items:
+ - description: WDMA Clock
+
+ iommus:
+ description:
+ This property should point to the respective IOMMU block with master port as argument,
+ see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml for details.
+
+ mediatek,larb:
+ description:
+ This property should contain a phandle pointing to the local arbiter devices defined in
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml.
+ It must sort according to the local arbiter index, like larb0, larb1, larb2...
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 32
+
+ mediatek,gce-client-reg:
+ description: The register of client driver can be configured by gce with
+ 4 arguments defined in this property, such as phandle of gce, subsys id,
+ register offset and size. Each GCE subsys id is mapping to a client
+ defined in the header include/dt-bindings/gce/<chip>-gce.h.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - power-domains
+ - clocks
+ - iommus
+
+additionalProperties: false
+
+examples:
+ - |
+
+ wdma0: wdma@14011000 {
+ compatible = "mediatek,mt8173-disp-wdma";
+ reg = <0 0x14011000 0 0x1000>;
+ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_WDMA0>;
+ iommus = <&iommu M4U_PORT_DISP_WDMA0>;
+ mediatek,larb = <&larb0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x1000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
index 93878c2cd370..3a8c2c11f9bd 100644
--- a/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
@@ -11,13 +11,23 @@ maintainers:
- Thierry Reding <thierry.reding@gmail.com>
allOf:
- - $ref: lvds.yaml#
+ - $ref: panel-common.yaml#
+ - $ref: /schemas/display/lvds.yaml/#
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: advantech,idk-1110wr
+
+ required:
+ - compatible
properties:
compatible:
items:
- const: advantech,idk-1110wr
- - {} # panel-lvds, but not listed here to avoid false select
+ - const: panel-lvds
data-mapping:
const: jeida-24
@@ -35,6 +45,11 @@ additionalProperties: false
required:
- compatible
+ - data-mapping
+ - width-mm
+ - height-mm
+ - panel-timing
+ - port
examples:
- |+
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.yaml b/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.yaml
index a69681e724cb..566e11f6bfc0 100644
--- a/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.yaml
+++ b/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.yaml
@@ -11,15 +11,26 @@ maintainers:
- Thierry Reding <thierry.reding@gmail.com>
allOf:
- - $ref: lvds.yaml#
+ - $ref: panel-common.yaml#
+ - $ref: /schemas/display/lvds.yaml/#
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: innolux,ee101ia-01d
+
+ required:
+ - compatible
properties:
compatible:
items:
- const: innolux,ee101ia-01d
- - {} # panel-lvds, but not listed here to avoid false select
+ - const: panel-lvds
backlight: true
+ data-mapping: true
enable-gpios: true
power-supply: true
width-mm: true
@@ -27,5 +38,13 @@ properties:
panel-timing: true
port: true
+required:
+ - compatible
+ - data-mapping
+ - width-mm
+ - height-mm
+ - panel-timing
+ - port
+
additionalProperties: false
...
diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml
index b5e7ee230fa6..5cf3c588f46d 100644
--- a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml
+++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml
@@ -11,13 +11,23 @@ maintainers:
- Thierry Reding <thierry.reding@gmail.com>
allOf:
- - $ref: lvds.yaml#
+ - $ref: panel-common.yaml#
+ - $ref: /schemas/display/lvds.yaml/#
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: mitsubishi,aa104xd12
+
+ required:
+ - compatible
properties:
compatible:
items:
- const: mitsubishi,aa104xd12
- - {} # panel-lvds, but not listed here to avoid false select
+ - const: panel-lvds
vcc-supply:
description: Reference to the regulator powering the panel VCC pins.
@@ -39,6 +49,11 @@ additionalProperties: false
required:
- compatible
- vcc-supply
+ - data-mapping
+ - width-mm
+ - height-mm
+ - panel-timing
+ - port
examples:
- |+
diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml
index 977c50a85b67..54750cc5440d 100644
--- a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml
+++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml
@@ -11,13 +11,23 @@ maintainers:
- Thierry Reding <thierry.reding@gmail.com>
allOf:
- - $ref: lvds.yaml#
+ - $ref: panel-common.yaml#
+ - $ref: /schemas/display/lvds.yaml/#
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: mitsubishi,aa121td01
+
+ required:
+ - compatible
properties:
compatible:
items:
- const: mitsubishi,aa121td01
- - {} # panel-lvds, but not listed here to avoid false select
+ - const: panel-lvds
vcc-supply:
description: Reference to the regulator powering the panel VCC pins.
@@ -39,6 +49,11 @@ additionalProperties: false
required:
- compatible
- vcc-supply
+ - data-mapping
+ - width-mm
+ - height-mm
+ - panel-timing
+ - port
examples:
- |+
diff --git a/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml b/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml
new file mode 100644
index 000000000000..fcc50db6a812
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-lvds.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic LVDS Display Panel Device Tree Bindings
+
+maintainers:
+ - Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: panel-common.yaml#
+ - $ref: /schemas/display/lvds.yaml/#
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: panel-lvds
+
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - advantech,idk-1110wr
+ - advantech,idk-2121wr
+ - innolux,ee101ia-01d
+ - mitsubishi,aa104xd12
+ - mitsubishi,aa121td01
+ - sgd,gktw70sdae4se
+
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - auo,b101ew05
+ - tbs,a711-panel
+
+ - const: panel-lvds
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - data-mapping
+ - width-mm
+ - height-mm
+ - panel-timing
+ - port
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 9cf5588a09d8..1eb9dd4f8f58 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -284,6 +284,8 @@ properties:
- sharp,lq101k1ly04
# Sharp 12.3" (2400x1600 pixels) TFT LCD panel
- sharp,lq123p1jx31
+ # Sharp 14" (1920x1080 pixels) TFT LCD panel
+ - sharp,lq140m1jw46
# Sharp LS020B1DD01D 2.0" HQVGA TFT LCD panel
- sharp,ls020b1dd01d
# Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml b/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
index e63a570ae59d..44e02decdf3a 100644
--- a/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
@@ -11,13 +11,23 @@ maintainers:
- Thierry Reding <thierry.reding@gmail.com>
allOf:
- - $ref: lvds.yaml#
+ - $ref: panel-common.yaml#
+ - $ref: /schemas/display/lvds.yaml/#
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: sgd,gktw70sdae4se
+
+ required:
+ - compatible
properties:
compatible:
items:
- const: sgd,gktw70sdae4se
- - {} # panel-lvds, but not listed here to avoid false select
+ - const: panel-lvds
data-mapping:
const: jeida-18
@@ -35,6 +45,11 @@ additionalProperties: false
required:
- compatible
+ - port
+ - data-mapping
+ - width-mm
+ - height-mm
+ - panel-timing
examples:
- |+
diff --git a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
index 2ed2a7d0ca2f..9baafd0c42dd 100644
--- a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
+++ b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
@@ -8,6 +8,7 @@ title: Solomon SSD1307 OLED Controller Framebuffer
maintainers:
- Maxime Ripard <mripard@kernel.org>
+ - Javier Martinez Canillas <javierm@redhat.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
index e04349567eeb..427c5873f96a 100644
--- a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: SiFive GPIO controller
maintainers:
- - Yash Shah <yash.shah@sifive.com>
- Paul Walmsley <paul.walmsley@sifive.com>
properties:
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 63a08f3f321d..4d6bfae0653c 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -159,6 +159,21 @@ allOf:
power-domains:
maxItems: 1
sram-supply: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3568-mali
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ clock-names:
+ items:
+ - const: gpu
+ - const: bus
+ required:
+ - clock-names
examples:
- |
diff --git a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml b/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
index 272832e9f8f2..fa86691ebf16 100644
--- a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
+++ b/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
@@ -20,7 +20,7 @@ description: |
maintainers:
- Kishon Vijay Abraham I <kishon@ti.com>
- - Roger Quadros <rogerq@ti.com
+ - Roger Quadros <rogerq@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml b/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml
index cbbf5e8b1197..f78d3246fbdc 100644
--- a/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml
+++ b/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml
@@ -8,7 +8,7 @@ title: OMAP USB2 PHY
maintainers:
- Kishon Vijay Abraham I <kishon@ti.com>
- - Roger Quadros <rogerq@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml b/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
index 84e66913d042..db41cd7bf150 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
@@ -8,7 +8,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: SiFive PWM controller
maintainers:
- - Yash Shah <yash.shah@sifive.com>
- Sagar Kadam <sagar.kadam@sifive.com>
- Paul Walmsley <paul.walmsley@sifive.com>
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
index 2b1f91603897..e2d330bd4608 100644
--- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
+++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
@@ -9,7 +9,6 @@ title: SiFive L2 Cache Controller
maintainers:
- Sagar Kadam <sagar.kadam@sifive.com>
- - Yash Shah <yash.shah@sifive.com>
- Paul Walmsley <paul.walmsley@sifive.com>
description:
diff --git a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
index 77adbebed824..c3e9f3485449 100644
--- a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
@@ -8,6 +8,7 @@ title: Audio codec controlled by ChromeOS EC
maintainers:
- Cheng-Yi Chiang <cychiang@chromium.org>
+ - Tzung-Bi Shih <tzungbi@google.com>
description: |
Google's ChromeOS EC codec is a digital mic codec provided by the
diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
index a634774c537c..eedde385d299 100644
--- a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
@@ -7,7 +7,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Bindings for the TI wrapper module for the Cadence USBSS-DRD controller
maintainers:
- - Roger Quadros <rogerq@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml b/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
index f6e91a5fd8fe..4f7a212fddd3 100644
--- a/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI Keystone Soc USB Controller
maintainers:
- - Roger Quadros <rogerq@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
properties:
compatible:
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index b7d801993bfa..bcaefc952764 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -539,6 +539,7 @@ GuC ABI
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
HuC
---
diff --git a/Documentation/tools/rtla/common_hist_options.rst b/Documentation/tools/rtla/common_hist_options.rst
index 0266cd08a6c9..df53ff835bfb 100644
--- a/Documentation/tools/rtla/common_hist_options.rst
+++ b/Documentation/tools/rtla/common_hist_options.rst
@@ -2,7 +2,7 @@
Set the histogram bucket size (default *1*).
-**-e**, **--entries** *N*
+**-E**, **--entries** *N*
Set the number of entries of the histogram (default 256).
diff --git a/Documentation/tools/rtla/common_osnoise_description.rst b/Documentation/tools/rtla/common_osnoise_description.rst
index 8973c5df888f..d5d61615b967 100644
--- a/Documentation/tools/rtla/common_osnoise_description.rst
+++ b/Documentation/tools/rtla/common_osnoise_description.rst
@@ -1,7 +1,7 @@
The **rtla osnoise** tool is an interface for the *osnoise* tracer. The
*osnoise* tracer dispatches a kernel thread per-cpu. These threads read the
time in a loop while with preemption, softirq and IRQs enabled, thus
-allowing all the sources of operating systme noise during its execution.
+allowing all the sources of operating system noise during its execution.
The *osnoise*'s tracer threads take note of the delta between each time
read, along with an interference counter of all sources of interference.
At the end of each period, the *osnoise* tracer displays a summary of
diff --git a/Documentation/tools/rtla/rtla-osnoise-hist.rst b/Documentation/tools/rtla/rtla-osnoise-hist.rst
index 52298ddd8701..f2e79d22c4c4 100644
--- a/Documentation/tools/rtla/rtla-osnoise-hist.rst
+++ b/Documentation/tools/rtla/rtla-osnoise-hist.rst
@@ -36,7 +36,7 @@ default). The reason for reducing the runtime is to avoid starving the
**rtla** tool. The tool is also set to run for *one minute*. The output
histogram is set to group outputs in buckets of *10us* and *25* entries::
- [root@f34 ~/]# rtla osnoise hist -P F:1 -c 0-11 -r 900000 -d 1M -b 10 -e 25
+ [root@f34 ~/]# rtla osnoise hist -P F:1 -c 0-11 -r 900000 -d 1M -b 10 -E 25
# RTLA osnoise histogram
# Time unit is microseconds (us)
# Duration: 0 00:01:00
diff --git a/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst b/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
index 87a36044f828..2ca92042767b 100644
--- a/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
+++ b/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
@@ -84,6 +84,8 @@ CPUfreq核心层注册一个cpufreq_driver结构体。
.resume - 一个指向per-policy恢复函数的指针,该函数在关中断且在调节器再一次启动前被
调用。
+ .ready - 一个指向per-policy准备函数的指针,该函数在策略完全初始化之后被调用。
+
.attr - 一个指向NULL结尾的"struct freq_attr"列表的指针,该列表允许导出值到
sysfs。
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index a4267104db50..9f3172376ec3 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -1394,7 +1394,7 @@ documentation when it pops into existence).
-------------------
:Capability: KVM_CAP_ENABLE_CAP
-:Architectures: mips, ppc, s390
+:Architectures: mips, ppc, s390, x86
:Type: vcpu ioctl
:Parameters: struct kvm_enable_cap (in)
:Returns: 0 on success; -1 on error
@@ -6997,6 +6997,20 @@ indicated by the fd to the VM this is called on.
This is intended to support intra-host migration of VMs between userspace VMMs,
upgrading the VMM process without interrupting the guest.
+7.30 KVM_CAP_PPC_AIL_MODE_3
+-------------------------------
+
+:Capability: KVM_CAP_PPC_AIL_MODE_3
+:Architectures: ppc
+:Type: vm
+
+This capability indicates that the kernel supports the mode 3 setting for the
+"Address Translation Mode on Interrupt" aka "Alternate Interrupt Location"
+resource that is controlled with the H_SET_MODE hypercall.
+
+This capability allows a guest kernel to use a better-performance mode for
+handling interrupts and system calls.
+
8. Other capabilities.
======================
diff --git a/MAINTAINERS b/MAINTAINERS
index abfb59afd06d..e0ad1bc5b9d4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3139,11 +3139,9 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/ath5k
F: drivers/net/wireless/ath/ath5k/
ATHEROS ATH6KL WIRELESS DRIVER
-M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
-S: Supported
+S: Orphan
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath6kl/
ATI_REMOTE2 DRIVER
@@ -4549,6 +4547,7 @@ F: drivers/platform/chrome/
CHROMEOS EC CODEC DRIVER
M: Cheng-Yi Chiang <cychiang@chromium.org>
+M: Tzung-Bi Shih <tzungbi@google.com>
R: Guenter Roeck <groeck@chromium.org>
S: Maintained
F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
@@ -4914,7 +4913,8 @@ F: kernel/cgroup/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
-M: Vladimir Davydov <vdavydov.dev@gmail.com>
+M: Roman Gushchin <roman.gushchin@linux.dev>
+M: Shakeel Butt <shakeelb@google.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Maintained
@@ -6082,7 +6082,8 @@ L: dri-devel@lists.freedesktop.org
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/panel/panel-lvds.c
-F: Documentation/devicetree/bindings/display/panel/lvds.yaml
+F: Documentation/devicetree/bindings/display/lvds.yaml
+F: Documentation/devicetree/bindings/display/panel/panel-lvds.yaml
DRM DRIVER FOR MANTIX MLAF057WE51 PANELS
M: Guido Günther <agx@sigxcpu.org>
@@ -6131,6 +6132,13 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
+DRM DRIVER FOR NOVATEK NT35560 PANELS
+M: Linus Walleij <linus.walleij@linaro.org>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
+F: drivers/gpu/drm/panel/panel-novatek-nt35560.c
+
DRM DRIVER FOR NOVATEK NT36672A PANELS
M: Sumit Semwal <sumit.semwal@linaro.org>
S: Maintained
@@ -6167,6 +6175,13 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/repaper.txt
F: drivers/gpu/drm/tiny/repaper.c
+DRM DRIVER FOR SOLOMON SSD130X OLED DISPLAYS
+M: Javier Martinez Canillas <javierm@redhat.com>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
+F: drivers/gpu/drm/solomon/ssd130x*
+
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
M: Dave Airlie <airlied@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
@@ -6255,12 +6270,6 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
F: drivers/gpu/drm/tiny/st7735r.c
-DRM DRIVER FOR SONY ACX424AKP PANELS
-M: Linus Walleij <linus.walleij@linaro.org>
-S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
-F: drivers/gpu/drm/panel/panel-sony-acx424akp.c
-
DRM DRIVER FOR ST-ERICSSON MCDE
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
@@ -7012,12 +7021,6 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/sb_edac.c
-EDAC-SIFIVE
-M: Yash Shah <yash.shah@sifive.com>
-L: linux-edac@vger.kernel.org
-S: Supported
-F: drivers/edac/sifive_edac.c
-
EDAC-SKYLAKE
M: Tony Luck <tony.luck@intel.com>
L: linux-edac@vger.kernel.org
@@ -7188,7 +7191,7 @@ F: drivers/net/can/usb/etas_es58x/
ETHERNET BRIDGE
M: Roopa Prabhu <roopa@nvidia.com>
-M: Nikolay Aleksandrov <nikolay@nvidia.com>
+M: Nikolay Aleksandrov <razor@blackwall.org>
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Maintained
@@ -9264,6 +9267,15 @@ S: Maintained
W: https://github.com/o2genum/ideapad-slidebar
F: drivers/input/misc/ideapad_slidebar.c
+IDMAPPED MOUNTS
+M: Christian Brauner <brauner@kernel.org>
+L: linux-fsdevel@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
+F: Documentation/filesystems/idmappings.rst
+F: tools/testing/selftests/mount_setattr/
+F: include/linux/mnt_idmapping.h
+
IDT VersaClock 5 CLOCK DRIVER
M: Luca Ceresoli <luca@lucaceresoli.net>
S: Maintained
@@ -15154,7 +15166,7 @@ M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
R: Mark Rutland <mark.rutland@arm.com>
R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
-R: Jiri Olsa <jolsa@redhat.com>
+R: Jiri Olsa <jolsa@kernel.org>
R: Namhyung Kim <namhyung@kernel.org>
L: linux-perf-users@vger.kernel.org
L: linux-kernel@vger.kernel.org
@@ -15572,6 +15584,7 @@ M: Iurii Zaikin <yzaikin@google.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/linux.git sysctl-next
F: fs/proc/proc_sysctl.c
F: include/linux/sysctl.h
F: kernel/sysctl-test.c
@@ -15919,6 +15932,7 @@ S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath10k/
+F: Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
@@ -15926,11 +15940,12 @@ L: ath11k@lists.infradead.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath11k/
+F: Documentation/devicetree/bindings/net/wireless/qcom,ath11k.txt
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
-M: ath9k-devel@qca.qualcomm.com
+M: Toke Høiland-Jørgensen <toke@toke.dk>
L: linux-wireless@vger.kernel.org
-S: Supported
+S: Maintained
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
F: drivers/net/wireless/ath/ath9k/
@@ -16005,14 +16020,6 @@ F: Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
F: drivers/misc/fastrpc.c
F: include/uapi/misc/fastrpc.h
-QUALCOMM GENERIC INTERFACE I2C DRIVER
-M: Akash Asthana <akashast@codeaurora.org>
-M: Mukesh Savaliya <msavaliy@codeaurora.org>
-L: linux-i2c@vger.kernel.org
-L: linux-arm-msm@vger.kernel.org
-S: Supported
-F: drivers/i2c/busses/i2c-qcom-geni.c
-
QUALCOMM HEXAGON ARCHITECTURE
M: Brian Cain <bcain@codeaurora.org>
L: linux-hexagon@vger.kernel.org
@@ -16084,8 +16091,8 @@ F: Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
F: drivers/mtd/nand/raw/qcom_nandc.c
QUALCOMM RMNET DRIVER
-M: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
-M: Sean Tranchetti <stranche@codeaurora.org>
+M: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
+M: Sean Tranchetti <quic_stranche@quicinc.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
@@ -16111,11 +16118,10 @@ F: Documentation/devicetree/bindings/media/*venus*
F: drivers/media/platform/qcom/venus/
QUALCOMM WCN36XX WIRELESS DRIVER
-M: Kalle Valo <kvalo@kernel.org>
+M: Loic Poulain <loic.poulain@linaro.org>
L: wcn36xx@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
-T: git git://github.com/KrasnikovEugene/wcn36xx.git
F: drivers/net/wireless/ath/wcn36xx/
QUANTENNA QTNFMAC WIRELESS DRIVER
@@ -16378,6 +16384,7 @@ F: drivers/watchdog/realtek_otto_wdt.c
REALTEK RTL83xx SMI DSA ROUTER CHIPS
M: Linus Walleij <linus.walleij@linaro.org>
+M: Alvin Šipraga <alsi@bang-olufsen.dk>
S: Maintained
F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
F: drivers/net/dsa/realtek-smi*
@@ -17771,8 +17778,10 @@ M: David Rientjes <rientjes@google.com>
M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
M: Andrew Morton <akpm@linux-foundation.org>
M: Vlastimil Babka <vbabka@suse.cz>
+R: Roman Gushchin <roman.gushchin@linux.dev>
L: linux-mm@kvack.org
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git
F: include/linux/sl?b*.h
F: mm/sl?b*
diff --git a/Makefile b/Makefile
index 51e142f760f7..daeb5c88b50b 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 17
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
NAME = Superb Owl
# *DOCUMENTATION*
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 3198acb2aad8..7f3c87f7a0ce 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -106,7 +106,7 @@
msr_s SYS_ICC_SRE_EL2, x0
isb // Make sure SRE is now set
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
- tbz x0, #0, 1f // and check that it sticks
+ tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
.Lskip_gicv3_\@:
.endm
diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c
index 7068da080799..49837d3a3ef5 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio.c
@@ -248,6 +248,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
IRQCHIP_STATE_PENDING,
&val);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+ } else if (vgic_irq_is_mapped_level(irq)) {
+ val = vgic_get_phys_line_level(irq);
} else {
val = irq_is_pending(irq);
}
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 0ec9cfc5131f..56ffd260c669 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -12,6 +12,14 @@
#include <asm/barrier.h>
#include <linux/atomic.h>
+/* compiler build environment sanity checks: */
+#if !defined(CONFIG_64BIT) && defined(__LP64__)
+#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
+#endif
+#if defined(CONFIG_64BIT) && !defined(__LP64__)
+#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
+#endif
+
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __*_bit() (set/clear/change):
* *_bit() want use of volatile.
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index ebf8a845b017..123d5f16cd9d 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -89,8 +89,8 @@ struct exception_table_entry {
__asm__("1: " ldx " 0(" sr "%2),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- : "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err)); \
+ : "=r"(__gu_val), "+r"(__gu_err) \
+ : "r"(ptr)); \
\
(val) = (__force __typeof__(*(ptr))) __gu_val; \
}
@@ -123,8 +123,8 @@ struct exception_table_entry {
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err)); \
+ : "=&r"(__gu_tmp.l), "+r"(__gu_err) \
+ : "r"(ptr)); \
\
(val) = __gu_tmp.t; \
}
@@ -135,13 +135,12 @@ struct exception_table_entry {
#define __put_user_internal(sr, x, ptr) \
({ \
ASM_EXCEPTIONTABLE_VAR(__pu_err); \
- __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm(sr, "stb", __x, ptr); break; \
- case 2: __put_user_asm(sr, "sth", __x, ptr); break; \
- case 4: __put_user_asm(sr, "stw", __x, ptr); break; \
- case 8: STD_USER(sr, __x, ptr); break; \
+ case 1: __put_user_asm(sr, "stb", x, ptr); break; \
+ case 2: __put_user_asm(sr, "sth", x, ptr); break; \
+ case 4: __put_user_asm(sr, "stw", x, ptr); break; \
+ case 8: STD_USER(sr, x, ptr); break; \
default: BUILD_BUG(); \
} \
\
@@ -150,7 +149,9 @@ struct exception_table_entry {
#define __put_user(x, ptr) \
({ \
- __put_user_internal("%%sr3,", x, ptr); \
+ __typeof__(&*(ptr)) __ptr = ptr; \
+ __typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x); \
+ __put_user_internal("%%sr3,", __x, __ptr); \
})
#define __put_kernel_nofault(dst, src, type, err_label) \
@@ -180,8 +181,8 @@ struct exception_table_entry {
"1: " stx " %2,0(" sr "%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(x), "0"(__pu_err))
+ : "+r"(__pu_err) \
+ : "r"(ptr), "r"(x))
#if !defined(CONFIG_64BIT)
@@ -193,8 +194,8 @@ struct exception_table_entry {
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err)); \
+ : "+r"(__pu_err) \
+ : "r"(ptr), "r"(__val)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 237d20dd5622..286cec4d86d7 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
- return 0;
+ return ret;
}
static int emulate_std(struct pt_regs *regs, int frreg, int flop)
{
@@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
" zdep %2, 29, 2, %%r19\n"
-" dep %%r0, 31, 2, %2\n"
+" dep %%r0, 31, 2, %3\n"
" mtsar %%r19\n"
" zvdepi -2, 32, %%r19\n"
"1: ldw 0(%%sr1,%3),%%r20\n"
@@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
" andcm %%r21, %%r19, %%r21\n"
" or %1, %%r20, %1\n"
" or %2, %%r21, %2\n"
-"3: stw %1,0(%%sr1,%1)\n"
+"3: stw %1,0(%%sr1,%3)\n"
"4: stw %%r1,4(%%sr1,%3)\n"
"5: stw %2,8(%%sr1,%3)\n"
" copy %%r0, %0\n"
@@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs)
ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
break;
}
-#ifdef CONFIG_PA20
switch (regs->iir & OPCODE2_MASK)
{
case OPCODE_FLDD_L:
@@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs)
flop=1;
ret = emulate_std(regs, R2(regs->iir),1);
break;
+#ifdef CONFIG_PA20
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
- }
#endif
+ }
switch (regs->iir & OPCODE3_MASK)
{
case OPCODE_FLDW_L:
flop=1;
- ret = emulate_ldw(regs, R2(regs->iir),0);
+ ret = emulate_ldw(regs, R2(regs->iir), 1);
break;
case OPCODE_LDW_M:
- ret = emulate_ldw(regs, R2(regs->iir),1);
+ ret = emulate_ldw(regs, R2(regs->iir), 0);
break;
case OPCODE_FSTW_L:
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index 367f6397bda7..860385058085 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr)
return *((u64 *)addr);
}
+u64 ioread64_lo_hi(const void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32(addr);
+ high = ioread32(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+
u64 ioread64_hi_lo(const void __iomem *addr)
{
u32 low, high;
@@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr)
}
}
+void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+ iowrite32(val >> 32, addr + sizeof(u32));
+}
+
void iowrite64_hi_lo(u64 val, void __iomem *addr)
{
iowrite32(val >> 32, addr + sizeof(u32));
@@ -530,6 +546,7 @@ EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
EXPORT_SYMBOL(ioread64);
EXPORT_SYMBOL(ioread64be);
+EXPORT_SYMBOL(ioread64_lo_hi);
EXPORT_SYMBOL(ioread64_hi_lo);
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
@@ -538,6 +555,7 @@ EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
EXPORT_SYMBOL(iowrite64);
EXPORT_SYMBOL(iowrite64be);
+EXPORT_SYMBOL(iowrite64_lo_hi);
EXPORT_SYMBOL(iowrite64_hi_lo);
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 1ae31db9988f..1dc2e88e7b04 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -337,9 +337,9 @@ static void __init setup_bootmem(void)
static bool kernel_set_to_readonly;
-static void __init map_pages(unsigned long start_vaddr,
- unsigned long start_paddr, unsigned long size,
- pgprot_t pgprot, int force)
+static void __ref map_pages(unsigned long start_vaddr,
+ unsigned long start_paddr, unsigned long size,
+ pgprot_t pgprot, int force)
{
pmd_t *pmd;
pte_t *pg_table;
@@ -449,7 +449,7 @@ void __init set_kernel_text_rw(int enable_read_write)
flush_tlb_all();
}
-void __ref free_initmem(void)
+void free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
@@ -463,7 +463,6 @@ void __ref free_initmem(void)
/* The init text pages are marked R-X. We have to
* flush the icache and mark them RW-
*
- * This is tricky, because map_pages is in the init section.
* Do a dummy remap of the data section first (the data
* section is already PAGE_KERNEL) to pull in the TLB entries
* for map_kernel */
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index fa84744d6b24..b876ef8c70a7 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -421,14 +421,14 @@ InstructionTLBMiss:
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_IMISS
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SDR1
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
rlwinm r2, r2, 28, 0xfffff000
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index a94b0cd0bdc5..bd3734d5be89 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -3264,12 +3264,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
case BARRIER_EIEIO:
eieio();
break;
+#ifdef CONFIG_PPC64
case BARRIER_LWSYNC:
asm volatile("lwsync" : : : "memory");
break;
case BARRIER_PTESYNC:
asm volatile("ptesync" : : : "memory");
break;
+#endif
}
break;
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index 2a82a3b2992b..af64b95e88cc 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -23,7 +23,7 @@ CONFIG_SLOB=y
CONFIG_SOC_CANAAN=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_CMDLINE="earlycon console=ttySIF0 rootdelay=2 root=/dev/mmcblk0p1 ro"
+CONFIG_CMDLINE="earlycon console=ttySIF0 root=/dev/mmcblk0p1 rootwait ro"
CONFIG_CMDLINE_FORCE=y
# CONFIG_SECCOMP is not set
# CONFIG_STACKPROTECTOR is not set
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 612556faa527..ffc87e76b1dd 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -51,6 +51,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
+obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o
+
obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index ed29e9c8f660..d6a46ed0bf05 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -108,7 +108,7 @@ _save_context:
.option pop
#ifdef CONFIG_TRACE_IRQFLAGS
- call trace_hardirqs_off
+ call __trace_hardirqs_off
#endif
#ifdef CONFIG_CONTEXT_TRACKING
@@ -143,7 +143,7 @@ skip_context_tracking:
li t0, EXC_BREAKPOINT
beq s4, t0, 1f
#ifdef CONFIG_TRACE_IRQFLAGS
- call trace_hardirqs_on
+ call __trace_hardirqs_on
#endif
csrs CSR_STATUS, SR_IE
@@ -234,7 +234,7 @@ ret_from_exception:
REG_L s0, PT_STATUS(sp)
csrc CSR_STATUS, SR_IE
#ifdef CONFIG_TRACE_IRQFLAGS
- call trace_hardirqs_off
+ call __trace_hardirqs_off
#endif
#ifdef CONFIG_RISCV_M_MODE
/* the MPP value is too large to be used as an immediate arg for addi */
@@ -270,10 +270,10 @@ restore_all:
REG_L s1, PT_STATUS(sp)
andi t0, s1, SR_PIE
beqz t0, 1f
- call trace_hardirqs_on
+ call __trace_hardirqs_on
j 2f
1:
- call trace_hardirqs_off
+ call __trace_hardirqs_off
2:
#endif
REG_L a0, PT_STATUS(sp)
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index f72527fcb347..775d3322b422 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -5,6 +5,7 @@
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*/
+#include <linux/bits.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/reboot.h>
@@ -85,7 +86,7 @@ static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mas
pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
break;
}
- hmask |= 1 << hartid;
+ hmask |= BIT(hartid);
}
return hmask;
@@ -160,7 +161,7 @@ static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask)
{
unsigned long hart_mask;
- if (!cpu_mask)
+ if (!cpu_mask || cpumask_empty(cpu_mask))
cpu_mask = cpu_online_mask;
hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
@@ -176,7 +177,7 @@ static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
int result = 0;
unsigned long hart_mask;
- if (!cpu_mask)
+ if (!cpu_mask || cpumask_empty(cpu_mask))
cpu_mask = cpu_online_mask;
hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
@@ -249,26 +250,37 @@ static void __sbi_set_timer_v02(uint64_t stime_value)
static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask)
{
- unsigned long hartid, cpuid, hmask = 0, hbase = 0;
+ unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
struct sbiret ret = {0};
int result;
- if (!cpu_mask)
+ if (!cpu_mask || cpumask_empty(cpu_mask))
cpu_mask = cpu_online_mask;
for_each_cpu(cpuid, cpu_mask) {
hartid = cpuid_to_hartid_map(cpuid);
- if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
- ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
- hmask, hbase, 0, 0, 0, 0);
- if (ret.error)
- goto ecall_failed;
- hmask = 0;
- hbase = 0;
+ if (hmask) {
+ if (hartid + BITS_PER_LONG <= htop ||
+ hbase + BITS_PER_LONG <= hartid) {
+ ret = sbi_ecall(SBI_EXT_IPI,
+ SBI_EXT_IPI_SEND_IPI, hmask,
+ hbase, 0, 0, 0, 0);
+ if (ret.error)
+ goto ecall_failed;
+ hmask = 0;
+ } else if (hartid < hbase) {
+ /* shift the mask to fit lower hartid */
+ hmask <<= hbase - hartid;
+ hbase = hartid;
+ }
}
- if (!hmask)
+ if (!hmask) {
hbase = hartid;
- hmask |= 1UL << (hartid - hbase);
+ htop = hartid;
+ } else if (hartid > htop) {
+ htop = hartid;
+ }
+ hmask |= BIT(hartid - hbase);
}
if (hmask) {
@@ -344,25 +356,35 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5)
{
- unsigned long hartid, cpuid, hmask = 0, hbase = 0;
+ unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
int result;
- if (!cpu_mask)
+ if (!cpu_mask || cpumask_empty(cpu_mask))
cpu_mask = cpu_online_mask;
for_each_cpu(cpuid, cpu_mask) {
hartid = cpuid_to_hartid_map(cpuid);
- if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
- result = __sbi_rfence_v02_call(fid, hmask, hbase,
- start, size, arg4, arg5);
- if (result)
- return result;
- hmask = 0;
- hbase = 0;
+ if (hmask) {
+ if (hartid + BITS_PER_LONG <= htop ||
+ hbase + BITS_PER_LONG <= hartid) {
+ result = __sbi_rfence_v02_call(fid, hmask,
+ hbase, start, size, arg4, arg5);
+ if (result)
+ return result;
+ hmask = 0;
+ } else if (hartid < hbase) {
+ /* shift the mask to fit lower hartid */
+ hmask <<= hbase - hartid;
+ hbase = hartid;
+ }
}
- if (!hmask)
+ if (!hmask) {
hbase = hartid;
- hmask |= 1UL << (hartid - hbase);
+ htop = hartid;
+ } else if (hartid > htop) {
+ htop = hartid;
+ }
+ hmask |= BIT(hartid - hbase);
}
if (hmask) {
diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c
new file mode 100644
index 000000000000..095ac976d7da
--- /dev/null
+++ b/arch/riscv/kernel/trace_irq.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Changbin Du <changbin.du@gmail.com>
+ */
+
+#include <linux/irqflags.h>
+#include <linux/kprobes.h>
+#include "trace_irq.h"
+
+/*
+ * trace_hardirqs_on/off require the caller to setup frame pointer properly.
+ * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel.
+ * Here we add one extra level so they can be safely called by low
+ * level entry code which $fp is used for other purpose.
+ */
+
+void __trace_hardirqs_on(void)
+{
+ trace_hardirqs_on();
+}
+NOKPROBE_SYMBOL(__trace_hardirqs_on);
+
+void __trace_hardirqs_off(void)
+{
+ trace_hardirqs_off();
+}
+NOKPROBE_SYMBOL(__trace_hardirqs_off);
diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h
new file mode 100644
index 000000000000..99fe67377e5e
--- /dev/null
+++ b/arch/riscv/kernel/trace_irq.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Changbin Du <changbin.du@gmail.com>
+ */
+#ifndef __TRACE_IRQ_H
+#define __TRACE_IRQ_H
+
+void __trace_hardirqs_on(void);
+void __trace_hardirqs_off(void);
+
+#endif /* __TRACE_IRQ_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6dcccb304775..ec9830d2aabf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -703,7 +703,6 @@ struct kvm_vcpu_arch {
struct fpu_guest guest_fpu;
u64 xcr0;
- u64 guest_supported_xcr0;
struct kvm_pio_request pio;
void *pio_data;
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 3faf0f97edb1..a4a39c3e0f19 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -476,6 +476,7 @@
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index b00dbc5fac2b..bb2fb78523ce 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -220,6 +220,42 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
+
+/* AVIC */
+#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
+#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
+
+#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
+#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
+#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
+#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
+#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFF)
+
+#define AVIC_DOORBELL_PHYSICAL_ID_MASK (0xFF)
+
+#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
+#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
+#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
+
+enum avic_ipi_failure_cause {
+ AVIC_IPI_FAILURE_INVALID_INT_TYPE,
+ AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
+ AVIC_IPI_FAILURE_INVALID_TARGET,
+ AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+};
+
+
+/*
+ * 0xff is broadcast, so the max index allowed for physical APIC ID
+ * table is 0xfe. APIC IDs above 0xff are reserved.
+ */
+#define AVIC_MAX_PHYSICAL_ID_COUNT 0xff
+
+#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
+#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
+
+
struct vmcb_seg {
u16 selector;
u16 attrib;
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 4b41efc9e367..8e4bc6453d26 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -344,10 +344,8 @@ static void sgx_reclaim_pages(void)
{
struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
struct sgx_backing backing[SGX_NR_TO_SCAN];
- struct sgx_epc_section *section;
struct sgx_encl_page *encl_page;
struct sgx_epc_page *epc_page;
- struct sgx_numa_node *node;
pgoff_t page_index;
int cnt = 0;
int ret;
@@ -418,13 +416,7 @@ skip:
kref_put(&encl_page->encl->refcount, sgx_encl_release);
epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
- section = &sgx_epc_sections[epc_page->section];
- node = section->node;
-
- spin_lock(&node->lock);
- list_add_tail(&epc_page->list, &node->free_page_list);
- spin_unlock(&node->lock);
- atomic_long_inc(&sgx_nr_free_pages);
+ sgx_free_epc_page(epc_page);
}
}
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index 437d7c930c0b..75ffaef8c299 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -91,11 +91,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
- struct user32_fxsr_struct newstate;
+ struct fxregs_state newstate;
int ret;
- BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state));
-
if (!cpu_feature_enabled(X86_FEATURE_FXSR))
return -ENODEV;
@@ -116,9 +114,10 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
/* Copy the state */
memcpy(&fpu->fpstate->regs.fxsave, &newstate, sizeof(newstate));
- /* Clear xmm8..15 */
+ /* Clear xmm8..15 for 32-bit callers */
BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16);
- memset(&fpu->fpstate->regs.fxsave.xmm_space[8], 0, 8 * 16);
+ if (in_ia32_syscall())
+ memset(&fpu->fpstate->regs.fxsave.xmm_space[8*4], 0, 8 * 16);
/* Mark FP and SSE as in use when XSAVE is enabled */
if (use_xsave())
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 02b3ddaf4f75..7c7824ae7862 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1558,7 +1558,10 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
fpregs_restore_userregs();
newfps->xfeatures = curfps->xfeatures | xfeatures;
- newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+
+ if (!guest_fpu)
+ newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+
newfps->xfd = curfps->xfd & ~xfeatures;
/* Do the final updates within the locked region */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a438217cbfac..f734e3b0cfec 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -462,19 +462,22 @@ static bool pv_tlb_flush_supported(void)
{
return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
+ (num_possible_cpus() != 1));
}
static bool pv_ipi_supported(void)
{
- return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
+ return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) &&
+ (num_possible_cpus() != 1));
}
static bool pv_sched_yield_supported(void)
{
return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
+ (num_possible_cpus() != 1));
}
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 6d2244c94799..8d2f2f995539 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1224,7 +1224,7 @@ static struct user_regset x86_64_regsets[] __ro_after_init = {
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
- .n = sizeof(struct user_i387_struct) / sizeof(long),
+ .n = sizeof(struct fxregs_state) / sizeof(long),
.size = sizeof(long), .align = sizeof(long),
.active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
},
@@ -1271,7 +1271,7 @@ static struct user_regset x86_32_regsets[] __ro_after_init = {
},
[REGSET_XFP] = {
.core_note_type = NT_PRXFPREG,
- .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
+ .n = sizeof(struct fxregs_state) / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
},
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 494d4d351859..b8f8d268d058 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -282,6 +282,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *best;
+ u64 guest_supported_xcr0;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
if (best && apic) {
@@ -293,9 +294,11 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_apic_set_version(vcpu);
}
- vcpu->arch.guest_supported_xcr0 =
+ guest_supported_xcr0 =
cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
+ vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
+
kvm_update_pv_runtime(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index d7e6fde82d25..9322e6340a74 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2306,7 +2306,12 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
apic->irr_pending = true;
apic->isr_count = 1;
} else {
- apic->irr_pending = (apic_search_irr(apic) != -1);
+ /*
+ * Don't clear irr_pending, searching the IRR can race with
+ * updates from the CPU as APICv is still active from hardware's
+ * perspective. The flag will be cleared as appropriate when
+ * KVM injects the interrupt.
+ */
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
}
}
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 593093b52395..8e24f73bf60b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3889,12 +3889,23 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
walk_shadow_page_lockless_end(vcpu);
}
+static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
+{
+ /* make sure the token value is not 0 */
+ u32 id = vcpu->arch.apf.id;
+
+ if (id << 12 == 0)
+ vcpu->arch.apf.id = 1;
+
+ return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
+}
+
static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
gfn_t gfn)
{
struct kvm_arch_async_pf arch;
- arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
+ arch.token = alloc_apf_token(vcpu);
arch.gfn = gfn;
arch.direct_map = vcpu->arch.mmu->direct_map;
arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index f614f95acc6b..b1a02993782b 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -95,7 +95,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
}
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
- unsigned config, bool exclude_user,
+ u64 config, bool exclude_user,
bool exclude_kernel, bool intr,
bool in_tx, bool in_tx_cp)
{
@@ -181,7 +181,8 @@ static int cmp_u64(const void *a, const void *b)
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{
- unsigned config, type = PERF_TYPE_RAW;
+ u64 config;
+ u32 type = PERF_TYPE_RAW;
struct kvm *kvm = pmc->vcpu->kvm;
struct kvm_pmu_event_filter *filter;
bool allow_event = true;
@@ -220,7 +221,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
}
if (type == PERF_TYPE_RAW)
- config = eventsel & X86_RAW_EVENT_MASK;
+ config = eventsel & AMD64_RAW_EVENT_MASK;
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
return;
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 90364d02f22a..fb3e20791338 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -27,20 +27,6 @@
#include "irq.h"
#include "svm.h"
-#define SVM_AVIC_DOORBELL 0xc001011b
-
-#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
-
-/*
- * 0xff is broadcast, so the max index allowed for physical APIC ID
- * table is 0xfe. APIC IDs above 0xff are reserved.
- */
-#define AVIC_MAX_PHYSICAL_ID_COUNT 255
-
-#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
-#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
-#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
-
/* AVIC GATAG is encoded using VM and VCPU IDs */
#define AVIC_VCPU_ID_BITS 8
#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
@@ -73,12 +59,6 @@ struct amd_svm_iommu_ir {
void *data; /* Storing pointer to struct amd_ir_data */
};
-enum avic_ipi_failure_cause {
- AVIC_IPI_FAILURE_INVALID_INT_TYPE,
- AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
- AVIC_IPI_FAILURE_INVALID_TARGET,
- AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
-};
/* Note:
* This function is called from IOMMU driver to notify
@@ -289,6 +269,22 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return 0;
}
+void avic_ring_doorbell(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any point,
+ * which could result in signalling the wrong/previous pCPU. But if
+ * that happens the vCPU is guaranteed to do a VMRUN (after being
+ * migrated) and thus will process pending interrupts, i.e. a doorbell
+ * is not needed (and the spurious one is harmless).
+ */
+ int cpu = READ_ONCE(vcpu->cpu);
+
+ if (cpu != get_cpu())
+ wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
+ put_cpu();
+}
+
static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
u32 icrl, u32 icrh)
{
@@ -304,8 +300,13 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
kvm_for_each_vcpu(i, vcpu, kvm) {
if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
GET_APIC_DEST_FIELD(icrh),
- icrl & APIC_DEST_MASK))
- kvm_vcpu_wake_up(vcpu);
+ icrl & APIC_DEST_MASK)) {
+ vcpu->arch.apic->irr_pending = true;
+ svm_complete_interrupt_delivery(vcpu,
+ icrl & APIC_MODE_MASK,
+ icrl & APIC_INT_LEVELTRIG,
+ icrl & APIC_VECTOR_MASK);
+ }
}
}
@@ -345,8 +346,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
break;
case AVIC_IPI_FAILURE_INVALID_TARGET:
- WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
- index, vcpu->vcpu_id, icrh, icrl);
break;
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
WARN_ONCE(1, "Invalid backing page\n");
@@ -669,52 +668,6 @@ void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
return;
}
-int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
-{
- if (!vcpu->arch.apicv_active)
- return -1;
-
- kvm_lapic_set_irr(vec, vcpu->arch.apic);
-
- /*
- * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
- * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
- * the read of guest_mode, which guarantees that either VMRUN will see
- * and process the new vIRR entry, or that the below code will signal
- * the doorbell if the vCPU is already running in the guest.
- */
- smp_mb__after_atomic();
-
- /*
- * Signal the doorbell to tell hardware to inject the IRQ if the vCPU
- * is in the guest. If the vCPU is not in the guest, hardware will
- * automatically process AVIC interrupts at VMRUN.
- */
- if (vcpu->mode == IN_GUEST_MODE) {
- int cpu = READ_ONCE(vcpu->cpu);
-
- /*
- * Note, the vCPU could get migrated to a different pCPU at any
- * point, which could result in signalling the wrong/previous
- * pCPU. But if that happens the vCPU is guaranteed to do a
- * VMRUN (after being migrated) and thus will process pending
- * interrupts, i.e. a doorbell is not needed (and the spurious
- * one is harmless).
- */
- if (cpu != get_cpu())
- wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
- put_cpu();
- } else {
- /*
- * Wake the vCPU if it was blocking. KVM will then detect the
- * pending IRQ when checking if the vCPU has a wake event.
- */
- kvm_vcpu_wake_up(vcpu);
- }
-
- return 0;
-}
-
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
return false;
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 1218b5a342fc..39d280e7e80e 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1457,18 +1457,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
!__nested_vmcb_check_save(vcpu, &save_cached))
goto out_free;
- /*
- * While the nested guest CR3 is already checked and set by
- * KVM_SET_SREGS, it was set when nested state was yet loaded,
- * thus MMU might not be initialized correctly.
- * Set it again to fix this.
- */
-
- ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
- nested_npt_enabled(svm), false);
- if (WARN_ON_ONCE(ret))
- goto out_free;
-
/*
* All checks done, we can enter guest mode. Userspace provides
@@ -1494,6 +1482,20 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm_switch_vmcb(svm, &svm->nested.vmcb02);
nested_vmcb02_prepare_control(svm);
+
+ /*
+ * While the nested guest CR3 is already checked and set by
+ * KVM_SET_SREGS, it was set when nested state was yet loaded,
+ * thus MMU might not be initialized correctly.
+ * Set it again to fix this.
+ */
+
+ ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
+ nested_npt_enabled(svm), false);
+ if (WARN_ON_ONCE(ret))
+ goto out_free;
+
+
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
ret = 0;
out_free:
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index a290efb272ad..fd3a00c892c7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1585,6 +1585,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 hcr0 = cr0;
+ bool old_paging = is_paging(vcpu);
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
@@ -1601,8 +1602,11 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
#endif
vcpu->arch.cr0 = cr0;
- if (!npt_enabled)
+ if (!npt_enabled) {
hcr0 |= X86_CR0_PG | X86_CR0_WP;
+ if (old_paging != is_paging(vcpu))
+ svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
+ }
/*
* re-enable caching here because the QEMU bios
@@ -1646,8 +1650,12 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
svm_flush_tlb(vcpu);
vcpu->arch.cr4 = cr4;
- if (!npt_enabled)
+ if (!npt_enabled) {
cr4 |= X86_CR4_PAE;
+
+ if (!is_paging(vcpu))
+ cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
+ }
cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4;
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
@@ -2685,8 +2693,23 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
u64 data = msr->data;
switch (ecx) {
case MSR_AMD64_TSC_RATIO:
- if (!msr->host_initiated && !svm->tsc_scaling_enabled)
- return 1;
+
+ if (!svm->tsc_scaling_enabled) {
+
+ if (!msr->host_initiated)
+ return 1;
+ /*
+ * In case TSC scaling is not enabled, always
+ * leave this MSR at the default value.
+ *
+ * Due to bug in qemu 6.2.0, it would try to set
+ * this msr to 0 if tsc scaling is not enabled.
+ * Ignore this value as well.
+ */
+ if (data != 0 && data != svm->tsc_ratio_msr)
+ return 1;
+ break;
+ }
if (data & TSC_RATIO_RSVD)
return 1;
@@ -3291,21 +3314,55 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
}
-static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
- int trig_mode, int vector)
+void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
+ int trig_mode, int vector)
{
- struct kvm_vcpu *vcpu = apic->vcpu;
+ /*
+ * vcpu->arch.apicv_active must be read after vcpu->mode.
+ * Pairs with smp_store_release in vcpu_enter_guest.
+ */
+ bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
- if (svm_deliver_avic_intr(vcpu, vector)) {
- kvm_lapic_set_irr(vector, apic);
+ if (!READ_ONCE(vcpu->arch.apicv_active)) {
+ /* Process the interrupt via inject_pending_event */
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
+ return;
+ }
+
+ trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
+ if (in_guest_mode) {
+ /*
+ * Signal the doorbell to tell hardware to inject the IRQ. If
+ * the vCPU exits the guest before the doorbell chimes, hardware
+ * will automatically process AVIC interrupts at the next VMRUN.
+ */
+ avic_ring_doorbell(vcpu);
} else {
- trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
- trig_mode, vector);
+ /*
+ * Wake the vCPU if it was blocking. KVM will then detect the
+ * pending IRQ when checking if the vCPU has a wake event.
+ */
+ kvm_vcpu_wake_up(vcpu);
}
}
+static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector)
+{
+ kvm_lapic_set_irr(vector, apic);
+
+ /*
+ * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
+ * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
+ * the read of guest_mode. This guarantees that either VMRUN will see
+ * and process the new vIRR entry, or that svm_complete_interrupt_delivery
+ * will signal the doorbell if the CPU has already entered the guest.
+ */
+ smp_mb__after_atomic();
+ svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
+}
+
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3353,11 +3410,13 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_nmi_blocked(vcpu))
+ return 0;
+
/* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
return -EBUSY;
-
- return !svm_nmi_blocked(vcpu);
+ return 1;
}
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -3409,9 +3468,13 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
struct vcpu_svm *svm = to_svm(vcpu);
+
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_interrupt_blocked(vcpu))
+ return 0;
+
/*
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
* e.g. if the IRQ arrived asynchronously after checking nested events.
@@ -3419,7 +3482,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
return -EBUSY;
- return !svm_interrupt_blocked(vcpu);
+ return 1;
}
static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
@@ -4150,11 +4213,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_smi_blocked(vcpu))
+ return 0;
+
/* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
return -EBUSY;
- return !svm_smi_blocked(vcpu);
+ return 1;
}
static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
@@ -4248,11 +4314,18 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
* Enter the nested guest now
*/
+ vmcb_mark_all_dirty(svm->vmcb01.ptr);
+
vmcb12 = map.hva;
nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
+ if (ret)
+ goto unmap_save;
+
+ svm->nested.nested_run_pending = 1;
+
unmap_save:
kvm_vcpu_unmap(vcpu, &map_save, true);
unmap_map:
@@ -4637,6 +4710,7 @@ static __init void svm_set_cpu_caps(void)
/* CPUID 0x80000001 and 0x8000000A (SVM features) */
if (nested) {
kvm_cpu_cap_set(X86_FEATURE_SVM);
+ kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
if (nrips)
kvm_cpu_cap_set(X86_FEATURE_NRIPS);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 73525353e424..fa98d6844728 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -489,6 +489,8 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
int read, int write);
+void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
+ int trig_mode, int vec);
/* nested.c */
@@ -556,17 +558,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
/* avic.c */
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
-#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
-#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
-
-#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
-#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
-#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
-#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
-
-#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
-
int avic_ga_log_notifier(u32 ga_tag);
void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm);
@@ -583,12 +574,12 @@ bool svm_check_apicv_inhibit_reasons(ulong bit);
void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
-int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
+void avic_ring_doorbell(struct kvm_vcpu *vcpu);
/* sev.c */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6c27bd0c89e1..efda5e4d6247 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7659,6 +7659,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
if (ret)
return ret;
+ vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
}
return 0;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7131d735b1ef..82a9dcd8c67f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -984,6 +984,18 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
+static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
+}
+
+#ifdef CONFIG_X86_64
+static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
+{
+ return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC;
+}
+#endif
+
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
u64 xcr0 = xcr;
@@ -1003,7 +1015,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
* saving. However, xcr0 bit 0 is always set, even if the
* emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
*/
- valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
+ valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP;
if (xcr0 & ~valid_bits)
return 1;
@@ -2351,10 +2363,12 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
return tsc;
}
+#ifdef CONFIG_X86_64
static inline int gtod_is_based_on_tsc(int mode)
{
return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
}
+#endif
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
{
@@ -3706,8 +3720,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!guest_cpuid_has(vcpu, X86_FEATURE_XFD))
return 1;
- if (data & ~(XFEATURE_MASK_USER_DYNAMIC &
- vcpu->arch.guest_supported_xcr0))
+ if (data & ~kvm_guest_supported_xfd(vcpu))
return 1;
fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
@@ -3717,8 +3730,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!guest_cpuid_has(vcpu, X86_FEATURE_XFD))
return 1;
- if (data & ~(XFEATURE_MASK_USER_DYNAMIC &
- vcpu->arch.guest_supported_xcr0))
+ if (data & ~kvm_guest_supported_xfd(vcpu))
return 1;
vcpu->arch.guest_fpu.xfd_err = data;
@@ -4233,6 +4245,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
case KVM_CAP_VCPU_ATTRIBUTES:
case KVM_CAP_SYS_ATTRIBUTES:
+ case KVM_CAP_ENABLE_CAP:
r = 1;
break;
case KVM_CAP_EXIT_HYPERCALL:
@@ -8942,6 +8955,13 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
return -KVM_EOPNOTSUPP;
+ /*
+ * When tsc is in permanent catchup mode guests won't be able to use
+ * pvclock_read_retry loop to get consistent view of pvclock
+ */
+ if (vcpu->arch.tsc_always_catchup)
+ return -KVM_EOPNOTSUPP;
+
if (!kvm_get_walltime_and_clockread(&ts, &cycle))
return -KVM_EOPNOTSUPP;
@@ -9983,7 +10003,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* result in virtual interrupt delivery.
*/
local_irq_disable();
- vcpu->mode = IN_GUEST_MODE;
+
+ /* Store vcpu->apicv_active before vcpu->mode. */
+ smp_store_release(&vcpu->mode, IN_GUEST_MODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index bad57535fad0..74be1fda58e3 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -133,32 +133,57 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
{
struct kvm_vcpu_xen *vx = &v->arch.xen;
+ struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
+ struct kvm_memslots *slots = kvm_memslots(v->kvm);
+ bool atomic = (state == RUNSTATE_runnable);
uint64_t state_entry_time;
- unsigned int offset;
+ int __user *user_state;
+ uint64_t __user *user_times;
kvm_xen_update_runstate(v, state);
if (!vx->runstate_set)
return;
- BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+ if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
+ kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
+ return;
+
+ /* We made sure it fits in a single page */
+ BUG_ON(!ghc->memslot);
+
+ if (atomic)
+ pagefault_disable();
- offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
-#ifdef CONFIG_X86_64
/*
- * The only difference is alignment of uint64_t in 32-bit.
- * So the first field 'state' is accessed directly using
- * offsetof() (where its offset happens to be zero), while the
- * remaining fields which are all uint64_t, start at 'offset'
- * which we tweak here by adding 4.
+ * The only difference between 32-bit and 64-bit versions of the
+ * runstate struct us the alignment of uint64_t in 32-bit, which
+ * means that the 64-bit version has an additional 4 bytes of
+ * padding after the first field 'state'.
+ *
+ * So we use 'int __user *user_state' to point to the state field,
+ * and 'uint64_t __user *user_times' for runstate_entry_time. So
+ * the actual array of time[] in each state starts at user_times[1].
*/
+ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
+ BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
+ user_state = (int __user *)ghc->hva;
+
+ BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+
+ user_times = (uint64_t __user *)(ghc->hva +
+ offsetof(struct compat_vcpu_runstate_info,
+ state_entry_time));
+#ifdef CONFIG_X86_64
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
offsetof(struct compat_vcpu_runstate_info, time) + 4);
if (v->kvm->arch.xen.long_mode)
- offset = offsetof(struct vcpu_runstate_info, state_entry_time);
+ user_times = (uint64_t __user *)(ghc->hva +
+ offsetof(struct vcpu_runstate_info,
+ state_entry_time));
#endif
/*
* First write the updated state_entry_time at the appropriate
@@ -172,10 +197,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
sizeof(state_entry_time));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &state_entry_time, offset,
- sizeof(state_entry_time)))
- return;
+ if (__put_user(state_entry_time, user_times))
+ goto out;
smp_wmb();
/*
@@ -189,11 +212,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
sizeof(vx->current_runstate));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &vx->current_runstate,
- offsetof(struct vcpu_runstate_info, state),
- sizeof(vx->current_runstate)))
- return;
+ if (__put_user(vx->current_runstate, user_state))
+ goto out;
/*
* Write the actual runstate times immediately after the
@@ -208,24 +228,23 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
sizeof(vx->runstate_times));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &vx->runstate_times[0],
- offset + sizeof(u64),
- sizeof(vx->runstate_times)))
- return;
-
+ if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
+ goto out;
smp_wmb();
/*
* Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
* runstate_entry_time field.
*/
-
state_entry_time &= ~XEN_RUNSTATE_UPDATE;
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &state_entry_time, offset,
- sizeof(state_entry_time)))
- return;
+ __put_user(state_entry_time, user_times);
+ smp_wmb();
+
+ out:
+ mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+ if (atomic)
+ pagefault_enable();
}
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
@@ -443,6 +462,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.vcpu_info_cache,
data->u.gpa,
@@ -460,6 +485,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.vcpu_time_info_cache,
data->u.gpa,
@@ -481,6 +512,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.runstate_cache,
data->u.gpa,
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 0c612a911696..36a66e97e3c2 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -7018,6 +7018,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
spin_unlock_irq(&bfqd->lock);
#endif
+ wbt_enable_default(bfqd->queue);
+
kfree(bfqd);
}
diff --git a/block/blk-core.c b/block/blk-core.c
index d93e3bb9a769..1039515c99d6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -284,13 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq);
}
-void blk_set_queue_dying(struct request_queue *q)
-{
- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
- blk_queue_start_drain(q);
-}
-EXPORT_SYMBOL_GPL(blk_set_queue_dying);
-
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -308,7 +301,8 @@ void blk_cleanup_queue(struct request_queue *q)
WARN_ON_ONCE(blk_queue_registered(q));
/* mark @q DYING, no new request or merges will be allowed afterwards */
- blk_set_queue_dying(q);
+ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
+ blk_queue_start_drain(q);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
diff --git a/block/blk-map.c b/block/blk-map.c
index 4526adde0156..c7f71d83eff1 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -446,7 +446,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
if (bytes > len)
bytes = len;
- page = alloc_page(GFP_NOIO | gfp_mask);
+ page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
if (!page)
goto cleanup;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1adfe4824ef5..d69ca91fbc8b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -736,6 +736,10 @@ static void blk_complete_request(struct request *req)
/* Completion has already been traced */
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
+
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ bio->bi_iter.bi_sector = req->__sector;
+
if (!is_flush)
bio_endio(bio);
bio = next;
diff --git a/block/elevator.c b/block/elevator.c
index ec98aed39c4f..482df2a350fc 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -525,8 +525,6 @@ void elv_unregister_queue(struct request_queue *q)
kobject_del(&e->kobj);
e->registered = 0;
- /* Re-enable throttling in case elevator disabled it */
- wbt_enable_default(q);
}
}
diff --git a/block/fops.c b/block/fops.c
index 4f59e0f5bf30..a18e7fbd97b8 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -289,6 +289,8 @@ static void blkdev_bio_end_io_async(struct bio *bio)
struct kiocb *iocb = dio->iocb;
ssize_t ret;
+ WRITE_ONCE(iocb->private, NULL);
+
if (likely(!bio->bi_status)) {
ret = dio->size;
iocb->ki_pos += ret;
diff --git a/block/genhd.c b/block/genhd.c
index 626c8406f21a..9eca1f7d35c9 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -549,6 +549,20 @@ out_free_ext_minor:
EXPORT_SYMBOL(device_add_disk);
/**
+ * blk_mark_disk_dead - mark a disk as dead
+ * @disk: disk to mark as dead
+ *
+ * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
+ * to this disk.
+ */
+void blk_mark_disk_dead(struct gendisk *disk)
+{
+ set_bit(GD_DEAD, &disk->state);
+ blk_queue_start_drain(disk->queue);
+}
+EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
+
+/**
* del_gendisk - remove the gendisk
* @disk: the struct gendisk to remove
*
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index e1ea18536a5f..c8289b7a85ba 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -25,12 +25,9 @@ struct alg_type_list {
struct list_head list;
};
-static atomic_long_t alg_memory_allocated;
-
static struct proto alg_proto = {
.name = "ALG",
.owner = THIS_MODULE,
- .memory_allocated = &alg_memory_allocated,
.obj_size = sizeof(struct alg_sock),
};
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 86560a28751b..f8e9fa82cb9b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -96,6 +96,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
+ /* T40 can not handle C3 idle state */
+ { set_max_cstate, "IBM ThinkPad T40", {
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
+ (void *)2},
{},
};
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 0741a4933f62..34600b5b9d8e 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -400,7 +400,7 @@ int __init_or_acpilib acpi_table_parse_entries_array(
acpi_get_table(id, instance, &table_header);
if (!table_header) {
- pr_warn("%4.4s not present\n", id);
+ pr_debug("%4.4s not present\n", id);
return -ENODEV;
}
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 7abc7e04f656..6fa4a2faf49c 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -920,6 +920,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
pci_write_config_byte(dev, 0x5a, irqmask);
/*
+ * HPT371 chips physically have only one channel, the secondary one,
+ * but the primary channel registers do exist! Go figure...
+ * So, we manually disable the non-existing channel here
+ * (if the BIOS hasn't done this already).
+ */
+ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
+ u8 mcr1;
+
+ pci_read_config_byte(dev, 0x50, &mcr1);
+ mcr1 &= ~0x04;
+ pci_write_config_byte(dev, 0x50, mcr1);
+ }
+
+ /*
* default to pci clock. make sure MA15/16 are set to output
* to prevent drives having problems with 40-pin cables. Needed
* for some drives such as IBM-DTLA which will not enter ready
@@ -950,14 +964,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if ((freq >> 12) != 0xABCDE) {
int i;
- u8 sr;
+ u16 sr;
u32 total = 0;
dev_warn(&dev->dev, "BIOS has not set timing clocks\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
- pci_read_config_byte(dev, 0x78, &sr);
+ pci_read_config_word(dev, 0x78, &sr);
total += sr & 0x1FF;
udelay(15);
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9eaaff2f556c..f47cab21430f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -629,6 +629,9 @@ re_probe:
drv->remove(dev);
devres_release_all(dev);
+ arch_teardown_dma_ops(dev);
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@@ -1209,6 +1212,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
devres_release_all(dev);
arch_teardown_dma_ops(dev);
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d2656581a608..4a446259a184 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
ret = regmap_write(map, reg, d->mask_buf[i]);
if (d->chip->clear_ack) {
if (d->chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- d->mask_buf[i]);
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~d->mask_buf[i]);
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0)
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
@@ -556,11 +554,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_buf[i]);
if (chip->clear_ack) {
if (chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- data->status_buf[i]);
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~data->status_buf[i]);
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0)
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
@@ -817,13 +813,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->status_buf[i] & d->mask_buf[i]);
if (chip->clear_ack) {
if (chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- (d->status_buf[i] &
- d->mask_buf[i]));
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~(d->status_buf[i] &
- d->mask_buf[i]));
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 150012ffb387..19fe19eaa50e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -79,6 +79,7 @@
#include <linux/ioprio.h>
#include <linux/blk-cgroup.h>
#include <linux/sched/mm.h>
+#include <linux/statfs.h>
#include "loop.h"
@@ -774,8 +775,13 @@ static void loop_config_discard(struct loop_device *lo)
granularity = 0;
} else {
+ struct kstatfs sbuf;
+
max_discard_sectors = UINT_MAX >> 9;
- granularity = inode->i_sb->s_blocksize;
+ if (!vfs_statfs(&file->f_path, &sbuf))
+ granularity = sbuf.f_bsize;
+ else
+ max_discard_sectors = 0;
}
if (max_discard_sectors) {
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index e6005c232328..2b588b62cbbb 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
"Completion workers still active!\n");
}
- blk_set_queue_dying(dd->queue);
+ blk_mark_disk_dead(dd->disk);
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
/* Clean up the block layer. */
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4203cdab8abf..b844432bad20 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7185,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
* IO to complete/fail.
*/
blk_mq_freeze_queue(rbd_dev->disk->queue);
- blk_set_queue_dying(rbd_dev->disk->queue);
+ blk_mark_disk_dead(rbd_dev->disk);
}
del_gendisk(rbd_dev->disk);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ccd0dd0c6b83..ca71a0585333 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2126,7 +2126,7 @@ static void blkfront_closing(struct blkfront_info *info)
/* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq);
- blk_set_queue_dying(info->rq);
+ blk_mark_disk_dead(info->gd);
set_capacity(info->gd, 0);
for_each_rinfo(info, rinfo, i) {
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index 744d136b721b..15d61793f53b 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
},
[JZ4725B_CLK_I2S] = {
- "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ "i2s", CGU_CLK_MUX | CGU_CLK_DIV,
.parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 31, 1 },
.div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 },
- .gate = { CGU_REG_CLKGR, 6 },
},
[JZ4725B_CLK_SPI] = {
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 71aa630fa4bd..f09499999eb3 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -108,42 +108,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
{ .hw = &gpll4.clkr.hw },
};
-static struct clk_rcg2 system_noc_clk_src = {
- .cmd_rcgr = 0x0120,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "system_noc_clk_src",
- .parent_data = gcc_xo_gpll0,
- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 config_noc_clk_src = {
- .cmd_rcgr = 0x0150,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "config_noc_clk_src",
- .parent_data = gcc_xo_gpll0,
- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 periph_noc_clk_src = {
- .cmd_rcgr = 0x0190,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "periph_noc_clk_src",
- .parent_data = gcc_xo_gpll0,
- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
-};
-
static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
@@ -1150,8 +1114,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1435,8 +1397,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1764,8 +1724,6 @@ static struct clk_branch gcc_lpass_q6_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_lpass_q6_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1778,8 +1736,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1807,9 +1763,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_cfg_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1822,9 +1775,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_mstr_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1854,9 +1804,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_slv_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1884,9 +1831,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_cfg_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1899,9 +1843,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_mstr_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1930,9 +1871,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_slv_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1960,8 +1898,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1989,9 +1925,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2004,9 +1937,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2034,9 +1964,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc3_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2064,9 +1991,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2124,8 +2048,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2153,8 +2075,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2198,8 +2118,6 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_0_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2213,8 +2131,6 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_1_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2243,8 +2159,6 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_0_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2258,8 +2172,6 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_1_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2364,8 +2276,6 @@ static struct clk_branch gcc_usb_hs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2488,8 +2398,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_boot_rom_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2503,8 +2411,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2547,9 +2453,6 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL4_EARLY] = &gpll4_early.clkr,
[GPLL4] = &gpll4.clkr,
- [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
- [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
- [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
[UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
@@ -2696,6 +2599,15 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[USB_SS_PHY_LDO] = &usb_ss_phy_ldo.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+
+ /*
+ * The following clocks should NOT be managed by this driver, but they once were
+ * mistakengly added. Now they are only here to indicate that they are not defined
+ * on purpose, even though the names will stay in the header file (for ABI sanity).
+ */
+ [CONFIG_NOC_CLK_SRC] = NULL,
+ [PERIPH_NOC_CLK_SRC] = NULL,
+ [SYSTEM_NOC_CLK_SRC] = NULL,
};
static struct gdsc *gcc_msm8994_gdscs[] = {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b8d95536ee22..80f535cc8a75 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1518,6 +1518,10 @@ static int cpufreq_online(unsigned int cpu)
kobject_uevent(&policy->kobj, KOBJ_ADD);
+ /* Callback for handling stuff after policy is ready */
+ if (cpufreq_driver->ready)
+ cpufreq_driver->ready(policy);
+
if (cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 05f3d7876e44..effbb680b453 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -388,7 +388,7 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
- IRQF_ONESHOT, data->irq_name, data);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
if (ret) {
dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
return 0;
@@ -542,6 +542,14 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
+static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+
+ if (data->throttle_irq >= 0)
+ enable_irq(data->throttle_irq);
+}
+
static struct freq_attr *qcom_cpufreq_hw_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_scaling_boost_freqs,
@@ -561,6 +569,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
.attr = qcom_cpufreq_hw_attr,
+ .ready = qcom_cpufreq_ready,
};
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 3e07f961e2f3..cb1bacb5a42b 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -176,6 +176,20 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
array->base.error = PENDING_ERROR;
+ /*
+ * dma_fence_array objects should never contain any other fence
+ * containers or otherwise we run into recursion and potential kernel
+ * stack overflow on operations on the dma_fence_array.
+ *
+ * The correct way of handling this is to flatten out the array by the
+ * caller instead.
+ *
+ * Enforce this here by checking that we don't create a dma_fence_array
+ * with any container inside.
+ */
+ while (num_fences--)
+ WARN_ON(dma_fence_is_container(fences[num_fences]));
+
return array;
}
EXPORT_SYMBOL(dma_fence_array_create);
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 1b4cb3e5cec9..06f8ef97c6e8 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -148,8 +148,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
dma_fence_get(&head->base);
dma_fence_chain_for_each(fence, &head->base) {
- struct dma_fence_chain *chain = to_dma_fence_chain(fence);
- struct dma_fence *f = chain ? chain->fence : fence;
+ struct dma_fence *f = dma_fence_chain_contained(fence);
dma_fence_get(f);
if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
@@ -165,8 +164,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
static bool dma_fence_chain_signaled(struct dma_fence *fence)
{
dma_fence_chain_for_each(fence, fence) {
- struct dma_fence_chain *chain = to_dma_fence_chain(fence);
- struct dma_fence *f = chain ? chain->fence : fence;
+ struct dma_fence *f = dma_fence_chain_contained(fence);
if (!dma_fence_is_signaled(f)) {
dma_fence_put(fence);
@@ -254,5 +252,14 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
dma_fence_init(&chain->base, &dma_fence_chain_ops,
&chain->lock, context, seqno);
+
+ /*
+ * Chaining dma_fence_chain container together is only allowed through
+ * the prev fence and not through the contained fence.
+ *
+ * The correct way of handling this is to flatten out the fence
+ * structure into a dma_fence_array by the caller instead.
+ */
+ WARN_ON(dma_fence_is_chain(fence));
}
EXPORT_SYMBOL(dma_fence_chain_init);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 6dd9a40b55d4..b51416405e86 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -256,6 +256,11 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
dma_resv_assert_held(obj);
+ /* Drivers should not add containers here, instead add each fence
+ * individually.
+ */
+ WARN_ON(dma_fence_is_container(fence));
+
fobj = dma_resv_shared_list(obj);
count = fobj->shared_count;
@@ -323,12 +328,8 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
}
EXPORT_SYMBOL(dma_resv_add_excl_fence);
-/**
- * dma_resv_iter_restart_unlocked - restart the unlocked iterator
- * @cursor: The dma_resv_iter object to restart
- *
- * Restart the unlocked iteration by initializing the cursor object.
- */
+/* Restart the iterator by initializing all the necessary fields, but not the
+ * relation to the dma_resv object. */
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
{
cursor->seq = read_seqcount_begin(&cursor->obj->seq);
@@ -344,14 +345,7 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
cursor->is_restarted = true;
}
-/**
- * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
- * @cursor: cursor to record the current position
- *
- * Return all the fences in the dma_resv object which are not yet signaled.
- * The returned fence has an extra local reference so will stay alive.
- * If a concurrent modify is detected the whole iteration is started over again.
- */
+/* Walk to the next not signaled fence and grab a reference to it */
static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
{
struct dma_resv *obj = cursor->obj;
@@ -387,6 +381,12 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
* dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
* @cursor: the cursor with the current position
*
+ * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
+ *
+ * Beware that the iterator can be restarted. Code which accumulates statistics
+ * or similar needs to check for this with dma_resv_iter_is_restarted(). For
+ * this reason prefer the locked dma_resv_iter_first() whenver possible.
+ *
* Returns the first fence from an unlocked dma_resv obj.
*/
struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
@@ -406,6 +406,10 @@ EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
* dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
* @cursor: the cursor with the current position
*
+ * Beware that the iterator can be restarted. Code which accumulates statistics
+ * or similar needs to check for this with dma_resv_iter_is_restarted(). For
+ * this reason prefer the locked dma_resv_iter_next() whenver possible.
+ *
* Returns the next fence from an unlocked dma_resv obj.
*/
struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
@@ -431,6 +435,8 @@ EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
* dma_resv_iter_first - first fence from a locked dma_resv object
* @cursor: cursor to record the current position
*
+ * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
+ *
* Return the first fence in the dma_resv object while holding the
* &dma_resv.lock.
*/
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a1da2b4b6d73..1476156af74b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1681,8 +1681,10 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
__func__, atchan->irq_status);
if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
- !(atchan->irq_status & error_mask))
+ !(atchan->irq_status & error_mask)) {
+ spin_unlock_irq(&atchan->lock);
return;
+ }
if (atchan->irq_status & error_mask)
at_xdmac_handle_error(atchan);
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index 8a6bf291a73f..daafea5bc35d 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
if (!cmd_q->qbase) {
dev_err(dev, "unable to allocate command queue\n");
ret = -ENOMEM;
- goto e_dma_alloc;
+ goto e_destroy_pool;
}
cmd_q->qidx = 0;
@@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
/* Request an irq */
ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
- if (ret)
- goto e_pool;
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_free_dma;
+ }
/* Update the device registers with queue information. */
cmd_q->qcontrol &= ~CMD_Q_SIZE;
@@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
/* Register the DMA engine support */
ret = pt_dmaengine_register(pt);
if (ret)
- goto e_dmaengine;
+ goto e_free_irq;
/* Set up debugfs entries */
ptdma_debugfs_setup(pt);
return 0;
-e_dmaengine:
+e_free_irq:
free_irq(pt->pt_irq, pt);
-e_dma_alloc:
+e_free_dma:
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
-e_pool:
- dev_err(dev, "unable to allocate an IRQ\n");
+e_destroy_pool:
dma_pool_destroy(pt->cmd_q.dma_pool);
return ret;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 481f45c77ce1..13d12d660cc2 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
- dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
- dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+ ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
if (ret < 0)
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 158e5e7defae..b26ed690f03c 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
ret = pm_runtime_get(schan->dev);
spin_unlock_irq(&schan->chan_lock);
- if (ret < 0)
+ if (ret < 0) {
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
+ pm_runtime_put(schan->dev);
+ }
pm_runtime_barrier(schan->dev);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index a42164389ebc..d5d55732adba 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter);
if (ret)
- goto err_clk;
+ goto pm_disable;
return 0;
+pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_clk:
clk_disable_unprepare(stm32_dmamux->clk);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 9d9aabdec96b..f5677d81bd2d 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -215,7 +215,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
else
return (char *)ptr;
- r = (unsigned long)p % align;
+ r = (unsigned long)ptr % align;
if (r == 0)
return (char *)ptr;
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index a4c4e4584f5b..099e358d2491 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -410,10 +410,8 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type);
polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity);
- switch (type) {
- case IRQ_TYPE_EDGE_BOTH:
+ if (type == IRQ_TYPE_EDGE_BOTH) {
if (bank->gpio_type == GPIO_TYPE_V2) {
- bank->toggle_edge_mode &= ~mask;
rockchip_gpio_writel_bit(bank, d->hwirq, 1,
bank->gpio_regs->int_bothedge);
goto out;
@@ -431,30 +429,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
else
polarity |= mask;
}
- break;
- case IRQ_TYPE_EDGE_RISING:
- bank->toggle_edge_mode &= ~mask;
- level |= mask;
- polarity |= mask;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- bank->toggle_edge_mode &= ~mask;
- level |= mask;
- polarity &= ~mask;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- bank->toggle_edge_mode &= ~mask;
- level &= ~mask;
- polarity |= mask;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- bank->toggle_edge_mode &= ~mask;
- level &= ~mask;
- polarity &= ~mask;
- break;
- default:
- ret = -EINVAL;
- goto out;
+ } else {
+ if (bank->gpio_type == GPIO_TYPE_V2) {
+ rockchip_gpio_writel_bit(bank, d->hwirq, 0,
+ bank->gpio_regs->int_bothedge);
+ } else {
+ bank->toggle_edge_mode &= ~mask;
+ }
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ level |= mask;
+ polarity |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ level |= mask;
+ polarity &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ level &= ~mask;
+ polarity |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ level &= ~mask;
+ polarity &= ~mask;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
}
rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 34b36a8c035f..8d298beffd86 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -343,9 +343,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
return offset + pin;
}
+#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio)
+
static void tegra186_irq_ack(struct irq_data *data)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
base = tegra186_gpio_get_base(gpio, data->hwirq);
@@ -357,7 +360,8 @@ static void tegra186_irq_ack(struct irq_data *data)
static void tegra186_irq_mask(struct irq_data *data)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
@@ -372,7 +376,8 @@ static void tegra186_irq_mask(struct irq_data *data)
static void tegra186_irq_unmask(struct irq_data *data)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
@@ -387,7 +392,8 @@ static void tegra186_irq_unmask(struct irq_data *data)
static int tegra186_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3859911b61e9..a3d14277f17c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3147,6 +3147,16 @@ int gpiod_to_irq(const struct gpio_desc *desc)
return retirq;
}
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+ if (gc->irq.chip) {
+ /*
+ * Avoid race condition with other code, which tries to lookup
+ * an IRQ before the irqchip has been properly registered,
+ * i.e. while gpiochip is still being brought up.
+ */
+ return -EPROBE_DEFER;
+ }
+#endif
return -ENXIO;
}
EXPORT_SYMBOL_GPL(gpiod_to_irq);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index dfdd3ec5f793..f1422bee3dcc 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -71,6 +71,7 @@ config DRM_DEBUG_SELFTEST
select DRM_DP_HELPER
select DRM_LIB_RANDOM
select DRM_KMS_HELPER
+ select DRM_BUDDY
select DRM_EXPORT_FOR_TESTS if m
default n
help
@@ -403,6 +404,8 @@ source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+source "drivers/gpu/drm/solomon/Kconfig"
+
source "drivers/gpu/drm/sprd/Kconfig"
config DRM_HYPERV
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 8675c2af7ae1..c2ef5f9fce54 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -132,4 +132,5 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
+obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d990ebfd2afc..37ff8cf5bbed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -808,6 +808,8 @@ struct ip_discovery_top;
#define AMDGPU_RESET_MAGIC_NUM 64
#define AMDGPU_MAX_DF_PERFMONS 4
#define AMDGPU_PRODUCT_NAME_LEN 64
+struct amdgpu_reset_domain;
+
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -1043,9 +1045,7 @@ struct amdgpu_device {
bool in_s4;
bool in_s0ix;
- atomic_t in_gpu_reset;
enum pp_mp1_state mp1_state;
- struct rw_semaphore reset_sem;
struct amdgpu_doorbell_index doorbell_index;
struct mutex notifier_lock;
@@ -1094,6 +1094,8 @@ struct amdgpu_device {
struct ip_discovery_top *ip_top;
+ struct amdgpu_reset_domain *reset_domain;
+
struct mutex benchmark_mutex;
/* reset dump register */
@@ -1292,6 +1294,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job* job);
+int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
+ struct amdgpu_job *job);
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
@@ -1478,8 +1482,6 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
return adev->gmc.tmz_enabled;
}
-static inline int amdgpu_in_reset(struct amdgpu_device *adev)
-{
- return atomic_read(&adev->in_gpu_reset);
-}
+int amdgpu_in_reset(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 10b9e99c8941..e762e45b7b85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -312,7 +312,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
}
total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
- used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
+ used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 9eb9b440bd43..426b63e4f1f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -37,6 +37,8 @@
#include "amdgpu_fw_attestation.h"
#include "amdgpu_umr.h"
+#include "amdgpu_reset.h"
+
#if defined(CONFIG_DEBUG_FS)
/**
@@ -1284,7 +1286,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
}
/* Avoid accidently unparking the sched thread during GPU reset */
- r = down_write_killable(&adev->reset_sem);
+ r = down_write_killable(&adev->reset_domain->sem);
if (r)
return r;
@@ -1313,7 +1315,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
kthread_unpark(ring->sched.thread);
}
- up_write(&adev->reset_sem);
+ up_write(&adev->reset_domain->sem);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1543,7 +1545,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
return -ENOMEM;
/* Avoid accidently unparking the sched thread during GPU reset */
- r = down_read_killable(&adev->reset_sem);
+ r = down_read_killable(&adev->reset_domain->sem);
if (r)
goto pro_end;
@@ -1586,7 +1588,7 @@ failure:
/* restart the scheduler */
kthread_unpark(ring->sched.thread);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
@@ -1649,23 +1651,23 @@ static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
return 0;
memset(reg_offset, 0, 12);
- ret = down_read_killable(&adev->reset_sem);
+ ret = down_read_killable(&adev->reset_domain->sem);
if (ret)
return ret;
for (i = 0; i < adev->num_regs; i++) {
sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
return -EFAULT;
len += strlen(reg_offset);
- ret = down_read_killable(&adev->reset_sem);
+ ret = down_read_killable(&adev->reset_domain->sem);
if (ret)
return ret;
}
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
*pos += len;
return len;
@@ -1697,13 +1699,13 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
i++;
} while (len < size);
- ret = down_write_killable(&adev->reset_sem);
+ ret = down_write_killable(&adev->reset_domain->sem);
if (ret)
goto error_free;
swap(adev->reset_dump_reg_list, tmp);
adev->num_regs = i;
- up_write(&adev->reset_sem);
+ up_write(&adev->reset_domain->sem);
ret = size;
error_free:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6113ddc765a7..ca854626a108 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -426,10 +426,10 @@ bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
* the lock.
*/
if (in_task()) {
- if (down_read_trylock(&adev->reset_sem))
- up_read(&adev->reset_sem);
+ if (down_read_trylock(&adev->reset_domain->sem))
+ up_read(&adev->reset_domain->sem);
else
- lockdep_assert_held(&adev->reset_sem);
+ lockdep_assert_held(&adev->reset_domain->sem);
}
#endif
return false;
@@ -455,9 +455,9 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
if ((reg * 4) < adev->rmmio_size) {
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
amdgpu_sriov_runtime(adev) &&
- down_read_trylock(&adev->reset_sem)) {
+ down_read_trylock(&adev->reset_domain->sem)) {
ret = amdgpu_kiq_rreg(adev, reg);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
} else {
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
}
@@ -540,9 +540,9 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
if ((reg * 4) < adev->rmmio_size) {
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
amdgpu_sriov_runtime(adev) &&
- down_read_trylock(&adev->reset_sem)) {
+ down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_kiq_wreg(adev, reg, v);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
@@ -2336,6 +2336,49 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
return r;
}
+static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
+{
+ long timeout;
+ int r, i;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ /* No need to setup the GPU scheduler for rings that don't need it */
+ if (!ring || ring->no_scheduler)
+ continue;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ timeout = adev->gfx_timeout;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ timeout = adev->compute_timeout;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ timeout = adev->sdma_timeout;
+ break;
+ default:
+ timeout = adev->video_timeout;
+ break;
+ }
+
+ r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
+ ring->num_hw_submission, amdgpu_job_hang_limit,
+ timeout, adev->reset_domain->wq,
+ ring->sched_score, ring->name,
+ adev->dev);
+ if (r) {
+ DRM_ERROR("Failed to create scheduler on ring %s.\n",
+ ring->name);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+
/**
* amdgpu_device_ip_init - run init for hardware IPs
*
@@ -2447,8 +2490,28 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
goto init_failed;
- if (adev->gmc.xgmi.num_physical_nodes > 1)
- amdgpu_xgmi_add_device(adev);
+ /**
+ * In case of XGMI grab extra reference for reset domain for this device
+ */
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (amdgpu_xgmi_add_device(adev) == 0) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+
+ if (!hive->reset_domain ||
+ !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
+ r = -ENOENT;
+ goto init_failed;
+ }
+
+ /* Drop the early temporary reset domain we created for device */
+ amdgpu_reset_put_reset_domain(adev->reset_domain);
+ adev->reset_domain = hive->reset_domain;
+ }
+ }
+
+ r = amdgpu_device_init_schedulers(adev);
+ if (r)
+ goto init_failed;
/* Don't init kfd if whole hive need to be reset during init */
if (!adev->gmc.xgmi.pending_reset)
@@ -3548,8 +3611,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
- atomic_set(&adev->in_gpu_reset, 0);
- init_rwsem(&adev->reset_sem);
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
mutex_init(&adev->pm.stable_pstate_ctx_lock);
@@ -3636,6 +3697,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
+ /*
+ * Reset domain needs to be present early, before XGMI hive discovered
+ * (if any) and intitialized to use reset sem and in_gpu reset flag
+ * early on during init.
+ */
+ adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE ,"amdgpu-reset-dev");
+ if (!adev->reset_domain)
+ return -ENOMEM;
+
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
@@ -3999,6 +4069,9 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
if (adev->mman.discovery_bin)
amdgpu_discovery_fini(adev);
+ amdgpu_reset_put_reset_domain(adev->reset_domain);
+ adev->reset_domain = NULL;
+
kfree(adev->pci_state);
}
@@ -4648,7 +4721,7 @@ static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
uint32_t reg_value;
int i;
- lockdep_assert_held(&adev->reset_sem);
+ lockdep_assert_held(&adev->reset_domain->sem);
dump_stack();
for (i = 0; i < adev->num_regs; i++) {
@@ -4827,17 +4900,8 @@ end:
return r;
}
-static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
- struct amdgpu_hive_info *hive)
+static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
{
- if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
- return false;
-
- if (hive) {
- down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
- } else {
- down_write(&adev->reset_sem);
- }
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
@@ -4850,56 +4914,12 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
adev->mp1_state = PP_MP1_STATE_NONE;
break;
}
-
- return true;
}
-static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
+static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
{
amdgpu_vf_error_trans_all(adev);
adev->mp1_state = PP_MP1_STATE_NONE;
- atomic_set(&adev->in_gpu_reset, 0);
- up_write(&adev->reset_sem);
-}
-
-/*
- * to lockup a list of amdgpu devices in a hive safely, if not a hive
- * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
- *
- * unlock won't require roll back.
- */
-static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
-{
- struct amdgpu_device *tmp_adev = NULL;
-
- if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
- if (!hive) {
- dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
- return -ENODEV;
- }
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- if (!amdgpu_device_lock_adev(tmp_adev, hive))
- goto roll_back;
- }
- } else if (!amdgpu_device_lock_adev(adev, hive))
- return -EAGAIN;
-
- return 0;
-roll_back:
- if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
- /*
- * if the lockup iteration break in the middle of a hive,
- * it may means there may has a race issue,
- * or a hive device locked up independently.
- * we may be in trouble and may not, so will try to roll back
- * the lock and give out a warnning.
- */
- dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
- list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- amdgpu_device_unlock_adev(tmp_adev);
- }
- }
- return -EAGAIN;
}
static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
@@ -5033,7 +5053,7 @@ retry:
}
/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ * amdgpu_device_gpu_recover_imp - reset the asic and recover scheduler
*
* @adev: amdgpu_device pointer
* @job: which job trigger hang
@@ -5043,7 +5063,7 @@ retry:
* Returns 0 for success or an error on failure.
*/
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
struct amdgpu_job *job)
{
struct list_head device_list, *device_list_handle = NULL;
@@ -5077,26 +5097,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dev_info(adev->dev, "GPU %s begin!\n",
need_emergency_restart ? "jobs stop":"reset");
- /*
- * Here we trylock to avoid chain of resets executing from
- * either trigger by jobs on different adevs in XGMI hive or jobs on
- * different schedulers for same device while this TO handler is running.
- * We always reset all schedulers for device and all devices for XGMI
- * hive so that should take care of them too.
- */
if (!amdgpu_sriov_vf(adev))
hive = amdgpu_get_xgmi_hive(adev);
- if (hive) {
- if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
- DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
- job ? job->base.id : -1, hive->hive_id);
- amdgpu_put_xgmi_hive(hive);
- if (job && job->vm)
- drm_sched_increase_karma(&job->base);
- return 0;
- }
+ if (hive)
mutex_lock(&hive->hive_lock);
- }
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -5105,22 +5109,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
/*
- * lock the device before we try to operate the linked list
- * if didn't get the device lock, don't touch the linked list since
- * others may iterating it.
- */
- r = amdgpu_device_lock_hive_adev(adev, hive);
- if (r) {
- dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
- job ? job->base.id : -1);
-
- /* even we skipped this reset, still need to set the job to guilty */
- if (job && job->vm)
- drm_sched_increase_karma(&job->base);
- goto skip_recovery;
- }
-
- /*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
* to put adev in the 1st position.
@@ -5137,8 +5125,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
device_list_handle = &device_list;
}
+ /* We need to lock reset domain only once both for XGMI and single device */
+ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+ reset_list);
+ amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+
+ amdgpu_device_set_mp1_state(tmp_adev);
+
/*
* Try to put the audio codec into suspend state
* before gpu reset started.
@@ -5290,21 +5286,55 @@ skip_sched_resume:
if (audio_suspended)
amdgpu_device_resume_display_audio(tmp_adev);
- amdgpu_device_unlock_adev(tmp_adev);
+
+ amdgpu_device_unset_mp1_state(tmp_adev);
}
-skip_recovery:
+ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+ reset_list);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+
if (hive) {
- atomic_set(&hive->in_reset, 0);
mutex_unlock(&hive->hive_lock);
amdgpu_put_xgmi_hive(hive);
}
- if (r && r != -EAGAIN)
+ if (r)
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
return r;
}
+struct amdgpu_recover_work_struct {
+ struct work_struct base;
+ struct amdgpu_device *adev;
+ struct amdgpu_job *job;
+ int ret;
+};
+
+static void amdgpu_device_queue_gpu_recover_work(struct work_struct *work)
+{
+ struct amdgpu_recover_work_struct *recover_work = container_of(work, struct amdgpu_recover_work_struct, base);
+
+ recover_work->ret = amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
+}
+/*
+ * Serialize gpu recover into reset domain single threaded wq
+ */
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_recover_work_struct work = {.adev = adev, .job = job};
+
+ INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
+
+ if (!amdgpu_reset_domain_schedule(adev->reset_domain, &work.base))
+ return -EAGAIN;
+
+ flush_work(&work.base);
+
+ return work.ret;
+}
+
/**
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
*
@@ -5492,20 +5522,6 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
return 0;
}
-static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
-
- cancel_delayed_work_sync(&ring->sched.work_tdr);
- }
-}
-
/**
* amdgpu_pci_error_detected - Called when a PCI error is detected.
* @pdev: PCI device struct
@@ -5536,14 +5552,11 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
/* Fatal error, prepare for slot reset */
case pci_channel_io_frozen:
/*
- * Cancel and wait for all TDRs in progress if failing to
- * set adev->in_gpu_reset in amdgpu_device_lock_adev
- *
- * Locking adev->reset_sem will prevent any external access
+ * Locking adev->reset_domain->sem will prevent any external access
* to GPU during PCI error recovery
*/
- while (!amdgpu_device_lock_adev(adev, NULL))
- amdgpu_cancel_all_tdr(adev);
+ amdgpu_device_lock_reset_domain(adev->reset_domain);
+ amdgpu_device_set_mp1_state(adev);
/*
* Block any work scheduling as we do for regular GPU reset
@@ -5650,7 +5663,8 @@ out:
DRM_INFO("PCIe error recovery succeeded\n");
} else {
DRM_ERROR("PCIe error recovery failed, err:%d", r);
- amdgpu_device_unlock_adev(adev);
+ amdgpu_device_unset_mp1_state(adev);
+ amdgpu_device_unlock_reset_domain(adev->reset_domain);
}
return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
@@ -5687,7 +5701,8 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
drm_sched_start(&ring->sched, true);
}
- amdgpu_device_unlock_adev(adev);
+ amdgpu_device_unset_mp1_state(adev);
+ amdgpu_device_unlock_reset_domain(adev->reset_domain);
}
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
@@ -5764,6 +5779,11 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
amdgpu_asic_invalidate_hdp(adev, ring);
}
+int amdgpu_in_reset(struct amdgpu_device *adev)
+{
+ return atomic_read(&adev->reset_domain->in_gpu_reset);
+ }
+
/**
* amdgpu_device_halt() - bring hardware to some kind of halt state
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 448e9b46417c..cae57d3b317a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -937,7 +937,7 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
int ret;
unsigned int i, block_width, block_height, block_size_log2;
- if (!rfb->base.dev->mode_config.allow_fb_modifiers)
+ if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
return 0;
for (i = 0; i < format_info->num_planes; ++i) {
@@ -1124,7 +1124,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret)
return ret;
- if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) {
+ if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
"GFX9+ requires FB check based on format modifier\n");
ret = check_tiling_flags_gfx6(rfb);
@@ -1132,7 +1132,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
return ret;
}
- if (dev->mode_config.allow_fb_modifiers &&
+ if (!dev->mode_config.fb_modifiers_not_supported &&
!(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
ret = convert_tiling_flags_to_modifier(rfb);
if (ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 415ecf8b2e05..2ab675123ae3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2000,6 +2000,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
+ if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev))
+ amdgpu_aspm = 0;
+
if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 45977a72b5dd..5d13ed376ab4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -446,24 +446,18 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* for the requested ring.
*
* @ring: ring to init the fence driver on
- * @num_hw_submission: number of entries on the hardware queue
- * @sched_score: optional score atomic shared with other schedulers
*
* Init the fence driver for the requested ring (all asics).
* Helper function for amdgpu_fence_driver_init().
*/
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission,
- atomic_t *sched_score)
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- long timeout;
- int r;
if (!adev)
return -EINVAL;
- if (!is_power_of_2(num_hw_submission))
+ if (!is_power_of_2(ring->num_hw_submission))
return -EINVAL;
ring->fence_drv.cpu_addr = NULL;
@@ -474,41 +468,14 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
- ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
+ ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
- ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
+ ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
GFP_KERNEL);
+
if (!ring->fence_drv.fences)
return -ENOMEM;
- /* No need to setup the GPU scheduler for rings that don't need it */
- if (ring->no_scheduler)
- return 0;
-
- switch (ring->funcs->type) {
- case AMDGPU_RING_TYPE_GFX:
- timeout = adev->gfx_timeout;
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- timeout = adev->compute_timeout;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- timeout = adev->sdma_timeout;
- break;
- default:
- timeout = adev->video_timeout;
- break;
- }
-
- r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
- num_hw_submission, amdgpu_job_hang_limit,
- timeout, NULL, sched_score, ring->name);
- if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
- return r;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 899a47011a67..dd78402e3cb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -60,7 +60,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
struct ttm_resource_manager *man;
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE);
+ return sysfs_emit(buf, "%llu\n", man->size);
}
/**
@@ -77,8 +77,9 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = &adev->mman.gtt_mgr.manager;
- return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr));
+ return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
}
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
@@ -130,20 +131,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct amdgpu_gtt_node *node;
int r;
- if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
- atomic64_add_return(num_pages, &mgr->used) > man->size) {
- atomic64_sub(num_pages, &mgr->used);
- return -ENOSPC;
- }
-
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
- if (!node) {
- r = -ENOMEM;
- goto err_out;
- }
+ if (!node)
+ return -ENOMEM;
node->tbo = tbo;
ttm_resource_init(tbo, place, &node->base.base);
+ if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
+ ttm_resource_manager_usage(man) > man->size) {
+ r = -ENOSPC;
+ goto err_free;
+ }
if (place->lpfn) {
spin_lock(&mgr->lock);
@@ -169,11 +167,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
err_free:
ttm_resource_fini(man, &node->base.base);
kfree(node);
-
-err_out:
- if (!(place->flags & TTM_PL_FLAG_TEMPORARY))
- atomic64_sub(num_pages, &mgr->used);
-
return r;
}
@@ -196,26 +189,11 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
drm_mm_remove_node(&node->base.mm_nodes[0]);
spin_unlock(&mgr->lock);
- if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
- atomic64_sub(res->num_pages, &mgr->used);
-
ttm_resource_fini(man, res);
kfree(node);
}
/**
- * amdgpu_gtt_mgr_usage - return usage of GTT domain
- *
- * @mgr: amdgpu_gtt_mgr pointer
- *
- * Return how many bytes are used in the GTT domain
- */
-uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr)
-{
- return atomic64_read(&mgr->used) * PAGE_SIZE;
-}
-
-/**
* amdgpu_gtt_mgr_recover - re-init gart
*
* @mgr: amdgpu_gtt_mgr pointer
@@ -255,9 +233,6 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
spin_lock(&mgr->lock);
drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
-
- drm_printf(printer, "man size:%llu pages, gtt used:%llu pages\n",
- man->size, atomic64_read(&mgr->used));
}
static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
@@ -283,14 +258,12 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
man->use_tt = true;
man->func = &amdgpu_gtt_mgr_func;
- ttm_resource_manager_init(man, &adev->mman.bdev,
- gtt_size >> PAGE_SHIFT);
+ ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
- atomic64_set(&mgr->used, 0);
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager);
ttm_resource_manager_set_used(man, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4870e093213d..d970336d2261 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -64,10 +64,9 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
ti.process_name, ti.tgid, ti.task_name, ti.pid);
if (amdgpu_device_should_recover_gpu(ring->adev)) {
- r = amdgpu_device_gpu_recover(ring->adev, job);
+ r = amdgpu_device_gpu_recover_imp(ring->adev, job);
if (r)
DRM_ERROR("GPU Recovery Failed: %d\n", r);
-
} else {
drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 9f985bd463be..6b626c293e72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -604,13 +604,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
+ ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
- ui64 = amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
+ ui64 = ttm_resource_manager_usage(&adev->mman.gtt_mgr.manager);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
@@ -642,14 +642,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case AMDGPU_INFO_MEMORY: {
struct drm_amdgpu_memory_info mem;
struct ttm_resource_manager *gtt_man =
- ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
+ &adev->mman.gtt_mgr.manager;
+ struct ttm_resource_manager *vram_man =
+ &adev->mman.vram_mgr.manager;
+
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
- amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
+ ttm_resource_manager_usage(vram_man);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -667,8 +670,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mem.gtt.total_heap_size *= PAGE_SIZE;
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
atomic64_read(&adev->gart_pin_size);
- mem.gtt.heap_usage =
- amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
+ mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 23c9a60693ee..25731719c627 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -451,7 +451,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- if (size < (man->size << PAGE_SHIFT))
+ if (size < man->size)
return true;
else
goto fail;
@@ -460,7 +460,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- if (size < (man->size << PAGE_SHIFT))
+ if (size < man->size)
return true;
else
goto fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index 0d85c2096ab5..e8adfd0a570a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -25,12 +25,6 @@
#include "amdgpu.h"
-static inline struct amdgpu_preempt_mgr *
-to_preempt_mgr(struct ttm_resource_manager *man)
-{
- return container_of(man, struct amdgpu_preempt_mgr, manager);
-}
-
/**
* DOC: mem_info_preempt_used
*
@@ -45,10 +39,9 @@ static ssize_t mem_info_preempt_used_show(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- struct ttm_resource_manager *man;
+ struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
- man = ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_PREEMPT);
- return sysfs_emit(buf, "%llu\n", amdgpu_preempt_mgr_usage(man));
+ return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
}
static DEVICE_ATTR_RO(mem_info_preempt_used);
@@ -68,16 +61,12 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_resource **res)
{
- struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
-
*res = kzalloc(sizeof(**res), GFP_KERNEL);
if (!*res)
return -ENOMEM;
ttm_resource_init(tbo, place, *res);
(*res)->start = AMDGPU_BO_INVALID_OFFSET;
-
- atomic64_add((*res)->num_pages, &mgr->used);
return 0;
}
@@ -92,49 +81,13 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
- struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
-
- atomic64_sub(res->num_pages, &mgr->used);
ttm_resource_fini(man, res);
kfree(res);
}
-/**
- * amdgpu_preempt_mgr_usage - return usage of PREEMPT domain
- *
- * @man: TTM memory type manager
- *
- * Return how many bytes are used in the GTT domain
- */
-uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man)
-{
- struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
- s64 result = atomic64_read(&mgr->used);
-
- return (result > 0 ? result : 0) * PAGE_SIZE;
-}
-
-/**
- * amdgpu_preempt_mgr_debug - dump VRAM table
- *
- * @man: TTM memory type manager
- * @printer: DRM printer to use
- *
- * Dump the table content using printk.
- */
-static void amdgpu_preempt_mgr_debug(struct ttm_resource_manager *man,
- struct drm_printer *printer)
-{
- struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
-
- drm_printf(printer, "man size:%llu pages, preempt used:%lld pages\n",
- man->size, (u64)atomic64_read(&mgr->used));
-}
-
static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = {
.alloc = amdgpu_preempt_mgr_new,
.free = amdgpu_preempt_mgr_del,
- .debug = amdgpu_preempt_mgr_debug
};
/**
@@ -146,8 +99,7 @@ static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = {
*/
int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
{
- struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
- struct ttm_resource_manager *man = &mgr->manager;
+ struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
int ret;
man->use_tt = true;
@@ -155,16 +107,13 @@ int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
ttm_resource_manager_init(man, &adev->mman.bdev, (1 << 30));
- atomic64_set(&mgr->used, 0);
-
ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used);
if (ret) {
DRM_ERROR("Failed to create device file mem_info_preempt_used\n");
return ret;
}
- ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT,
- &mgr->manager);
+ ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, man);
ttm_resource_manager_set_used(man, true);
return 0;
}
@@ -179,8 +128,7 @@ int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
*/
void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev)
{
- struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
- struct ttm_resource_manager *man = &mgr->manager;
+ struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
int ret;
ttm_resource_manager_set_used(man, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2b844a5aafdb..a44f2eeed6ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -31,6 +31,8 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
+#include "amdgpu_reset.h"
+
#define EEPROM_I2C_MADDR_VEGA20 0x0
#define EEPROM_I2C_MADDR_ARCTURUS 0x40000
#define EEPROM_I2C_MADDR_ARCTURUS_D342 0x0
@@ -193,12 +195,12 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
__encode_table_header_to_buf(&control->tbl_hdr, buf);
/* i2c may be unstable in gpu reset */
- down_read(&adev->reset_sem);
+ down_read(&adev->reset_domain->sem);
res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
control->i2c_address +
control->ras_header_offset,
buf, RAS_TABLE_HEADER_SIZE);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
if (res < 0) {
DRM_ERROR("Failed to write EEPROM table header:%d", res);
@@ -390,13 +392,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
int res;
/* i2c may be unstable in gpu reset */
- down_read(&adev->reset_sem);
+ down_read(&adev->reset_domain->sem);
buf_size = num * RAS_TABLE_RECORD_SIZE;
res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
control->i2c_address +
RAS_INDEX_TO_OFFSET(control, fri),
buf, buf_size);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
if (res < 0) {
DRM_ERROR("Writing %d EEPROM table records error:%d",
num, res);
@@ -550,12 +552,12 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
goto Out;
}
- down_read(&adev->reset_sem);
+ down_read(&adev->reset_domain->sem);
res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
control->i2c_address +
control->ras_record_offset,
buf, buf_size);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
if (res < 0) {
DRM_ERROR("EEPROM failed reading records:%d\n",
res);
@@ -645,13 +647,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
int res;
/* i2c may be unstable in gpu reset */
- down_read(&adev->reset_sem);
+ down_read(&adev->reset_domain->sem);
buf_size = num * RAS_TABLE_RECORD_SIZE;
res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
control->i2c_address +
RAS_INDEX_TO_OFFSET(control, fri),
buf, buf_size);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
if (res < 0) {
DRM_ERROR("Reading %d EEPROM table records error:%d",
num, res);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 02afd4115675..248d64158721 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -96,3 +96,59 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
return reset_handler->restore_hwcontext(adev->reset_cntl,
reset_context);
}
+
+
+void amdgpu_reset_destroy_reset_domain(struct kref *ref)
+{
+ struct amdgpu_reset_domain *reset_domain = container_of(ref,
+ struct amdgpu_reset_domain,
+ refcount);
+ if (reset_domain->wq)
+ destroy_workqueue(reset_domain->wq);
+
+ kvfree(reset_domain);
+}
+
+struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
+ char *wq_name)
+{
+ struct amdgpu_reset_domain *reset_domain;
+
+ reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL);
+ if (!reset_domain) {
+ DRM_ERROR("Failed to allocate amdgpu_reset_domain!");
+ return NULL;
+ }
+
+ reset_domain->type = type;
+ kref_init(&reset_domain->refcount);
+
+ reset_domain->wq = create_singlethread_workqueue(wq_name);
+ if (!reset_domain->wq) {
+ DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!");
+ amdgpu_reset_put_reset_domain(reset_domain);
+ return NULL;
+
+ }
+
+ atomic_set(&reset_domain->in_gpu_reset, 0);
+ init_rwsem(&reset_domain->sem);
+
+ return reset_domain;
+}
+
+void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain)
+{
+ atomic_set(&reset_domain->in_gpu_reset, 1);
+ down_write(&reset_domain->sem);
+}
+
+
+void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
+{
+ atomic_set(&reset_domain->in_gpu_reset, 0);
+ up_write(&reset_domain->sem);
+}
+
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index e00d38d9160a..1949dbe28a86 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -70,6 +70,21 @@ struct amdgpu_reset_control {
void (*async_reset)(struct work_struct *work);
};
+
+enum amdgpu_reset_domain_type {
+ SINGLE_DEVICE,
+ XGMI_HIVE
+};
+
+struct amdgpu_reset_domain {
+ struct kref refcount;
+ struct workqueue_struct *wq;
+ enum amdgpu_reset_domain_type type;
+ struct rw_semaphore sem;
+ atomic_t in_gpu_reset;
+};
+
+
int amdgpu_reset_init(struct amdgpu_device *adev);
int amdgpu_reset_fini(struct amdgpu_device *adev);
@@ -82,4 +97,29 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_handler *handler);
+struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
+ char *wq_name);
+
+void amdgpu_reset_destroy_reset_domain(struct kref *ref);
+
+static inline bool amdgpu_reset_get_reset_domain(struct amdgpu_reset_domain *domain)
+{
+ return kref_get_unless_zero(&domain->refcount) != 0;
+}
+
+static inline void amdgpu_reset_put_reset_domain(struct amdgpu_reset_domain *domain)
+{
+ kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
+}
+
+static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *domain,
+ struct work_struct *work)
+{
+ return queue_work(domain->wq, work);
+}
+
+void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
+
+void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index ab2351ba9574..35bcb6dc1816 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -191,8 +191,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->adev = adev;
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
- r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission,
- sched_score);
+ ring->num_hw_submission = sched_hw_submission;
+ ring->sched_score = sched_score;
+ r = amdgpu_fence_driver_init_ring(ring);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index fae7d185ad0d..48365da213dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -114,9 +114,7 @@ struct amdgpu_fence_driver {
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission,
- atomic_t *sched_score);
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
@@ -251,6 +249,8 @@ struct amdgpu_ring {
bool has_compute_vm_bug;
bool no_scheduler;
int hw_prio;
+ unsigned num_hw_submission;
+ atomic_t *sched_score;
};
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index f7d8487799b2..40e06745fae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -261,10 +261,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
dma_resv_for_each_fence(&cursor, resv, true, f) {
dma_fence_chain_for_each(f, f) {
- struct dma_fence_chain *chain = to_dma_fence_chain(f);
+ struct dma_fence *tmp = dma_fence_chain_contained(f);
- if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
- chain->fence : f)) {
+ if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) {
r = amdgpu_sync_fence(sync, f);
dma_fence_put(f);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 414a22dddc78..4b9ee6e27f74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1941,7 +1941,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
size = adev->gmc.real_vram_size;
else
size = adev->gmc.visible_vram_size;
- man->size = size >> PAGE_SHIFT;
+ man->size = size;
adev->mman.buffer_funcs_enabled = enable;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 0e4ecc77db3f..9120ae80ef52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -44,7 +44,6 @@ struct amdgpu_vram_mgr {
spinlock_t lock;
struct list_head reservations_pending;
struct list_head reserved_pages;
- atomic64_t usage;
atomic64_t vis_usage;
};
@@ -52,12 +51,6 @@ struct amdgpu_gtt_mgr {
struct ttm_resource_manager manager;
struct drm_mm mm;
spinlock_t lock;
- atomic64_t used;
-};
-
-struct amdgpu_preempt_mgr {
- struct ttm_resource_manager manager;
- atomic64_t used;
};
struct amdgpu_mman {
@@ -76,7 +69,7 @@ struct amdgpu_mman {
struct amdgpu_vram_mgr vram_mgr;
struct amdgpu_gtt_mgr gtt_mgr;
- struct amdgpu_preempt_mgr preempt_mgr;
+ struct ttm_resource_manager preempt_mgr;
uint64_t stolen_vga_size;
struct amdgpu_bo *stolen_vga_memory;
@@ -118,7 +111,6 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
-uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr);
void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
@@ -133,7 +125,6 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
void amdgpu_vram_mgr_free_sgt(struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt);
-uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr);
uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr);
int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 5656bf7d9267..a025f080aa6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -575,8 +575,10 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->driver_cert = 0;
vf2pf_info->os_info.all = 0;
- vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20;
- vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
+ vf2pf_info->fb_usage =
+ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
+ vf2pf_info->fb_vis_usage =
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index fce9a13a6ba1..0a7611648573 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -96,9 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
- return sysfs_emit(buf, "%llu\n",
- amdgpu_vram_mgr_usage(&adev->mman.vram_mgr));
+ return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
}
/**
@@ -253,7 +253,9 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
atomic64_add(vis_usage, &mgr->vis_usage);
- atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage);
+ spin_lock(&man->bdev->lru_lock);
+ man->usage += rsv->mm_node.size << PAGE_SHIFT;
+ spin_unlock(&man->bdev->lru_lock);
list_move(&rsv->node, &mgr->reserved_pages);
}
}
@@ -378,19 +380,13 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
lpfn = place->lpfn;
if (!lpfn)
- lpfn = man->size;
+ lpfn = man->size >> PAGE_SHIFT;
max_bytes = adev->gmc.mc_vram_size;
if (tbo->type != ttm_bo_type_kernel)
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
- /* bail out quickly if there's likely not enough VRAM for this BO */
mem_bytes = tbo->base.size;
- if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
- r = -ENOSPC;
- goto error_sub;
- }
-
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
pages_per_node = ~0ul;
num_nodes = 1;
@@ -408,13 +404,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
GFP_KERNEL | __GFP_ZERO);
- if (!node) {
- r = -ENOMEM;
- goto error_sub;
- }
+ if (!node)
+ return -ENOMEM;
ttm_resource_init(tbo, place, &node->base);
+ /* bail out quickly if there's likely not enough VRAM for this BO */
+ if (ttm_resource_manager_usage(man) > max_bytes) {
+ r = -ENOSPC;
+ goto error_fini;
+ }
+
mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
@@ -472,11 +472,10 @@ error_free:
while (i--)
drm_mm_remove_node(&node->mm_nodes[i]);
spin_unlock(&mgr->lock);
+error_fini:
ttm_resource_fini(man, &node->base);
kvfree(node);
-error_sub:
- atomic64_sub(mem_bytes, &mgr->usage);
return r;
}
@@ -494,7 +493,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- uint64_t usage = 0, vis_usage = 0;
+ uint64_t vis_usage = 0;
unsigned i, pages;
spin_lock(&mgr->lock);
@@ -503,13 +502,11 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
struct drm_mm_node *mm = &node->mm_nodes[i];
drm_mm_remove_node(mm);
- usage += mm->size << PAGE_SHIFT;
vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
}
amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock);
- atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage);
ttm_resource_fini(man, res);
@@ -628,18 +625,6 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev,
}
/**
- * amdgpu_vram_mgr_usage - how many bytes are used in this domain
- *
- * @mgr: amdgpu_vram_mgr pointer
- *
- * Returns how many bytes are used in this domain.
- */
-uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr)
-{
- return atomic64_read(&mgr->usage);
-}
-
-/**
* amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
*
* @mgr: amdgpu_vram_mgr pointer
@@ -664,13 +649,12 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ drm_printf(printer, " vis usage:%llu\n",
+ amdgpu_vram_mgr_vis_usage(mgr));
+
spin_lock(&mgr->lock);
drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
-
- drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
- man->size, amdgpu_vram_mgr_usage(mgr) >> 20,
- amdgpu_vram_mgr_vis_usage(mgr) >> 20);
}
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
@@ -692,11 +676,11 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
struct ttm_resource_manager *man = &mgr->manager;
ttm_resource_manager_init(man, &adev->mman.bdev,
- adev->gmc.real_vram_size >> PAGE_SHIFT);
+ adev->gmc.real_vram_size);
man->func = &amdgpu_vram_mgr_func;
- drm_mm_init(&mgr->mm, 0, man->size);
+ drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
spin_lock_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 77b65434ccc2..91817a31f3e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -32,6 +32,8 @@
#include "wafl/wafl2_4_0_0_smn.h"
#include "wafl/wafl2_4_0_0_sh_mask.h"
+#include "amdgpu_reset.h"
+
#define smnPCS_XGMI23_PCS_ERROR_STATUS 0x11a01210
#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
@@ -227,6 +229,9 @@ static void amdgpu_xgmi_hive_release(struct kobject *kobj)
struct amdgpu_hive_info *hive = container_of(
kobj, struct amdgpu_hive_info, kobj);
+ amdgpu_reset_put_reset_domain(hive->reset_domain);
+ hive->reset_domain = NULL;
+
mutex_destroy(&hive->hive_lock);
kfree(hive);
}
@@ -398,15 +403,35 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
goto pro_end;
}
+ /**
+ * Avoid recreating reset domain when hive is reconstructed for the case
+ * of reset the devices in the XGMI hive during probe for SRIOV
+ * See https://www.spinics.net/lists/amd-gfx/msg58836.html
+ */
+ if (adev->reset_domain->type != XGMI_HIVE) {
+ hive->reset_domain = amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
+ if (!hive->reset_domain) {
+ dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
+ ret = -ENOMEM;
+ kobject_put(&hive->kobj);
+ kfree(hive);
+ hive = NULL;
+ goto pro_end;
+ }
+ } else {
+ amdgpu_reset_get_reset_domain(adev->reset_domain);
+ hive->reset_domain = adev->reset_domain;
+ }
+
hive->hive_id = adev->gmc.xgmi.hive_id;
INIT_LIST_HEAD(&hive->device_list);
INIT_LIST_HEAD(&hive->node);
mutex_init(&hive->hive_lock);
- atomic_set(&hive->in_reset, 0);
atomic_set(&hive->number_devices, 0);
task_barrier_init(&hive->tb);
hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
hive->hi_req_gpu = NULL;
+
/*
* hive pstate on boot is high in vega20 so we have to go to low
* pstate on after boot.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 095921851fb5..552e6fb55aa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -33,7 +33,6 @@ struct amdgpu_hive_info {
struct list_head node;
atomic_t number_devices;
struct mutex hive_lock;
- atomic_t in_reset;
int hi_req_count;
struct amdgpu_device *hi_req_gpu;
struct task_barrier tb;
@@ -42,6 +41,8 @@ struct amdgpu_hive_info {
AMDGPU_XGMI_PSTATE_MAX_VEGA20,
AMDGPU_XGMI_PSTATE_UNKNOWN
} pstate;
+
+ struct amdgpu_reset_domain *reset_domain;
};
struct amdgpu_pcs_ras_field {
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 6fa2229b7229..1c5d9388ad0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -25,6 +25,8 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
+
#include <asm/unaligned.h>
#include <drm/drm_util.h>
@@ -740,7 +742,7 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
break;
}
if (arg != ATOM_COND_ALWAYS)
- SDEBUG(" taken: %s\n", execute ? "yes" : "no");
+ SDEBUG(" taken: %s\n", str_yes_no(execute));
SDEBUG(" target: 0x%04X\n", target);
if (execute) {
if (ctx->last_jump == (ctx->start + target)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5d5205870861..288fce7dc0ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2798,6 +2798,8 @@ static int dce_v10_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
+
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 4d812b22c54f..cbe5250b31cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2916,6 +2916,8 @@ static int dce_v11_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
+
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b90bc2adf778..982855e6cf52 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2674,6 +2674,7 @@ static int dce_v6_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 7c1379b02f94..84440741c60b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2695,6 +2695,8 @@ static int dce_v8_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
+
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index e7add2020d48..3dcd82b49481 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -48,6 +48,8 @@
#include "athub_v2_0.h"
#include "athub_v2_1.h"
+#include "amdgpu_reset.h"
+
#if 0
static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
{
@@ -328,7 +330,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
*/
if (adev->gfx.kiq.ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
- down_read_trylock(&adev->reset_sem)) {
+ down_read_trylock(&adev->reset_domain->sem)) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17;
u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
@@ -338,7 +340,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 412e44af1608..df35f0252eea 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -62,6 +62,8 @@
#include "amdgpu_ras.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_reset.h"
+
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
@@ -787,13 +789,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
*/
if (adev->gfx.kiq.ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
- down_read_trylock(&adev->reset_sem)) {
+ down_read_trylock(&adev->reset_domain->sem)) {
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
return;
}
@@ -900,7 +902,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (amdgpu_in_reset(adev))
return -EIO;
- if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
+ if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy-weight TLB flush (type 2), which flushes
* both. Due to a race condition with concurrent
@@ -927,7 +929,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (r) {
amdgpu_ring_undo(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
return -ETIME;
}
@@ -936,10 +938,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
return -ETIME;
}
- up_read(&adev->reset_sem);
+ up_read(&adev->reset_domain->sem);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 56da5ab82987..b81acf59870c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -32,6 +32,8 @@
#include "soc15_common.h"
#include "mxgpu_ai.h"
+#include "amdgpu_reset.h"
+
static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
{
WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
@@ -257,10 +259,10 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
+ if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
return;
- down_write(&adev->reset_sem);
+ down_write(&adev->reset_domain->sem);
amdgpu_virt_fini_data_exchange(adev);
@@ -275,14 +277,14 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
} while (timeout > 1);
flr_done:
- atomic_set(&adev->in_gpu_reset, 0);
- up_write(&adev->reset_sem);
+ atomic_set(&adev->reset_domain->in_gpu_reset, 0);
+ up_write(&adev->reset_domain->sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
&& (!amdgpu_device_has_job_running(adev) ||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
- amdgpu_device_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover_imp(adev, NULL);
}
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -307,8 +309,11 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
switch (event) {
case IDH_FLR_NOTIFICATION:
- if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.flr_work);
+ if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
break;
case IDH_QUERY_ALIVE:
xgpu_ai_mailbox_send_ack(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 477d0dde19c5..22c10b97ea81 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -31,6 +31,8 @@
#include "soc15_common.h"
#include "mxgpu_nv.h"
+#include "amdgpu_reset.h"
+
static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
{
WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
@@ -281,10 +283,10 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
+ if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
return;
- down_write(&adev->reset_sem);
+ down_write(&adev->reset_domain->sem);
amdgpu_virt_fini_data_exchange(adev);
@@ -299,8 +301,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
} while (timeout > 1);
flr_done:
- atomic_set(&adev->in_gpu_reset, 0);
- up_write(&adev->reset_sem);
+ atomic_set(&adev->reset_domain->in_gpu_reset, 0);
+ up_write(&adev->reset_domain->sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
@@ -309,7 +311,7 @@ flr_done:
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
- amdgpu_device_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover_imp(adev, NULL);
}
static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -337,8 +339,11 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
switch (event) {
case IDH_FLR_NOTIFICATION:
- if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.flr_work);
+ if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
break;
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
* it byfar since that polling thread will handle it,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index a642c04cf17d..7b63d30b9b79 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -42,6 +42,8 @@
#include "smu/smu_7_1_3_d.h"
#include "mxgpu_vi.h"
+#include "amdgpu_reset.h"
+
/* VI golden setting */
static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
@@ -521,7 +523,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
/* Trigger recovery due to world switch failure */
if (amdgpu_device_should_recover_gpu(adev))
- amdgpu_device_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover_imp(adev, NULL);
}
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -550,8 +552,11 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
/* only handle FLR_NOTIFY now */
- if (!r)
- schedule_work(&adev->virt.flr_work);
+ if (!r && !amdgpu_in_reset(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
}
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6b2e46ed6184..e1d3db3fe8de 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7924,6 +7924,9 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
if (res)
return res;
+ if (modifiers == NULL)
+ adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
+
res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
&dm_plane_funcs, formats, num_formats,
modifiers, plane->type, NULL);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 77f9d94743a1..777210811311 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -23,6 +23,7 @@
*
*/
+#include <linux/string_helpers.h>
#include <linux/uaccess.h>
#include "dc.h"
@@ -49,11 +50,6 @@ struct dmub_debugfs_trace_entry {
uint32_t param1;
};
-static inline const char *yesno(bool v)
-{
- return v ? "yes" : "no";
-}
-
/* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array
*
* Function takes in attributes passed to debugfs write entry
@@ -857,12 +853,12 @@ static int psr_capability_show(struct seq_file *m, void *data)
if (!(link->connector_signal & SIGNAL_TYPE_EDP))
return -ENODEV;
- seq_printf(m, "Sink support: %s", yesno(link->dpcd_caps.psr_info.psr_version != 0));
+ seq_printf(m, "Sink support: %s", str_yes_no(link->dpcd_caps.psr_info.psr_version != 0));
if (link->dpcd_caps.psr_info.psr_version)
seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_info.psr_version);
seq_puts(m, "\n");
- seq_printf(m, "Driver support: %s", yesno(link->psr_settings.psr_feature_enabled));
+ seq_printf(m, "Driver support: %s", str_yes_no(link->psr_settings.psr_feature_enabled));
if (link->psr_settings.psr_version)
seq_printf(m, " [0x%02x]", link->psr_settings.psr_version);
seq_puts(m, "\n");
@@ -1211,8 +1207,8 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- seq_printf(m, "FEC_Sink_Support: %s\n", yesno(is_fec_supported));
- seq_printf(m, "DSC_Sink_Support: %s\n", yesno(is_dsc_supported));
+ seq_printf(m, "FEC_Sink_Support: %s\n", str_yes_no(is_fec_supported));
+ seq_printf(m, "DSC_Sink_Support: %s\n", str_yes_no(is_dsc_supported));
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 8e3e98f13db4..4f9b0a9f13e3 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -273,6 +273,9 @@ static int __init armada_drm_init(void)
{
int ret;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
ret = platform_driver_register(&armada_lcd_platform_driver);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 65f172807a0d..13f496473b9e 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -20,6 +20,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@@ -359,7 +360,7 @@ static struct platform_driver aspeed_gfx_platform_driver = {
},
};
-module_platform_driver(aspeed_gfx_platform_driver);
+drm_module_platform_driver(aspeed_gfx_platform_driver);
MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
MODULE_DESCRIPTION("ASPEED BMC DRM/KMS driver");
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index cd93c44f2662..204c926a18ea 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -272,64 +272,6 @@ static bool ast_launch_m68k(struct drm_device *dev)
return true;
}
-u8 ast_get_dp501_max_clk(struct drm_device *dev)
-{
- struct ast_private *ast = to_ast_private(dev);
- u32 boot_address, offset, data;
- u8 linkcap[4], linkrate, linklanes, maxclk = 0xff;
- u32 *plinkcap;
-
- if (ast->config_mode == ast_use_p2a) {
- boot_address = get_fw_base(ast);
-
- /* validate FW version */
- offset = AST_DP501_GBL_VERSION;
- data = ast_mindwm(ast, boot_address + offset);
- if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
- return maxclk;
-
- /* Read Link Capability */
- offset = AST_DP501_LINKRATE;
- plinkcap = (u32 *)linkcap;
- *plinkcap = ast_mindwm(ast, boot_address + offset);
- if (linkcap[2] == 0) {
- linkrate = linkcap[0];
- linklanes = linkcap[1];
- data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
- if (data > 0xff)
- data = 0xff;
- maxclk = (u8)data;
- }
- } else {
- if (!ast->dp501_fw_buf)
- return AST_DP501_DEFAULT_DCLK; /* 1024x768 as default */
-
- /* dummy read */
- offset = 0x0000;
- data = readl(ast->dp501_fw_buf + offset);
-
- /* validate FW version */
- offset = AST_DP501_GBL_VERSION;
- data = readl(ast->dp501_fw_buf + offset);
- if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
- return maxclk;
-
- /* Read Link Capability */
- offset = AST_DP501_LINKRATE;
- plinkcap = (u32 *)linkcap;
- *plinkcap = readl(ast->dp501_fw_buf + offset);
- if (linkcap[2] == 0) {
- linkrate = linkcap[0];
- linklanes = linkcap[1];
- data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
- if (data > 0xff)
- data = 0xff;
- maxclk = (u8)data;
- }
- }
- return maxclk;
-}
-
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_private *ast = to_ast_private(dev);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 9c8d56b0a41b..a19315b2f7e5 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -69,7 +69,6 @@ enum ast_chip {
enum ast_tx_chip {
AST_TX_NONE,
AST_TX_SIL164,
- AST_TX_ITE66121,
AST_TX_DP501,
};
@@ -130,15 +129,26 @@ struct ast_i2c_chan {
struct i2c_algo_bit_data bit;
};
-struct ast_connector {
+struct ast_vga_connector {
struct drm_connector base;
struct ast_i2c_chan *i2c;
};
-static inline struct ast_connector *
-to_ast_connector(struct drm_connector *connector)
+static inline struct ast_vga_connector *
+to_ast_vga_connector(struct drm_connector *connector)
{
- return container_of(connector, struct ast_connector, base);
+ return container_of(connector, struct ast_vga_connector, base);
+}
+
+struct ast_sil164_connector {
+ struct drm_connector base;
+ struct ast_i2c_chan *i2c;
+};
+
+static inline struct ast_sil164_connector *
+to_ast_sil164_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct ast_sil164_connector, base);
}
/*
@@ -161,8 +171,20 @@ struct ast_private {
struct drm_plane primary_plane;
struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
- struct drm_encoder encoder;
- struct ast_connector connector;
+ union {
+ struct {
+ struct drm_encoder encoder;
+ struct ast_vga_connector vga_connector;
+ } vga;
+ struct {
+ struct drm_encoder encoder;
+ struct ast_sil164_connector sil164_connector;
+ } sil164;
+ struct {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ } dp501;
+ } output;
bool support_wide_screen;
enum {
@@ -172,7 +194,6 @@ struct ast_private {
} config_mode;
enum ast_tx_chip tx_chip_type;
- u8 dp501_maxclk;
u8 *dp501_fw_addr;
const struct firmware *dp501_fw; /* dp501 fw */
};
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 2c7115a4d81f..45b56b39ad47 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -40,6 +40,7 @@
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -1005,6 +1006,71 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+static enum drm_mode_status
+ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ struct ast_private *ast = to_ast_private(crtc->dev);
+ enum drm_mode_status status;
+ uint32_t jtemp;
+
+ if (ast->support_wide_screen) {
+ if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
+ return MODE_OK;
+ if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
+ return MODE_OK;
+ if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
+ return MODE_OK;
+ if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
+ return MODE_OK;
+ if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
+ return MODE_OK;
+
+ if ((ast->chip == AST2100) || (ast->chip == AST2200) ||
+ (ast->chip == AST2300) || (ast->chip == AST2400) ||
+ (ast->chip == AST2500)) {
+ if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
+ return MODE_OK;
+
+ if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ if (jtemp & 0x01)
+ return MODE_NOMODE;
+ else
+ return MODE_OK;
+ }
+ }
+ }
+
+ status = MODE_NOMODE;
+
+ switch (mode->hdisplay) {
+ case 640:
+ if (mode->vdisplay == 480)
+ status = MODE_OK;
+ break;
+ case 800:
+ if (mode->vdisplay == 600)
+ status = MODE_OK;
+ break;
+ case 1024:
+ if (mode->vdisplay == 768)
+ status = MODE_OK;
+ break;
+ case 1280:
+ if (mode->vdisplay == 1024)
+ status = MODE_OK;
+ break;
+ case 1600:
+ if (mode->vdisplay == 1200)
+ status = MODE_OK;
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1107,6 +1173,7 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
+ .mode_valid = ast_crtc_helper_mode_valid,
.atomic_check = ast_crtc_helper_atomic_check,
.atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
@@ -1187,128 +1254,229 @@ static int ast_crtc_init(struct drm_device *dev)
}
/*
- * Encoder
+ * VGA Connector
*/
-static int ast_encoder_init(struct drm_device *dev)
+static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
{
- struct ast_private *ast = to_ast_private(dev);
- struct drm_encoder *encoder = &ast->encoder;
+ struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector);
+ struct edid *edid;
+ int count;
+
+ if (!ast_vga_connector->i2c)
+ goto err_drm_connector_update_edid_property;
+
+ edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter);
+ if (!edid)
+ goto err_drm_connector_update_edid_property;
+
+ count = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return count;
+
+err_drm_connector_update_edid_property:
+ drm_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = {
+ .get_modes = ast_vga_connector_helper_get_modes,
+};
+
+static const struct drm_connector_funcs ast_vga_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_vga_connector_init(struct drm_device *dev,
+ struct ast_vga_connector *ast_vga_connector)
+{
+ struct drm_connector *connector = &ast_vga_connector->base;
+ int ret;
+
+ ast_vga_connector->i2c = ast_i2c_create(dev);
+ if (!ast_vga_connector->i2c)
+ drm_err(dev, "failed to add ddc bus for connector\n");
+
+ if (ast_vga_connector->i2c)
+ ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &ast_vga_connector->i2c->adapter);
+ else
+ ret = drm_connector_init(dev, connector, &ast_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ return 0;
+}
+
+static int ast_vga_output_init(struct ast_private *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.vga.encoder;
+ struct ast_vga_connector *ast_vga_connector = &ast->output.vga.vga_connector;
+ struct drm_connector *connector = &ast_vga_connector->base;
int ret;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
if (ret)
return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
- encoder->possible_crtcs = 1;
+ ret = ast_vga_connector_init(dev, ast_vga_connector);
+ if (ret)
+ return ret;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
return 0;
}
/*
- * Connector
+ * SIL164 Connector
*/
-static int ast_get_modes(struct drm_connector *connector)
+static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector);
+ struct edid *edid;
+ int count;
+
+ if (!ast_sil164_connector->i2c)
+ goto err_drm_connector_update_edid_property;
+
+ edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter);
+ if (!edid)
+ goto err_drm_connector_update_edid_property;
+
+ count = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return count;
+
+err_drm_connector_update_edid_property:
+ drm_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = {
+ .get_modes = ast_sil164_connector_helper_get_modes,
+};
+
+static const struct drm_connector_funcs ast_sil164_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_sil164_connector_init(struct drm_device *dev,
+ struct ast_sil164_connector *ast_sil164_connector)
{
- struct ast_connector *ast_connector = to_ast_connector(connector);
- struct ast_private *ast = to_ast_private(connector->dev);
- struct edid *edid = NULL;
- bool flags = false;
+ struct drm_connector *connector = &ast_sil164_connector->base;
int ret;
- if (ast->tx_chip_type == AST_TX_DP501) {
- ast->dp501_maxclk = 0xff;
- edid = kmalloc(128, GFP_KERNEL);
- if (!edid)
- return -ENOMEM;
+ ast_sil164_connector->i2c = ast_i2c_create(dev);
+ if (!ast_sil164_connector->i2c)
+ drm_err(dev, "failed to add ddc bus for connector\n");
- flags = ast_dp501_read_edid(connector->dev, (u8 *)edid);
- if (flags)
- ast->dp501_maxclk = ast_get_dp501_max_clk(connector->dev);
- else
- kfree(edid);
- }
- if (!flags && ast_connector->i2c)
- edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
- if (edid) {
- drm_connector_update_edid_property(&ast_connector->base, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ if (ast_sil164_connector->i2c)
+ ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII,
+ &ast_sil164_connector->i2c->adapter);
+ else
+ ret = drm_connector_init(dev, connector, &ast_sil164_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII);
+ if (ret)
return ret;
- }
- drm_connector_update_edid_property(&ast_connector->base, NULL);
+
+ drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
return 0;
}
-static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static int ast_sil164_output_init(struct ast_private *ast)
{
- struct ast_private *ast = to_ast_private(connector->dev);
- int flags = MODE_NOMODE;
- uint32_t jtemp;
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.sil164.encoder;
+ struct ast_sil164_connector *ast_sil164_connector = &ast->output.sil164.sil164_connector;
+ struct drm_connector *connector = &ast_sil164_connector->base;
+ int ret;
- if (ast->support_wide_screen) {
- if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
- return MODE_OK;
- if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
- return MODE_OK;
- if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
- return MODE_OK;
- if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
- return MODE_OK;
- if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
- return MODE_OK;
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
- if ((ast->chip == AST2100) || (ast->chip == AST2200) ||
- (ast->chip == AST2300) || (ast->chip == AST2400) ||
- (ast->chip == AST2500)) {
- if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
- return MODE_OK;
+ ret = ast_sil164_connector_init(dev, ast_sil164_connector);
+ if (ret)
+ return ret;
- if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
- jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
- if (jtemp & 0x01)
- return MODE_NOMODE;
- else
- return MODE_OK;
- }
- }
- }
- switch (mode->hdisplay) {
- case 640:
- if (mode->vdisplay == 480)
- flags = MODE_OK;
- break;
- case 800:
- if (mode->vdisplay == 600)
- flags = MODE_OK;
- break;
- case 1024:
- if (mode->vdisplay == 768)
- flags = MODE_OK;
- break;
- case 1280:
- if (mode->vdisplay == 1024)
- flags = MODE_OK;
- break;
- case 1600:
- if (mode->vdisplay == 1200)
- flags = MODE_OK;
- break;
- default:
- return flags;
- }
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
- return flags;
+ return 0;
}
-static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
- .get_modes = ast_get_modes,
- .mode_valid = ast_mode_valid,
+/*
+ * DP501 Connector
+ */
+
+static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector)
+{
+ void *edid;
+ bool succ;
+ int count;
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!edid)
+ goto err_drm_connector_update_edid_property;
+
+ succ = ast_dp501_read_edid(connector->dev, edid);
+ if (!succ)
+ goto err_kfree;
+
+ drm_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return count;
+
+err_kfree:
+ kfree(edid);
+err_drm_connector_update_edid_property:
+ drm_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
+ .get_modes = ast_dp501_connector_helper_get_modes,
};
-static const struct drm_connector_funcs ast_connector_funcs = {
+static const struct drm_connector_funcs ast_dp501_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
@@ -1316,33 +1484,45 @@ static const struct drm_connector_funcs ast_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ast_connector_init(struct drm_device *dev)
+static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector)
{
- struct ast_private *ast = to_ast_private(dev);
- struct ast_connector *ast_connector = &ast->connector;
- struct drm_connector *connector = &ast_connector->base;
- struct drm_encoder *encoder = &ast->encoder;
-
- ast_connector->i2c = ast_i2c_create(dev);
- if (!ast_connector->i2c)
- drm_err(dev, "failed to add ddc bus for connector\n");
+ int ret;
- if (ast_connector->i2c)
- drm_connector_init_with_ddc(dev, connector, &ast_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &ast_connector->i2c->adapter);
- else
- drm_connector_init(dev, connector, &ast_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret)
+ return ret;
- drm_connector_helper_add(connector, &ast_connector_helper_funcs);
+ drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- drm_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+static int ast_dp501_output_init(struct ast_private *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.dp501.encoder;
+ struct drm_connector *connector = &ast->output.dp501.connector;
+ int ret;
+
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = ast_dp501_connector_init(dev, connector);
+ if (ret)
+ return ret;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
return 0;
}
@@ -1351,8 +1531,7 @@ static int ast_connector_init(struct drm_device *dev)
* Mode config
*/
-static const struct drm_mode_config_helper_funcs
-ast_mode_config_helper_funcs = {
+static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
@@ -1404,8 +1583,20 @@ int ast_mode_config_init(struct ast_private *ast)
return ret;
ast_crtc_init(dev);
- ast_encoder_init(dev);
- ast_connector_init(dev);
+
+ switch (ast->tx_chip_type) {
+ case AST_TX_NONE:
+ ret = ast_vga_output_init(ast);
+ break;
+ case AST_TX_SIL164:
+ ret = ast_sil164_output_init(ast);
+ break;
+ case AST_TX_DP501:
+ ret = ast_dp501_output_init(ast);
+ break;
+ }
+ if (ret)
+ return ret;
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 1656d27b78b6..651e3c109360 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -22,6 +22,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -833,7 +834,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = {
.of_match_table = atmel_hlcdc_dc_of_match,
},
};
-module_platform_driver(atmel_hlcdc_dc_platform_driver);
+drm_module_platform_driver(atmel_hlcdc_dc_platform_driver);
MODULE_AUTHOR("Jean-Jacques Hiblot <jjhiblot@traphandler.com>");
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index fcd93f1aec90..c86f5be4dfe0 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -75,6 +75,14 @@ config DRM_DISPLAY_CONNECTOR
on ARM-based platforms. Saying Y here when this driver is not needed
will not cause any issue.
+config DRM_ITE_IT6505
+ tristate "ITE IT6505 DisplayPort bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select EXTCON
+ help
+ ITE IT6505 DisplayPort bridge chip driver.
+
config DRM_LONTIUM_LT8912B
tristate "Lontium LT8912B DSI/HDMI bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index f2c73683cfcb..425844c30495 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
+obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o
obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o
obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
index 319ba0df57be..cc0aa6572d98 100644
--- a/drivers/gpu/drm/bridge/analogix/Kconfig
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -32,6 +32,8 @@ config DRM_ANALOGIX_ANX7625
tristate "Analogix Anx7625 MIPI to DP interface support"
depends on DRM
depends on OF
+ select DRM_DP_AUX_BUS
+ select DRM_DP_HELPER
select DRM_MIPI_DSI
help
ANX7625 is an ultra-low power 4K mobile HD transmitter
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 76662fce4ce6..633618bafd75 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -24,6 +24,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/dp/drm_dp_aux_bus.h>
#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
@@ -129,6 +130,23 @@ static int anx7625_reg_write(struct anx7625_data *ctx,
return ret;
}
+static int anx7625_reg_block_write(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 reg_addr, u8 len, u8 *buf)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ i2c_access_workaround(ctx, client);
+
+ ret = i2c_smbus_write_i2c_block_data(client, reg_addr, len, buf);
+ if (ret < 0)
+ dev_err(dev, "write i2c block failed id=%x\n:%x",
+ client->addr, reg_addr);
+
+ return ret;
+}
+
static int anx7625_write_or(struct anx7625_data *ctx,
struct i2c_client *client,
u8 offset, u8 mask)
@@ -214,25 +232,28 @@ static int wait_aux_op_finish(struct anx7625_data *ctx)
return 0;
}
-static int anx7625_aux_dpcd_read(struct anx7625_data *ctx,
- u32 address, u8 len, u8 *buf)
+static int anx7625_aux_trans(struct anx7625_data *ctx, u8 op, u32 address,
+ u8 len, u8 *buf)
{
struct device *dev = &ctx->client->dev;
int ret;
u8 addrh, addrm, addrl;
u8 cmd;
+ bool is_write = !(op & DP_AUX_I2C_READ);
- if (len > MAX_DPCD_BUFFER_SIZE) {
+ if (len > DP_AUX_MAX_PAYLOAD_BYTES) {
dev_err(dev, "exceed aux buffer len.\n");
return -EINVAL;
}
+ if (!len)
+ return len;
+
addrl = address & 0xFF;
addrm = (address >> 8) & 0xFF;
addrh = (address >> 16) & 0xFF;
- cmd = DPCD_CMD(len, DPCD_READ);
- cmd = ((len - 1) << 4) | 0x09;
+ cmd = DPCD_CMD(len, op);
/* Set command and length */
ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
@@ -246,6 +267,9 @@ static int anx7625_aux_dpcd_read(struct anx7625_data *ctx,
ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
AP_AUX_ADDR_19_16, addrh);
+ if (is_write)
+ ret |= anx7625_reg_block_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_BUFF_START, len, buf);
/* Enable aux access */
ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN);
@@ -255,14 +279,17 @@ static int anx7625_aux_dpcd_read(struct anx7625_data *ctx,
return -EIO;
}
- usleep_range(2000, 2100);
-
ret = wait_aux_op_finish(ctx);
- if (ret) {
+ if (ret < 0) {
dev_err(dev, "aux IO error: wait aux op finish.\n");
return ret;
}
+ /* Write done */
+ if (is_write)
+ return len;
+
+ /* Read done, read out dpcd data */
ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
AP_AUX_BUFF_START, len, buf);
if (ret < 0) {
@@ -270,7 +297,7 @@ static int anx7625_aux_dpcd_read(struct anx7625_data *ctx,
return -EIO;
}
- return 0;
+ return len;
}
static int anx7625_video_mute_control(struct anx7625_data *ctx,
@@ -845,7 +872,7 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
}
/* Read downstream capability */
- anx7625_aux_dpcd_read(ctx, 0x68028, 1, &bcap);
+ anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap);
if (!(bcap & 0x01)) {
pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap);
return 0;
@@ -918,6 +945,7 @@ static void anx7625_dp_stop(struct anx7625_data *ctx)
{
struct device *dev = &ctx->client->dev;
int ret;
+ u8 data;
DRM_DEV_DEBUG_DRIVER(dev, "stop dp output\n");
@@ -929,6 +957,11 @@ static void anx7625_dp_stop(struct anx7625_data *ctx)
ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, 0x08, 0x7f);
ret |= anx7625_video_mute_control(ctx, 1);
+
+ dev_dbg(dev, "notify downstream enter into standby\n");
+ /* Downstream monitor enter into standby mode */
+ data = 2;
+ ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
if (ret < 0)
DRM_DEV_ERROR(dev, "IO error : mute video fail\n");
@@ -1076,7 +1109,8 @@ static int segments_edid_read(struct anx7625_data *ctx,
static int sp_tx_edid_read(struct anx7625_data *ctx,
u8 *pedid_blocks_buf)
{
- u8 offset, edid_pos;
+ u8 offset;
+ int edid_pos;
int count, blocks_num;
u8 pblock_buf[MAX_DPCD_BUFFER_SIZE];
u8 i, j;
@@ -1627,11 +1661,56 @@ static int anx7625_parse_dt(struct device *dev,
return 0;
}
+static bool anx7625_of_panel_on_aux_bus(struct device *dev)
+{
+ struct device_node *bus, *panel;
+
+ bus = of_get_child_by_name(dev->of_node, "aux-bus");
+ if (!bus)
+ return false;
+
+ panel = of_get_child_by_name(bus, "panel");
+ of_node_put(bus);
+ if (!panel)
+ return false;
+ of_node_put(panel);
+
+ return true;
+}
+
static inline struct anx7625_data *bridge_to_anx7625(struct drm_bridge *bridge)
{
return container_of(bridge, struct anx7625_data, bridge);
}
+static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux);
+ struct device *dev = &ctx->client->dev;
+ u8 request = msg->request & ~DP_AUX_I2C_MOT;
+ int ret = 0;
+
+ pm_runtime_get_sync(dev);
+ msg->reply = 0;
+ switch (request) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (!ret)
+ ret = anx7625_aux_trans(ctx, msg->request, msg->address,
+ msg->size, msg->buffer);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
{
struct device *dev = &ctx->client->dev;
@@ -2038,6 +2117,13 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
return -ENODEV;
}
+ ctx->aux.drm_dev = bridge->dev;
+ err = drm_dp_aux_register(&ctx->aux);
+ if (err) {
+ dev_err(dev, "failed to register aux channel: %d\n", err);
+ return err;
+ }
+
if (ctx->pdata.panel_bridge) {
err = drm_bridge_attach(bridge->encoder,
ctx->pdata.panel_bridge,
@@ -2051,6 +2137,13 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
return 0;
}
+static void anx7625_bridge_detach(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+
+ drm_dp_aux_unregister(&ctx->aux);
+}
+
static enum drm_mode_status
anx7625_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
@@ -2316,6 +2409,7 @@ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.attach = anx7625_bridge_attach,
+ .detach = anx7625_bridge_detach,
.mode_valid = anx7625_bridge_mode_valid,
.mode_set = anx7625_bridge_mode_set,
.atomic_check = anx7625_bridge_atomic_check,
@@ -2473,6 +2567,12 @@ static const struct dev_pm_ops anx7625_pm_ops = {
anx7625_runtime_pm_resume, NULL)
};
+static void anx7625_runtime_disable(void *data)
+{
+ pm_runtime_dont_use_autosuspend(data);
+ pm_runtime_disable(data);
+}
+
static int anx7625_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -2487,7 +2587,7 @@ static int anx7625_i2c_probe(struct i2c_client *client,
return -ENODEV;
}
- platform = kzalloc(sizeof(*platform), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
if (!platform) {
DRM_DEV_ERROR(dev, "fail to allocate driver data\n");
return -ENOMEM;
@@ -2495,13 +2595,6 @@ static int anx7625_i2c_probe(struct i2c_client *client,
pdata = &platform->pdata;
- ret = anx7625_parse_dt(dev, pdata);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
- goto free_platform;
- }
-
platform->client = client;
i2c_set_clientdata(client, platform);
@@ -2524,7 +2617,7 @@ static int anx7625_i2c_probe(struct i2c_client *client,
if (!platform->hdcp_workqueue) {
dev_err(dev, "fail to create work queue\n");
ret = -ENOMEM;
- goto free_platform;
+ return ret;
}
platform->pdata.intp_irq = client->irq;
@@ -2549,6 +2642,19 @@ static int anx7625_i2c_probe(struct i2c_client *client,
}
}
+ platform->aux.name = "anx7625-aux";
+ platform->aux.dev = dev;
+ platform->aux.transfer = anx7625_aux_transfer;
+ drm_dp_aux_init(&platform->aux);
+ devm_of_dp_aux_populate_ep_devices(&platform->aux);
+
+ ret = anx7625_parse_dt(dev, pdata);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
+ return ret;
+ }
+
if (anx7625_register_i2c_dummy_clients(platform, client) != 0) {
ret = -ENOMEM;
DRM_DEV_ERROR(dev, "fail to reserve I2C bus.\n");
@@ -2556,6 +2662,12 @@ static int anx7625_i2c_probe(struct i2c_client *client,
}
pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+ pm_suspend_ignore_children(dev, true);
+ ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev);
+ if (ret)
+ return ret;
if (!platform->pdata.low_power_mode) {
anx7625_disable_pd_protocol(platform);
@@ -2568,7 +2680,8 @@ static int anx7625_i2c_probe(struct i2c_client *client,
platform->bridge.funcs = &anx7625_bridge_funcs;
platform->bridge.of_node = client->dev.of_node;
- platform->bridge.ops = DRM_BRIDGE_OP_EDID;
+ if (!anx7625_of_panel_on_aux_bus(&client->dev))
+ platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
if (!platform->pdata.panel_bridge)
platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT;
@@ -2609,9 +2722,6 @@ free_hdcp_wq:
if (platform->hdcp_workqueue)
destroy_workqueue(platform->hdcp_workqueue);
-free_platform:
- kfree(platform);
-
return ret;
}
@@ -2638,7 +2748,6 @@ static int anx7625_i2c_remove(struct i2c_client *client)
if (platform->pdata.audio_en)
anx7625_unregister_audio(platform);
- kfree(platform);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index 56165f5b254c..edbbfe410a56 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -242,8 +242,6 @@
#define AP_AUX_COMMAND 0x27 /* com+len */
#define LENGTH_SHIFT 4
-#define DPCD_READ 0x09
-#define DPCD_WRITE 0x08
#define DPCD_CMD(len, cmd) ((((len) - 1) << LENGTH_SHIFT) | (cmd))
/* Bit 0&1: 3D video structure */
@@ -474,6 +472,7 @@ struct anx7625_data {
u8 bridge_attached;
struct drm_connector *connector;
struct mipi_dsi_device *dsi;
+ struct drm_dp_aux aux;
};
#endif /* __ANX7625_H__ */
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
new file mode 100644
index 000000000000..fb16a176822d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -0,0 +1,3352 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/extcon.h>
+#include <linux/fs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <crypto/hash.h>
+
+#include <drm/dp/drm_dp_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_hdcp.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include <sound/hdmi-codec.h>
+
+#define REG_IC_VER 0x04
+
+#define REG_RESET_CTRL 0x05
+#define VIDEO_RESET BIT(0)
+#define AUDIO_RESET BIT(1)
+#define ALL_LOGIC_RESET BIT(2)
+#define AUX_RESET BIT(3)
+#define HDCP_RESET BIT(4)
+
+#define INT_STATUS_01 0x06
+#define INT_MASK_01 0x09
+#define INT_HPD_CHANGE 0
+#define INT_RECEIVE_HPD_IRQ 1
+#define INT_SCDT_CHANGE 2
+#define INT_HDCP_FAIL 3
+#define INT_HDCP_DONE 4
+#define BIT_OFFSET(x) (((x) - INT_STATUS_01) * BITS_PER_BYTE)
+#define BIT_INT_HPD INT_HPD_CHANGE
+#define BIT_INT_HPD_IRQ INT_RECEIVE_HPD_IRQ
+#define BIT_INT_SCDT INT_SCDT_CHANGE
+#define BIT_INT_HDCP_FAIL INT_HDCP_FAIL
+#define BIT_INT_HDCP_DONE INT_HDCP_DONE
+
+#define INT_STATUS_02 0x07
+#define INT_MASK_02 0x0A
+#define INT_AUX_CMD_FAIL 0
+#define INT_HDCP_KSV_CHECK 1
+#define INT_AUDIO_FIFO_ERROR 2
+#define BIT_INT_AUX_CMD_FAIL (BIT_OFFSET(0x07) + INT_AUX_CMD_FAIL)
+#define BIT_INT_HDCP_KSV_CHECK (BIT_OFFSET(0x07) + INT_HDCP_KSV_CHECK)
+#define BIT_INT_AUDIO_FIFO_ERROR (BIT_OFFSET(0x07) + INT_AUDIO_FIFO_ERROR)
+
+#define INT_STATUS_03 0x08
+#define INT_MASK_03 0x0B
+#define INT_LINK_TRAIN_FAIL 4
+#define INT_VID_FIFO_ERROR 5
+#define INT_IO_LATCH_FIFO_OVERFLOW 7
+#define BIT_INT_LINK_TRAIN_FAIL (BIT_OFFSET(0x08) + INT_LINK_TRAIN_FAIL)
+#define BIT_INT_VID_FIFO_ERROR (BIT_OFFSET(0x08) + INT_VID_FIFO_ERROR)
+#define BIT_INT_IO_FIFO_OVERFLOW (BIT_OFFSET(0x08) + INT_IO_LATCH_FIFO_OVERFLOW)
+
+#define REG_SYSTEM_STS 0x0D
+#define INT_STS BIT(0)
+#define HPD_STS BIT(1)
+#define VIDEO_STB BIT(2)
+
+#define REG_LINK_TRAIN_STS 0x0E
+#define LINK_STATE_CR BIT(2)
+#define LINK_STATE_EQ BIT(3)
+#define LINK_STATE_NORP BIT(4)
+
+#define REG_BANK_SEL 0x0F
+#define REG_CLK_CTRL0 0x10
+#define M_PCLK_DELAY 0x03
+
+#define REG_AUX_OPT 0x11
+#define AUX_AUTO_RST BIT(0)
+#define AUX_FIX_FREQ BIT(3)
+
+#define REG_DATA_CTRL0 0x12
+#define VIDEO_LATCH_EDGE BIT(4)
+#define ENABLE_PCLK_COUNTER BIT(7)
+
+#define REG_PCLK_COUNTER_VALUE 0x13
+
+#define REG_501_FIFO_CTRL 0x15
+#define RST_501_FIFO BIT(1)
+
+#define REG_TRAIN_CTRL0 0x16
+#define FORCE_LBR BIT(0)
+#define LANE_COUNT_MASK 0x06
+#define LANE_SWAP BIT(3)
+#define SPREAD_AMP_5 BIT(4)
+#define FORCE_CR_DONE BIT(5)
+#define FORCE_EQ_DONE BIT(6)
+
+#define REG_TRAIN_CTRL1 0x17
+#define AUTO_TRAIN BIT(0)
+#define MANUAL_TRAIN BIT(1)
+#define FORCE_RETRAIN BIT(2)
+
+#define REG_AUX_CTRL 0x23
+#define CLR_EDID_FIFO BIT(0)
+#define AUX_USER_MODE BIT(1)
+#define AUX_NO_SEGMENT_WR BIT(6)
+#define AUX_EN_FIFO_READ BIT(7)
+
+#define REG_AUX_ADR_0_7 0x24
+#define REG_AUX_ADR_8_15 0x25
+#define REG_AUX_ADR_16_19 0x26
+#define REG_AUX_OUT_DATA0 0x27
+
+#define REG_AUX_CMD_REQ 0x2B
+#define AUX_BUSY BIT(5)
+
+#define REG_AUX_DATA_0_7 0x2C
+#define REG_AUX_DATA_8_15 0x2D
+#define REG_AUX_DATA_16_23 0x2E
+#define REG_AUX_DATA_24_31 0x2F
+
+#define REG_AUX_DATA_FIFO 0x2F
+
+#define REG_AUX_ERROR_STS 0x9F
+#define M_AUX_REQ_FAIL 0x03
+
+#define REG_HDCP_CTRL1 0x38
+#define HDCP_CP_ENABLE BIT(0)
+
+#define REG_HDCP_TRIGGER 0x39
+#define HDCP_TRIGGER_START BIT(0)
+#define HDCP_TRIGGER_CPIRQ BIT(1)
+#define HDCP_TRIGGER_KSV_DONE BIT(4)
+#define HDCP_TRIGGER_KSV_FAIL BIT(5)
+
+#define REG_HDCP_CTRL2 0x3A
+#define HDCP_AN_SEL BIT(0)
+#define HDCP_AN_GEN BIT(1)
+#define HDCP_HW_HPDIRQ_ACT BIT(2)
+#define HDCP_EN_M0_READ BIT(5)
+
+#define REG_M0_0_7 0x4C
+#define REG_AN_0_7 0x4C
+#define REG_SP_CTRL0 0x58
+#define REG_IP_CTRL1 0x59
+#define REG_IP_CTRL2 0x5A
+
+#define REG_LINK_DRV 0x5C
+#define DRV_HS BIT(1)
+
+#define REG_DRV_LN_DATA_SEL 0x5D
+
+#define REG_AUX 0x5E
+
+#define REG_VID_BUS_CTRL0 0x60
+#define IN_DDR BIT(2)
+#define DDR_CD (0x01 << 6)
+
+#define REG_VID_BUS_CTRL1 0x61
+#define TX_FIFO_RESET BIT(1)
+
+#define REG_INPUT_CTRL 0xA0
+#define INPUT_HSYNC_POL BIT(0)
+#define INPUT_VSYNC_POL BIT(2)
+#define INPUT_INTERLACED BIT(4)
+
+#define REG_INPUT_HTOTAL 0xA1
+#define REG_INPUT_HACTIVE_START 0xA3
+#define REG_INPUT_HACTIVE_WIDTH 0xA5
+#define REG_INPUT_HFRONT_PORCH 0xA7
+#define REG_INPUT_HSYNC_WIDTH 0xA9
+#define REG_INPUT_VTOTAL 0xAB
+#define REG_INPUT_VACTIVE_START 0xAD
+#define REG_INPUT_VACTIVE_WIDTH 0xAF
+#define REG_INPUT_VFRONT_PORCH 0xB1
+#define REG_INPUT_VSYNC_WIDTH 0xB3
+
+#define REG_AUDIO_SRC_CTRL 0xB8
+#define M_AUDIO_I2S_EN 0x0F
+#define EN_I2S0 BIT(0)
+#define EN_I2S1 BIT(1)
+#define EN_I2S2 BIT(2)
+#define EN_I2S3 BIT(3)
+#define AUDIO_FIFO_RESET BIT(7)
+
+#define REG_AUDIO_FMT 0xB9
+#define REG_AUDIO_FIFO_SEL 0xBA
+
+#define REG_AUDIO_CTRL0 0xBB
+#define AUDIO_FULL_PKT BIT(4)
+#define AUDIO_16B_BOUND BIT(5)
+
+#define REG_AUDIO_CTRL1 0xBC
+#define REG_AUDIO_INPUT_FREQ 0xBE
+
+#define REG_IEC958_STS0 0xBF
+#define REG_IEC958_STS1 0xC0
+#define REG_IEC958_STS2 0xC1
+#define REG_IEC958_STS3 0xC2
+#define REG_IEC958_STS4 0xC3
+
+#define REG_HPD_IRQ_TIME 0xC9
+#define REG_AUX_DEBUG_MODE 0xCA
+#define REG_AUX_OPT2 0xCB
+#define REG_HDCP_OPT 0xCE
+#define REG_USER_DRV_PRE 0xCF
+
+#define REG_DATA_MUTE_CTRL 0xD3
+#define ENABLE_ENHANCED_FRAME BIT(0)
+#define ENABLE_AUTO_VIDEO_FIFO_RESET BIT(1)
+#define EN_VID_MUTE BIT(4)
+#define EN_AUD_MUTE BIT(5)
+
+#define REG_TIME_STMP_CTRL 0xD4
+#define EN_ENHANCE_VID_STMP BIT(0)
+#define EN_ENHANCE_AUD_STMP BIT(2)
+#define M_STAMP_STEP 0x30
+#define EN_SSC_GAT BIT(6)
+
+#define REG_INFOFRAME_CTRL 0xE8
+#define EN_AVI_PKT BIT(0)
+#define EN_AUD_PKT BIT(1)
+#define EN_MPG_PKT BIT(2)
+#define EN_GEN_PKT BIT(3)
+#define EN_VID_TIME_STMP BIT(4)
+#define EN_AUD_TIME_STMP BIT(5)
+#define EN_VID_CTRL_PKT (EN_AVI_PKT | EN_VID_TIME_STMP)
+#define EN_AUD_CTRL_PKT (EN_AUD_PKT | EN_AUD_TIME_STMP)
+
+#define REG_AUDIO_N_0_7 0xDE
+#define REG_AUDIO_N_8_15 0xDF
+#define REG_AUDIO_N_16_23 0xE0
+
+#define REG_AVI_INFO_DB1 0xE9
+#define REG_AVI_INFO_DB2 0xEA
+#define REG_AVI_INFO_DB3 0xEB
+#define REG_AVI_INFO_DB4 0xEC
+#define REG_AVI_INFO_DB5 0xED
+#define REG_AVI_INFO_SUM 0xF6
+
+#define REG_AUD_INFOFRAM_DB1 0xF7
+#define REG_AUD_INFOFRAM_DB2 0xF8
+#define REG_AUD_INFOFRAM_DB3 0xF9
+#define REG_AUD_INFOFRAM_DB4 0xFA
+#define REG_AUD_INFOFRAM_SUM 0xFB
+
+/* the following six registers are in bank1 */
+#define REG_DRV_0_DB_800_MV 0x7E
+#define REG_PRE_0_DB_800_MV 0x7F
+#define REG_PRE_3P5_DB_800_MV 0x81
+#define REG_SSC_CTRL0 0x88
+#define REG_SSC_CTRL1 0x89
+#define REG_SSC_CTRL2 0x8A
+
+#define RBR DP_LINK_BW_1_62
+#define HBR DP_LINK_BW_2_7
+#define HBR2 DP_LINK_BW_5_4
+#define HBR3 DP_LINK_BW_8_1
+
+#define DPCD_V_1_1 0x11
+#define MISC_VERB 0xF0
+#define MISC_VERC 0x70
+#define I2S_INPUT_FORMAT_STANDARD 0
+#define I2S_INPUT_FORMAT_32BIT 1
+#define I2S_INPUT_LEFT_JUSTIFIED 0
+#define I2S_INPUT_RIGHT_JUSTIFIED 1
+#define I2S_DATA_1T_DELAY 0
+#define I2S_DATA_NO_DELAY 1
+#define I2S_WS_LEFT_CHANNEL 0
+#define I2S_WS_RIGHT_CHANNEL 1
+#define I2S_DATA_MSB_FIRST 0
+#define I2S_DATA_LSB_FIRST 1
+#define WORD_LENGTH_16BIT 0
+#define WORD_LENGTH_18BIT 1
+#define WORD_LENGTH_20BIT 2
+#define WORD_LENGTH_24BIT 3
+#define DEBUGFS_DIR_NAME "it6505-debugfs"
+#define READ_BUFFER_SIZE 200
+
+/* Vendor option */
+#define HDCP_DESIRED 1
+#define MAX_LANE_COUNT 4
+#define MAX_LINK_RATE HBR
+#define AUTO_TRAIN_RETRY 3
+#define MAX_HDCP_DOWN_STREAM_COUNT 10
+#define MAX_CR_LEVEL 0x03
+#define MAX_EQ_LEVEL 0x03
+#define AUX_WAIT_TIMEOUT_MS 15
+#define AUX_FIFO_MAX_SIZE 32
+#define PIXEL_CLK_DELAY 1
+#define PIXEL_CLK_INVERSE 0
+#define ADJUST_PHASE_THRESHOLD 80000
+#define DPI_PIXEL_CLK_MAX 95000
+#define HDCP_SHA1_FIFO_LEN (MAX_HDCP_DOWN_STREAM_COUNT * 5 + 10)
+#define DEFAULT_PWR_ON 0
+#define DEFAULT_DRV_HOLD 0
+
+#define AUDIO_SELECT I2S
+#define AUDIO_TYPE LPCM
+#define AUDIO_SAMPLE_RATE SAMPLE_RATE_48K
+#define AUDIO_CHANNEL_COUNT 2
+#define I2S_INPUT_FORMAT I2S_INPUT_FORMAT_32BIT
+#define I2S_JUSTIFIED I2S_INPUT_LEFT_JUSTIFIED
+#define I2S_DATA_DELAY I2S_DATA_1T_DELAY
+#define I2S_WS_CHANNEL I2S_WS_LEFT_CHANNEL
+#define I2S_DATA_SEQUENCE I2S_DATA_MSB_FIRST
+#define AUDIO_WORD_LENGTH WORD_LENGTH_24BIT
+
+enum aux_cmd_type {
+ CMD_AUX_NATIVE_READ = 0x0,
+ CMD_AUX_NATIVE_WRITE = 0x5,
+ CMD_AUX_I2C_EDID_READ = 0xB,
+};
+
+enum aux_cmd_reply {
+ REPLY_ACK,
+ REPLY_NACK,
+ REPLY_DEFER,
+};
+
+enum link_train_status {
+ LINK_IDLE,
+ LINK_BUSY,
+ LINK_OK,
+};
+
+enum hdcp_state {
+ HDCP_AUTH_IDLE,
+ HDCP_AUTH_GOING,
+ HDCP_AUTH_DONE,
+};
+
+struct it6505_platform_data {
+ struct regulator *pwr18;
+ struct regulator *ovdd;
+ struct gpio_desc *gpiod_reset;
+};
+
+enum it6505_audio_select {
+ I2S = 0,
+ SPDIF,
+};
+
+enum it6505_audio_sample_rate {
+ SAMPLE_RATE_24K = 0x6,
+ SAMPLE_RATE_32K = 0x3,
+ SAMPLE_RATE_48K = 0x2,
+ SAMPLE_RATE_96K = 0xA,
+ SAMPLE_RATE_192K = 0xE,
+ SAMPLE_RATE_44_1K = 0x0,
+ SAMPLE_RATE_88_2K = 0x8,
+ SAMPLE_RATE_176_4K = 0xC,
+};
+
+enum it6505_audio_type {
+ LPCM = 0,
+ NLPCM,
+ DSS,
+};
+
+struct it6505_audio_data {
+ enum it6505_audio_select select;
+ enum it6505_audio_sample_rate sample_rate;
+ enum it6505_audio_type type;
+ u8 word_length;
+ u8 channel_count;
+ u8 i2s_input_format;
+ u8 i2s_justified;
+ u8 i2s_data_delay;
+ u8 i2s_ws_channel;
+ u8 i2s_data_sequence;
+};
+
+struct it6505_audio_sample_rate_map {
+ enum it6505_audio_sample_rate rate;
+ int sample_rate_value;
+};
+
+struct it6505_drm_dp_link {
+ unsigned char revision;
+ unsigned int rate;
+ unsigned int num_lanes;
+ unsigned long capabilities;
+};
+
+struct debugfs_entries {
+ char *name;
+ const struct file_operations *fops;
+};
+
+struct it6505 {
+ struct drm_dp_aux aux;
+ struct drm_bridge bridge;
+ struct i2c_client *client;
+ struct it6505_drm_dp_link link;
+ struct it6505_platform_data pdata;
+ /*
+ * Mutex protects extcon and interrupt functions from interfering
+ * each other.
+ */
+ struct mutex extcon_lock;
+ struct mutex mode_lock; /* used to bridge_detect */
+ struct mutex aux_lock; /* used to aux data transfers */
+ struct regmap *regmap;
+ struct drm_display_mode source_output_mode;
+ struct drm_display_mode video_info;
+ struct notifier_block event_nb;
+ struct extcon_dev *extcon;
+ struct work_struct extcon_wq;
+ enum drm_connector_status connector_status;
+ enum link_train_status link_state;
+ struct work_struct link_works;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 lane_count;
+ u8 link_rate_bw_code;
+ u8 sink_count;
+ bool step_train;
+ bool branch_device;
+ bool enable_ssc;
+ bool lane_swap_disabled;
+ bool lane_swap;
+ bool powered;
+ bool hpd_state;
+ u32 afe_setting;
+ enum hdcp_state hdcp_status;
+ struct delayed_work hdcp_work;
+ struct work_struct hdcp_wait_ksv_list;
+ struct completion wait_edid_complete;
+ u8 auto_train_retry;
+ bool hdcp_desired;
+ bool is_repeater;
+ u8 hdcp_down_stream_count;
+ u8 bksvs[DRM_HDCP_KSV_LEN];
+ u8 sha1_input[HDCP_SHA1_FIFO_LEN];
+ bool enable_enhanced_frame;
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
+ struct delayed_work delayed_audio;
+ struct it6505_audio_data audio;
+ struct dentry *debugfs;
+
+ /* it6505 driver hold option */
+ bool enable_drv_hold;
+};
+
+struct it6505_step_train_para {
+ u8 voltage_swing[MAX_LANE_COUNT];
+ u8 pre_emphasis[MAX_LANE_COUNT];
+};
+
+/*
+ * Vendor option afe settings for different platforms
+ * 0: without FPC cable
+ * 1: with FPC cable
+ */
+
+static const u8 afe_setting_table[][3] = {
+ {0x82, 0x00, 0x45},
+ {0x93, 0x2A, 0x85}
+};
+
+static const struct it6505_audio_sample_rate_map audio_sample_rate_map[] = {
+ {SAMPLE_RATE_24K, 24000},
+ {SAMPLE_RATE_32K, 32000},
+ {SAMPLE_RATE_48K, 48000},
+ {SAMPLE_RATE_96K, 96000},
+ {SAMPLE_RATE_192K, 192000},
+ {SAMPLE_RATE_44_1K, 44100},
+ {SAMPLE_RATE_88_2K, 88200},
+ {SAMPLE_RATE_176_4K, 176400},
+};
+
+static const struct regmap_range it6505_bridge_volatile_ranges[] = {
+ { .range_min = 0, .range_max = 0xFF },
+};
+
+static const struct regmap_access_table it6505_bridge_volatile_table = {
+ .yes_ranges = it6505_bridge_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(it6505_bridge_volatile_ranges),
+};
+
+static const struct regmap_config it6505_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &it6505_bridge_volatile_table,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int it6505_read(struct it6505 *it6505, unsigned int reg_addr)
+{
+ unsigned int value;
+ int err;
+ struct device *dev = &it6505->client->dev;
+
+ err = regmap_read(it6505->regmap, reg_addr, &value);
+ if (err < 0) {
+ dev_err(dev, "read failed reg[0x%x] err: %d", reg_addr, err);
+ return err;
+ }
+
+ return value;
+}
+
+static int it6505_write(struct it6505 *it6505, unsigned int reg_addr,
+ unsigned int reg_val)
+{
+ int err;
+ struct device *dev = &it6505->client->dev;
+
+ err = regmap_write(it6505->regmap, reg_addr, reg_val);
+
+ if (err < 0) {
+ dev_err(dev, "write failed reg[0x%x] = 0x%x err = %d",
+ reg_addr, reg_val, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int it6505_set_bits(struct it6505 *it6505, unsigned int reg,
+ unsigned int mask, unsigned int value)
+{
+ int err;
+ struct device *dev = &it6505->client->dev;
+
+ err = regmap_update_bits(it6505->regmap, reg, mask, value);
+ if (err < 0) {
+ dev_err(dev, "write reg[0x%x] = 0x%x mask = 0x%x failed err %d",
+ reg, value, mask, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void it6505_debug_print(struct it6505 *it6505, unsigned int reg,
+ const char *prefix)
+{
+ struct device *dev = &it6505->client->dev;
+ int val;
+
+ if (likely(!(__drm_debug & DRM_UT_DRIVER)))
+ return;
+
+ val = it6505_read(it6505, reg);
+ if (val < 0)
+ DRM_DEV_DEBUG_DRIVER(dev, "%s reg[%02x] read error (%d)",
+ prefix, reg, val);
+ else
+ DRM_DEV_DEBUG_DRIVER(dev, "%s reg[%02x] = 0x%02x", prefix, reg,
+ val);
+}
+
+static int it6505_dpcd_read(struct it6505 *it6505, unsigned long offset)
+{
+ u8 value;
+ int ret;
+ struct device *dev = &it6505->client->dev;
+
+ ret = drm_dp_dpcd_readb(&it6505->aux, offset, &value);
+ if (ret < 0) {
+ dev_err(dev, "DPCD read failed [0x%lx] ret: %d", offset, ret);
+ return ret;
+ }
+ return value;
+}
+
+static int it6505_dpcd_write(struct it6505 *it6505, unsigned long offset,
+ u8 datain)
+{
+ int ret;
+ struct device *dev = &it6505->client->dev;
+
+ ret = drm_dp_dpcd_writeb(&it6505->aux, offset, datain);
+ if (ret < 0) {
+ dev_err(dev, "DPCD write failed [0x%lx] ret: %d", offset, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int it6505_get_dpcd(struct it6505 *it6505, int offset, u8 *dpcd, int num)
+{
+ int ret;
+ struct device *dev = &it6505->client->dev;
+
+ ret = drm_dp_dpcd_read(&it6505->aux, offset, dpcd, num);
+
+ if (ret < 0)
+ return ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "ret = %d DPCD[0x%x] = 0x%*ph", ret, offset,
+ num, dpcd);
+
+ return 0;
+}
+
+static void it6505_dump(struct it6505 *it6505)
+{
+ unsigned int i, j;
+ u8 regs[16];
+ struct device *dev = &it6505->client->dev;
+
+ for (i = 0; i <= 0xff; i += 16) {
+ for (j = 0; j < 16; j++)
+ regs[j] = it6505_read(it6505, i + j);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "[0x%02x] = %16ph", i, regs);
+ }
+}
+
+static bool it6505_get_sink_hpd_status(struct it6505 *it6505)
+{
+ int reg_0d;
+
+ reg_0d = it6505_read(it6505, REG_SYSTEM_STS);
+
+ if (reg_0d < 0)
+ return false;
+
+ return reg_0d & HPD_STS;
+}
+
+static int it6505_read_word(struct it6505 *it6505, unsigned int reg)
+{
+ int val0, val1;
+
+ val0 = it6505_read(it6505, reg);
+ if (val0 < 0)
+ return val0;
+
+ val1 = it6505_read(it6505, reg + 1);
+ if (val1 < 0)
+ return val1;
+
+ return (val1 << 8) | val0;
+}
+
+static void it6505_calc_video_info(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ int hsync_pol, vsync_pol, interlaced;
+ int htotal, hdes, hdew, hfph, hsyncw;
+ int vtotal, vdes, vdew, vfph, vsyncw;
+ int rddata, i, pclk, sum = 0;
+
+ usleep_range(10000, 15000);
+ rddata = it6505_read(it6505, REG_INPUT_CTRL);
+ hsync_pol = rddata & INPUT_HSYNC_POL;
+ vsync_pol = (rddata & INPUT_VSYNC_POL) >> 2;
+ interlaced = (rddata & INPUT_INTERLACED) >> 4;
+
+ htotal = it6505_read_word(it6505, REG_INPUT_HTOTAL) & 0x1FFF;
+ hdes = it6505_read_word(it6505, REG_INPUT_HACTIVE_START) & 0x1FFF;
+ hdew = it6505_read_word(it6505, REG_INPUT_HACTIVE_WIDTH) & 0x1FFF;
+ hfph = it6505_read_word(it6505, REG_INPUT_HFRONT_PORCH) & 0x1FFF;
+ hsyncw = it6505_read_word(it6505, REG_INPUT_HSYNC_WIDTH) & 0x1FFF;
+
+ vtotal = it6505_read_word(it6505, REG_INPUT_VTOTAL) & 0xFFF;
+ vdes = it6505_read_word(it6505, REG_INPUT_VACTIVE_START) & 0xFFF;
+ vdew = it6505_read_word(it6505, REG_INPUT_VACTIVE_WIDTH) & 0xFFF;
+ vfph = it6505_read_word(it6505, REG_INPUT_VFRONT_PORCH) & 0xFFF;
+ vsyncw = it6505_read_word(it6505, REG_INPUT_VSYNC_WIDTH) & 0xFFF;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync_pol:%d, vsync_pol:%d, interlaced:%d",
+ hsync_pol, vsync_pol, interlaced);
+ DRM_DEV_DEBUG_DRIVER(dev, "hactive_start:%d, vactive_start:%d",
+ hdes, vdes);
+
+ for (i = 0; i < 10; i++) {
+ it6505_set_bits(it6505, REG_DATA_CTRL0, ENABLE_PCLK_COUNTER,
+ ENABLE_PCLK_COUNTER);
+ usleep_range(10000, 15000);
+ it6505_set_bits(it6505, REG_DATA_CTRL0, ENABLE_PCLK_COUNTER,
+ 0x00);
+ rddata = it6505_read_word(it6505, REG_PCLK_COUNTER_VALUE) &
+ 0xFFF;
+
+ sum += rddata;
+ }
+
+ if (sum == 0) {
+ DRM_DEV_DEBUG_DRIVER(dev, "calc video timing error");
+ return;
+ }
+
+ sum /= 10;
+ pclk = 13500 * 2048 / sum;
+ it6505->video_info.clock = pclk;
+ it6505->video_info.hdisplay = hdew;
+ it6505->video_info.hsync_start = hdew + hfph;
+ it6505->video_info.hsync_end = hdew + hfph + hsyncw;
+ it6505->video_info.htotal = htotal;
+ it6505->video_info.vdisplay = vdew;
+ it6505->video_info.vsync_start = vdew + vfph;
+ it6505->video_info.vsync_end = vdew + vfph + vsyncw;
+ it6505->video_info.vtotal = vtotal;
+
+ DRM_DEV_DEBUG_DRIVER(dev, DRM_MODE_FMT,
+ DRM_MODE_ARG(&it6505->video_info));
+}
+
+static int it6505_drm_dp_link_probe(struct drm_dp_aux *aux,
+ struct it6505_drm_dp_link *link)
+{
+ u8 values[3];
+ int err;
+
+ memset(link, 0, sizeof(*link));
+
+ err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ link->revision = values[0];
+ link->rate = drm_dp_bw_code_to_link_rate(values[1]);
+ link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
+
+ if (values[2] & DP_ENHANCED_FRAME_CAP)
+ link->capabilities = DP_ENHANCED_FRAME_CAP;
+
+ return 0;
+}
+
+static int it6505_drm_dp_link_power_up(struct drm_dp_aux *aux,
+ struct it6505_drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < DPCD_V_1_1)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static void it6505_clear_int(struct it6505 *it6505)
+{
+ it6505_write(it6505, INT_STATUS_01, 0xFF);
+ it6505_write(it6505, INT_STATUS_02, 0xFF);
+ it6505_write(it6505, INT_STATUS_03, 0xFF);
+}
+
+static void it6505_int_mask_enable(struct it6505 *it6505)
+{
+ it6505_write(it6505, INT_MASK_01, BIT(INT_HPD_CHANGE) |
+ BIT(INT_RECEIVE_HPD_IRQ) | BIT(INT_SCDT_CHANGE) |
+ BIT(INT_HDCP_FAIL) | BIT(INT_HDCP_DONE));
+
+ it6505_write(it6505, INT_MASK_02, BIT(INT_AUX_CMD_FAIL) |
+ BIT(INT_HDCP_KSV_CHECK) | BIT(INT_AUDIO_FIFO_ERROR));
+
+ it6505_write(it6505, INT_MASK_03, BIT(INT_LINK_TRAIN_FAIL) |
+ BIT(INT_VID_FIFO_ERROR) | BIT(INT_IO_LATCH_FIFO_OVERFLOW));
+}
+
+static void it6505_int_mask_disable(struct it6505 *it6505)
+{
+ it6505_write(it6505, INT_MASK_01, 0x00);
+ it6505_write(it6505, INT_MASK_02, 0x00);
+ it6505_write(it6505, INT_MASK_03, 0x00);
+}
+
+static void it6505_lane_termination_on(struct it6505 *it6505)
+{
+ int regcf;
+
+ regcf = it6505_read(it6505, REG_USER_DRV_PRE);
+
+ if (regcf == MISC_VERB)
+ it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x00);
+
+ if (regcf == MISC_VERC) {
+ if (it6505->lane_swap) {
+ switch (it6505->lane_count) {
+ case 1:
+ case 2:
+ it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL,
+ 0x0C, 0x08);
+ break;
+ default:
+ it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL,
+ 0x0C, 0x0C);
+ break;
+ }
+ } else {
+ switch (it6505->lane_count) {
+ case 1:
+ case 2:
+ it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL,
+ 0x0C, 0x04);
+ break;
+ default:
+ it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL,
+ 0x0C, 0x0C);
+ break;
+ }
+ }
+ }
+}
+
+static void it6505_lane_termination_off(struct it6505 *it6505)
+{
+ int regcf;
+
+ regcf = it6505_read(it6505, REG_USER_DRV_PRE);
+
+ if (regcf == MISC_VERB)
+ it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x80);
+
+ if (regcf == MISC_VERC)
+ it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x0C, 0x00);
+}
+
+static void it6505_lane_power_on(struct it6505 *it6505)
+{
+ it6505_set_bits(it6505, REG_LINK_DRV, 0xF1,
+ (it6505->lane_swap ?
+ GENMASK(7, 8 - it6505->lane_count) :
+ GENMASK(3 + it6505->lane_count, 4)) |
+ 0x01);
+}
+
+static void it6505_lane_power_off(struct it6505 *it6505)
+{
+ it6505_set_bits(it6505, REG_LINK_DRV, 0xF0, 0x00);
+}
+
+static void it6505_lane_off(struct it6505 *it6505)
+{
+ it6505_lane_power_off(it6505);
+ it6505_lane_termination_off(it6505);
+}
+
+static void it6505_aux_termination_on(struct it6505 *it6505)
+{
+ int regcf;
+
+ regcf = it6505_read(it6505, REG_USER_DRV_PRE);
+
+ if (regcf == MISC_VERB)
+ it6505_lane_termination_on(it6505);
+
+ if (regcf == MISC_VERC)
+ it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x80);
+}
+
+static void it6505_aux_power_on(struct it6505 *it6505)
+{
+ it6505_set_bits(it6505, REG_AUX, 0x02, 0x02);
+}
+
+static void it6505_aux_on(struct it6505 *it6505)
+{
+ it6505_aux_power_on(it6505);
+ it6505_aux_termination_on(it6505);
+}
+
+static void it6505_aux_reset(struct it6505 *it6505)
+{
+ it6505_set_bits(it6505, REG_RESET_CTRL, AUX_RESET, AUX_RESET);
+ it6505_set_bits(it6505, REG_RESET_CTRL, AUX_RESET, 0x00);
+}
+
+static void it6505_reset_logic(struct it6505 *it6505)
+{
+ regmap_write(it6505->regmap, REG_RESET_CTRL, ALL_LOGIC_RESET);
+ usleep_range(1000, 1500);
+}
+
+static bool it6505_aux_op_finished(struct it6505 *it6505)
+{
+ int reg2b = it6505_read(it6505, REG_AUX_CMD_REQ);
+
+ if (reg2b < 0)
+ return false;
+
+ return (reg2b & AUX_BUSY) == 0;
+}
+
+static int it6505_aux_wait(struct it6505 *it6505)
+{
+ int status;
+ unsigned long timeout;
+ struct device *dev = &it6505->client->dev;
+
+ timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
+
+ while (!it6505_aux_op_finished(it6505)) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "Timed out waiting AUX to finish");
+ return -ETIMEDOUT;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ status = it6505_read(it6505, REG_AUX_ERROR_STS);
+ if (status < 0) {
+ dev_err(dev, "Failed to read AUX channel: %d", status);
+ return status;
+ }
+
+ return 0;
+}
+
+static ssize_t it6505_aux_operation(struct it6505 *it6505,
+ enum aux_cmd_type cmd,
+ unsigned int address, u8 *buffer,
+ size_t size, enum aux_cmd_reply *reply)
+{
+ int i, ret;
+ bool aux_write_check = false;
+
+ if (!it6505_get_sink_hpd_status(it6505))
+ return -EIO;
+
+ /* set AUX user mode */
+ it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, AUX_USER_MODE);
+
+aux_op_start:
+ if (cmd == CMD_AUX_I2C_EDID_READ) {
+ /* AUX EDID FIFO has max length of AUX_FIFO_MAX_SIZE bytes. */
+ size = min_t(size_t, size, AUX_FIFO_MAX_SIZE);
+ /* Enable AUX FIFO read back and clear FIFO */
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO);
+
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO,
+ AUX_EN_FIFO_READ);
+ } else {
+ /* The DP AUX transmit buffer has 4 bytes. */
+ size = min_t(size_t, size, 4);
+ it6505_set_bits(it6505, REG_AUX_CTRL, AUX_NO_SEGMENT_WR,
+ AUX_NO_SEGMENT_WR);
+ }
+
+ /* Start Address[7:0] */
+ it6505_write(it6505, REG_AUX_ADR_0_7, (address >> 0) & 0xFF);
+ /* Start Address[15:8] */
+ it6505_write(it6505, REG_AUX_ADR_8_15, (address >> 8) & 0xFF);
+ /* WriteNum[3:0]+StartAdr[19:16] */
+ it6505_write(it6505, REG_AUX_ADR_16_19,
+ ((address >> 16) & 0x0F) | ((size - 1) << 4));
+
+ if (cmd == CMD_AUX_NATIVE_WRITE)
+ regmap_bulk_write(it6505->regmap, REG_AUX_OUT_DATA0, buffer,
+ size);
+
+ /* Aux Fire */
+ it6505_write(it6505, REG_AUX_CMD_REQ, cmd);
+
+ ret = it6505_aux_wait(it6505);
+ if (ret < 0)
+ goto aux_op_err;
+
+ ret = it6505_read(it6505, REG_AUX_ERROR_STS);
+ if (ret < 0)
+ goto aux_op_err;
+
+ switch ((ret >> 6) & 0x3) {
+ case 0:
+ *reply = REPLY_ACK;
+ break;
+ case 1:
+ *reply = REPLY_DEFER;
+ ret = -EAGAIN;
+ goto aux_op_err;
+ case 2:
+ *reply = REPLY_NACK;
+ ret = -EIO;
+ goto aux_op_err;
+ case 3:
+ ret = -ETIMEDOUT;
+ goto aux_op_err;
+ }
+
+ /* Read back Native Write data */
+ if (cmd == CMD_AUX_NATIVE_WRITE) {
+ aux_write_check = true;
+ cmd = CMD_AUX_NATIVE_READ;
+ goto aux_op_start;
+ }
+
+ if (cmd == CMD_AUX_I2C_EDID_READ) {
+ for (i = 0; i < size; i++) {
+ ret = it6505_read(it6505, REG_AUX_DATA_FIFO);
+ if (ret < 0)
+ goto aux_op_err;
+ buffer[i] = ret;
+ }
+ } else {
+ for (i = 0; i < size; i++) {
+ ret = it6505_read(it6505, REG_AUX_DATA_0_7 + i);
+ if (ret < 0)
+ goto aux_op_err;
+
+ if (aux_write_check && buffer[size - 1 - i] != ret) {
+ ret = -EINVAL;
+ goto aux_op_err;
+ }
+
+ buffer[size - 1 - i] = ret;
+ }
+ }
+
+ ret = i;
+
+aux_op_err:
+ if (cmd == CMD_AUX_I2C_EDID_READ) {
+ /* clear AUX FIFO */
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO);
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO, 0x00);
+ }
+
+ /* Leave AUX user mode */
+ it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, 0);
+
+ return ret;
+}
+
+static ssize_t it6505_aux_do_transfer(struct it6505 *it6505,
+ enum aux_cmd_type cmd,
+ unsigned int address, u8 *buffer,
+ size_t size, enum aux_cmd_reply *reply)
+{
+ int i, ret_size, ret = 0, request_size;
+
+ mutex_lock(&it6505->aux_lock);
+ for (i = 0; i < size; i += 4) {
+ request_size = min((int)size - i, 4);
+ ret_size = it6505_aux_operation(it6505, cmd, address + i,
+ buffer + i, request_size,
+ reply);
+ if (ret_size < 0) {
+ ret = ret_size;
+ goto aux_op_err;
+ }
+
+ ret += ret_size;
+ }
+
+aux_op_err:
+ mutex_unlock(&it6505->aux_lock);
+ return ret;
+}
+
+static ssize_t it6505_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct it6505 *it6505 = container_of(aux, struct it6505, aux);
+ u8 cmd;
+ bool is_i2c = !(msg->request & DP_AUX_NATIVE_WRITE);
+ int ret;
+ enum aux_cmd_reply reply;
+
+ /* IT6505 doesn't support arbitrary I2C read / write. */
+ if (is_i2c)
+ return -EINVAL;
+
+ switch (msg->request) {
+ case DP_AUX_NATIVE_READ:
+ cmd = CMD_AUX_NATIVE_READ;
+ break;
+ case DP_AUX_NATIVE_WRITE:
+ cmd = CMD_AUX_NATIVE_WRITE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = it6505_aux_do_transfer(it6505, cmd, msg->address, msg->buffer,
+ msg->size, &reply);
+ if (ret < 0)
+ return ret;
+
+ switch (reply) {
+ case REPLY_ACK:
+ msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+ break;
+ case REPLY_NACK:
+ msg->reply = DP_AUX_NATIVE_REPLY_NACK;
+ break;
+ case REPLY_DEFER:
+ msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+ break;
+ }
+
+ return ret;
+}
+
+static int it6505_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
+{
+ struct it6505 *it6505 = data;
+ struct device *dev = &it6505->client->dev;
+ enum aux_cmd_reply reply;
+ int offset, ret, aux_retry = 100;
+
+ it6505_aux_reset(it6505);
+ DRM_DEV_DEBUG_DRIVER(dev, "block number = %d", block);
+
+ for (offset = 0; offset < EDID_LENGTH;) {
+ ret = it6505_aux_do_transfer(it6505, CMD_AUX_I2C_EDID_READ,
+ block * EDID_LENGTH + offset,
+ buf + offset, 8, &reply);
+
+ if (ret < 0 && ret != -EAGAIN)
+ return ret;
+
+ switch (reply) {
+ case REPLY_ACK:
+ DRM_DEV_DEBUG_DRIVER(dev, "[0x%02x]: %8ph", offset,
+ buf + offset);
+ offset += 8;
+ aux_retry = 100;
+ break;
+ case REPLY_NACK:
+ return -EIO;
+ case REPLY_DEFER:
+ msleep(20);
+ if (!(--aux_retry))
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static void it6505_variable_config(struct it6505 *it6505)
+{
+ it6505->link_rate_bw_code = HBR;
+ it6505->lane_count = MAX_LANE_COUNT;
+ it6505->link_state = LINK_IDLE;
+ it6505->hdcp_desired = HDCP_DESIRED;
+ it6505->auto_train_retry = AUTO_TRAIN_RETRY;
+ it6505->audio.select = AUDIO_SELECT;
+ it6505->audio.sample_rate = AUDIO_SAMPLE_RATE;
+ it6505->audio.channel_count = AUDIO_CHANNEL_COUNT;
+ it6505->audio.type = AUDIO_TYPE;
+ it6505->audio.i2s_input_format = I2S_INPUT_FORMAT;
+ it6505->audio.i2s_justified = I2S_JUSTIFIED;
+ it6505->audio.i2s_data_delay = I2S_DATA_DELAY;
+ it6505->audio.i2s_ws_channel = I2S_WS_CHANNEL;
+ it6505->audio.i2s_data_sequence = I2S_DATA_SEQUENCE;
+ it6505->audio.word_length = AUDIO_WORD_LENGTH;
+ memset(it6505->sha1_input, 0, sizeof(it6505->sha1_input));
+ memset(it6505->bksvs, 0, sizeof(it6505->bksvs));
+}
+
+static int it6505_send_video_infoframe(struct it6505 *it6505,
+ struct hdmi_avi_infoframe *frame)
+{
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ int err;
+ struct device *dev = &it6505->client->dev;
+
+ err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(dev, "Failed to pack AVI infoframe: %d", err);
+ return err;
+ }
+
+ err = it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AVI_PKT, 0x00);
+ if (err)
+ return err;
+
+ err = regmap_bulk_write(it6505->regmap, REG_AVI_INFO_DB1,
+ buffer + HDMI_INFOFRAME_HEADER_SIZE,
+ frame->length);
+ if (err)
+ return err;
+
+ err = it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AVI_PKT,
+ EN_AVI_PKT);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void it6505_get_extcon_property(struct it6505 *it6505)
+{
+ int err;
+ union extcon_property_value property;
+ struct device *dev = &it6505->client->dev;
+
+ if (it6505->extcon && !it6505->lane_swap_disabled) {
+ err = extcon_get_property(it6505->extcon, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ &property);
+ if (err) {
+ dev_err(dev, "get property fail!");
+ return;
+ }
+ it6505->lane_swap = property.intval;
+ }
+}
+
+static void it6505_clk_phase_adjustment(struct it6505 *it6505,
+ const struct drm_display_mode *mode)
+{
+ int clock = mode->clock;
+
+ it6505_set_bits(it6505, REG_CLK_CTRL0, M_PCLK_DELAY,
+ clock < ADJUST_PHASE_THRESHOLD ? PIXEL_CLK_DELAY : 0);
+ it6505_set_bits(it6505, REG_DATA_CTRL0, VIDEO_LATCH_EDGE,
+ PIXEL_CLK_INVERSE << 4);
+}
+
+static void it6505_link_reset_step_train(struct it6505 *it6505)
+{
+ it6505_set_bits(it6505, REG_TRAIN_CTRL0,
+ FORCE_CR_DONE | FORCE_EQ_DONE, 0x00);
+ it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void it6505_init(struct it6505 *it6505)
+{
+ it6505_write(it6505, REG_AUX_OPT, AUX_AUTO_RST | AUX_FIX_FREQ);
+ it6505_write(it6505, REG_AUX_CTRL, AUX_NO_SEGMENT_WR);
+ it6505_write(it6505, REG_HDCP_CTRL2, HDCP_AN_SEL | HDCP_HW_HPDIRQ_ACT);
+ it6505_write(it6505, REG_VID_BUS_CTRL0, IN_DDR | DDR_CD);
+ it6505_write(it6505, REG_VID_BUS_CTRL1, 0x01);
+ it6505_write(it6505, REG_AUDIO_CTRL0, AUDIO_16B_BOUND);
+
+ /* chip internal setting, don't modify */
+ it6505_write(it6505, REG_HPD_IRQ_TIME, 0xF5);
+ it6505_write(it6505, REG_AUX_DEBUG_MODE, 0x4D);
+ it6505_write(it6505, REG_AUX_OPT2, 0x17);
+ it6505_write(it6505, REG_HDCP_OPT, 0x60);
+ it6505_write(it6505, REG_DATA_MUTE_CTRL,
+ EN_VID_MUTE | EN_AUD_MUTE | ENABLE_AUTO_VIDEO_FIFO_RESET);
+ it6505_write(it6505, REG_TIME_STMP_CTRL,
+ EN_SSC_GAT | EN_ENHANCE_VID_STMP | EN_ENHANCE_AUD_STMP);
+ it6505_write(it6505, REG_INFOFRAME_CTRL, 0x00);
+ it6505_write(it6505, REG_BANK_SEL, 0x01);
+ it6505_write(it6505, REG_DRV_0_DB_800_MV,
+ afe_setting_table[it6505->afe_setting][0]);
+ it6505_write(it6505, REG_PRE_0_DB_800_MV,
+ afe_setting_table[it6505->afe_setting][1]);
+ it6505_write(it6505, REG_PRE_3P5_DB_800_MV,
+ afe_setting_table[it6505->afe_setting][2]);
+ it6505_write(it6505, REG_SSC_CTRL0, 0x9E);
+ it6505_write(it6505, REG_SSC_CTRL1, 0x1C);
+ it6505_write(it6505, REG_SSC_CTRL2, 0x42);
+ it6505_write(it6505, REG_BANK_SEL, 0x00);
+}
+
+static void it6505_video_disable(struct it6505 *it6505)
+{
+ it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE);
+ it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00);
+ it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET);
+}
+
+static void it6505_video_reset(struct it6505 *it6505)
+{
+ it6505_link_reset_step_train(it6505);
+ it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE);
+ it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00);
+ it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET);
+ it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, RST_501_FIFO);
+ it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, 0x00);
+ it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, 0x00);
+}
+
+static void it6505_update_video_parameter(struct it6505 *it6505,
+ const struct drm_display_mode *mode)
+{
+ it6505_clk_phase_adjustment(it6505, mode);
+ it6505_video_disable(it6505);
+}
+
+static bool it6505_audio_input(struct it6505 *it6505)
+{
+ int reg05, regbe;
+
+ reg05 = it6505_read(it6505, REG_RESET_CTRL);
+ it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, 0x00);
+ usleep_range(3000, 4000);
+ regbe = it6505_read(it6505, REG_AUDIO_INPUT_FREQ);
+ it6505_write(it6505, REG_RESET_CTRL, reg05);
+
+ return regbe != 0xFF;
+}
+
+static void it6505_setup_audio_channel_status(struct it6505 *it6505)
+{
+ enum it6505_audio_sample_rate sample_rate = it6505->audio.sample_rate;
+ u8 audio_word_length_map[] = { 0x02, 0x04, 0x03, 0x0B };
+
+ /* Channel Status */
+ it6505_write(it6505, REG_IEC958_STS0, it6505->audio.type << 1);
+ it6505_write(it6505, REG_IEC958_STS1, 0x00);
+ it6505_write(it6505, REG_IEC958_STS2, 0x00);
+ it6505_write(it6505, REG_IEC958_STS3, sample_rate);
+ it6505_write(it6505, REG_IEC958_STS4, (~sample_rate << 4) |
+ audio_word_length_map[it6505->audio.word_length]);
+}
+
+static void it6505_setup_audio_format(struct it6505 *it6505)
+{
+ /* I2S MODE */
+ it6505_write(it6505, REG_AUDIO_FMT,
+ (it6505->audio.word_length << 5) |
+ (it6505->audio.i2s_data_sequence << 4) |
+ (it6505->audio.i2s_ws_channel << 3) |
+ (it6505->audio.i2s_data_delay << 2) |
+ (it6505->audio.i2s_justified << 1) |
+ it6505->audio.i2s_input_format);
+ if (it6505->audio.select == SPDIF) {
+ it6505_write(it6505, REG_AUDIO_FIFO_SEL, 0x00);
+ /* 0x30 = 128*FS */
+ it6505_set_bits(it6505, REG_AUX_OPT, 0xF0, 0x30);
+ } else {
+ it6505_write(it6505, REG_AUDIO_FIFO_SEL, 0xE4);
+ }
+
+ it6505_write(it6505, REG_AUDIO_CTRL0, 0x20);
+ it6505_write(it6505, REG_AUDIO_CTRL1, 0x00);
+}
+
+static void it6505_enable_audio_source(struct it6505 *it6505)
+{
+ unsigned int audio_source_count;
+
+ audio_source_count = BIT(DIV_ROUND_UP(it6505->audio.channel_count, 2))
+ - 1;
+
+ audio_source_count |= it6505->audio.select << 4;
+
+ it6505_write(it6505, REG_AUDIO_SRC_CTRL, audio_source_count);
+}
+
+static void it6505_enable_audio_infoframe(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ u8 audio_info_ca[] = { 0x00, 0x00, 0x01, 0x03, 0x07, 0x0B, 0x0F, 0x1F };
+
+ DRM_DEV_DEBUG_DRIVER(dev, "infoframe channel_allocation:0x%02x",
+ audio_info_ca[it6505->audio.channel_count - 1]);
+
+ it6505_write(it6505, REG_AUD_INFOFRAM_DB1, it6505->audio.channel_count
+ - 1);
+ it6505_write(it6505, REG_AUD_INFOFRAM_DB2, 0x00);
+ it6505_write(it6505, REG_AUD_INFOFRAM_DB3,
+ audio_info_ca[it6505->audio.channel_count - 1]);
+ it6505_write(it6505, REG_AUD_INFOFRAM_DB4, 0x00);
+ it6505_write(it6505, REG_AUD_INFOFRAM_SUM, 0x00);
+
+ /* Enable Audio InfoFrame */
+ it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AUD_CTRL_PKT,
+ EN_AUD_CTRL_PKT);
+}
+
+static void it6505_disable_audio(struct it6505 *it6505)
+{
+ it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_AUD_MUTE, EN_AUD_MUTE);
+ it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, M_AUDIO_I2S_EN, 0x00);
+ it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AUD_CTRL_PKT, 0x00);
+ it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, AUDIO_RESET);
+}
+
+static void it6505_enable_audio(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ int regbe;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "start");
+ it6505_disable_audio(it6505);
+
+ it6505_setup_audio_channel_status(it6505);
+ it6505_setup_audio_format(it6505);
+ it6505_enable_audio_source(it6505);
+ it6505_enable_audio_infoframe(it6505);
+
+ it6505_write(it6505, REG_AUDIO_N_0_7, 0x00);
+ it6505_write(it6505, REG_AUDIO_N_8_15, 0x80);
+ it6505_write(it6505, REG_AUDIO_N_16_23, 0x00);
+
+ it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, AUDIO_FIFO_RESET,
+ AUDIO_FIFO_RESET);
+ it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, AUDIO_FIFO_RESET, 0x00);
+ it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, 0x00);
+ regbe = it6505_read(it6505, REG_AUDIO_INPUT_FREQ);
+ DRM_DEV_DEBUG_DRIVER(dev, "regbe:0x%02x audio input fs: %d.%d kHz",
+ regbe, 6750 / regbe, (6750 % regbe) * 10 / regbe);
+ it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_AUD_MUTE, 0x00);
+}
+
+static bool it6505_use_step_train_check(struct it6505 *it6505)
+{
+ if (it6505->link.revision >= 0x12)
+ return it6505->dpcd[DP_TRAINING_AUX_RD_INTERVAL] >= 0x01;
+
+ return true;
+}
+
+static void it6505_parse_link_capabilities(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ struct it6505_drm_dp_link *link = &it6505->link;
+ int bcaps;
+
+ if (it6505->dpcd[0] == 0) {
+ it6505_aux_on(it6505);
+ it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd,
+ ARRAY_SIZE(it6505->dpcd));
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "DPCD Rev.: %d.%d",
+ link->revision >> 4, link->revision & 0x0F);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Sink max link rate: %d.%02d Gbps per lane",
+ link->rate / 100000, link->rate / 1000 % 100);
+
+ it6505->link_rate_bw_code = drm_dp_link_rate_to_bw_code(link->rate);
+ DRM_DEV_DEBUG_DRIVER(dev, "link rate bw code:0x%02x",
+ it6505->link_rate_bw_code);
+ it6505->link_rate_bw_code = min_t(int, it6505->link_rate_bw_code,
+ MAX_LINK_RATE);
+
+ it6505->lane_count = link->num_lanes;
+ DRM_DEV_DEBUG_DRIVER(dev, "Sink support %d lanes training",
+ it6505->lane_count);
+ it6505->lane_count = min_t(int, it6505->lane_count, MAX_LANE_COUNT);
+
+ it6505->branch_device = drm_dp_is_branch(it6505->dpcd);
+ DRM_DEV_DEBUG_DRIVER(dev, "Sink %sbranch device",
+ it6505->branch_device ? "" : "Not ");
+
+ it6505->enable_enhanced_frame = link->capabilities;
+ DRM_DEV_DEBUG_DRIVER(dev, "Sink %sSupport Enhanced Framing",
+ it6505->enable_enhanced_frame ? "" : "Not ");
+
+ it6505->enable_ssc = (it6505->dpcd[DP_MAX_DOWNSPREAD] &
+ DP_MAX_DOWNSPREAD_0_5);
+ DRM_DEV_DEBUG_DRIVER(dev, "Maximum Down-Spread: %s, %ssupport SSC!",
+ it6505->enable_ssc ? "0.5" : "0",
+ it6505->enable_ssc ? "" : "Not ");
+
+ it6505->step_train = it6505_use_step_train_check(it6505);
+ if (it6505->step_train)
+ DRM_DEV_DEBUG_DRIVER(dev, "auto train fail, will step train");
+
+ bcaps = it6505_dpcd_read(it6505, DP_AUX_HDCP_BCAPS);
+ DRM_DEV_DEBUG_DRIVER(dev, "bcaps:0x%02x", bcaps);
+ if (bcaps & DP_BCAPS_HDCP_CAPABLE) {
+ it6505->is_repeater = (bcaps & DP_BCAPS_REPEATER_PRESENT);
+ DRM_DEV_DEBUG_DRIVER(dev, "Support HDCP! Downstream is %s!",
+ it6505->is_repeater ? "repeater" :
+ "receiver");
+ } else {
+ DRM_DEV_DEBUG_DRIVER(dev, "Sink not support HDCP!");
+ it6505->hdcp_desired = false;
+ }
+ DRM_DEV_DEBUG_DRIVER(dev, "HDCP %s",
+ it6505->hdcp_desired ? "desired" : "undesired");
+}
+
+static void it6505_setup_ssc(struct it6505 *it6505)
+{
+ it6505_set_bits(it6505, REG_TRAIN_CTRL0, SPREAD_AMP_5,
+ it6505->enable_ssc ? SPREAD_AMP_5 : 0x00);
+ if (it6505->enable_ssc) {
+ it6505_write(it6505, REG_BANK_SEL, 0x01);
+ it6505_write(it6505, REG_SSC_CTRL0, 0x9E);
+ it6505_write(it6505, REG_SSC_CTRL1, 0x1C);
+ it6505_write(it6505, REG_SSC_CTRL2, 0x42);
+ it6505_write(it6505, REG_BANK_SEL, 0x00);
+ it6505_write(it6505, REG_SP_CTRL0, 0x07);
+ it6505_write(it6505, REG_IP_CTRL1, 0x29);
+ it6505_write(it6505, REG_IP_CTRL2, 0x03);
+ /* Stamp Interrupt Step */
+ it6505_set_bits(it6505, REG_TIME_STMP_CTRL, M_STAMP_STEP,
+ 0x10);
+ it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ } else {
+ it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL, 0x00);
+ it6505_set_bits(it6505, REG_TIME_STMP_CTRL, M_STAMP_STEP,
+ 0x00);
+ }
+}
+
+static inline void it6505_link_rate_setup(struct it6505 *it6505)
+{
+ it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_LBR,
+ (it6505->link_rate_bw_code == RBR) ? FORCE_LBR : 0x00);
+ it6505_set_bits(it6505, REG_LINK_DRV, DRV_HS,
+ (it6505->link_rate_bw_code == RBR) ? 0x00 : DRV_HS);
+}
+
+static void it6505_lane_count_setup(struct it6505 *it6505)
+{
+ it6505_get_extcon_property(it6505);
+ it6505_set_bits(it6505, REG_TRAIN_CTRL0, LANE_SWAP,
+ it6505->lane_swap ? LANE_SWAP : 0x00);
+ it6505_set_bits(it6505, REG_TRAIN_CTRL0, LANE_COUNT_MASK,
+ (it6505->lane_count - 1) << 1);
+}
+
+static void it6505_link_training_setup(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ if (it6505->enable_enhanced_frame)
+ it6505_set_bits(it6505, REG_DATA_MUTE_CTRL,
+ ENABLE_ENHANCED_FRAME, ENABLE_ENHANCED_FRAME);
+
+ it6505_link_rate_setup(it6505);
+ it6505_lane_count_setup(it6505);
+ it6505_setup_ssc(it6505);
+ DRM_DEV_DEBUG_DRIVER(dev,
+ "%s, %d lanes, %sable ssc, %sable enhanced frame",
+ it6505->link_rate_bw_code != RBR ? "HBR" : "RBR",
+ it6505->lane_count,
+ it6505->enable_ssc ? "en" : "dis",
+ it6505->enable_enhanced_frame ? "en" : "dis");
+}
+
+static bool it6505_link_start_auto_train(struct it6505 *it6505)
+{
+ int timeout = 500, link_training_state;
+ bool state = false;
+
+ mutex_lock(&it6505->aux_lock);
+ it6505_set_bits(it6505, REG_TRAIN_CTRL0,
+ FORCE_CR_DONE | FORCE_EQ_DONE, 0x00);
+ it6505_write(it6505, REG_TRAIN_CTRL1, FORCE_RETRAIN);
+ it6505_write(it6505, REG_TRAIN_CTRL1, AUTO_TRAIN);
+
+ while (timeout > 0) {
+ usleep_range(1000, 2000);
+ link_training_state = it6505_read(it6505, REG_LINK_TRAIN_STS);
+
+ if (link_training_state > 0 &&
+ (link_training_state & LINK_STATE_NORP)) {
+ state = true;
+ goto unlock;
+ }
+
+ timeout--;
+ }
+unlock:
+ mutex_unlock(&it6505->aux_lock);
+
+ return state;
+}
+
+static int it6505_drm_dp_link_configure(struct it6505 *it6505)
+{
+ u8 values[2];
+ int err;
+ struct drm_dp_aux *aux = &it6505->aux;
+
+ values[0] = it6505->link_rate_bw_code;
+ values[1] = it6505->lane_count;
+
+ if (it6505->enable_enhanced_frame)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static bool it6505_check_voltage_swing_max(u8 lane_voltage_swing_pre_emphasis)
+{
+ return ((lane_voltage_swing_pre_emphasis & 0x03) == MAX_CR_LEVEL);
+}
+
+static bool it6505_check_pre_emphasis_max(u8 lane_voltage_swing_pre_emphasis)
+{
+ return ((lane_voltage_swing_pre_emphasis & 0x03) == MAX_EQ_LEVEL);
+}
+
+static bool it6505_check_max_voltage_swing_reached(u8 *lane_voltage_swing,
+ u8 lane_count)
+{
+ u8 i;
+
+ for (i = 0; i < lane_count; i++) {
+ if (lane_voltage_swing[i] & DP_TRAIN_MAX_SWING_REACHED)
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+step_train_lane_voltage_para_set(struct it6505 *it6505,
+ struct it6505_step_train_para
+ *lane_voltage_pre_emphasis,
+ u8 *lane_voltage_pre_emphasis_set)
+{
+ u8 *voltage_swing = lane_voltage_pre_emphasis->voltage_swing;
+ u8 *pre_emphasis = lane_voltage_pre_emphasis->pre_emphasis;
+ u8 i;
+
+ for (i = 0; i < it6505->lane_count; i++) {
+ voltage_swing[i] &= 0x03;
+ lane_voltage_pre_emphasis_set[i] = voltage_swing[i];
+ if (it6505_check_voltage_swing_max(voltage_swing[i]))
+ lane_voltage_pre_emphasis_set[i] |=
+ DP_TRAIN_MAX_SWING_REACHED;
+
+ pre_emphasis[i] &= 0x03;
+ lane_voltage_pre_emphasis_set[i] |= pre_emphasis[i]
+ << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ if (it6505_check_pre_emphasis_max(pre_emphasis[i]))
+ lane_voltage_pre_emphasis_set[i] |=
+ DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ it6505_dpcd_write(it6505, DP_TRAINING_LANE0_SET + i,
+ lane_voltage_pre_emphasis_set[i]);
+
+ if (lane_voltage_pre_emphasis_set[i] !=
+ it6505_dpcd_read(it6505, DP_TRAINING_LANE0_SET + i))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+it6505_step_cr_train(struct it6505 *it6505,
+ struct it6505_step_train_para *lane_voltage_pre_emphasis)
+{
+ u8 loop_count = 0, i = 0, j;
+ u8 link_status[DP_LINK_STATUS_SIZE] = { 0 };
+ u8 lane_level_config[MAX_LANE_COUNT] = { 0 };
+ int pre_emphasis_adjust = -1, voltage_swing_adjust = -1;
+ const struct drm_dp_aux *aux = &it6505->aux;
+
+ it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL,
+ it6505->enable_ssc ? DP_SPREAD_AMP_0_5 : 0x00);
+ it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1);
+
+ while (loop_count < 5 && i < 10) {
+ i++;
+ if (!step_train_lane_voltage_para_set(it6505,
+ lane_voltage_pre_emphasis,
+ lane_level_config))
+ continue;
+ drm_dp_link_train_clock_recovery_delay(aux, it6505->dpcd);
+ drm_dp_dpcd_read_link_status(&it6505->aux, link_status);
+
+ if (drm_dp_clock_recovery_ok(link_status, it6505->lane_count)) {
+ it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_CR_DONE,
+ FORCE_CR_DONE);
+ return true;
+ }
+ DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "cr not done");
+
+ if (it6505_check_max_voltage_swing_reached(lane_level_config,
+ it6505->lane_count))
+ goto cr_train_fail;
+
+ for (j = 0; j < it6505->lane_count; j++) {
+ lane_voltage_pre_emphasis->voltage_swing[j] =
+ drm_dp_get_adjust_request_voltage(link_status,
+ j) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ lane_voltage_pre_emphasis->pre_emphasis[j] =
+ drm_dp_get_adjust_request_pre_emphasis(link_status,
+ j) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ if (voltage_swing_adjust ==
+ lane_voltage_pre_emphasis->voltage_swing[j] &&
+ pre_emphasis_adjust ==
+ lane_voltage_pre_emphasis->pre_emphasis[j]) {
+ loop_count++;
+ continue;
+ }
+
+ voltage_swing_adjust =
+ lane_voltage_pre_emphasis->voltage_swing[j];
+ pre_emphasis_adjust =
+ lane_voltage_pre_emphasis->pre_emphasis[j];
+ loop_count = 0;
+
+ if (voltage_swing_adjust + pre_emphasis_adjust >
+ MAX_EQ_LEVEL)
+ lane_voltage_pre_emphasis->voltage_swing[j] =
+ MAX_EQ_LEVEL -
+ lane_voltage_pre_emphasis
+ ->pre_emphasis[j];
+ }
+ }
+
+cr_train_fail:
+ it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ return false;
+}
+
+static bool
+it6505_step_eq_train(struct it6505 *it6505,
+ struct it6505_step_train_para *lane_voltage_pre_emphasis)
+{
+ u8 loop_count = 0, i, link_status[DP_LINK_STATUS_SIZE] = { 0 };
+ u8 lane_level_config[MAX_LANE_COUNT] = { 0 };
+ const struct drm_dp_aux *aux = &it6505->aux;
+
+ it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_2);
+
+ while (loop_count < 6) {
+ loop_count++;
+
+ if (!step_train_lane_voltage_para_set(it6505,
+ lane_voltage_pre_emphasis,
+ lane_level_config))
+ continue;
+
+ drm_dp_link_train_channel_eq_delay(aux, it6505->dpcd);
+ drm_dp_dpcd_read_link_status(&it6505->aux, link_status);
+
+ if (!drm_dp_clock_recovery_ok(link_status, it6505->lane_count))
+ goto eq_train_fail;
+
+ if (drm_dp_channel_eq_ok(link_status, it6505->lane_count)) {
+ it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_EQ_DONE,
+ FORCE_EQ_DONE);
+ return true;
+ }
+ DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "eq not done");
+
+ for (i = 0; i < it6505->lane_count; i++) {
+ lane_voltage_pre_emphasis->voltage_swing[i] =
+ drm_dp_get_adjust_request_voltage(link_status,
+ i) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ lane_voltage_pre_emphasis->pre_emphasis[i] =
+ drm_dp_get_adjust_request_pre_emphasis(link_status,
+ i) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (lane_voltage_pre_emphasis->voltage_swing[i] +
+ lane_voltage_pre_emphasis->pre_emphasis[i] >
+ MAX_EQ_LEVEL)
+ lane_voltage_pre_emphasis->voltage_swing[i] =
+ 0x03 - lane_voltage_pre_emphasis
+ ->pre_emphasis[i];
+ }
+ }
+
+eq_train_fail:
+ it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ return false;
+}
+
+static bool it6505_link_start_step_train(struct it6505 *it6505)
+{
+ int err;
+ struct it6505_step_train_para lane_voltage_pre_emphasis = {
+ .voltage_swing = { 0 },
+ .pre_emphasis = { 0 },
+ };
+
+ DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start");
+ err = it6505_drm_dp_link_configure(it6505);
+
+ if (err < 0)
+ return false;
+ if (!it6505_step_cr_train(it6505, &lane_voltage_pre_emphasis))
+ return false;
+ if (!it6505_step_eq_train(it6505, &lane_voltage_pre_emphasis))
+ return false;
+ return true;
+}
+
+static bool it6505_get_video_status(struct it6505 *it6505)
+{
+ int reg_0d;
+
+ reg_0d = it6505_read(it6505, REG_SYSTEM_STS);
+
+ if (reg_0d < 0)
+ return false;
+
+ return reg_0d & VIDEO_STB;
+}
+
+static void it6505_reset_hdcp(struct it6505 *it6505)
+{
+ it6505->hdcp_status = HDCP_AUTH_IDLE;
+ /* Disable CP_Desired */
+ it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, 0x00);
+ it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, HDCP_RESET);
+}
+
+static void it6505_start_hdcp(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "start");
+ it6505_reset_hdcp(it6505);
+ queue_delayed_work(system_wq, &it6505->hdcp_work,
+ msecs_to_jiffies(2400));
+}
+
+static void it6505_stop_hdcp(struct it6505 *it6505)
+{
+ it6505_reset_hdcp(it6505);
+ cancel_delayed_work(&it6505->hdcp_work);
+}
+
+static bool it6505_hdcp_is_ksv_valid(u8 *ksv)
+{
+ int i, ones = 0;
+
+ /* KSV has 20 1's and 20 0's */
+ for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
+ ones += hweight8(ksv[i]);
+ if (ones != 20)
+ return false;
+ return true;
+}
+
+static void it6505_hdcp_part1_auth(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ u8 hdcp_bcaps;
+
+ it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, 0x00);
+ /* Disable CP_Desired */
+ it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, 0x00);
+
+ usleep_range(1000, 1500);
+ hdcp_bcaps = it6505_dpcd_read(it6505, DP_AUX_HDCP_BCAPS);
+ DRM_DEV_DEBUG_DRIVER(dev, "DPCD[0x68028]: 0x%02x",
+ hdcp_bcaps);
+
+ if (!hdcp_bcaps)
+ return;
+
+ /* clear the repeater List Chk Done and fail bit */
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+ HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL,
+ 0x00);
+
+ /* Enable An Generator */
+ it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_AN_GEN, HDCP_AN_GEN);
+ /* delay1ms(10);*/
+ usleep_range(10000, 15000);
+ /* Stop An Generator */
+ it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_AN_GEN, 0x00);
+
+ it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, HDCP_CP_ENABLE);
+
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_START,
+ HDCP_TRIGGER_START);
+
+ it6505->hdcp_status = HDCP_AUTH_GOING;
+}
+
+static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input,
+ unsigned int size, u8 *output_av)
+{
+ struct shash_desc *desc;
+ struct crypto_shash *tfm;
+ int err;
+ struct device *dev = &it6505->client->dev;
+
+ tfm = crypto_alloc_shash("sha1", 0, 0);
+ if (IS_ERR(tfm)) {
+ dev_err(dev, "crypto_alloc_shash sha1 failed");
+ return PTR_ERR(tfm);
+ }
+ desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
+ if (!desc) {
+ crypto_free_shash(tfm);
+ return -ENOMEM;
+ }
+
+ desc->tfm = tfm;
+ err = crypto_shash_digest(desc, sha1_input, size, output_av);
+ if (err)
+ dev_err(dev, "crypto_shash_digest sha1 failed");
+
+ crypto_free_shash(tfm);
+ kfree(desc);
+ return err;
+}
+
+static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
+{
+ struct device *dev = &it6505->client->dev;
+ u8 binfo[2];
+ int down_stream_count, i, err, msg_count = 0;
+
+ err = it6505_get_dpcd(it6505, DP_AUX_HDCP_BINFO, binfo,
+ ARRAY_SIZE(binfo));
+
+ if (err < 0) {
+ dev_err(dev, "Read binfo value Fail");
+ return err;
+ }
+
+ down_stream_count = binfo[0] & 0x7F;
+ DRM_DEV_DEBUG_DRIVER(dev, "binfo:0x%*ph", (int)ARRAY_SIZE(binfo),
+ binfo);
+
+ if ((binfo[0] & BIT(7)) || (binfo[1] & BIT(3))) {
+ dev_err(dev, "HDCP max cascade device exceed");
+ return 0;
+ }
+
+ if (!down_stream_count ||
+ down_stream_count > MAX_HDCP_DOWN_STREAM_COUNT) {
+ dev_err(dev, "HDCP down stream count Error %d",
+ down_stream_count);
+ return 0;
+ }
+
+ for (i = 0; i < down_stream_count; i++) {
+ err = it6505_get_dpcd(it6505, DP_AUX_HDCP_KSV_FIFO +
+ (i % 3) * DRM_HDCP_KSV_LEN,
+ sha1_input + msg_count,
+ DRM_HDCP_KSV_LEN);
+
+ if (err < 0)
+ return err;
+
+ msg_count += 5;
+ }
+
+ it6505->hdcp_down_stream_count = down_stream_count;
+ sha1_input[msg_count++] = binfo[0];
+ sha1_input[msg_count++] = binfo[1];
+
+ it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_EN_M0_READ,
+ HDCP_EN_M0_READ);
+
+ err = regmap_bulk_read(it6505->regmap, REG_M0_0_7,
+ sha1_input + msg_count, 8);
+
+ it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_EN_M0_READ, 0x00);
+
+ if (err < 0) {
+ dev_err(dev, " Warning, Read M value Fail");
+ return err;
+ }
+
+ msg_count += 8;
+
+ return msg_count;
+}
+
+static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ u8 av[5][4], bv[5][4];
+ int i, err;
+
+ i = it6505_setup_sha1_input(it6505, it6505->sha1_input);
+ if (i <= 0) {
+ dev_err(dev, "SHA-1 Input length error %d", i);
+ return false;
+ }
+
+ it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av);
+
+ err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
+ sizeof(bv));
+
+ if (err < 0) {
+ dev_err(dev, "Read V' value Fail");
+ return false;
+ }
+
+ for (i = 0; i < 5; i++)
+ if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
+ bv[i][1] != av[i][2] || bv[i][0] != av[i][3])
+ return false;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "V' all match!!");
+ return true;
+}
+
+static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
+{
+ struct it6505 *it6505 = container_of(work, struct it6505,
+ hdcp_wait_ksv_list);
+ struct device *dev = &it6505->client->dev;
+ unsigned int timeout = 5000;
+ u8 bstatus = 0;
+ bool ksv_list_check;
+
+ timeout /= 20;
+ while (timeout > 0) {
+ if (!it6505_get_sink_hpd_status(it6505))
+ return;
+
+ bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS);
+
+ if (bstatus & DP_BSTATUS_READY)
+ break;
+
+ msleep(20);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ DRM_DEV_DEBUG_DRIVER(dev, "timeout and ksv list wait failed");
+ goto timeout;
+ }
+
+ ksv_list_check = it6505_hdcp_part2_ksvlist_check(it6505);
+ DRM_DEV_DEBUG_DRIVER(dev, "ksv list ready, ksv list check %s",
+ ksv_list_check ? "pass" : "fail");
+ if (ksv_list_check) {
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+ HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
+ return;
+ }
+timeout:
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+ HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL,
+ HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL);
+}
+
+static void it6505_hdcp_work(struct work_struct *work)
+{
+ struct it6505 *it6505 = container_of(work, struct it6505,
+ hdcp_work.work);
+ struct device *dev = &it6505->client->dev;
+ int ret;
+ u8 link_status[DP_LINK_STATUS_SIZE] = { 0 };
+
+ DRM_DEV_DEBUG_DRIVER(dev, "start");
+
+ if (!it6505_get_sink_hpd_status(it6505))
+ return;
+
+ ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status);
+ DRM_DEV_DEBUG_DRIVER(dev, "ret: %d link_status: %*ph", ret,
+ (int)sizeof(link_status), link_status);
+
+ if (ret < 0 || !drm_dp_channel_eq_ok(link_status, it6505->lane_count) ||
+ !it6505_get_video_status(it6505)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "link train not done or no video");
+ return;
+ }
+
+ ret = it6505_get_dpcd(it6505, DP_AUX_HDCP_BKSV, it6505->bksvs,
+ ARRAY_SIZE(it6505->bksvs));
+ if (ret < 0) {
+ dev_err(dev, "fail to get bksv ret: %d", ret);
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+ HDCP_TRIGGER_KSV_FAIL, HDCP_TRIGGER_KSV_FAIL);
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "bksv = 0x%*ph",
+ (int)ARRAY_SIZE(it6505->bksvs), it6505->bksvs);
+
+ if (!it6505_hdcp_is_ksv_valid(it6505->bksvs)) {
+ dev_err(dev, "Display Port bksv not valid");
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+ HDCP_TRIGGER_KSV_FAIL, HDCP_TRIGGER_KSV_FAIL);
+ }
+
+ it6505_hdcp_part1_auth(it6505);
+}
+
+static void it6505_show_hdcp_info(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ int i;
+ u8 *sha1 = it6505->sha1_input;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "hdcp_status: %d is_repeater: %d",
+ it6505->hdcp_status, it6505->is_repeater);
+ DRM_DEV_DEBUG_DRIVER(dev, "bksv = 0x%*ph",
+ (int)ARRAY_SIZE(it6505->bksvs), it6505->bksvs);
+
+ if (it6505->is_repeater) {
+ DRM_DEV_DEBUG_DRIVER(dev, "hdcp_down_stream_count: %d",
+ it6505->hdcp_down_stream_count);
+ DRM_DEV_DEBUG_DRIVER(dev, "sha1_input: 0x%*ph",
+ (int)ARRAY_SIZE(it6505->sha1_input),
+ it6505->sha1_input);
+ for (i = 0; i < it6505->hdcp_down_stream_count; i++) {
+ DRM_DEV_DEBUG_DRIVER(dev, "KSV_%d = 0x%*ph", i,
+ DRM_HDCP_KSV_LEN, sha1);
+ sha1 += DRM_HDCP_KSV_LEN;
+ }
+ DRM_DEV_DEBUG_DRIVER(dev, "binfo: 0x%2ph M0: 0x%8ph",
+ sha1, sha1 + 2);
+ }
+}
+
+static void it6505_stop_link_train(struct it6505 *it6505)
+{
+ it6505->link_state = LINK_IDLE;
+ cancel_work_sync(&it6505->link_works);
+ it6505_write(it6505, REG_TRAIN_CTRL1, FORCE_RETRAIN);
+}
+
+static void it6505_link_train_ok(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ it6505->link_state = LINK_OK;
+ /* disalbe mute enable avi info frame */
+ it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, 0x00);
+ it6505_set_bits(it6505, REG_INFOFRAME_CTRL,
+ EN_VID_CTRL_PKT, EN_VID_CTRL_PKT);
+
+ if (it6505_audio_input(it6505)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "Enable audio!");
+ it6505_enable_audio(it6505);
+ }
+
+ if (it6505->hdcp_desired)
+ it6505_start_hdcp(it6505);
+}
+
+static void it6505_link_step_train_process(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ int ret, i, step_retry = 3;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Start step train");
+
+ if (it6505->sink_count == 0) {
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d, force eq",
+ it6505->sink_count);
+ it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_EQ_DONE,
+ FORCE_EQ_DONE);
+ return;
+ }
+
+ if (!it6505->step_train) {
+ DRM_DEV_DEBUG_DRIVER(dev, "not support step train");
+ return;
+ }
+
+ /* step training start here */
+ for (i = 0; i < step_retry; i++) {
+ it6505_link_reset_step_train(it6505);
+ ret = it6505_link_start_step_train(it6505);
+ DRM_DEV_DEBUG_DRIVER(dev, "step train %s, retry:%d times",
+ ret ? "pass" : "failed", i + 1);
+ if (ret) {
+ it6505_link_train_ok(it6505);
+ return;
+ }
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "training fail");
+ it6505->link_state = LINK_IDLE;
+ it6505_video_reset(it6505);
+}
+
+static void it6505_link_training_work(struct work_struct *work)
+{
+ struct it6505 *it6505 = container_of(work, struct it6505, link_works);
+ struct device *dev = &it6505->client->dev;
+ int ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d",
+ it6505->sink_count);
+
+ if (!it6505_get_sink_hpd_status(it6505))
+ return;
+
+ it6505_link_training_setup(it6505);
+ it6505_reset_hdcp(it6505);
+ it6505_aux_reset(it6505);
+
+ if (it6505->auto_train_retry < 1) {
+ it6505_link_step_train_process(it6505);
+ return;
+ }
+
+ ret = it6505_link_start_auto_train(it6505);
+ DRM_DEV_DEBUG_DRIVER(dev, "auto train %s, auto_train_retry: %d",
+ ret ? "pass" : "failed", it6505->auto_train_retry);
+ it6505->auto_train_retry--;
+
+ if (ret) {
+ it6505_link_train_ok(it6505);
+ return;
+ }
+
+ it6505_dump(it6505);
+}
+
+static void it6505_plugged_status_to_codec(struct it6505 *it6505)
+{
+ enum drm_connector_status status = it6505->connector_status;
+
+ if (it6505->plugged_cb && it6505->codec_dev)
+ it6505->plugged_cb(it6505->codec_dev,
+ status == connector_status_connected);
+}
+
+static int it6505_process_hpd_irq(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ int ret, dpcd_sink_count, dp_irq_vector, bstatus;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!it6505_get_sink_hpd_status(it6505)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "HPD_IRQ HPD low");
+ it6505->sink_count = 0;
+ return 0;
+ }
+
+ ret = it6505_dpcd_read(it6505, DP_SINK_COUNT);
+ if (ret < 0)
+ return ret;
+
+ dpcd_sink_count = DP_GET_SINK_COUNT(ret);
+ DRM_DEV_DEBUG_DRIVER(dev, "dpcd_sink_count: %d it6505->sink_count:%d",
+ dpcd_sink_count, it6505->sink_count);
+
+ if (it6505->branch_device && dpcd_sink_count != it6505->sink_count) {
+ memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
+ it6505->sink_count = dpcd_sink_count;
+ it6505_reset_logic(it6505);
+ it6505_int_mask_enable(it6505);
+ it6505_init(it6505);
+ return 0;
+ }
+
+ dp_irq_vector = it6505_dpcd_read(it6505, DP_DEVICE_SERVICE_IRQ_VECTOR);
+ if (dp_irq_vector < 0)
+ return dp_irq_vector;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "dp_irq_vector = 0x%02x", dp_irq_vector);
+
+ if (dp_irq_vector & DP_CP_IRQ) {
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
+ HDCP_TRIGGER_CPIRQ);
+
+ bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS);
+ if (bstatus < 0)
+ return bstatus;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Bstatus = 0x%02x", bstatus);
+ }
+
+ ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status);
+ if (ret < 0) {
+ dev_err(dev, "Fail to read link status ret: %d", ret);
+ return ret;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "link status = 0x%*ph",
+ (int)ARRAY_SIZE(link_status), link_status);
+
+ if (!drm_dp_channel_eq_ok(link_status, it6505->lane_count)) {
+ it6505->auto_train_retry = AUTO_TRAIN_RETRY;
+ it6505_video_reset(it6505);
+ }
+
+ return 0;
+}
+
+static void it6505_irq_hpd(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ it6505->hpd_state = it6505_get_sink_hpd_status(it6505);
+ DRM_DEV_DEBUG_DRIVER(dev, "hpd change interrupt, change to %s",
+ it6505->hpd_state ? "high" : "low");
+
+ if (it6505->bridge.dev)
+ drm_helper_hpd_irq_event(it6505->bridge.dev);
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d",
+ it6505->sink_count);
+
+ if (it6505->hpd_state) {
+ wait_for_completion_timeout(&it6505->wait_edid_complete,
+ msecs_to_jiffies(6000));
+ it6505_lane_termination_on(it6505);
+ it6505_lane_power_on(it6505);
+
+ /*
+ * for some dongle which issue HPD_irq
+ * when sink count change from 0->1
+ * it6505 not able to receive HPD_IRQ
+ * if HW never go into trainig done
+ */
+
+ if (it6505->branch_device && it6505->sink_count == 0)
+ schedule_work(&it6505->link_works);
+
+ if (!it6505_get_video_status(it6505))
+ it6505_video_reset(it6505);
+
+ it6505_calc_video_info(it6505);
+ } else {
+ memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
+
+ if (it6505->hdcp_desired)
+ it6505_stop_hdcp(it6505);
+
+ it6505_video_disable(it6505);
+ it6505_disable_audio(it6505);
+ it6505_stop_link_train(it6505);
+ it6505_lane_off(it6505);
+ it6505_link_reset_step_train(it6505);
+ }
+}
+
+static void it6505_irq_hpd_irq(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "hpd_irq interrupt");
+
+ if (it6505_process_hpd_irq(it6505) < 0)
+ DRM_DEV_DEBUG_DRIVER(dev, "process hpd_irq fail!");
+}
+
+static void it6505_irq_scdt(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ bool data;
+
+ data = it6505_get_video_status(it6505);
+ DRM_DEV_DEBUG_DRIVER(dev, "video stable change interrupt, %s",
+ data ? "stable" : "unstable");
+ it6505_calc_video_info(it6505);
+ it6505_link_reset_step_train(it6505);
+
+ if (data)
+ schedule_work(&it6505->link_works);
+}
+
+static void it6505_irq_hdcp_done(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "hdcp done interrupt");
+ it6505->hdcp_status = HDCP_AUTH_DONE;
+ it6505_show_hdcp_info(it6505);
+}
+
+static void it6505_irq_hdcp_fail(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "hdcp fail interrupt");
+ it6505->hdcp_status = HDCP_AUTH_IDLE;
+ it6505_show_hdcp_info(it6505);
+ it6505_start_hdcp(it6505);
+}
+
+static void it6505_irq_aux_cmd_fail(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "AUX PC Request Fail Interrupt");
+}
+
+static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt");
+ schedule_work(&it6505->hdcp_wait_ksv_list);
+}
+
+static void it6505_irq_audio_fifo_error(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "audio fifo error Interrupt");
+
+ if (it6505_audio_input(it6505))
+ it6505_enable_audio(it6505);
+}
+
+static void it6505_irq_link_train_fail(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "link training fail interrupt");
+ schedule_work(&it6505->link_works);
+}
+
+static void it6505_irq_video_fifo_error(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt");
+ it6505->auto_train_retry = AUTO_TRAIN_RETRY;
+ flush_work(&it6505->link_works);
+ it6505_stop_hdcp(it6505);
+ it6505_video_reset(it6505);
+}
+
+static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt");
+ it6505->auto_train_retry = AUTO_TRAIN_RETRY;
+ flush_work(&it6505->link_works);
+ it6505_stop_hdcp(it6505);
+ it6505_video_reset(it6505);
+}
+
+static bool it6505_test_bit(unsigned int bit, const unsigned int *addr)
+{
+ return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE));
+}
+
+static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
+{
+ struct it6505 *it6505 = data;
+ struct device *dev = &it6505->client->dev;
+ static const struct {
+ int bit;
+ void (*handler)(struct it6505 *it6505);
+ } irq_vec[] = {
+ { BIT_INT_HPD, it6505_irq_hpd },
+ { BIT_INT_HPD_IRQ, it6505_irq_hpd_irq },
+ { BIT_INT_SCDT, it6505_irq_scdt },
+ { BIT_INT_HDCP_FAIL, it6505_irq_hdcp_fail },
+ { BIT_INT_HDCP_DONE, it6505_irq_hdcp_done },
+ { BIT_INT_AUX_CMD_FAIL, it6505_irq_aux_cmd_fail },
+ { BIT_INT_HDCP_KSV_CHECK, it6505_irq_hdcp_ksv_check },
+ { BIT_INT_AUDIO_FIFO_ERROR, it6505_irq_audio_fifo_error },
+ { BIT_INT_LINK_TRAIN_FAIL, it6505_irq_link_train_fail },
+ { BIT_INT_VID_FIFO_ERROR, it6505_irq_video_fifo_error },
+ { BIT_INT_IO_FIFO_OVERFLOW, it6505_irq_io_latch_fifo_overflow },
+ };
+ int int_status[3], i;
+
+ msleep(100);
+ mutex_lock(&it6505->extcon_lock);
+
+ if (it6505->enable_drv_hold || !it6505->powered)
+ goto unlock;
+
+ int_status[0] = it6505_read(it6505, INT_STATUS_01);
+ int_status[1] = it6505_read(it6505, INT_STATUS_02);
+ int_status[2] = it6505_read(it6505, INT_STATUS_03);
+
+ it6505_write(it6505, INT_STATUS_01, int_status[0]);
+ it6505_write(it6505, INT_STATUS_02, int_status[1]);
+ it6505_write(it6505, INT_STATUS_03, int_status[2]);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "reg06 = 0x%02x", int_status[0]);
+ DRM_DEV_DEBUG_DRIVER(dev, "reg07 = 0x%02x", int_status[1]);
+ DRM_DEV_DEBUG_DRIVER(dev, "reg08 = 0x%02x", int_status[2]);
+ it6505_debug_print(it6505, REG_SYSTEM_STS, "");
+
+ if (it6505_test_bit(irq_vec[0].bit, (unsigned int *)int_status))
+ irq_vec[0].handler(it6505);
+
+ if (!it6505->hpd_state)
+ goto unlock;
+
+ for (i = 1; i < ARRAY_SIZE(irq_vec); i++) {
+ if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
+ irq_vec[i].handler(it6505);
+ }
+
+unlock:
+ mutex_unlock(&it6505->extcon_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int it6505_poweron(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ struct it6505_platform_data *pdata = &it6505->pdata;
+ int err;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505 start powered on");
+
+ if (it6505->powered) {
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505 already powered on");
+ return 0;
+ }
+
+ if (pdata->pwr18) {
+ err = regulator_enable(pdata->pwr18);
+ if (err) {
+ DRM_DEV_DEBUG_DRIVER(dev, "Failed to enable VDD18: %d",
+ err);
+ return err;
+ }
+ }
+
+ if (pdata->ovdd) {
+ /* time interval between IVDD and OVDD at least be 1ms */
+ usleep_range(1000, 2000);
+ err = regulator_enable(pdata->ovdd);
+ if (err) {
+ regulator_disable(pdata->pwr18);
+ return err;
+ }
+ }
+ /* time interval between OVDD and SYSRSTN at least be 10ms */
+ if (pdata->gpiod_reset) {
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
+ usleep_range(10000, 20000);
+ }
+
+ it6505_reset_logic(it6505);
+ it6505_int_mask_enable(it6505);
+ it6505_init(it6505);
+ it6505_lane_off(it6505);
+
+ it6505->powered = true;
+
+ return 0;
+}
+
+static int it6505_poweroff(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ struct it6505_platform_data *pdata = &it6505->pdata;
+ int err;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505 start power off");
+
+ if (!it6505->powered) {
+ DRM_DEV_DEBUG_DRIVER(dev, "power had been already off");
+ return 0;
+ }
+
+ if (pdata->gpiod_reset)
+ gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
+
+ if (pdata->pwr18) {
+ err = regulator_disable(pdata->pwr18);
+ if (err)
+ return err;
+ }
+
+ if (pdata->ovdd) {
+ err = regulator_disable(pdata->ovdd);
+ if (err)
+ return err;
+ }
+
+ it6505->powered = false;
+ it6505->sink_count = 0;
+
+ return 0;
+}
+
+static enum drm_connector_status it6505_detect(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ enum drm_connector_status status = connector_status_disconnected;
+ int dp_sink_count;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d powered:%d",
+ it6505->sink_count, it6505->powered);
+
+ mutex_lock(&it6505->mode_lock);
+
+ if (!it6505->powered)
+ goto unlock;
+
+ if (it6505->enable_drv_hold) {
+ status = it6505_get_sink_hpd_status(it6505) ?
+ connector_status_connected :
+ connector_status_disconnected;
+ goto unlock;
+ }
+
+ if (it6505_get_sink_hpd_status(it6505)) {
+ it6505_aux_on(it6505);
+ it6505_drm_dp_link_probe(&it6505->aux, &it6505->link);
+ it6505_drm_dp_link_power_up(&it6505->aux, &it6505->link);
+ it6505->auto_train_retry = AUTO_TRAIN_RETRY;
+
+ if (it6505->dpcd[0] == 0) {
+ it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd,
+ ARRAY_SIZE(it6505->dpcd));
+ it6505_variable_config(it6505);
+ it6505_parse_link_capabilities(it6505);
+ }
+
+ dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
+ it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d",
+ it6505->sink_count, it6505->branch_device);
+
+ if (it6505->branch_device) {
+ status = (it6505->sink_count != 0) ?
+ connector_status_connected :
+ connector_status_disconnected;
+ } else {
+ status = connector_status_connected;
+ }
+ } else {
+ it6505->sink_count = 0;
+ memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
+ }
+
+unlock:
+ if (it6505->connector_status != status) {
+ it6505->connector_status = status;
+ it6505_plugged_status_to_codec(it6505);
+ }
+
+ mutex_unlock(&it6505->mode_lock);
+
+ return status;
+}
+
+static int it6505_extcon_notifier(struct notifier_block *self,
+ unsigned long event, void *ptr)
+{
+ struct it6505 *it6505 = container_of(self, struct it6505, event_nb);
+
+ schedule_work(&it6505->extcon_wq);
+ return NOTIFY_DONE;
+}
+
+static void it6505_extcon_work(struct work_struct *work)
+{
+ struct it6505 *it6505 = container_of(work, struct it6505, extcon_wq);
+ struct device *dev = &it6505->client->dev;
+ int state = extcon_get_state(it6505->extcon, EXTCON_DISP_DP);
+ unsigned int pwroffretry = 0;
+
+ if (it6505->enable_drv_hold)
+ return;
+
+ mutex_lock(&it6505->extcon_lock);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "EXTCON_DISP_DP = 0x%02x", state);
+ if (state > 0) {
+ DRM_DEV_DEBUG_DRIVER(dev, "start to power on");
+ msleep(100);
+ it6505_poweron(it6505);
+ } else {
+ DRM_DEV_DEBUG_DRIVER(dev, "start to power off");
+ while (it6505_poweroff(it6505) && pwroffretry++ < 5) {
+ DRM_DEV_DEBUG_DRIVER(dev, "power off fail %d times",
+ pwroffretry);
+ }
+
+ drm_helper_hpd_irq_event(it6505->bridge.dev);
+ memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
+ DRM_DEV_DEBUG_DRIVER(dev, "power off it6505 success!");
+ }
+
+ mutex_unlock(&it6505->extcon_lock);
+}
+
+static int it6505_use_notifier_module(struct it6505 *it6505)
+{
+ int ret;
+ struct device *dev = &it6505->client->dev;
+
+ it6505->event_nb.notifier_call = it6505_extcon_notifier;
+ INIT_WORK(&it6505->extcon_wq, it6505_extcon_work);
+ ret = devm_extcon_register_notifier(&it6505->client->dev,
+ it6505->extcon, EXTCON_DISP_DP,
+ &it6505->event_nb);
+ if (ret) {
+ dev_err(dev, "failed to register notifier for DP");
+ return ret;
+ }
+
+ schedule_work(&it6505->extcon_wq);
+
+ return 0;
+}
+
+static void it6505_remove_notifier_module(struct it6505 *it6505)
+{
+ if (it6505->extcon) {
+ devm_extcon_unregister_notifier(&it6505->client->dev,
+ it6505->extcon, EXTCON_DISP_DP,
+ &it6505->event_nb);
+
+ flush_work(&it6505->extcon_wq);
+ }
+}
+
+static void __maybe_unused it6505_delayed_audio(struct work_struct *work)
+{
+ struct it6505 *it6505 = container_of(work, struct it6505,
+ delayed_audio.work);
+
+ DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start");
+
+ if (!it6505->powered)
+ return;
+
+ if (!it6505->enable_drv_hold)
+ it6505_enable_audio(it6505);
+}
+
+static int __maybe_unused it6505_audio_setup_hw_params(struct it6505 *it6505,
+ struct hdmi_codec_params
+ *params)
+{
+ struct device *dev = &it6505->client->dev;
+ int i = 0;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "%s %d Hz, %d bit, %d channels\n", __func__,
+ params->sample_rate, params->sample_width,
+ params->cea.channels);
+
+ if (!it6505->bridge.encoder)
+ return -ENODEV;
+
+ if (params->cea.channels <= 1 || params->cea.channels > 8) {
+ DRM_DEV_DEBUG_DRIVER(dev, "channel number: %d not support",
+ it6505->audio.channel_count);
+ return -EINVAL;
+ }
+
+ it6505->audio.channel_count = params->cea.channels;
+
+ while (i < ARRAY_SIZE(audio_sample_rate_map) &&
+ params->sample_rate !=
+ audio_sample_rate_map[i].sample_rate_value) {
+ i++;
+ }
+ if (i == ARRAY_SIZE(audio_sample_rate_map)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "sample rate: %d Hz not support",
+ params->sample_rate);
+ return -EINVAL;
+ }
+ it6505->audio.sample_rate = audio_sample_rate_map[i].rate;
+
+ switch (params->sample_width) {
+ case 16:
+ it6505->audio.word_length = WORD_LENGTH_16BIT;
+ break;
+ case 18:
+ it6505->audio.word_length = WORD_LENGTH_18BIT;
+ break;
+ case 20:
+ it6505->audio.word_length = WORD_LENGTH_20BIT;
+ break;
+ case 24:
+ case 32:
+ it6505->audio.word_length = WORD_LENGTH_24BIT;
+ break;
+ default:
+ DRM_DEV_DEBUG_DRIVER(dev, "wordlength: %d bit not support",
+ params->sample_width);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __maybe_unused it6505_audio_shutdown(struct device *dev, void *data)
+{
+ struct it6505 *it6505 = dev_get_drvdata(dev);
+
+ if (it6505->powered)
+ it6505_disable_audio(it6505);
+}
+
+static int __maybe_unused it6505_audio_hook_plugged_cb(struct device *dev,
+ void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct it6505 *it6505 = data;
+
+ it6505->plugged_cb = fn;
+ it6505->codec_dev = codec_dev;
+ it6505_plugged_status_to_codec(it6505);
+
+ return 0;
+}
+
+static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct it6505, bridge);
+}
+
+static int it6505_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct it6505 *it6505 = bridge_to_it6505(bridge);
+ struct device *dev = &it6505->client->dev;
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ DRM_ERROR("DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied");
+ return -EINVAL;
+ }
+
+ if (!bridge->encoder) {
+ dev_err(dev, "Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ /* Register aux channel */
+ it6505->aux.name = "DP-AUX";
+ it6505->aux.dev = dev;
+ it6505->aux.drm_dev = bridge->dev;
+ it6505->aux.transfer = it6505_aux_transfer;
+
+ ret = drm_dp_aux_register(&it6505->aux);
+
+ if (ret < 0) {
+ dev_err(dev, "Failed to register aux: %d", ret);
+ return ret;
+ }
+
+ if (it6505->extcon) {
+ ret = it6505_use_notifier_module(it6505);
+ if (ret < 0) {
+ dev_err(dev, "use notifier module failed");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void it6505_bridge_detach(struct drm_bridge *bridge)
+{
+ struct it6505 *it6505 = bridge_to_it6505(bridge);
+
+ flush_work(&it6505->link_works);
+ it6505_remove_notifier_module(it6505);
+}
+
+static enum drm_mode_status
+it6505_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct it6505 *it6505 = bridge_to_it6505(bridge);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ if (mode->clock > DPI_PIXEL_CLK_MAX)
+ return MODE_CLOCK_HIGH;
+
+ it6505->video_info.clock = mode->clock;
+
+ return MODE_OK;
+}
+
+static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct it6505 *it6505 = bridge_to_it6505(bridge);
+ struct device *dev = &it6505->client->dev;
+ struct drm_atomic_state *state = old_state->base.state;
+ struct hdmi_avi_infoframe frame;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector_state *conn_state;
+ struct drm_display_mode *mode;
+ struct drm_connector *connector;
+ int ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "start");
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ if (WARN_ON(!crtc_state))
+ return;
+
+ mode = &crtc_state->adjusted_mode;
+
+ if (WARN_ON(!mode))
+ return;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+ connector,
+ mode);
+ if (ret)
+ dev_err(dev, "Failed to setup AVI infoframe: %d", ret);
+
+ it6505_update_video_parameter(it6505, mode);
+
+ ret = it6505_send_video_infoframe(it6505, &frame);
+
+ if (ret)
+ dev_err(dev, "Failed to send AVI infoframe: %d", ret);
+
+ it6505_int_mask_enable(it6505);
+ it6505_video_reset(it6505);
+}
+
+static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct it6505 *it6505 = bridge_to_it6505(bridge);
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "start");
+
+ if (it6505->powered)
+ it6505_video_disable(it6505);
+}
+
+static enum drm_connector_status
+it6505_bridge_detect(struct drm_bridge *bridge)
+{
+ struct it6505 *it6505 = bridge_to_it6505(bridge);
+
+ return it6505_detect(it6505);
+}
+
+static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct it6505 *it6505 = bridge_to_it6505(bridge);
+ struct device *dev = &it6505->client->dev;
+ struct edid *edid;
+
+ edid = drm_do_get_edid(connector, it6505_get_edid_block, it6505);
+
+ if (!edid) {
+ DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!");
+ return NULL;
+ }
+
+ return edid;
+}
+
+static const struct drm_bridge_funcs it6505_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .attach = it6505_bridge_attach,
+ .detach = it6505_bridge_detach,
+ .mode_valid = it6505_bridge_mode_valid,
+ .atomic_enable = it6505_bridge_atomic_enable,
+ .atomic_disable = it6505_bridge_atomic_disable,
+ .detect = it6505_bridge_detect,
+ .get_edid = it6505_bridge_get_edid,
+};
+
+static __maybe_unused int it6505_bridge_resume(struct device *dev)
+{
+ struct it6505 *it6505 = dev_get_drvdata(dev);
+
+ return it6505_poweron(it6505);
+}
+
+static __maybe_unused int it6505_bridge_suspend(struct device *dev)
+{
+ struct it6505 *it6505 = dev_get_drvdata(dev);
+
+ return it6505_poweroff(it6505);
+}
+
+static SIMPLE_DEV_PM_OPS(it6505_bridge_pm_ops, it6505_bridge_suspend,
+ it6505_bridge_resume);
+
+static int it6505_init_pdata(struct it6505 *it6505)
+{
+ struct it6505_platform_data *pdata = &it6505->pdata;
+ struct device *dev = &it6505->client->dev;
+
+ /* 1.0V digital core power regulator */
+ pdata->pwr18 = devm_regulator_get(dev, "pwr18");
+ if (IS_ERR(pdata->pwr18)) {
+ dev_err(dev, "pwr18 regulator not found");
+ return PTR_ERR(pdata->pwr18);
+ }
+
+ pdata->ovdd = devm_regulator_get(dev, "ovdd");
+ if (IS_ERR(pdata->ovdd)) {
+ dev_err(dev, "ovdd regulator not found");
+ return PTR_ERR(pdata->ovdd);
+ }
+
+ pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pdata->gpiod_reset)) {
+ dev_err(dev, "gpiod_reset gpio not found");
+ return PTR_ERR(pdata->gpiod_reset);
+ }
+
+ return 0;
+}
+
+static void it6505_parse_dt(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+ u32 *afe_setting = &it6505->afe_setting;
+
+ it6505->lane_swap_disabled =
+ device_property_read_bool(dev, "no-laneswap");
+
+ if (it6505->lane_swap_disabled)
+ it6505->lane_swap = false;
+
+ if (device_property_read_u32(dev, "afe-setting", afe_setting) == 0) {
+ if (*afe_setting >= ARRAY_SIZE(afe_setting_table)) {
+ dev_err(dev, "afe setting error, use default");
+ *afe_setting = 0;
+ }
+ } else {
+ *afe_setting = 0;
+ }
+ DRM_DEV_DEBUG_DRIVER(dev, "using afe_setting: %d", *afe_setting);
+}
+
+static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct it6505 *it6505 = file->private_data;
+ struct drm_display_mode *vid = &it6505->video_info;
+ u8 read_buf[READ_BUFFER_SIZE];
+ u8 *str = read_buf, *end = read_buf + PAGE_SIZE;
+ ssize_t ret, count;
+
+ if (!it6505)
+ return -ENODEV;
+
+ it6505_calc_video_info(it6505);
+ str += scnprintf(str, end - str, "---video timing---\n");
+ str += scnprintf(str, end - str, "PCLK:%d.%03dMHz\n",
+ vid->clock / 1000, vid->clock % 1000);
+ str += scnprintf(str, end - str, "HTotal:%d\n", vid->htotal);
+ str += scnprintf(str, end - str, "HActive:%d\n", vid->hdisplay);
+ str += scnprintf(str, end - str, "HFrontPorch:%d\n",
+ vid->hsync_start - vid->hdisplay);
+ str += scnprintf(str, end - str, "HSyncWidth:%d\n",
+ vid->hsync_end - vid->hsync_start);
+ str += scnprintf(str, end - str, "HBackPorch:%d\n",
+ vid->htotal - vid->hsync_end);
+ str += scnprintf(str, end - str, "VTotal:%d\n", vid->vtotal);
+ str += scnprintf(str, end - str, "VActive:%d\n", vid->vdisplay);
+ str += scnprintf(str, end - str, "VFrontPorch:%d\n",
+ vid->vsync_start - vid->vdisplay);
+ str += scnprintf(str, end - str, "VSyncWidth:%d\n",
+ vid->vsync_end - vid->vsync_start);
+ str += scnprintf(str, end - str, "VBackPorch:%d\n",
+ vid->vtotal - vid->vsync_end);
+
+ count = str - read_buf;
+ ret = simple_read_from_buffer(buf, len, ppos, read_buf, count);
+
+ return ret;
+}
+
+static int force_power_on_off_debugfs_write(void *data, u64 value)
+{
+ struct it6505 *it6505 = data;
+
+ if (!it6505)
+ return -ENODEV;
+
+ if (value)
+ it6505_poweron(it6505);
+ else
+ it6505_poweroff(it6505);
+
+ return 0;
+}
+
+static int enable_drv_hold_debugfs_show(void *data, u64 *buf)
+{
+ struct it6505 *it6505 = data;
+
+ if (!it6505)
+ return -ENODEV;
+
+ *buf = it6505->enable_drv_hold;
+
+ return 0;
+}
+
+static int enable_drv_hold_debugfs_write(void *data, u64 drv_hold)
+{
+ struct it6505 *it6505 = data;
+
+ if (!it6505)
+ return -ENODEV;
+
+ it6505->enable_drv_hold = drv_hold;
+
+ if (it6505->enable_drv_hold) {
+ it6505_int_mask_disable(it6505);
+ } else {
+ it6505_clear_int(it6505);
+ it6505_int_mask_enable(it6505);
+
+ if (it6505->powered) {
+ it6505->connector_status =
+ it6505_get_sink_hpd_status(it6505) ?
+ connector_status_connected :
+ connector_status_disconnected;
+ } else {
+ it6505->connector_status =
+ connector_status_disconnected;
+ }
+ }
+
+ return 0;
+}
+
+static const struct file_operations receive_timing_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = receive_timing_debugfs_show,
+ .llseek = default_llseek,
+};
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_force_power, NULL,
+ force_power_on_off_debugfs_write, "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_enable_drv_hold, enable_drv_hold_debugfs_show,
+ enable_drv_hold_debugfs_write, "%llu\n");
+
+static const struct debugfs_entries debugfs_entry[] = {
+ { "receive_timing", &receive_timing_fops },
+ { "force_power_on_off", &fops_force_power },
+ { "enable_drv_hold", &fops_enable_drv_hold },
+ { NULL, NULL },
+};
+
+static void debugfs_create_files(struct it6505 *it6505)
+{
+ int i = 0;
+
+ while (debugfs_entry[i].name && debugfs_entry[i].fops) {
+ debugfs_create_file(debugfs_entry[i].name, 0644,
+ it6505->debugfs, it6505,
+ debugfs_entry[i].fops);
+ i++;
+ }
+}
+
+static void debugfs_init(struct it6505 *it6505)
+{
+ struct device *dev = &it6505->client->dev;
+
+ it6505->debugfs = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+
+ if (IS_ERR(it6505->debugfs)) {
+ dev_err(dev, "failed to create debugfs root");
+ return;
+ }
+
+ debugfs_create_files(it6505);
+}
+
+static void it6505_debugfs_remove(struct it6505 *it6505)
+{
+ debugfs_remove_recursive(it6505->debugfs);
+}
+
+static void it6505_shutdown(struct i2c_client *client)
+{
+ struct it6505 *it6505 = dev_get_drvdata(&client->dev);
+
+ if (it6505->powered)
+ it6505_lane_off(it6505);
+}
+
+static int it6505_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct it6505 *it6505;
+ struct device *dev = &client->dev;
+ struct extcon_dev *extcon;
+ int err, intp_irq;
+
+ it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL);
+ if (!it6505)
+ return -ENOMEM;
+
+ mutex_init(&it6505->extcon_lock);
+ mutex_init(&it6505->mode_lock);
+ mutex_init(&it6505->aux_lock);
+
+ it6505->bridge.of_node = client->dev.of_node;
+ it6505->connector_status = connector_status_disconnected;
+ it6505->client = client;
+ i2c_set_clientdata(client, it6505);
+
+ /* get extcon device from DTS */
+ extcon = extcon_get_edev_by_phandle(dev, 0);
+ if (PTR_ERR(extcon) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (IS_ERR(extcon)) {
+ dev_err(dev, "can not get extcon device!");
+ return PTR_ERR(extcon);
+ }
+
+ it6505->extcon = extcon;
+
+ it6505->regmap = devm_regmap_init_i2c(client, &it6505_regmap_config);
+ if (IS_ERR(it6505->regmap)) {
+ dev_err(dev, "regmap i2c init failed");
+ err = PTR_ERR(it6505->regmap);
+ return err;
+ }
+
+ err = it6505_init_pdata(it6505);
+ if (err) {
+ dev_err(dev, "Failed to initialize pdata: %d", err);
+ return err;
+ }
+
+ it6505_parse_dt(it6505);
+
+ intp_irq = client->irq;
+
+ if (!intp_irq) {
+ dev_err(dev, "Failed to get INTP IRQ");
+ err = -ENODEV;
+ return err;
+ }
+
+ err = devm_request_threaded_irq(&client->dev, intp_irq, NULL,
+ it6505_int_threaded_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "it6505-intp", it6505);
+ if (err) {
+ dev_err(dev, "Failed to request INTP threaded IRQ: %d", err);
+ return err;
+ }
+
+ INIT_WORK(&it6505->link_works, it6505_link_training_work);
+ INIT_WORK(&it6505->hdcp_wait_ksv_list, it6505_hdcp_wait_ksv_list);
+ INIT_DELAYED_WORK(&it6505->hdcp_work, it6505_hdcp_work);
+ init_completion(&it6505->wait_edid_complete);
+ memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
+ it6505->powered = false;
+ it6505->enable_drv_hold = DEFAULT_DRV_HOLD;
+
+ if (DEFAULT_PWR_ON)
+ it6505_poweron(it6505);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505 device name: %s", dev_name(dev));
+ debugfs_init(it6505);
+
+ it6505->bridge.funcs = &it6505_bridge_funcs;
+ it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+ it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HPD;
+ drm_bridge_add(&it6505->bridge);
+
+ return 0;
+}
+
+static int it6505_i2c_remove(struct i2c_client *client)
+{
+ struct it6505 *it6505 = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&it6505->bridge);
+ drm_dp_aux_unregister(&it6505->aux);
+ it6505_debugfs_remove(it6505);
+ it6505_poweroff(it6505);
+
+ return 0;
+}
+
+static const struct i2c_device_id it6505_id[] = {
+ { "it6505", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, it6505_id);
+
+static const struct of_device_id it6505_of_match[] = {
+ { .compatible = "ite,it6505" },
+ { }
+};
+
+static struct i2c_driver it6505_i2c_driver = {
+ .driver = {
+ .name = "it6505",
+ .of_match_table = it6505_of_match,
+ .pm = &it6505_bridge_pm_ops,
+ },
+ .probe = it6505_i2c_probe,
+ .remove = it6505_i2c_remove,
+ .shutdown = it6505_shutdown,
+ .id_table = it6505_id,
+};
+
+module_i2c_driver(it6505_i2c_driver);
+
+MODULE_AUTHOR("Allen Chen <allen.chen@ite.com.tw>");
+MODULE_DESCRIPTION("IT6505 DisplayPort Transmitter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index feb128a4557d..63df2e8a8abc 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -1164,7 +1164,11 @@ static int lt9611_probe(struct i2c_client *client,
lt9611_enable_hpd_interrupts(lt9611);
- return lt9611_audio_init(dev, lt9611);
+ ret = lt9611_audio_init(dev, lt9611);
+ if (ret)
+ goto err_remove_bridge;
+
+ return 0;
err_remove_bridge:
drm_bridge_remove(&lt9611->bridge);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 5abb5ec3de46..963a6794735f 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -860,18 +860,19 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
drm_mode_debug_printmodeline(adjusted_mode);
- pm_runtime_get_sync(dev);
+ if (pm_runtime_resume_and_get(dev) < 0)
+ return;
if (clk_prepare_enable(dsi->lcdif_clk) < 0)
- return;
+ goto runtime_put;
if (clk_prepare_enable(dsi->core_clk) < 0)
- return;
+ goto runtime_put;
/* Step 1 from DSI reset-out instructions */
ret = reset_control_deassert(dsi->rst_pclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret);
- return;
+ goto runtime_put;
}
/* Step 2 from DSI reset-out instructions */
@@ -881,13 +882,18 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
ret = reset_control_deassert(dsi->rst_esc);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret);
- return;
+ goto runtime_put;
}
ret = reset_control_deassert(dsi->rst_byte);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret);
- return;
+ goto runtime_put;
}
+
+ return;
+
+runtime_put:
+ pm_runtime_put_sync(dev);
}
static void
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index b32295abd9e7..5be057575183 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -138,6 +138,17 @@ static int panel_bridge_get_modes(struct drm_bridge *bridge,
return drm_panel_get_modes(panel_bridge->panel, connector);
}
+static void panel_bridge_debugfs_init(struct drm_bridge *bridge,
+ struct dentry *root)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+ struct drm_panel *panel = panel_bridge->panel;
+
+ root = debugfs_create_dir("panel", root);
+ if (panel->funcs->debugfs_init)
+ panel->funcs->debugfs_init(panel, root);
+}
+
static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.attach = panel_bridge_attach,
.detach = panel_bridge_detach,
@@ -150,6 +161,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
+ .debugfs_init = panel_bridge_debugfs_init,
};
/**
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index b0d8110dd412..4befc104d220 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2551,8 +2551,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
if (!output_fmts)
return NULL;
- /* If dw-hdmi is the only bridge, avoid negociating with ourselves */
- if (list_is_singular(&bridge->encoder->bridge_chain)) {
+ /* If dw-hdmi is the first or only bridge, avoid negociating with ourselves */
+ if (list_is_singular(&bridge->encoder->bridge_chain) ||
+ list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) {
*num_output_fmts = 1;
output_fmts[0] = MEDIA_BUS_FMT_FIXED;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index ba136a188be7..38616aab12ac 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/dp/drm_dp_aux_bus.h>
#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
@@ -174,7 +175,7 @@ struct ti_sn65dsi86 {
struct regmap *regmap;
struct drm_dp_aux aux;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_connector *connector;
struct device_node *host_node;
struct mipi_dsi_device *dsi;
struct clk *refclk;
@@ -646,54 +647,6 @@ static struct auxiliary_driver ti_sn_aux_driver = {
.id_table = ti_sn_aux_id_table,
};
-/* -----------------------------------------------------------------------------
- * DRM Connector Operations
- */
-
-static struct ti_sn65dsi86 *
-connector_to_ti_sn65dsi86(struct drm_connector *connector)
-{
- return container_of(connector, struct ti_sn65dsi86, connector);
-}
-
-static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
-{
- struct ti_sn65dsi86 *pdata = connector_to_ti_sn65dsi86(connector);
-
- return drm_bridge_get_modes(pdata->next_bridge, connector);
-}
-
-static struct drm_connector_helper_funcs ti_sn_bridge_connector_helper_funcs = {
- .get_modes = ti_sn_bridge_connector_get_modes,
-};
-
-static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ti_sn_bridge_connector_init(struct ti_sn65dsi86 *pdata)
-{
- int ret;
-
- ret = drm_connector_init(pdata->bridge.dev, &pdata->connector,
- &ti_sn_bridge_connector_funcs,
- DRM_MODE_CONNECTOR_eDP);
- if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
- }
-
- drm_connector_helper_add(&pdata->connector,
- &ti_sn_bridge_connector_helper_funcs);
- drm_connector_attach_encoder(&pdata->connector, pdata->bridge.encoder);
-
- return 0;
-}
-
/*------------------------------------------------------------------------------
* DRM Bridge
*/
@@ -757,10 +710,6 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- ret = ti_sn_bridge_connector_init(pdata);
- if (ret < 0)
- goto err_conn_init;
-
/* We never want the next bridge to *also* create a connector: */
flags |= DRM_BRIDGE_ATTACH_NO_CONNECTOR;
@@ -768,13 +717,20 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
&pdata->bridge, flags);
if (ret < 0)
- goto err_dsi_host;
+ goto err_initted_aux;
+
+ pdata->connector = drm_bridge_connector_init(pdata->bridge.dev,
+ pdata->bridge.encoder);
+ if (IS_ERR(pdata->connector)) {
+ ret = PTR_ERR(pdata->connector);
+ goto err_initted_aux;
+ }
+
+ drm_connector_attach_encoder(pdata->connector, pdata->bridge.encoder);
return 0;
-err_dsi_host:
- drm_connector_cleanup(&pdata->connector);
-err_conn_init:
+err_initted_aux:
drm_dp_aux_unregister(&pdata->aux);
return ret;
}
@@ -824,7 +780,7 @@ static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
static unsigned int ti_sn_bridge_get_bpp(struct ti_sn65dsi86 *pdata)
{
- if (pdata->connector.display_info.bpc <= 6)
+ if (pdata->connector->display_info.bpc <= 6)
return 18;
else
return 24;
diff --git a/drivers/gpu/drm/dp/drm_dp.c b/drivers/gpu/drm/dp/drm_dp.c
index 6d43325acca5..e159b81800d4 100644
--- a/drivers/gpu/drm/dp/drm_dp.c
+++ b/drivers/gpu/drm/dp/drm_dp.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <linux/string_helpers.h>
#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_print.h>
@@ -144,6 +145,69 @@ u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_get_adjust_tx_ffe_preset);
+/* DP 2.0 errata for 128b/132b */
+bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align, lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+ if (!(lane_align & DP_INTERLANE_ALIGN_DONE))
+ return false;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if (!(lane_status & DP_LANE_CHANNEL_EQ_DONE))
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_lane_channel_eq_done);
+
+/* DP 2.0 errata for 128b/132b */
+bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_status;
+ int lane;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if (!(lane_status & DP_LANE_SYMBOL_LOCKED))
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_lane_symbol_locked);
+
+/* DP 2.0 errata for 128b/132b */
+bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+
+ return status & DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_eq_interlane_align_done);
+
+/* DP 2.0 errata for 128b/132b */
+bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+
+ return status & DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_cds_interlane_align_done);
+
+/* DP 2.0 errata for 128b/132b */
+bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+
+ return status & DP_128B132B_LT_FAILED;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_link_training_failed);
+
u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
unsigned int lane)
{
@@ -281,6 +345,26 @@ int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIV
}
EXPORT_SYMBOL(drm_dp_read_channel_eq_delay);
+/* Per DP 2.0 Errata */
+int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux)
+{
+ int unit;
+ u8 val;
+
+ if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) {
+ drm_err(aux->drm_dev, "%s: failed rd interval read\n",
+ aux->name);
+ /* default to max */
+ val = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
+ }
+
+ unit = (val & DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT) ? 1 : 2;
+ val &= DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
+
+ return (val + 1) * unit * 1000;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_read_aux_rd_interval);
+
void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
@@ -1239,7 +1323,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
bool branch_device = drm_dp_is_branch(dpcd);
seq_printf(m, "\tDP branch device present: %s\n",
- branch_device ? "yes" : "no");
+ str_yes_no(branch_device));
if (!branch_device)
return;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 9781722519c3..54d62fdb4ef9 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
state->mode_blob = NULL;
if (mode) {
+ struct drm_property_blob *blob;
+
drm_mode_convert_to_umode(&umode, mode);
- state->mode_blob =
- drm_property_create_blob(state->crtc->dev,
- sizeof(umode),
- &umode);
- if (IS_ERR(state->mode_blob))
- return PTR_ERR(state->mode_blob);
+ blob = drm_property_create_blob(crtc->dev,
+ sizeof(umode), &umode);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
drm_mode_copy(&state->mode, mode);
+
+ state->mode_blob = blob;
state->enable = true;
drm_dbg_atomic(crtc->dev,
"Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 791379816837..60923cdfe8e1 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -216,6 +216,20 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector)
kfree(bridge_connector);
}
+static void drm_bridge_connector_debugfs_init(struct drm_connector *connector,
+ struct dentry *root)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_encoder *encoder = bridge_connector->encoder;
+ struct drm_bridge *bridge;
+
+ list_for_each_entry(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->debugfs_init)
+ bridge->funcs->debugfs_init(bridge, root);
+ }
+}
+
static const struct drm_connector_funcs drm_bridge_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = drm_bridge_connector_detect,
@@ -223,6 +237,7 @@ static const struct drm_connector_funcs drm_bridge_connector_funcs = {
.destroy = drm_bridge_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .debugfs_init = drm_bridge_connector_debugfs_init,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index d60878bc9c20..72f52f293249 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -211,7 +211,7 @@ static int split_block(struct drm_buddy *mm,
}
static struct drm_buddy_block *
-get_buddy(struct drm_buddy_block *block)
+__get_buddy(struct drm_buddy_block *block)
{
struct drm_buddy_block *parent;
@@ -225,6 +225,23 @@ get_buddy(struct drm_buddy_block *block)
return parent->left;
}
+/**
+ * drm_get_buddy - get buddy address
+ *
+ * @block: DRM buddy block
+ *
+ * Returns the corresponding buddy block for @block, or NULL
+ * if this is a root block and can't be merged further.
+ * Requires some kind of locking to protect against
+ * any concurrent allocate and free operations.
+ */
+struct drm_buddy_block *
+drm_get_buddy(struct drm_buddy_block *block)
+{
+ return __get_buddy(block);
+}
+EXPORT_SYMBOL(drm_get_buddy);
+
static void __drm_buddy_free(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
@@ -233,7 +250,7 @@ static void __drm_buddy_free(struct drm_buddy *mm,
while ((parent = block->parent)) {
struct drm_buddy_block *buddy;
- buddy = get_buddy(block);
+ buddy = __get_buddy(block);
if (!drm_buddy_block_is_free(buddy))
break;
@@ -282,34 +299,134 @@ void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
}
EXPORT_SYMBOL(drm_buddy_free_list);
-/**
- * drm_buddy_alloc_blocks - allocate power-of-two blocks
- *
- * @mm: DRM buddy manager to allocate from
- * @order: size of the allocation
- *
- * The order value here translates to:
- *
- * 0 = 2^0 * mm->chunk_size
- * 1 = 2^1 * mm->chunk_size
- * 2 = 2^2 * mm->chunk_size
- *
- * Returns:
- * allocated ptr to the &drm_buddy_block on success
- */
-struct drm_buddy_block *
-drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order)
+static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= e2 && e1 >= s2;
+}
+
+static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= s2 && e1 >= e2;
+}
+
+static struct drm_buddy_block *
+alloc_range_bias(struct drm_buddy *mm,
+ u64 start, u64 end,
+ unsigned int order)
+{
+ struct drm_buddy_block *block;
+ struct drm_buddy_block *buddy;
+ LIST_HEAD(dfs);
+ int err;
+ int i;
+
+ end = end - 1;
+
+ for (i = 0; i < mm->n_roots; ++i)
+ list_add_tail(&mm->roots[i]->tmp_link, &dfs);
+
+ do {
+ u64 block_start;
+ u64 block_end;
+
+ block = list_first_entry_or_null(&dfs,
+ struct drm_buddy_block,
+ tmp_link);
+ if (!block)
+ break;
+
+ list_del(&block->tmp_link);
+
+ if (drm_buddy_block_order(block) < order)
+ continue;
+
+ block_start = drm_buddy_block_offset(block);
+ block_end = block_start + drm_buddy_block_size(mm, block) - 1;
+
+ if (!overlaps(start, end, block_start, block_end))
+ continue;
+
+ if (drm_buddy_block_is_allocated(block))
+ continue;
+
+ if (contains(start, end, block_start, block_end) &&
+ order == drm_buddy_block_order(block)) {
+ /*
+ * Find the free block within the range.
+ */
+ if (drm_buddy_block_is_free(block))
+ return block;
+
+ continue;
+ }
+
+ if (!drm_buddy_block_is_split(block)) {
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto err_undo;
+ }
+
+ list_add(&block->right->tmp_link, &dfs);
+ list_add(&block->left->tmp_link, &dfs);
+ } while (1);
+
+ return ERR_PTR(-ENOSPC);
+
+err_undo:
+ /*
+ * We really don't want to leave around a bunch of split blocks, since
+ * bigger is better, so make sure we merge everything back before we
+ * free the allocated blocks.
+ */
+ buddy = __get_buddy(block);
+ if (buddy &&
+ (drm_buddy_block_is_free(block) &&
+ drm_buddy_block_is_free(buddy)))
+ __drm_buddy_free(mm, block);
+ return ERR_PTR(err);
+}
+
+static struct drm_buddy_block *
+get_maxblock(struct list_head *head)
+{
+ struct drm_buddy_block *max_block = NULL, *node;
+
+ max_block = list_first_entry_or_null(head,
+ struct drm_buddy_block,
+ link);
+ if (!max_block)
+ return NULL;
+
+ list_for_each_entry(node, head, link) {
+ if (drm_buddy_block_offset(node) >
+ drm_buddy_block_offset(max_block))
+ max_block = node;
+ }
+
+ return max_block;
+}
+
+static struct drm_buddy_block *
+alloc_from_freelist(struct drm_buddy *mm,
+ unsigned int order,
+ unsigned long flags)
{
struct drm_buddy_block *block = NULL;
unsigned int i;
int err;
for (i = order; i <= mm->max_order; ++i) {
- block = list_first_entry_or_null(&mm->free_list[i],
- struct drm_buddy_block,
- link);
- if (block)
- break;
+ if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
+ block = get_maxblock(&mm->free_list[i]);
+ if (block)
+ break;
+ } else {
+ block = list_first_entry_or_null(&mm->free_list[i],
+ struct drm_buddy_block,
+ link);
+ if (block)
+ break;
+ }
}
if (!block)
@@ -320,78 +437,29 @@ drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order)
while (i != order) {
err = split_block(mm, block);
if (unlikely(err))
- goto out_free;
+ goto err_undo;
- /* Go low */
- block = block->left;
+ block = block->right;
i--;
}
-
- mark_allocated(block);
- mm->avail -= drm_buddy_block_size(mm, block);
- kmemleak_update_trace(block);
return block;
-out_free:
+err_undo:
if (i != order)
__drm_buddy_free(mm, block);
return ERR_PTR(err);
}
-EXPORT_SYMBOL(drm_buddy_alloc_blocks);
-
-static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
-{
- return s1 <= e2 && e1 >= s2;
-}
-static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
-{
- return s1 <= s2 && e1 >= e2;
-}
-
-/**
- * drm_buddy_alloc_range - allocate range
- *
- * @mm: DRM buddy manager to allocate from
- * @blocks: output list head to add allocated blocks
- * @start: start of the allowed range for this block
- * @size: size of the allocation
- *
- * Intended for pre-allocating portions of the address space, for example to
- * reserve a block for the initial framebuffer or similar, hence the expectation
- * here is that drm_buddy_alloc_blocks() is still the main vehicle for
- * allocations, so if that's not the case then the drm_mm range allocator is
- * probably a much better fit, and so you should probably go use that instead.
- *
- * Note that it's safe to chain together multiple alloc_ranges
- * with the same blocks list
- *
- * Returns:
- * 0 on success, error code on failure.
- */
-int drm_buddy_alloc_range(struct drm_buddy *mm,
- struct list_head *blocks,
- u64 start, u64 size)
+static int __alloc_range(struct drm_buddy *mm,
+ struct list_head *dfs,
+ u64 start, u64 size,
+ struct list_head *blocks)
{
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(allocated);
- LIST_HEAD(dfs);
u64 end;
int err;
- int i;
-
- if (size < mm->chunk_size)
- return -EINVAL;
-
- if (!IS_ALIGNED(size | start, mm->chunk_size))
- return -EINVAL;
-
- if (range_overflows(start, size, mm->size))
- return -EINVAL;
-
- for (i = 0; i < mm->n_roots; ++i)
- list_add_tail(&mm->roots[i]->tmp_link, &dfs);
end = start + size - 1;
@@ -399,7 +467,7 @@ int drm_buddy_alloc_range(struct drm_buddy *mm,
u64 block_start;
u64 block_end;
- block = list_first_entry_or_null(&dfs,
+ block = list_first_entry_or_null(dfs,
struct drm_buddy_block,
tmp_link);
if (!block)
@@ -436,8 +504,8 @@ int drm_buddy_alloc_range(struct drm_buddy *mm,
goto err_undo;
}
- list_add(&block->right->tmp_link, &dfs);
- list_add(&block->left->tmp_link, &dfs);
+ list_add(&block->right->tmp_link, dfs);
+ list_add(&block->left->tmp_link, dfs);
} while (1);
list_splice_tail(&allocated, blocks);
@@ -449,7 +517,7 @@ err_undo:
* bigger is better, so make sure we merge everything back before we
* free the allocated blocks.
*/
- buddy = get_buddy(block);
+ buddy = __get_buddy(block);
if (buddy &&
(drm_buddy_block_is_free(block) &&
drm_buddy_block_is_free(buddy)))
@@ -459,7 +527,189 @@ err_free:
drm_buddy_free_list(mm, &allocated);
return err;
}
-EXPORT_SYMBOL(drm_buddy_alloc_range);
+
+static int __drm_buddy_alloc_range(struct drm_buddy *mm,
+ u64 start,
+ u64 size,
+ struct list_head *blocks)
+{
+ LIST_HEAD(dfs);
+ int i;
+
+ for (i = 0; i < mm->n_roots; ++i)
+ list_add_tail(&mm->roots[i]->tmp_link, &dfs);
+
+ return __alloc_range(mm, &dfs, start, size, blocks);
+}
+
+/**
+ * drm_buddy_block_trim - free unused pages
+ *
+ * @mm: DRM buddy manager
+ * @new_size: original size requested
+ * @blocks: Input and output list of allocated blocks.
+ * MUST contain single block as input to be trimmed.
+ * On success will contain the newly allocated blocks
+ * making up the @new_size. Blocks always appear in
+ * ascending order
+ *
+ * For contiguous allocation, we round up the size to the nearest
+ * power of two value, drivers consume *actual* size, so remaining
+ * portions are unused and can be optionally freed with this function
+ *
+ * Returns:
+ * 0 on success, error code on failure.
+ */
+int drm_buddy_block_trim(struct drm_buddy *mm,
+ u64 new_size,
+ struct list_head *blocks)
+{
+ struct drm_buddy_block *parent;
+ struct drm_buddy_block *block;
+ LIST_HEAD(dfs);
+ u64 new_start;
+ int err;
+
+ if (!list_is_singular(blocks))
+ return -EINVAL;
+
+ block = list_first_entry(blocks,
+ struct drm_buddy_block,
+ link);
+
+ if (WARN_ON(!drm_buddy_block_is_allocated(block)))
+ return -EINVAL;
+
+ if (new_size > drm_buddy_block_size(mm, block))
+ return -EINVAL;
+
+ if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
+ return -EINVAL;
+
+ if (new_size == drm_buddy_block_size(mm, block))
+ return 0;
+
+ list_del(&block->link);
+ mark_free(mm, block);
+ mm->avail += drm_buddy_block_size(mm, block);
+
+ /* Prevent recursively freeing this node */
+ parent = block->parent;
+ block->parent = NULL;
+
+ new_start = drm_buddy_block_offset(block);
+ list_add(&block->tmp_link, &dfs);
+ err = __alloc_range(mm, &dfs, new_start, new_size, blocks);
+ if (err) {
+ mark_allocated(block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ list_add(&block->link, blocks);
+ }
+
+ block->parent = parent;
+ return err;
+}
+EXPORT_SYMBOL(drm_buddy_block_trim);
+
+/**
+ * drm_buddy_alloc_blocks - allocate power-of-two blocks
+ *
+ * @mm: DRM buddy manager to allocate from
+ * @start: start of the allowed range for this block
+ * @end: end of the allowed range for this block
+ * @size: size of the allocation
+ * @min_page_size: alignment of the allocation
+ * @blocks: output list head to add allocated blocks
+ * @flags: DRM_BUDDY_*_ALLOCATION flags
+ *
+ * alloc_range_bias() called on range limitations, which traverses
+ * the tree and returns the desired block.
+ *
+ * alloc_from_freelist() called when *no* range restrictions
+ * are enforced, which picks the block from the freelist.
+ *
+ * Returns:
+ * 0 on success, error code on failure.
+ */
+int drm_buddy_alloc_blocks(struct drm_buddy *mm,
+ u64 start, u64 end, u64 size,
+ u64 min_page_size,
+ struct list_head *blocks,
+ unsigned long flags)
+{
+ struct drm_buddy_block *block = NULL;
+ unsigned int min_order, order;
+ unsigned long pages;
+ LIST_HEAD(allocated);
+ int err;
+
+ if (size < mm->chunk_size)
+ return -EINVAL;
+
+ if (min_page_size < mm->chunk_size)
+ return -EINVAL;
+
+ if (!is_power_of_2(min_page_size))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(start | end | size, mm->chunk_size))
+ return -EINVAL;
+
+ if (end > mm->size)
+ return -EINVAL;
+
+ if (range_overflows(start, size, mm->size))
+ return -EINVAL;
+
+ /* Actual range allocation */
+ if (start + size == end)
+ return __drm_buddy_alloc_range(mm, start, size, blocks);
+
+ pages = size >> ilog2(mm->chunk_size);
+ order = fls(pages) - 1;
+ min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
+
+ do {
+ order = min(order, (unsigned int)fls(pages) - 1);
+ BUG_ON(order > mm->max_order);
+ BUG_ON(order < min_order);
+
+ do {
+ if (flags & DRM_BUDDY_RANGE_ALLOCATION)
+ /* Allocate traversing within the range */
+ block = alloc_range_bias(mm, start, end, order);
+ else
+ /* Allocate from freelist */
+ block = alloc_from_freelist(mm, order, flags);
+
+ if (!IS_ERR(block))
+ break;
+
+ if (order-- == min_order) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+ } while (1);
+
+ mark_allocated(block);
+ mm->avail -= drm_buddy_block_size(mm, block);
+ kmemleak_update_trace(block);
+ list_add_tail(&block->link, &allocated);
+
+ pages -= BIT(order);
+
+ if (!pages)
+ break;
+ } while (1);
+
+ list_splice_tail(&allocated, blocks);
+ return 0;
+
+err_free:
+ drm_buddy_free_list(mm, &allocated);
+ return err;
+}
+EXPORT_SYMBOL(drm_buddy_alloc_blocks);
/**
* drm_buddy_block_print - print block information
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 4bb093ccf1b8..c3e6e615bf09 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -112,8 +112,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
kunmap_atomic(page_virtual);
}
#else
- pr_err("Architecture has no drm_cache.c support\n");
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);
@@ -143,8 +142,7 @@ drm_clflush_sg(struct sg_table *st)
if (wbinvd_on_all_cpus())
pr_err("Timed out waiting for cache flush\n");
#else
- pr_err("Architecture has no drm_cache.c support\n");
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
}
EXPORT_SYMBOL(drm_clflush_sg);
@@ -177,8 +175,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
if (wbinvd_on_all_cpus())
pr_err("Timed out waiting for cache flush\n");
#else
- pr_err("Architecture has no drm_cache.c support\n");
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
}
EXPORT_SYMBOL(drm_clflush_virt_range);
@@ -221,7 +218,7 @@ static void memcpy_fallback(struct iosys_map *dst,
if (!dst->is_iomem && !src->is_iomem) {
memcpy(dst->vaddr, src->vaddr, len);
} else if (!src->is_iomem) {
- iosys_map_memcpy_to(dst, src->vaddr, len);
+ iosys_map_memcpy_to(dst, 0, src->vaddr, len);
} else if (!dst->is_iomem) {
memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
} else {
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index ced09c7c06f9..e6346a67cd98 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include <drm/drm_atomic.h>
#include <drm/drm_client.h>
@@ -241,7 +242,7 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors,
connector = connectors[i];
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no");
+ connector->display_info.non_desktop ? "non desktop" : str_yes_no(enabled[i]));
any_enabled |= enabled[i];
}
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index b0a826489488..7f1b82dbaebb 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -436,6 +436,9 @@ void drm_debugfs_connector_add(struct drm_connector *connector)
/* vrr range */
debugfs_create_file("vrr_range", S_IRUGO, root, connector,
&vrr_range_fops);
+
+ if (connector->funcs->debugfs_init)
+ connector->funcs->debugfs_init(connector, root);
}
void drm_debugfs_connector_remove(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a504542238ed..561f53831e29 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -5340,6 +5340,9 @@ drm_reset_display_info(struct drm_connector *connector)
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
+ info->edid_hdmi_rgb444_dc_modes = 0;
+ info->edid_hdmi_ycbcr444_dc_modes = 0;
+
info->non_desktop = 0;
memset(&info->monitor_range, 0, sizeof(info->monitor_range));
@@ -5366,6 +5369,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
goto out;
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
drm_parse_cea_ext(connector, edid);
/*
@@ -5414,7 +5418,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
connector->name, info->bpc);
- info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e9a9d35fbf5e..d265a73313c9 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -385,7 +385,7 @@ static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper,
iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
for (y = clip->y1; y < clip->y2; y++) {
- iosys_map_memcpy_to(dst, src, len);
+ iosys_map_memcpy_to(dst, 0, src, len);
iosys_map_incr(dst, fb->pitches[0]);
src += fb->pitches[0];
}
@@ -680,6 +680,31 @@ static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y,
schedule_work(&helper->damage_work);
}
+/* Convert memory region into area of scanlines and pixels per scanline */
+static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len,
+ struct drm_rect *clip)
+{
+ off_t end = off + len;
+ u32 x1 = 0;
+ u32 y1 = off / info->fix.line_length;
+ u32 x2 = info->var.xres;
+ u32 y2 = DIV_ROUND_UP(end, info->fix.line_length);
+
+ if ((y2 - y1) == 1) {
+ /*
+ * We've only written to a single scanline. Try to reduce
+ * the number of horizontal pixels that need an update.
+ */
+ off_t bit_off = (off % info->fix.line_length) * 8;
+ off_t bit_end = (end % info->fix.line_length) * 8;
+
+ x1 = bit_off / info->var.bits_per_pixel;
+ x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);
+ }
+
+ drm_rect_init(clip, x1, y1, x2 - x1, y2 - y1);
+}
+
/**
* drm_fb_helper_deferred_io() - fbdev deferred_io callback function
* @info: fb_info struct pointer
@@ -693,23 +718,23 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
{
unsigned long start, end, min, max;
struct page *page;
- u32 y1, y2;
+ struct drm_rect damage_area;
min = ULONG_MAX;
max = 0;
list_for_each_entry(page, pagelist, lru) {
start = page->index << PAGE_SHIFT;
- end = start + PAGE_SIZE - 1;
+ end = start + PAGE_SIZE;
min = min(min, start);
max = max(max, end);
}
+ if (min >= max)
+ return;
- if (min < max) {
- y1 = min / info->fix.line_length;
- y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length),
- info->var.yres);
- drm_fb_helper_damage(info, 0, y1, info->var.xres, y2 - y1);
- }
+ drm_fb_helper_memory_range_to_clip(info, min, max - min, &damage_area);
+ drm_fb_helper_damage(info, damage_area.x1, damage_area.y1,
+ drm_rect_width(&damage_area),
+ drm_rect_height(&damage_area));
}
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
@@ -741,11 +766,18 @@ EXPORT_SYMBOL(drm_fb_helper_sys_read);
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
+ loff_t pos = *ppos;
ssize_t ret;
+ struct drm_rect damage_area;
ret = fb_sys_write(info, buf, count, ppos);
- if (ret > 0)
- drm_fb_helper_damage(info, 0, 0, info->var.xres, info->var.yres);
+ if (ret <= 0)
+ return ret;
+
+ drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area);
+ drm_fb_helper_damage(info, damage_area.x1, damage_area.y1,
+ drm_rect_width(&damage_area),
+ drm_rect_height(&damage_area));
return ret;
}
@@ -2224,6 +2256,7 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
loff_t pos = *ppos;
size_t total_size;
ssize_t ret;
+ struct drm_rect damage_area;
int err = 0;
if (info->screen_size)
@@ -2252,13 +2285,19 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
else
ret = fb_write_screen_buffer(info, buf, count, pos);
- if (ret > 0)
- *ppos += ret;
+ if (ret < 0)
+ return ret; /* return last error, if any */
+ else if (!ret)
+ return err; /* return previous error, if any */
- if (ret > 0)
- drm_fb_helper_damage(info, 0, 0, info->var.xres_virtual, info->var.yres_virtual);
+ *ppos += ret;
- return ret ? ret : err;
+ drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area);
+ drm_fb_helper_damage(info, damage_area.x1, damage_area.y1,
+ drm_rect_width(&damage_area),
+ drm_rect_height(&damage_area));
+
+ return ret;
}
static void drm_fbdev_fb_fillrect(struct fb_info *info,
@@ -2346,6 +2385,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbops = &drm_fbdev_fb_ops;
fbi->screen_size = sizes->surface_height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size;
+ fbi->flags = FBINFO_DEFAULT;
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
@@ -2353,19 +2393,21 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->screen_buffer = vzalloc(fbi->screen_size);
if (!fbi->screen_buffer)
return -ENOMEM;
+ fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
fbi->fbdefio = &drm_fbdev_defio;
-
fb_deferred_io_init(fbi);
} else {
/* buffer is mapped for HW framebuffer */
ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
if (ret)
return ret;
- if (map.is_iomem)
+ if (map.is_iomem) {
fbi->screen_base = map.vaddr_iomem;
- else
+ } else {
fbi->screen_buffer = map.vaddr;
+ fbi->flags |= FBINFO_VIRTFB;
+ }
/*
* Shamelessly leak the physical address to user-space. As
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 0f28dd2bdd72..bc0f49773868 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -12,9 +12,11 @@
#include <linux/slab.h>
#include <linux/io.h>
+#include <drm/drm_device.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp)
@@ -464,6 +466,21 @@ void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst,
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio);
+static void drm_fb_xrgb8888_to_gray8_line(u8 *dst, const u32 *src, unsigned int pixels)
+{
+ unsigned int x;
+
+ for (x = 0; x < pixels; x++) {
+ u8 r = (*src & 0x00ff0000) >> 16;
+ u8 g = (*src & 0x0000ff00) >> 8;
+ u8 b = *src & 0x000000ff;
+
+ /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ *dst++ = (3 * r + 6 * g + b) / 10;
+ src++;
+ }
+}
+
/**
* drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
* @dst: 8-bit grayscale destination buffer
@@ -484,8 +501,9 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio);
void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr,
const struct drm_framebuffer *fb, const struct drm_rect *clip)
{
- unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
- unsigned int x, y;
+ unsigned int linepixels = clip->x2 - clip->x1;
+ unsigned int len = linepixels * sizeof(u32);
+ unsigned int y;
void *buf;
u8 *dst8;
u32 *src32;
@@ -508,16 +526,7 @@ void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vad
for (y = clip->y1; y < clip->y2; y++) {
dst8 = dst;
src32 = memcpy(buf, vaddr, len);
- for (x = clip->x1; x < clip->x2; x++) {
- u8 r = (*src32 & 0x00ff0000) >> 16;
- u8 g = (*src32 & 0x0000ff00) >> 8;
- u8 b = *src32 & 0x000000ff;
-
- /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dst8++ = (3 * r + 6 * g + b) / 10;
- src32++;
- }
-
+ drm_fb_xrgb8888_to_gray8_line(dst8, src32, linepixels);
vaddr += fb->pitches[0];
dst += dst_pitch;
}
@@ -584,3 +593,111 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
return -EINVAL;
}
EXPORT_SYMBOL(drm_fb_blit_toio);
+
+static void drm_fb_gray8_to_mono_reversed_line(u8 *dst, const u8 *src, unsigned int pixels,
+ unsigned int start_offset, unsigned int end_len)
+{
+ unsigned int xb, i;
+
+ for (xb = 0; xb < pixels; xb++) {
+ unsigned int start = 0, end = 8;
+ u8 byte = 0x00;
+
+ if (xb == 0 && start_offset)
+ start = start_offset;
+
+ if (xb == pixels - 1 && end_len)
+ end = end_len;
+
+ for (i = start; i < end; i++) {
+ unsigned int x = xb * 8 + i;
+
+ byte >>= 1;
+ if (src[x] >> 7)
+ byte |= BIT(7);
+ }
+ *dst++ = byte;
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_mono_reversed - Convert XRGB8888 to reversed monochrome
+ * @dst: reversed monochrome destination buffer
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
+ * @src: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * DRM doesn't have native monochrome support.
+ * Such drivers can announce the commonly supported XR24 format to userspace
+ * and use this function to convert to the native format.
+ *
+ * This function uses drm_fb_xrgb8888_to_gray8() to convert to grayscale and
+ * then the result is converted from grayscale to reversed monohrome.
+ */
+void drm_fb_xrgb8888_to_mono_reversed(void *dst, unsigned int dst_pitch, const void *vaddr,
+ const struct drm_framebuffer *fb, const struct drm_rect *clip)
+{
+ unsigned int linepixels = drm_rect_width(clip);
+ unsigned int lines = clip->y2 - clip->y1;
+ unsigned int cpp = fb->format->cpp[0];
+ unsigned int len_src32 = linepixels * cpp;
+ struct drm_device *dev = fb->dev;
+ unsigned int start_offset, end_len;
+ unsigned int y;
+ u8 *mono = dst, *gray8;
+ u32 *src32;
+
+ if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888))
+ return;
+
+ /*
+ * The reversed mono destination buffer contains 1 bit per pixel
+ * and destination scanlines have to be in multiple of 8 pixels.
+ */
+ if (!dst_pitch)
+ dst_pitch = DIV_ROUND_UP(linepixels, 8);
+
+ drm_WARN_ONCE(dev, dst_pitch % 8 != 0, "dst_pitch is not a multiple of 8\n");
+
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ *
+ * Also, format conversion from XR24 to reversed monochrome
+ * are done line-by-line but are converted to 8-bit grayscale
+ * as an intermediate step.
+ *
+ * Allocate a buffer to be used for both copying from the cma
+ * memory and to store the intermediate grayscale line pixels.
+ */
+ src32 = kmalloc(len_src32 + linepixels, GFP_KERNEL);
+ if (!src32)
+ return;
+
+ gray8 = (u8 *)src32 + len_src32;
+
+ /*
+ * For damage handling, it is possible that only parts of the source
+ * buffer is copied and this could lead to start and end pixels that
+ * are not aligned to multiple of 8.
+ *
+ * Calculate if the start and end pixels are not aligned and set the
+ * offsets for the reversed mono line conversion function to adjust.
+ */
+ start_offset = clip->x1 % 8;
+ end_len = clip->x2 % 8;
+
+ vaddr += clip_offset(clip, fb->pitches[0], cpp);
+ for (y = 0; y < lines; y++) {
+ src32 = memcpy(src32, vaddr, len_src32);
+ drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels);
+ drm_fb_gray8_to_mono_reversed_line(mono, gray8, dst_pitch,
+ start_offset, end_len);
+ vaddr += fb->pitches[0];
+ mono += dst_pitch;
+ }
+
+ kfree(src32);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono_reversed);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 07f5abc875e9..4562a8b86579 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -309,7 +309,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
}
if (r->flags & DRM_MODE_FB_MODIFIERS &&
- !dev->mode_config.allow_fb_modifiers) {
+ dev->mode_config.fb_modifiers_not_supported) {
DRM_DEBUG_KMS("driver does not support fb modifiers\n");
return ERR_PTR(-EINVAL);
}
@@ -594,7 +594,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
r->pixel_format = fb->format->format;
r->flags = 0;
- if (dev->mode_config.allow_fb_modifiers)
+ if (!dev->mode_config.fb_modifiers_not_supported)
r->flags |= DRM_MODE_FB_MODIFIERS;
for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
@@ -607,7 +607,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
for (i = 0; i < fb->format->num_planes; i++) {
r->pitches[i] = fb->pitches[i];
r->offsets[i] = fb->offsets[i];
- if (dev->mode_config.allow_fb_modifiers)
+ if (!dev->mode_config.fb_modifiers_not_supported)
r->modifier[i] = fb->modifier;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8c7b24f4b0e4..56fb87885146 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -25,20 +25,21 @@
*
*/
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <linux/pagemap.h>
-#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <linux/file.h>
+#include <linux/fs.h>
#include <linux/iosys-map.h>
#include <linux/mem_encrypt.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
#include <linux/pagevec.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
#include <drm/drm.h>
#include <drm/drm_device.h>
@@ -1145,7 +1146,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
drm_vma_node_start(&obj->vma_node));
drm_printf_indent(p, indent, "size=%zu\n", obj->size);
drm_printf_indent(p, indent, "imported=%s\n",
- obj->import_attach ? "yes" : "no");
+ str_yes_no(obj->import_attach));
if (obj->funcs->print_info)
obj->funcs->print_info(p, indent, obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 88c432a7cb3c..f36734c2c9e1 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -513,6 +513,7 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_DONTEXPAND;
if (cma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 3e738aea2664..8ad0e02991ca 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -46,6 +46,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
};
static struct drm_gem_shmem_object *
@@ -588,11 +589,12 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
drm_gem_vm_close(vma);
}
-static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
};
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
/**
* drm_gem_shmem_mmap - Memory-map a shmem GEM object
@@ -624,11 +626,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return ret;
}
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 8b8744dcf691..51fcf1298023 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -297,7 +297,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
req->value = 64;
break;
case DRM_CAP_ADDFB2_MODIFIERS:
- req->value = dev->mode_config.allow_fb_modifiers;
+ req->value = !dev->mode_config.fb_modifiers_not_supported;
break;
case DRM_CAP_CRTC_IN_VBLANK_EVENT:
req->value = 1;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 1c72208d8133..96b13e36293c 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -880,7 +880,7 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
* @dst: mode to overwrite
* @src: mode to copy
*
- * Copy an existing mode into another mode, preserving the object id and
+ * Copy an existing mode into another mode, preserving the
* list head of the destination mode.
*/
void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index deeec60a3315..bf0daa8d9bbd 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -237,6 +237,9 @@ static int __drm_universal_plane_init(struct drm_device *dev,
const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
+ static const uint64_t default_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ };
unsigned int format_modifier_count = 0;
int ret;
@@ -277,16 +280,16 @@ static int __drm_universal_plane_init(struct drm_device *dev,
while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID)
format_modifier_count++;
+ } else {
+ if (!dev->mode_config.fb_modifiers_not_supported) {
+ format_modifiers = default_modifiers;
+ format_modifier_count = ARRAY_SIZE(default_modifiers);
+ }
}
/* autoset the cap and check for consistency across all planes */
- if (format_modifier_count) {
- drm_WARN_ON(dev, !config->allow_fb_modifiers &&
- !list_empty(&config->plane_list));
- config->allow_fb_modifiers = true;
- } else {
- drm_WARN_ON(dev, config->allow_fb_modifiers);
- }
+ drm_WARN_ON(dev, config->fb_modifiers_not_supported &&
+ format_modifier_count);
plane->modifier_count = format_modifier_count;
plane->modifiers = kmalloc_array(format_modifier_count,
@@ -341,7 +344,7 @@ static int __drm_universal_plane_init(struct drm_device *dev,
drm_object_attach_property(&plane->base, config->prop_src_h, 0);
}
- if (config->allow_fb_modifiers)
+ if (format_modifier_count)
create_in_format_blob(dev, plane);
return 0;
@@ -368,8 +371,8 @@ static int __drm_universal_plane_init(struct drm_device *dev,
* drm_universal_plane_init() to let the DRM managed resource infrastructure
* take care of cleanup and deallocation.
*
- * Drivers supporting modifiers must set @format_modifiers on all their planes,
- * even those that only support DRM_FORMAT_MOD_LINEAR.
+ * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
+ * @format_modifiers to NULL. The plane will advertise the linear modifier.
*
* Returns:
* Zero on success, error code on failure.
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
index 2b082e5e9732..6cc39e30781f 100644
--- a/drivers/gpu/drm/drm_privacy_screen.c
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -379,6 +379,7 @@ static void drm_privacy_screen_device_release(struct device *dev)
* drm_privacy_screen_register - register a privacy-screen
* @parent: parent-device for the privacy-screen
* @ops: &struct drm_privacy_screen_ops pointer with ops for the privacy-screen
+ * @data: Private data owned by the privacy screen provider
*
* Create and register a privacy-screen.
*
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index c313a5b4549c..7e48dcd1bee4 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -853,12 +853,57 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
&args->handle);
}
+
+/*
+ * Try to flatten a dma_fence_chain into a dma_fence_array so that it can be
+ * added as timeline fence to a chain again.
+ */
+static int drm_syncobj_flatten_chain(struct dma_fence **f)
+{
+ struct dma_fence_chain *chain = to_dma_fence_chain(*f);
+ struct dma_fence *tmp, **fences;
+ struct dma_fence_array *array;
+ unsigned int count;
+
+ if (!chain)
+ return 0;
+
+ count = 0;
+ dma_fence_chain_for_each(tmp, &chain->base)
+ ++count;
+
+ fences = kmalloc_array(count, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
+
+ count = 0;
+ dma_fence_chain_for_each(tmp, &chain->base)
+ fences[count++] = dma_fence_get(tmp);
+
+ array = dma_fence_array_create(count, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!array)
+ goto free_fences;
+
+ dma_fence_put(*f);
+ *f = &array->base;
+ return 0;
+
+free_fences:
+ while (count--)
+ dma_fence_put(fences[count]);
+
+ kfree(fences);
+ return -ENOMEM;
+}
+
static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
struct drm_syncobj_transfer *args)
{
struct drm_syncobj *timeline_syncobj = NULL;
- struct dma_fence *fence;
struct dma_fence_chain *chain;
+ struct dma_fence *fence;
int ret;
timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
@@ -869,16 +914,22 @@ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
args->src_point, args->flags,
&fence);
if (ret)
- goto err;
+ goto err_put_timeline;
+
+ ret = drm_syncobj_flatten_chain(&fence);
+ if (ret)
+ goto err_free_fence;
+
chain = dma_fence_chain_alloc();
if (!chain) {
ret = -ENOMEM;
- goto err1;
+ goto err_free_fence;
}
+
drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
-err1:
+err_free_fence:
dma_fence_put(fence);
-err:
+err_put_timeline:
drm_syncobj_put(timeline_syncobj);
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 58f593b278c1..35e5ef7dbdcc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -195,7 +195,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
msecs_to_jiffies(500), NULL, NULL,
- dev_name(gpu->dev));
+ dev_name(gpu->dev), gpu->dev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 9743b6b17447..c68498497c0b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -455,6 +455,9 @@ static int exynos_drm_init(void)
{
int ret;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
ret = exynos_drm_register_devices();
if (ret)
return ret;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 660fe573db96..7a503bf08d0f 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -24,6 +24,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -368,7 +369,7 @@ static struct platform_driver fsl_dcu_drm_platform_driver = {
},
};
-module_platform_driver(fsl_dcu_drm_platform_driver);
+drm_module_platform_driver(fsl_dcu_drm_platform_driver);
MODULE_DESCRIPTION("Freescale DCU DRM Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 65cf1c79dd7c..eeb681be9c95 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -538,6 +538,9 @@ static struct pci_driver psb_pci_driver = {
static int __init psb_init(void)
{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
return pci_register_driver(&psb_pci_driver);
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 43943e980203..073adfe438dd 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
- depends on DRM && PCI && ARM64
+ depends on DRM && PCI && (ARM64 || COMPILE_TEST)
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 98ae9a48f3fe..3cf057269f2a 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -23,6 +23,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -307,7 +308,7 @@ static struct platform_driver kirin_drm_platform_driver = {
},
};
-module_platform_driver(kirin_drm_platform_driver);
+drm_module_platform_driver(kirin_drm_platform_driver);
MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 00e53de4812b..4a8941fa0815 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -305,6 +305,9 @@ static int __init hyperv_init(void)
{
int ret;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
ret = pci_register_driver(&hyperv_pci_driver);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 2ac220bfd0ed..42b5400f0fef 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -103,6 +103,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
+ depends on X86
depends on 64BIT
default n
help
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 139e0e7eba94..9d588d936e3d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,7 @@
# will most likely get a sudden build breakage... Hopefully we will fix
# new warnings before CI updates!
subdir-ccflags-y := -Wall -Wextra
+subdir-ccflags-y += -Wno-format-security
subdir-ccflags-y += -Wno-unused-parameter
subdir-ccflags-y += -Wno-type-limits
subdir-ccflags-y += -Wno-missing-field-initializers
@@ -174,7 +175,7 @@ i915-y += \
i915_trace_points.o \
i915_ttm_buddy_manager.o \
i915_vma.o \
- i915_vma_snapshot.o \
+ i915_vma_resource.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -197,6 +198,7 @@ i915-y += gt/uc/intel_uc.o \
# modesetting core code
i915-y += \
+ display/hsw_ips.o \
display/intel_atomic.o \
display/intel_atomic_plane.o \
display/intel_audio.o \
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
new file mode 100644
index 000000000000..38014e0cc9ad
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "hsw_ips.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_pcode.h"
+
+static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!crtc_state->ips_enabled)
+ return;
+
+ /*
+ * We can only enable IPS after we enable a plane and wait for a vblank
+ * This function is called from post_plane_update, which is run after
+ * a vblank wait.
+ */
+ drm_WARN_ON(&i915->drm,
+ !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
+
+ if (IS_BROADWELL(i915)) {
+ drm_WARN_ON(&i915->drm,
+ snb_pcode_write(i915, DISPLAY_IPS_CONTROL,
+ IPS_ENABLE | IPS_PCODE_CONTROL));
+ /*
+ * Quoting Art Runyan: "its not safe to expect any particular
+ * value in IPS_CTL bit 31 after enabling IPS through the
+ * mailbox." Moreover, the mailbox may return a bogus state,
+ * so we need to just enable it and continue on.
+ */
+ } else {
+ intel_de_write(i915, IPS_CTL, IPS_ENABLE);
+ /*
+ * The bit only becomes 1 in the next vblank, so this wait here
+ * is essentially intel_wait_for_vblank. If we don't have this
+ * and don't wait for vblanks until the end of crtc_enable, then
+ * the HW state readout code will complain that the expected
+ * IPS_CTL value is not the one we read.
+ */
+ if (intel_de_wait_for_set(i915, IPS_CTL, IPS_ENABLE, 50))
+ drm_err(&i915->drm,
+ "Timed out waiting for IPS enable\n");
+ }
+}
+
+bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ bool need_vblank_wait = false;
+
+ if (!crtc_state->ips_enabled)
+ return need_vblank_wait;
+
+ if (IS_BROADWELL(i915)) {
+ drm_WARN_ON(&i915->drm,
+ snb_pcode_write(i915, DISPLAY_IPS_CONTROL, 0));
+ /*
+ * Wait for PCODE to finish disabling IPS. The BSpec specified
+ * 42ms timeout value leads to occasional timeouts so use 100ms
+ * instead.
+ */
+ if (intel_de_wait_for_clear(i915, IPS_CTL, IPS_ENABLE, 100))
+ drm_err(&i915->drm,
+ "Timed out waiting for IPS disable\n");
+ } else {
+ intel_de_write(i915, IPS_CTL, 0);
+ intel_de_posting_read(i915, IPS_CTL);
+ }
+
+ /* We need to wait for a vblank before we can disable the plane. */
+ need_vblank_wait = true;
+
+ return need_vblank_wait;
+}
+
+static bool hsw_ips_need_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!old_crtc_state->ips_enabled)
+ return false;
+
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ *
+ * Disable IPS before we program the LUT.
+ */
+ if (IS_HASWELL(i915) &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe) &&
+ new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+ return true;
+
+ return !new_crtc_state->ips_enabled;
+}
+
+bool hsw_ips_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ if (!hsw_ips_need_disable(state, crtc))
+ return false;
+
+ return hsw_ips_disable(old_crtc_state);
+}
+
+static bool hsw_ips_need_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!new_crtc_state->ips_enabled)
+ return false;
+
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ *
+ * Re-enable IPS after the LUT has been programmed.
+ */
+ if (IS_HASWELL(i915) &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe) &&
+ new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+ return true;
+
+ /*
+ * We can't read out IPS on broadwell, assume the worst and
+ * forcibly enable IPS on the first fastset.
+ */
+ if (new_crtc_state->update_pipe && old_crtc_state->inherited)
+ return true;
+
+ return !old_crtc_state->ips_enabled;
+}
+
+void hsw_ips_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!hsw_ips_need_enable(state, crtc))
+ return;
+
+ hsw_ips_enable(new_crtc_state);
+}
+
+/* IPS only exists on ULT machines and is tied to pipe A. */
+bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
+{
+ return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
+}
+
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* IPS only exists on ULT machines and is tied to pipe A. */
+ if (!hsw_crtc_supports_ips(crtc))
+ return false;
+
+ if (!i915->params.enable_ips)
+ return false;
+
+ if (crtc_state->pipe_bpp > 24)
+ return false;
+
+ /*
+ * We compare against max which means we must take
+ * the increased cdclk requirement into account when
+ * calculating the new cdclk.
+ *
+ * Should measure whether using a lower cdclk w/o IPS
+ */
+ if (IS_BROADWELL(i915) &&
+ crtc_state->pixel_rate > i915->max_cdclk_freq * 95 / 100)
+ return false;
+
+ return true;
+}
+
+int hsw_ips_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->ips_enabled = false;
+
+ if (!hsw_crtc_state_ips_capable(crtc_state))
+ return 0;
+
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ if (crtc_state->crc_enabled)
+ return 0;
+
+ /* IPS should be fine as long as at least one plane is enabled. */
+ if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
+ return 0;
+
+ if (IS_BROADWELL(i915)) {
+ const struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
+ return 0;
+ }
+
+ crtc_state->ips_enabled = true;
+
+ return 0;
+}
+
+void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!hsw_crtc_supports_ips(crtc))
+ return;
+
+ if (IS_HASWELL(i915)) {
+ crtc_state->ips_enabled = intel_de_read(i915, IPS_CTL) & IPS_ENABLE;
+ } else {
+ /*
+ * We cannot readout IPS state on broadwell, set to
+ * true so we can set it to a defined state on first
+ * commit.
+ */
+ crtc_state->ips_enabled = true;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h
new file mode 100644
index 000000000000..4564dee497d7
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/hsw_ips.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __HSW_IPS_H__
+#define __HSW_IPS_H__
+
+#include <linux/types.h>
+
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+
+bool hsw_ips_disable(const struct intel_crtc_state *crtc_state);
+bool hsw_ips_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void hsw_ips_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+bool hsw_crtc_supports_ips(struct intel_crtc *crtc);
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+int hsw_ips_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void hsw_ips_get_config(struct intel_crtc_state *crtc_state);
+
+#endif /* __HSW_IPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 2d5bb9195b20..13b07c6fd6be 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -29,6 +29,7 @@
#include <drm/drm_mipi_dsi.h>
#include "icl_dsi.h"
+#include "icl_dsi_regs.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_combo_phy.h"
@@ -570,7 +571,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
/* Program T-INIT master registers */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port));
- tmp &= ~MASTER_INIT_TIMER_MASK;
+ tmp &= ~DSI_T_INIT_MASTER_MASK;
tmp |= intel_dsi->init_count;
intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp);
}
@@ -788,14 +789,14 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* program DSI operation mode */
if (is_vid_mode(intel_dsi)) {
tmp &= ~OP_MODE_MASK;
- switch (intel_dsi->video_mode_format) {
+ switch (intel_dsi->video_mode) {
default:
- MISSING_CASE(intel_dsi->video_mode_format);
+ MISSING_CASE(intel_dsi->video_mode);
fallthrough;
- case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
+ case NON_BURST_SYNC_EVENTS:
tmp |= VIDEO_MODE_SYNC_EVENT;
break;
- case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE:
+ case NON_BURST_SYNC_PULSE:
tmp |= VIDEO_MODE_SYNC_PULSE;
break;
}
@@ -960,8 +961,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
/* TRANS_HSYNC register to be programmed only for video mode */
if (is_vid_mode(intel_dsi)) {
- if (intel_dsi->video_mode_format ==
- VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
+ if (intel_dsi->video_mode == NON_BURST_SYNC_PULSE) {
/* BSPEC: hsync size should be atleast 16 pixels */
if (hsync_size < 16)
drm_err(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/icl_dsi_regs.h b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
new file mode 100644
index 000000000000..f78f28b8dd94
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __ICL_DSI_REGS_H__
+#define __ICL_DSI_REGS_H__
+
+#include "i915_reg_defs.h"
+
+/* Gen11 DSI */
+#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \
+ dsi0, dsi1)
+#define _ICL_DSI_ESC_CLK_DIV0 0x6b090
+#define _ICL_DSI_ESC_CLK_DIV1 0x6b890
+#define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \
+ _ICL_DSI_ESC_CLK_DIV0, \
+ _ICL_DSI_ESC_CLK_DIV1)
+#define _ICL_DPHY_ESC_CLK_DIV0 0x162190
+#define _ICL_DPHY_ESC_CLK_DIV1 0x6C190
+#define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \
+ _ICL_DPHY_ESC_CLK_DIV0, \
+ _ICL_DPHY_ESC_CLK_DIV1)
+#define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16)
+#define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16
+#define ICL_ESC_CLK_DIV_MASK 0x1ff
+#define ICL_ESC_CLK_DIV_SHIFT 0
+#define DSI_MAX_ESC_CLK 20000 /* in KHz */
+
+#define _ADL_MIPIO_REG 0x180
+#define ADL_MIPIO_DW(port, dw) _MMIO(_ICL_COMBOPHY(port) + _ADL_MIPIO_REG + 4 * (dw))
+#define TX_ESC_CLK_DIV_PHY_SEL REGBIT(16)
+#define TX_ESC_CLK_DIV_PHY_MASK REG_GENMASK(23, 16)
+#define TX_ESC_CLK_DIV_PHY REG_FIELD_PREP(TX_ESC_CLK_DIV_PHY_MASK, 0x7f)
+
+#define _DSI_CMD_FRMCTL_0 0x6b034
+#define _DSI_CMD_FRMCTL_1 0x6b834
+#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \
+ _DSI_CMD_FRMCTL_0,\
+ _DSI_CMD_FRMCTL_1)
+#define DSI_FRAME_UPDATE_REQUEST (1 << 31)
+#define DSI_PERIODIC_FRAME_UPDATE_ENABLE (1 << 29)
+#define DSI_NULL_PACKET_ENABLE (1 << 28)
+#define DSI_FRAME_IN_PROGRESS (1 << 0)
+
+#define _DSI_INTR_MASK_REG_0 0x6b070
+#define _DSI_INTR_MASK_REG_1 0x6b870
+#define DSI_INTR_MASK_REG(port) _MMIO_PORT(port, \
+ _DSI_INTR_MASK_REG_0,\
+ _DSI_INTR_MASK_REG_1)
+
+#define _DSI_INTR_IDENT_REG_0 0x6b074
+#define _DSI_INTR_IDENT_REG_1 0x6b874
+#define DSI_INTR_IDENT_REG(port) _MMIO_PORT(port, \
+ _DSI_INTR_IDENT_REG_0,\
+ _DSI_INTR_IDENT_REG_1)
+#define DSI_TE_EVENT (1 << 31)
+#define DSI_RX_DATA_OR_BTA_TERMINATED (1 << 30)
+#define DSI_TX_DATA (1 << 29)
+#define DSI_ULPS_ENTRY_DONE (1 << 28)
+#define DSI_NON_TE_TRIGGER_RECEIVED (1 << 27)
+#define DSI_HOST_CHKSUM_ERROR (1 << 26)
+#define DSI_HOST_MULTI_ECC_ERROR (1 << 25)
+#define DSI_HOST_SINGL_ECC_ERROR (1 << 24)
+#define DSI_HOST_CONTENTION_DETECTED (1 << 23)
+#define DSI_HOST_FALSE_CONTROL_ERROR (1 << 22)
+#define DSI_HOST_TIMEOUT_ERROR (1 << 21)
+#define DSI_HOST_LOW_POWER_TX_SYNC_ERROR (1 << 20)
+#define DSI_HOST_ESCAPE_MODE_ENTRY_ERROR (1 << 19)
+#define DSI_FRAME_UPDATE_DONE (1 << 16)
+#define DSI_PROTOCOL_VIOLATION_REPORTED (1 << 15)
+#define DSI_INVALID_TX_LENGTH (1 << 13)
+#define DSI_INVALID_VC (1 << 12)
+#define DSI_INVALID_DATA_TYPE (1 << 11)
+#define DSI_PERIPHERAL_CHKSUM_ERROR (1 << 10)
+#define DSI_PERIPHERAL_MULTI_ECC_ERROR (1 << 9)
+#define DSI_PERIPHERAL_SINGLE_ECC_ERROR (1 << 8)
+#define DSI_PERIPHERAL_CONTENTION_DETECTED (1 << 7)
+#define DSI_PERIPHERAL_FALSE_CTRL_ERROR (1 << 6)
+#define DSI_PERIPHERAL_TIMEOUT_ERROR (1 << 5)
+#define DSI_PERIPHERAL_LP_TX_SYNC_ERROR (1 << 4)
+#define DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR (1 << 3)
+#define DSI_EOT_SYNC_ERROR (1 << 2)
+#define DSI_SOT_SYNC_ERROR (1 << 1)
+#define DSI_SOT_ERROR (1 << 0)
+
+/* ICL DSI MODE control */
+#define _ICL_DSI_IO_MODECTL_0 0x6B094
+#define _ICL_DSI_IO_MODECTL_1 0x6B894
+#define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \
+ _ICL_DSI_IO_MODECTL_0, \
+ _ICL_DSI_IO_MODECTL_1)
+#define COMBO_PHY_MODE_DSI (1 << 0)
+
+/* TGL DSI Chicken register */
+#define _TGL_DSI_CHKN_REG_0 0x6B0C0
+#define _TGL_DSI_CHKN_REG_1 0x6B8C0
+#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \
+ _TGL_DSI_CHKN_REG_0, \
+ _TGL_DSI_CHKN_REG_1)
+#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12)
+#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \
+ (byte_clocks))
+#define _ICL_DSI_T_INIT_MASTER_0 0x6b088
+#define _ICL_DSI_T_INIT_MASTER_1 0x6b888
+#define ICL_DSI_T_INIT_MASTER(port) _MMIO_PORT(port, \
+ _ICL_DSI_T_INIT_MASTER_0,\
+ _ICL_DSI_T_INIT_MASTER_1)
+#define DSI_T_INIT_MASTER_MASK REG_GENMASK(15, 0)
+
+#define _DPHY_CLK_TIMING_PARAM_0 0x162180
+#define _DPHY_CLK_TIMING_PARAM_1 0x6c180
+#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_CLK_TIMING_PARAM_0,\
+ _DPHY_CLK_TIMING_PARAM_1)
+#define _DSI_CLK_TIMING_PARAM_0 0x6b080
+#define _DSI_CLK_TIMING_PARAM_1 0x6b880
+#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_CLK_TIMING_PARAM_0,\
+ _DSI_CLK_TIMING_PARAM_1)
+#define CLK_PREPARE_OVERRIDE (1 << 31)
+#define CLK_PREPARE(x) ((x) << 28)
+#define CLK_PREPARE_MASK (0x7 << 28)
+#define CLK_PREPARE_SHIFT 28
+#define CLK_ZERO_OVERRIDE (1 << 27)
+#define CLK_ZERO(x) ((x) << 20)
+#define CLK_ZERO_MASK (0xf << 20)
+#define CLK_ZERO_SHIFT 20
+#define CLK_PRE_OVERRIDE (1 << 19)
+#define CLK_PRE(x) ((x) << 16)
+#define CLK_PRE_MASK (0x3 << 16)
+#define CLK_PRE_SHIFT 16
+#define CLK_POST_OVERRIDE (1 << 15)
+#define CLK_POST(x) ((x) << 8)
+#define CLK_POST_MASK (0x7 << 8)
+#define CLK_POST_SHIFT 8
+#define CLK_TRAIL_OVERRIDE (1 << 7)
+#define CLK_TRAIL(x) ((x) << 0)
+#define CLK_TRAIL_MASK (0xf << 0)
+#define CLK_TRAIL_SHIFT 0
+
+#define _DPHY_DATA_TIMING_PARAM_0 0x162184
+#define _DPHY_DATA_TIMING_PARAM_1 0x6c184
+#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_DATA_TIMING_PARAM_0,\
+ _DPHY_DATA_TIMING_PARAM_1)
+#define _DSI_DATA_TIMING_PARAM_0 0x6B084
+#define _DSI_DATA_TIMING_PARAM_1 0x6B884
+#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_DATA_TIMING_PARAM_0,\
+ _DSI_DATA_TIMING_PARAM_1)
+#define HS_PREPARE_OVERRIDE (1 << 31)
+#define HS_PREPARE(x) ((x) << 24)
+#define HS_PREPARE_MASK (0x7 << 24)
+#define HS_PREPARE_SHIFT 24
+#define HS_ZERO_OVERRIDE (1 << 23)
+#define HS_ZERO(x) ((x) << 16)
+#define HS_ZERO_MASK (0xf << 16)
+#define HS_ZERO_SHIFT 16
+#define HS_TRAIL_OVERRIDE (1 << 15)
+#define HS_TRAIL(x) ((x) << 8)
+#define HS_TRAIL_MASK (0x7 << 8)
+#define HS_TRAIL_SHIFT 8
+#define HS_EXIT_OVERRIDE (1 << 7)
+#define HS_EXIT(x) ((x) << 0)
+#define HS_EXIT_MASK (0x7 << 0)
+#define HS_EXIT_SHIFT 0
+
+#define _DPHY_TA_TIMING_PARAM_0 0x162188
+#define _DPHY_TA_TIMING_PARAM_1 0x6c188
+#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_TA_TIMING_PARAM_0,\
+ _DPHY_TA_TIMING_PARAM_1)
+#define _DSI_TA_TIMING_PARAM_0 0x6b098
+#define _DSI_TA_TIMING_PARAM_1 0x6b898
+#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_TA_TIMING_PARAM_0,\
+ _DSI_TA_TIMING_PARAM_1)
+#define TA_SURE_OVERRIDE (1 << 31)
+#define TA_SURE(x) ((x) << 16)
+#define TA_SURE_MASK (0x1f << 16)
+#define TA_SURE_SHIFT 16
+#define TA_GO_OVERRIDE (1 << 15)
+#define TA_GO(x) ((x) << 8)
+#define TA_GO_MASK (0xf << 8)
+#define TA_GO_SHIFT 8
+#define TA_GET_OVERRIDE (1 << 7)
+#define TA_GET(x) ((x) << 0)
+#define TA_GET_MASK (0xf << 0)
+#define TA_GET_SHIFT 0
+
+/* DSI transcoder configuration */
+#define _DSI_TRANS_FUNC_CONF_0 0x6b030
+#define _DSI_TRANS_FUNC_CONF_1 0x6b830
+#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \
+ _DSI_TRANS_FUNC_CONF_0,\
+ _DSI_TRANS_FUNC_CONF_1)
+#define OP_MODE_MASK (0x3 << 28)
+#define OP_MODE_SHIFT 28
+#define CMD_MODE_NO_GATE (0x0 << 28)
+#define CMD_MODE_TE_GATE (0x1 << 28)
+#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
+#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
+#define TE_SOURCE_GPIO (1 << 27)
+#define LINK_READY (1 << 20)
+#define PIX_FMT_MASK (0x3 << 16)
+#define PIX_FMT_SHIFT 16
+#define PIX_FMT_RGB565 (0x0 << 16)
+#define PIX_FMT_RGB666_PACKED (0x1 << 16)
+#define PIX_FMT_RGB666_LOOSE (0x2 << 16)
+#define PIX_FMT_RGB888 (0x3 << 16)
+#define PIX_FMT_RGB101010 (0x4 << 16)
+#define PIX_FMT_RGB121212 (0x5 << 16)
+#define PIX_FMT_COMPRESSED (0x6 << 16)
+#define BGR_TRANSMISSION (1 << 15)
+#define PIX_VIRT_CHAN(x) ((x) << 12)
+#define PIX_VIRT_CHAN_MASK (0x3 << 12)
+#define PIX_VIRT_CHAN_SHIFT 12
+#define PIX_BUF_THRESHOLD_MASK (0x3 << 10)
+#define PIX_BUF_THRESHOLD_SHIFT 10
+#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10)
+#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10)
+#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10)
+#define PIX_BUF_THRESHOLD_FULL (0x3 << 10)
+#define CONTINUOUS_CLK_MASK (0x3 << 8)
+#define CONTINUOUS_CLK_SHIFT 8
+#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8)
+#define CLK_HS_OR_LP (0x2 << 8)
+#define CLK_HS_CONTINUOUS (0x3 << 8)
+#define LINK_CALIBRATION_MASK (0x3 << 4)
+#define LINK_CALIBRATION_SHIFT 4
+#define CALIBRATION_DISABLED (0x0 << 4)
+#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
+#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
+#define BLANKING_PACKET_ENABLE (1 << 2)
+#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
+#define EOTP_DISABLED (1 << 0)
+
+#define _DSI_CMD_RXCTL_0 0x6b0d4
+#define _DSI_CMD_RXCTL_1 0x6b8d4
+#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_RXCTL_0,\
+ _DSI_CMD_RXCTL_1)
+#define READ_UNLOADS_DW (1 << 16)
+#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15)
+#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14)
+#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13)
+#define RECEIVED_RESET_TRIGGER (1 << 12)
+#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11)
+#define RECEIVED_CRC_WAS_LOST (1 << 10)
+#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0)
+#define NUMBER_RX_PLOAD_DW_SHIFT 0
+
+#define _DSI_CMD_TXCTL_0 0x6b0d0
+#define _DSI_CMD_TXCTL_1 0x6b8d0
+#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXCTL_0,\
+ _DSI_CMD_TXCTL_1)
+#define KEEP_LINK_IN_HS (1 << 24)
+#define FREE_HEADER_CREDIT_MASK (0x1f << 8)
+#define FREE_HEADER_CREDIT_SHIFT 0x8
+#define FREE_PLOAD_CREDIT_MASK (0xff << 0)
+#define FREE_PLOAD_CREDIT_SHIFT 0
+#define MAX_HEADER_CREDIT 0x10
+#define MAX_PLOAD_CREDIT 0x40
+
+#define _DSI_CMD_TXHDR_0 0x6b100
+#define _DSI_CMD_TXHDR_1 0x6b900
+#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXHDR_0,\
+ _DSI_CMD_TXHDR_1)
+#define PAYLOAD_PRESENT (1 << 31)
+#define LP_DATA_TRANSFER (1 << 30)
+#define VBLANK_FENCE (1 << 29)
+#define PARAM_WC_MASK (0xffff << 8)
+#define PARAM_WC_LOWER_SHIFT 8
+#define PARAM_WC_UPPER_SHIFT 16
+#define VC_MASK (0x3 << 6)
+#define VC_SHIFT 6
+#define DT_MASK (0x3f << 0)
+#define DT_SHIFT 0
+
+#define _DSI_CMD_TXPYLD_0 0x6b104
+#define _DSI_CMD_TXPYLD_1 0x6b904
+#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXPYLD_0,\
+ _DSI_CMD_TXPYLD_1)
+
+#define _DSI_LP_MSG_0 0x6b0d8
+#define _DSI_LP_MSG_1 0x6b8d8
+#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \
+ _DSI_LP_MSG_0,\
+ _DSI_LP_MSG_1)
+#define LPTX_IN_PROGRESS (1 << 17)
+#define LINK_IN_ULPS (1 << 16)
+#define LINK_ULPS_TYPE_LP11 (1 << 8)
+#define LINK_ENTER_ULPS (1 << 0)
+
+/* DSI timeout registers */
+#define _DSI_HSTX_TO_0 0x6b044
+#define _DSI_HSTX_TO_1 0x6b844
+#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \
+ _DSI_HSTX_TO_0,\
+ _DSI_HSTX_TO_1)
+#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16)
+#define HSTX_TIMEOUT_VALUE_SHIFT 16
+#define HSTX_TIMEOUT_VALUE(x) ((x) << 16)
+#define HSTX_TIMED_OUT (1 << 0)
+
+#define _DSI_LPRX_HOST_TO_0 0x6b048
+#define _DSI_LPRX_HOST_TO_1 0x6b848
+#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \
+ _DSI_LPRX_HOST_TO_0,\
+ _DSI_LPRX_HOST_TO_1)
+#define LPRX_TIMED_OUT (1 << 16)
+#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define LPRX_TIMEOUT_VALUE_SHIFT 0
+#define LPRX_TIMEOUT_VALUE(x) ((x) << 0)
+
+#define _DSI_PWAIT_TO_0 0x6b040
+#define _DSI_PWAIT_TO_1 0x6b840
+#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \
+ _DSI_PWAIT_TO_0,\
+ _DSI_PWAIT_TO_1)
+#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16)
+#define PRESET_TIMEOUT_VALUE_SHIFT 16
+#define PRESET_TIMEOUT_VALUE(x) ((x) << 16)
+#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0
+#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0)
+
+#define _DSI_TA_TO_0 0x6b04c
+#define _DSI_TA_TO_1 0x6b84c
+#define DSI_TA_TO(tc) _MMIO_DSI(tc, \
+ _DSI_TA_TO_0,\
+ _DSI_TA_TO_1)
+#define TA_TIMED_OUT (1 << 16)
+#define TA_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define TA_TIMEOUT_VALUE_SHIFT 0
+#define TA_TIMEOUT_VALUE(x) ((x) << 0)
+
+#endif /* __ICL_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 093904065112..e0667d163266 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -281,17 +281,6 @@ void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state)
intel_crtc_put_color_blobs(crtc_state);
}
-void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state,
- const struct intel_crtc_state *from_crtc_state)
-{
- drm_property_replace_blob(&crtc_state->hw.degamma_lut,
- from_crtc_state->uapi.degamma_lut);
- drm_property_replace_blob(&crtc_state->hw.gamma_lut,
- from_crtc_state->uapi.gamma_lut);
- drm_property_replace_blob(&crtc_state->hw.ctm,
- from_crtc_state->uapi.ctm);
-}
-
/**
* intel_crtc_destroy_state - destroy crtc state
* @crtc: drm crtc
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index d2700c74c9da..1dc439983dd9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -44,8 +44,6 @@ struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
-void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state,
- const struct intel_crtc_state *from_crtc_state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
void intel_atomic_state_free(struct drm_atomic_state *state);
void intel_atomic_state_clear(struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index bec02333bdeb..c53aa6a4c7a0 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -45,6 +45,7 @@
#include "intel_fb_pin.h"
#include "intel_pm.h"
#include "intel_sprite.h"
+#include "skl_scaler.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
struct intel_plane *plane)
@@ -322,6 +323,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->scaled_planes &= ~BIT(plane->id);
crtc_state->nv12_planes &= ~BIT(plane->id);
crtc_state->c8_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
@@ -330,6 +332,185 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
plane_state->uapi.visible = false;
}
+/* FIXME nuke when all wm code is atomic */
+static bool intel_wm_need_update(const struct intel_plane_state *cur,
+ struct intel_plane_state *new)
+{
+ /* Update watermarks on tiling or size changes. */
+ if (new->uapi.visible != cur->uapi.visible)
+ return true;
+
+ if (!cur->hw.fb || !new->hw.fb)
+ return false;
+
+ if (cur->hw.fb->modifier != new->hw.fb->modifier ||
+ cur->hw.rotation != new->hw.rotation ||
+ drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
+ drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
+ drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
+ drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
+ return true;
+
+ return false;
+}
+
+static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state)
+{
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ int dst_w = drm_rect_width(&plane_state->uapi.dst);
+ int dst_h = drm_rect_height(&plane_state->uapi.dst);
+
+ return src_w != dst_w || src_h != dst_h;
+}
+
+static bool intel_plane_do_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ if (!plane->async_flip)
+ return false;
+
+ if (!new_crtc_state->uapi.async_flip)
+ return false;
+
+ /*
+ * In platforms after DISPLAY13, we might need to override
+ * first async flip in order to change watermark levels
+ * as part of optimization.
+ * So for those, we are checking if this is a first async flip.
+ * For platforms earlier than DISPLAY13 we always do async flip.
+ */
+ return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
+}
+
+static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ struct intel_plane_state *new_plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
+ bool was_crtc_enabled = old_crtc_state->hw.active;
+ bool is_crtc_enabled = new_crtc_state->hw.active;
+ bool turn_off, turn_on, visible, was_visible;
+ int ret;
+
+ if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
+ ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
+ if (ret)
+ return ret;
+ }
+
+ was_visible = old_plane_state->uapi.visible;
+ visible = new_plane_state->uapi.visible;
+
+ if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
+ was_visible = false;
+
+ /*
+ * Visibility is calculated as if the crtc was on, but
+ * after scaler setup everything depends on it being off
+ * when the crtc isn't active.
+ *
+ * FIXME this is wrong for watermarks. Watermarks should also
+ * be computed as if the pipe would be active. Perhaps move
+ * per-plane wm computation to the .check_plane() hook, and
+ * only combine the results from all planes in the current place?
+ */
+ if (!is_crtc_enabled) {
+ intel_plane_set_invisible(new_crtc_state, new_plane_state);
+ visible = false;
+ }
+
+ if (!was_visible && !visible)
+ return 0;
+
+ turn_off = was_visible && (!visible || mode_changed);
+ turn_on = visible && (!was_visible || mode_changed);
+
+ drm_dbg_atomic(&dev_priv->drm,
+ "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ was_visible, visible,
+ turn_off, turn_on, mode_changed);
+
+ if (turn_on) {
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ new_crtc_state->update_wm_pre = true;
+
+ /* must disable cxsr around plane enable/disable */
+ if (plane->id != PLANE_CURSOR)
+ new_crtc_state->disable_cxsr = true;
+ } else if (turn_off) {
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ new_crtc_state->update_wm_post = true;
+
+ /* must disable cxsr around plane enable/disable */
+ if (plane->id != PLANE_CURSOR)
+ new_crtc_state->disable_cxsr = true;
+ } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
+ /* FIXME bollocks */
+ new_crtc_state->update_wm_pre = true;
+ new_crtc_state->update_wm_post = true;
+ }
+ }
+
+ if (visible || was_visible)
+ new_crtc_state->fb_bits |= plane->frontbuffer_bit;
+
+ /*
+ * ILK/SNB DVSACNTR/Sprite Enable
+ * IVB SPR_CTL/Sprite Enable
+ * "When in Self Refresh Big FIFO mode, a write to enable the
+ * plane will be internally buffered and delayed while Big FIFO
+ * mode is exiting."
+ *
+ * Which means that enabling the sprite can take an extra frame
+ * when we start in big FIFO mode (LP1+). Thus we need to drop
+ * down to LP0 and wait for vblank in order to make sure the
+ * sprite gets enabled on the next vblank after the register write.
+ * Doing otherwise would risk enabling the sprite one frame after
+ * we've already signalled flip completion. We can resume LP1+
+ * once the sprite has been enabled.
+ *
+ *
+ * WaCxSRDisabledForSpriteScaling:ivb
+ * IVB SPR_SCALE/Scaling Enable
+ * "Low Power watermarks must be disabled for at least one
+ * frame before enabling sprite scaling, and kept disabled
+ * until sprite scaling is disabled."
+ *
+ * ILK/SNB DVSASCALE/Scaling Enable
+ * "When in Self Refresh Big FIFO mode, scaling enable will be
+ * masked off while Big FIFO mode is exiting."
+ *
+ * Despite the w/a only being listed for IVB we assume that
+ * the ILK/SNB note has similar ramifications, hence we apply
+ * the w/a on all three platforms.
+ *
+ * With experimental results seems this is needed also for primary
+ * plane, not only sprite plane.
+ */
+ if (plane->id != PLANE_CURSOR &&
+ (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)) &&
+ (turn_on || (!intel_plane_is_scaled(old_plane_state) &&
+ intel_plane_is_scaled(new_plane_state))))
+ new_crtc_state->disable_lp_wm = true;
+
+ if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
+ new_plane_state->do_async_flip = true;
+
+ return 0;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -357,6 +538,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes |= BIT(plane->id);
if (new_plane_state->uapi.visible &&
+ intel_plane_is_scaled(new_plane_state))
+ new_crtc_state->scaled_planes |= BIT(plane->id);
+
+ if (new_plane_state->uapi.visible &&
intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
new_crtc_state->nv12_planes |= BIT(plane->id);
@@ -403,10 +588,11 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (new_crtc_state && new_crtc_state->bigjoiner_slave) {
+ if (new_crtc_state && intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
+ struct intel_crtc *master_crtc =
+ intel_master_crtc(new_crtc_state);
struct intel_plane *master_plane =
- intel_crtc_get_plane(new_crtc_state->bigjoiner_linked_crtc,
- plane->id);
+ intel_crtc_get_plane(master_crtc, plane->id);
new_master_plane_state =
intel_atomic_get_new_plane_state(state, master_plane);
@@ -507,8 +693,8 @@ void intel_plane_disable_arm(struct intel_plane *plane,
plane->disable_arm(plane, crtc_state);
}
-void intel_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -536,8 +722,8 @@ void intel_update_planes_on_crtc(struct intel_atomic_state *state,
}
}
-void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -571,8 +757,8 @@ void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
}
}
-void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -597,6 +783,17 @@ void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
}
}
+void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (DISPLAY_VER(i915) >= 9)
+ skl_crtc_planes_update_arm(state, crtc);
+ else
+ i9xx_crtc_planes_update_arm(state, crtc);
+}
+
int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
struct intel_crtc_state *crtc_state,
int min_scale, int max_scale,
@@ -633,7 +830,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
}
/* right side of the image is on the slave crtc, adjust dst to match */
- if (crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
drm_rect_translate(dst, -crtc_state->pipe_src_w, 0);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index ead789709477..f4763a53541e 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -44,22 +44,16 @@ void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
-void intel_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
int intel_plane_atomic_check(struct intel_atomic_state *state,
struct intel_plane *plane);
-int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *plane_state);
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct intel_plane *plane,
bool *need_cdclk_calc);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index aec0efd5350e..40b5e7ed12c2 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -596,6 +596,12 @@ parse_general_features(struct drm_i915_private *i915,
} else {
i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
+
+ if (bdb->version >= 249 && general->afc_startup_config) {
+ i915->vbt.override_afc_startup = true;
+ i915->vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
+ }
+
drm_dbg_kms(&i915->drm,
"BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
i915->vbt.int_tv_support,
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 5dce3cf0ed12..ad1564ca7269 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -10,6 +10,7 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_pm.h"
@@ -673,6 +674,49 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
+ enum plane_id plane_id;
+
+ memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
+
+ if (!crtc_state->hw.active)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
+ unsigned int data_rate = crtc_state->data_rate[plane_id];
+ unsigned int dbuf_mask = 0;
+ enum dbuf_slice slice;
+
+ dbuf_mask |= skl_ddb_dbuf_slice_mask(i915, ddb_y);
+ dbuf_mask |= skl_ddb_dbuf_slice_mask(i915, ddb_uv);
+
+ /*
+ * FIXME: To calculate that more properly we probably
+ * need to split per plane data_rate into data_rate_y
+ * and data_rate_uv for multiplanar formats in order not
+ * to get accounted those twice if they happen to reside
+ * on different slices.
+ * However for pre-icl this would work anyway because
+ * we have only single slice and for icl+ uv plane has
+ * non-zero data rate.
+ * So in worst case those calculation are a bit
+ * pessimistic, which shouldn't pose any significant
+ * problem anyway.
+ */
+ for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask)
+ crtc_bw->used_bw[slice] += data_rate;
+ }
+}
+
int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -685,50 +729,13 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- enum plane_id plane_id;
- struct intel_dbuf_bw *crtc_bw;
-
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
- crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe];
-
- memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
-
- if (!crtc_state->hw.active)
- continue;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *plane_alloc =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- const struct skl_ddb_entry *uv_plane_alloc =
- &crtc_state->wm.skl.plane_ddb_uv[plane_id];
- unsigned int data_rate = crtc_state->data_rate[plane_id];
- unsigned int dbuf_mask = 0;
- enum dbuf_slice slice;
-
- dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
- dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
-
- /*
- * FIXME: To calculate that more properly we probably
- * need to to split per plane data_rate into data_rate_y
- * and data_rate_uv for multiplanar formats in order not
- * to get accounted those twice if they happen to reside
- * on different slices.
- * However for pre-icl this would work anyway because
- * we have only single slice and for icl+ uv plane has
- * non-zero data rate.
- * So in worst case those calculation are a bit
- * pessimistic, which shouldn't pose any significant
- * problem anyway.
- */
- for_each_dbuf_slice_in_mask(dev_priv, slice, dbuf_mask)
- crtc_bw->used_bw[slice] += data_rate;
- }
+ skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
}
if (!old_bw_state)
@@ -809,25 +816,11 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
return 0;
}
-int intel_bw_atomic_check(struct intel_atomic_state *state)
+static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *new_crtc_state, *old_crtc_state;
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- unsigned int data_rate;
- unsigned int num_active_planes;
- struct intel_crtc *crtc;
- int i, ret;
- u32 allowed_points = 0;
- unsigned int max_bw_point = 0, max_bw = 0;
- unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
- unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
- u32 mask = 0;
-
- /* FIXME earlier gens need some checks too */
- if (DISPLAY_VER(dev_priv) < 11)
- return 0;
+ unsigned int num_psf_gv_points = i915->max_bw[0].num_psf_gv_points;
+ unsigned int num_qgv_points = i915->max_bw[0].num_qgv_points;
+ u16 mask = 0;
/*
* We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
@@ -840,6 +833,16 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (num_psf_gv_points > 0)
mask |= REG_GENMASK(num_psf_gv_points - 1, 0) << ADLS_PSF_PT_SHIFT;
+ return mask;
+}
+
+static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
unsigned int old_data_rate =
@@ -850,6 +853,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
intel_bw_crtc_num_active_planes(old_crtc_state);
unsigned int new_active_planes =
intel_bw_crtc_num_active_planes(new_crtc_state);
+ struct intel_bw_state *new_bw_state;
/*
* Avoid locking the bw state when
@@ -866,14 +870,53 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state->data_rate[crtc->pipe] = new_data_rate;
new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
- drm_dbg_kms(&dev_priv->drm,
- "pipe %c data rate %u num active planes %u\n",
- pipe_name(crtc->pipe),
+ *changed = true;
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] data rate %u num active planes %u\n",
+ crtc->base.base.id, crtc->base.name,
new_bw_state->data_rate[crtc->pipe],
new_bw_state->num_active_planes[crtc->pipe]);
}
- if (!new_bw_state)
+ return 0;
+}
+
+int intel_bw_atomic_check(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state;
+ struct intel_bw_state *new_bw_state;
+ unsigned int data_rate;
+ unsigned int num_active_planes;
+ int i, ret;
+ u32 allowed_points = 0;
+ unsigned int max_bw_point = 0, max_bw = 0;
+ unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
+ unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
+ bool changed = false;
+
+ /* FIXME earlier gens need some checks too */
+ if (DISPLAY_VER(dev_priv) < 11)
+ return 0;
+
+ ret = intel_bw_check_data_rate(state, &changed);
+ if (ret)
+ return ret;
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+
+ if (new_bw_state &&
+ intel_can_enable_sagv(dev_priv, old_bw_state) !=
+ intel_can_enable_sagv(dev_priv, new_bw_state))
+ changed = true;
+
+ /*
+ * If none of our inputs (data rates, number of active
+ * planes, SAGV yes/no) changed then nothing to do here.
+ */
+ if (!changed)
return 0;
ret = intel_atomic_lock_global_state(&new_bw_state->base);
@@ -957,9 +1000,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask = ~allowed_points & mask;
+ new_bw_state->qgv_points_mask = ~allowed_points &
+ icl_qgv_points_mask(dev_priv);
- old_bw_state = intel_atomic_get_old_bw_state(state);
/*
* If the actual mask had changed we need to make sure that
* the commits are serialized(in case this is a nomodeset, nonblocking)
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 46c6eecbd917..0ceaed1c9656 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -30,19 +30,19 @@ struct intel_bw_state {
*/
u8 pipe_sagv_reject;
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
/*
* Current QGV points mask, which restricts
* some particular SAGV states, not to confuse
* with pipe_sagv_mask.
*/
- u8 qgv_points_mask;
+ u16 qgv_points_mask;
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
- /* bitmask of active pipes */
- u8 active_pipes;
-
int min_cdclk;
};
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 4b140a014ca8..8888fda8b701 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -23,6 +23,7 @@
#include <linux/time.h>
+#include "hsw_ips.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_audio.h"
@@ -31,6 +32,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
#include "intel_psr.h"
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index de3ded1e327a..e94ec57260f1 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -28,6 +28,25 @@
#include "intel_dpll.h"
#include "vlv_dsi_pll.h"
+struct intel_color_funcs {
+ int (*color_check)(struct intel_crtc_state *crtc_state);
+ /*
+ * Program double buffered color management registers during
+ * vblank evasion. The registers should then latch during the
+ * next vblank start, alongside any other double buffered registers
+ * involved with the same commit.
+ */
+ void (*color_commit)(const struct intel_crtc_state *crtc_state);
+ /*
+ * Load LUTs (and other single buffered color management
+ * registers). Will (hopefully) be called during the vblank
+ * following the latching of any double buffered registers
+ * involved with the same commit.
+ */
+ void (*load_luts)(const struct intel_crtc_state *crtc_state);
+ void (*read_luts)(struct intel_crtc_state *crtc_state);
+};
+
#define CTM_COEFF_SIGN (1ULL << 63)
#define CTM_COEFF_1_0 (1ULL << 32)
@@ -160,29 +179,29 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
- intel_de_write(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
- intel_de_write(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe),
- coeff[0] << 16 | coeff[1]);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe),
- coeff[3] << 16 | coeff[4]);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe),
- coeff[6] << 16 | coeff[7]);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
if (DISPLAY_VER(dev_priv) >= 7) {
- intel_de_write(dev_priv, PIPE_CSC_POSTOFF_HI(pipe),
- postoff[0]);
- intel_de_write(dev_priv, PIPE_CSC_POSTOFF_ME(pipe),
- postoff[1]);
- intel_de_write(dev_priv, PIPE_CSC_POSTOFF_LO(pipe),
- postoff[2]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_HI(pipe),
+ postoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_ME(pipe),
+ postoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_LO(pipe),
+ postoff[2]);
}
}
@@ -194,28 +213,28 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
- coeff[0] << 16 | coeff[1]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
- coeff[2] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
+ coeff[2] << 16);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
- coeff[3] << 16 | coeff[4]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
- coeff[5] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
+ coeff[5] << 16);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
- coeff[6] << 16 | coeff[7]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
- coeff[8] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
+ coeff[8] << 16);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
}
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
@@ -319,8 +338,8 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
ilk_csc_off_zero);
}
- intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
- crtc_state->csc_mode);
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
@@ -346,8 +365,8 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
ilk_csc_postoff_limited_range);
}
- intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
- crtc_state->csc_mode);
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
static void chv_load_cgm_csc(struct intel_crtc *crtc,
@@ -377,16 +396,16 @@ static void chv_load_cgm_csc(struct intel_crtc *crtc,
coeffs[i] |= (abs_coeff >> 20) & 0xfff;
}
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
- coeffs[1] << 16 | coeffs[0]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
- coeffs[3] << 16 | coeffs[2]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
- coeffs[5] << 16 | coeffs[4]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
- coeffs[7] << 16 | coeffs[6]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
- coeffs[8]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
+ coeffs[1] << 16 | coeffs[0]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
+ coeffs[3] << 16 | coeffs[2]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
+ coeffs[5] << 16 | coeffs[4]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
+ coeffs[7] << 16 | coeffs[6]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
+ coeffs[8]);
}
/* convert hw value with given bit_precision to lut property val */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 354b08d6f81d..e4260806c2a4 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -2703,6 +2703,7 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+ struct intel_crtc *slave_crtc;
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_crtc_vblank_off(old_crtc_state);
@@ -2721,9 +2722,8 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
ilk_pfit_disable(old_crtc_state);
}
- if (old_crtc_state->bigjoiner_linked_crtc) {
- struct intel_crtc *slave_crtc =
- old_crtc_state->bigjoiner_linked_crtc;
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) {
const struct intel_crtc_state *old_slave_crtc_state =
intel_atomic_get_old_crtc_state(state, slave_crtc);
@@ -2926,7 +2926,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
{
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
- if (!crtc_state->bigjoiner_slave)
+ if (!intel_crtc_is_bigjoiner_slave(crtc_state))
intel_ddi_enable_transcoder_func(encoder, crtc_state);
intel_vrr_enable(encoder, crtc_state);
@@ -3041,6 +3041,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
int required_lanes = crtc_state ? crtc_state->lane_count : 1;
@@ -3050,11 +3051,12 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
intel_tc_port_get_link(enc_to_dig_port(encoder),
required_lanes);
if (crtc_state && crtc_state->hw.active) {
- struct intel_crtc *slave_crtc = crtc_state->bigjoiner_linked_crtc;
+ struct intel_crtc *slave_crtc;
intel_update_active_dpll(state, crtc, encoder);
- if (slave_crtc)
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(crtc_state))
intel_update_active_dpll(state, slave_crtc, encoder);
}
}
@@ -3099,10 +3101,23 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
crtc_state->lane_lat_optim_mask);
}
+static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ int ln;
+
+ for (ln = 0; ln < 2; ln++) {
+ intel_de_write(i915, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+ intel_de_rmw(i915, DKL_PCS_DW5(tc_port), DKL_PCS_DW5_CORE_SOFTRESET, 0);
+ }
+}
+
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
u32 dp_tp_ctl, ddi_buf_ctl;
@@ -3138,6 +3153,10 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ if (IS_ALDERLAKE_P(dev_priv) &&
+ (intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port)))
+ adlp_tbt_to_dp_alt_switch_wa(encoder);
+
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 7f512f9e9e5c..80b19c304c43 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -74,6 +74,7 @@
#include "g4x_dp.h"
#include "g4x_hdmi.h"
+#include "hsw_ips.h"
#include "i915_drv.h"
#include "icl_dsi.h"
#include "intel_acpi.h"
@@ -112,9 +113,10 @@
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
+#include "vlv_dsi_regs.h"
#include "vlv_sideband.h"
-#include "vlv_dsi.h"
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
@@ -337,10 +339,38 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
is_trans_port_sync_slave(crtc_state);
}
-static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
+static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
+{
+ return ffs(crtc_state->bigjoiner_pipes) - 1;
+}
+
+u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
+}
+
+bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
{
- if (crtc_state->bigjoiner_slave)
- return crtc_state->bigjoiner_linked_crtc;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return crtc_state->bigjoiner_pipes &&
+ crtc->pipe != bigjoiner_master_pipe(crtc_state);
+}
+
+bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return crtc_state->bigjoiner_pipes &&
+ crtc->pipe == bigjoiner_master_pipe(crtc_state);
+}
+
+struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
else
return to_intel_crtc(crtc_state->uapi.crtc);
}
@@ -752,8 +782,11 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
crtc_state->data_rate[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
- if (plane->id == PLANE_PRIMARY)
- hsw_disable_ips(crtc_state);
+ if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
+ hsw_ips_disable(crtc_state)) {
+ crtc_state->ips_enabled = false;
+ intel_crtc_wait_for_next_vblank(crtc);
+ }
/*
* Vblank time updates from the shadow to live plane control register
@@ -1090,72 +1123,6 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
}
-void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!crtc_state->ips_enabled)
- return;
-
- /*
- * We can only enable IPS after we enable a plane and wait for a vblank
- * This function is called from post_plane_update, which is run after
- * a vblank wait.
- */
- drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
-
- if (IS_BROADWELL(dev_priv)) {
- drm_WARN_ON(dev, snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
- IPS_ENABLE | IPS_PCODE_CONTROL));
- /* Quoting Art Runyan: "its not safe to expect any particular
- * value in IPS_CTL bit 31 after enabling IPS through the
- * mailbox." Moreover, the mailbox may return a bogus state,
- * so we need to just enable it and continue on.
- */
- } else {
- intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
- /* The bit only becomes 1 in the next vblank, so this wait here
- * is essentially intel_wait_for_vblank. If we don't have this
- * and don't wait for vblanks until the end of crtc_enable, then
- * the HW state readout code will complain that the expected
- * IPS_CTL value is not the one we read. */
- if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
- drm_err(&dev_priv->drm,
- "Timed out waiting for IPS enable\n");
- }
-}
-
-void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!crtc_state->ips_enabled)
- return;
-
- if (IS_BROADWELL(dev_priv)) {
- drm_WARN_ON(dev,
- snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
- /*
- * Wait for PCODE to finish disabling IPS. The BSpec specified
- * 42ms timeout value leads to occasional timeouts so use 100ms
- * instead.
- */
- if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
- drm_err(&dev_priv->drm,
- "Timed out waiting for IPS disable\n");
- } else {
- intel_de_write(dev_priv, IPS_CTL, 0);
- intel_de_posting_read(dev_priv, IPS_CTL);
- }
-
- /* We need to wait for a vblank before we can disable the plane. */
- intel_crtc_wait_for_next_vblank(crtc);
-}
-
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
{
if (crtc->overlay)
@@ -1166,67 +1133,6 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
*/
}
-static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!old_crtc_state->ips_enabled)
- return false;
-
- if (intel_crtc_needs_modeset(new_crtc_state))
- return true;
-
- /*
- * Workaround : Do not read or write the pipe palette/gamma data while
- * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
- *
- * Disable IPS before we program the LUT.
- */
- if (IS_HASWELL(dev_priv) &&
- (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe) &&
- new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
- return true;
-
- return !new_crtc_state->ips_enabled;
-}
-
-static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!new_crtc_state->ips_enabled)
- return false;
-
- if (intel_crtc_needs_modeset(new_crtc_state))
- return true;
-
- /*
- * Workaround : Do not read or write the pipe palette/gamma data while
- * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
- *
- * Re-enable IPS after the LUT has been programmed.
- */
- if (IS_HASWELL(dev_priv) &&
- (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe) &&
- new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
- return true;
-
- /*
- * We can't read out IPS on broadwell, assume the worst and
- * forcibly enable IPS on the first fastset.
- */
- if (new_crtc_state->update_pipe && old_crtc_state->inherited)
- return true;
-
- return !old_crtc_state->ips_enabled;
-}
-
static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -1321,9 +1227,7 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
intel_update_watermarks(dev_priv);
- if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
- hsw_enable_ips(new_crtc_state);
-
+ hsw_ips_post_update(state, crtc);
intel_fbc_post_update(state, crtc);
intel_drrs_page_flip(state, crtc);
@@ -1426,8 +1330,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_psr_pre_plane_update(state, crtc);
- if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
- hsw_disable_ips(old_crtc_state);
+ if (hsw_ips_pre_update(state, crtc))
+ intel_crtc_wait_for_next_vblank(crtc);
if (intel_fbc_pre_update(state, crtc))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1905,12 +1809,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
-/* IPS only exists on ULT machines and is tied to pipe A. */
-static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
-{
- return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
-}
-
static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
enum pipe pipe, bool apply)
{
@@ -1974,34 +1872,18 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc_state *master_crtc_state;
- struct intel_crtc *master_crtc;
- struct drm_connector_state *conn_state;
- struct drm_connector *conn;
- struct intel_encoder *encoder = NULL;
- int i;
-
- master_crtc = intel_master_crtc(crtc_state);
- master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
-
- for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
- if (conn_state->crtc != &master_crtc->base)
- continue;
-
- encoder = to_intel_encoder(conn_state->best_encoder);
- break;
- }
+ struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
/*
* Enable sequence steps 1-7 on bigjoiner master
*/
- if (crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
intel_encoders_pre_pll_enable(state, master_crtc);
if (crtc_state->shared_dpll)
intel_enable_shared_dpll(crtc_state);
- if (crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
intel_encoders_pre_enable(state, master_crtc);
}
@@ -2065,7 +1947,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
bdw_set_pipemisc(new_crtc_state);
- if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder))
+ if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
+ !transcoder_is_dsi(cpu_transcoder))
hsw_configure_cpu_transcoder(new_crtc_state);
crtc->active = true;
@@ -2105,7 +1988,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
}
- if (new_crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
intel_crtc_vblank_on(new_crtc_state);
intel_encoders_enable(state, crtc);
@@ -2190,7 +2073,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
* FIXME collapse everything to one hook.
* Need care with mst->ddi interactions.
*/
- if (!old_crtc_state->bigjoiner_slave) {
+ if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
}
@@ -2778,77 +2661,6 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
}
}
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- /* IPS only exists on ULT machines and is tied to pipe A. */
- if (!hsw_crtc_supports_ips(crtc))
- return false;
-
- if (!dev_priv->params.enable_ips)
- return false;
-
- if (crtc_state->pipe_bpp > 24)
- return false;
-
- /*
- * We compare against max which means we must take
- * the increased cdclk requirement into account when
- * calculating the new cdclk.
- *
- * Should measure whether using a lower cdclk w/o IPS
- */
- if (IS_BROADWELL(dev_priv) &&
- crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
- return false;
-
- return true;
-}
-
-static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(crtc_state->uapi.crtc->dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
-
- crtc_state->ips_enabled = false;
-
- if (!hsw_crtc_state_ips_capable(crtc_state))
- return 0;
-
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- if (crtc_state->crc_enabled)
- return 0;
-
- /* IPS should be fine as long as at least one plane is enabled. */
- if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
- return 0;
-
- if (IS_BROADWELL(dev_priv)) {
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
- return 0;
- }
-
- crtc_state->ips_enabled = true;
-
- return 0;
-}
-
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
{
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -3347,13 +3159,11 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 pipeconf;
-
- pipeconf = 0;
+ u32 pipeconf = 0;
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
+ pipeconf |= PIPECONF_ENABLE;
if (crtc_state->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
@@ -4069,19 +3879,20 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
return tmp & TRANS_DDI_FUNC_ENABLE;
}
-static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
+static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
+ u8 *master_pipes, u8 *slave_pipes)
{
- u8 master_pipes = 0, slave_pipes = 0;
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ *master_pipes = 0;
+ *slave_pipes = 0;
+
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
+ bigjoiner_pipes(dev_priv)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
- if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
- continue;
-
power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
@@ -4090,9 +3901,9 @@ static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
continue;
if (tmp & MASTER_BIG_JOINER_ENABLE)
- master_pipes |= BIT(pipe);
+ *master_pipes |= BIT(pipe);
else
- slave_pipes |= BIT(pipe);
+ *slave_pipes |= BIT(pipe);
}
if (DISPLAY_VER(dev_priv) < 13)
@@ -4103,18 +3914,47 @@ static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
if (tmp & UNCOMPRESSED_JOINER_MASTER)
- master_pipes |= BIT(pipe);
+ *master_pipes |= BIT(pipe);
if (tmp & UNCOMPRESSED_JOINER_SLAVE)
- slave_pipes |= BIT(pipe);
+ *slave_pipes |= BIT(pipe);
}
}
/* Bigjoiner pipes should always be consecutive master and slave */
- drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
+ drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
"Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
- master_pipes, slave_pipes);
+ *master_pipes, *slave_pipes);
+}
- return slave_pipes;
+static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
+{
+ if ((slave_pipes & BIT(pipe)) == 0)
+ return pipe;
+
+ /* ignore everything above our pipe */
+ master_pipes &= ~GENMASK(7, pipe);
+
+ /* highest remaining bit should be our master pipe */
+ return fls(master_pipes) - 1;
+}
+
+static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
+{
+ enum pipe master_pipe, next_master_pipe;
+
+ master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
+
+ if ((master_pipes & BIT(master_pipe)) == 0)
+ return 0;
+
+ /* ignore our master pipe and everything below it */
+ master_pipes &= ~GENMASK(master_pipe, 0);
+ /* make sure a high bit is set for the ffs() */
+ master_pipes |= BIT(7);
+ /* lowest remaining bit should be the next master pipe */
+ next_master_pipe = ffs(master_pipes) - 1;
+
+ return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
}
static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
@@ -4133,6 +3973,7 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
enum transcoder cpu_transcoder;
+ u8 master_pipes, slave_pipes;
u8 enabled_transcoders = 0;
/*
@@ -4184,8 +4025,10 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
enabled_transcoders |= BIT(cpu_transcoder);
/* bigjoiner slave -> consider the master pipe's transcoder as well */
- if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
- cpu_transcoder = (enum transcoder) crtc->pipe - 1;
+ enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
+ if (slave_pipes & BIT(crtc->pipe)) {
+ cpu_transcoder = (enum transcoder)
+ get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
}
@@ -4310,6 +4153,24 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
return transcoder_is_dsi(pipe_config->cpu_transcoder);
}
+static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u8 master_pipes, slave_pipes;
+ enum pipe pipe = crtc->pipe;
+
+ enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
+
+ if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
+ return;
+
+ crtc_state->bigjoiner = true;
+ crtc_state->bigjoiner_pipes =
+ BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
+ get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
+}
+
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -4336,8 +4197,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
goto out;
intel_dsc_get_config(pipe_config);
- if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
- intel_uncompressed_joiner_get_config(pipe_config);
+ intel_bigjoiner_get_config(pipe_config);
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
DISPLAY_VER(dev_priv) >= 11)
@@ -4395,19 +4255,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
ilk_get_pfit_config(pipe_config);
}
- if (hsw_crtc_supports_ips(crtc)) {
- if (IS_HASWELL(dev_priv))
- pipe_config->ips_enabled = intel_de_read(dev_priv,
- IPS_CTL) & IPS_ENABLE;
- else {
- /*
- * We cannot readout IPS state on broadwell, set to
- * true so we can set it to a defined state on first
- * commit.
- */
- pipe_config->ips_enabled = true;
- }
- }
+ hsw_ips_get_config(pipe_config);
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
@@ -4819,194 +4667,6 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
return mode;
}
-/**
- * intel_wm_need_update - Check whether watermarks need updating
- * @cur: current plane state
- * @new: new plane state
- *
- * Check current plane state versus the new one to determine whether
- * watermarks need to be recalculated.
- *
- * Returns true or false.
- */
-static bool intel_wm_need_update(const struct intel_plane_state *cur,
- struct intel_plane_state *new)
-{
- /* Update watermarks on tiling or size changes. */
- if (new->uapi.visible != cur->uapi.visible)
- return true;
-
- if (!cur->hw.fb || !new->hw.fb)
- return false;
-
- if (cur->hw.fb->modifier != new->hw.fb->modifier ||
- cur->hw.rotation != new->hw.rotation ||
- drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
- drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
- drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
- drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
- return true;
-
- return false;
-}
-
-static bool needs_scaling(const struct intel_plane_state *state)
-{
- int src_w = drm_rect_width(&state->uapi.src) >> 16;
- int src_h = drm_rect_height(&state->uapi.src) >> 16;
- int dst_w = drm_rect_width(&state->uapi.dst);
- int dst_h = drm_rect_height(&state->uapi.dst);
-
- return (src_w != dst_w || src_h != dst_h);
-}
-
-static bool intel_plane_do_async_flip(struct intel_plane *plane,
- const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
-
- if (!plane->async_flip)
- return false;
-
- if (!new_crtc_state->uapi.async_flip)
- return false;
-
- /*
- * In platforms after DISPLAY13, we might need to override
- * first async flip in order to change watermark levels
- * as part of optimization.
- * So for those, we are checking if this is a first async flip.
- * For platforms earlier than DISPLAY13 we always do async flip.
- */
- return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
-}
-
-int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state,
- const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *new_plane_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
- bool was_crtc_enabled = old_crtc_state->hw.active;
- bool is_crtc_enabled = new_crtc_state->hw.active;
- bool turn_off, turn_on, visible, was_visible;
- int ret;
-
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
- ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
- if (ret)
- return ret;
- }
-
- was_visible = old_plane_state->uapi.visible;
- visible = new_plane_state->uapi.visible;
-
- if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
- was_visible = false;
-
- /*
- * Visibility is calculated as if the crtc was on, but
- * after scaler setup everything depends on it being off
- * when the crtc isn't active.
- *
- * FIXME this is wrong for watermarks. Watermarks should also
- * be computed as if the pipe would be active. Perhaps move
- * per-plane wm computation to the .check_plane() hook, and
- * only combine the results from all planes in the current place?
- */
- if (!is_crtc_enabled) {
- intel_plane_set_invisible(new_crtc_state, new_plane_state);
- visible = false;
- }
-
- if (!was_visible && !visible)
- return 0;
-
- turn_off = was_visible && (!visible || mode_changed);
- turn_on = visible && (!was_visible || mode_changed);
-
- drm_dbg_atomic(&dev_priv->drm,
- "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
- crtc->base.base.id, crtc->base.name,
- plane->base.base.id, plane->base.name,
- was_visible, visible,
- turn_off, turn_on, mode_changed);
-
- if (turn_on) {
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- new_crtc_state->update_wm_pre = true;
-
- /* must disable cxsr around plane enable/disable */
- if (plane->id != PLANE_CURSOR)
- new_crtc_state->disable_cxsr = true;
- } else if (turn_off) {
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- new_crtc_state->update_wm_post = true;
-
- /* must disable cxsr around plane enable/disable */
- if (plane->id != PLANE_CURSOR)
- new_crtc_state->disable_cxsr = true;
- } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
- /* FIXME bollocks */
- new_crtc_state->update_wm_pre = true;
- new_crtc_state->update_wm_post = true;
- }
- }
-
- if (visible || was_visible)
- new_crtc_state->fb_bits |= plane->frontbuffer_bit;
-
- /*
- * ILK/SNB DVSACNTR/Sprite Enable
- * IVB SPR_CTL/Sprite Enable
- * "When in Self Refresh Big FIFO mode, a write to enable the
- * plane will be internally buffered and delayed while Big FIFO
- * mode is exiting."
- *
- * Which means that enabling the sprite can take an extra frame
- * when we start in big FIFO mode (LP1+). Thus we need to drop
- * down to LP0 and wait for vblank in order to make sure the
- * sprite gets enabled on the next vblank after the register write.
- * Doing otherwise would risk enabling the sprite one frame after
- * we've already signalled flip completion. We can resume LP1+
- * once the sprite has been enabled.
- *
- *
- * WaCxSRDisabledForSpriteScaling:ivb
- * IVB SPR_SCALE/Scaling Enable
- * "Low Power watermarks must be disabled for at least one
- * frame before enabling sprite scaling, and kept disabled
- * until sprite scaling is disabled."
- *
- * ILK/SNB DVSASCALE/Scaling Enable
- * "When in Self Refresh Big FIFO mode, scaling enable will be
- * masked off while Big FIFO mode is exiting."
- *
- * Despite the w/a only being listed for IVB we assume that
- * the ILK/SNB note has similar ramifications, hence we apply
- * the w/a on all three platforms.
- *
- * With experimental results seems this is needed also for primary
- * plane, not only sprite plane.
- */
- if (plane->id != PLANE_CURSOR &&
- (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
- IS_IVYBRIDGE(dev_priv)) &&
- (turn_on || (!needs_scaling(old_plane_state) &&
- needs_scaling(new_plane_state))))
- new_crtc_state->disable_lp_wm = true;
-
- if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
- new_plane_state->do_async_flip = true;
-
- return 0;
-}
-
static bool encoders_cloneable(const struct intel_encoder *a,
const struct intel_encoder *b)
{
@@ -5266,7 +4926,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
if (mode_changed && crtc_state->hw.enable &&
!drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
- ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
+ ret = intel_dpll_crtc_compute_clock(crtc_state);
if (ret)
return ret;
}
@@ -5317,7 +4977,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
}
if (HAS_IPS(dev_priv)) {
- ret = hsw_compute_ips_config(crtc_state);
+ ret = hsw_ips_compute_config(state, crtc);
if (ret)
return ret;
}
@@ -5619,9 +5279,10 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
transcoder_name(pipe_config->master_transcoder),
pipe_config->sync_mode_slaves_mask);
- drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
- pipe_config->bigjoiner_slave ? "slave" :
- pipe_config->bigjoiner ? "master" : "no");
+ drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s, pipes: 0x%x\n",
+ intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
+ intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
+ pipe_config->bigjoiner_pipes);
drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
enableddisabled(pipe_config->splitter.enable),
@@ -5818,35 +5479,42 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
static void
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- const struct intel_crtc_state *master_crtc_state;
- struct intel_crtc *master_crtc;
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
- master_crtc = intel_master_crtc(crtc_state);
- master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
- /* No need to copy state if the master state is unchanged */
- if (master_crtc_state)
- intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
+ drm_property_replace_blob(&crtc_state->hw.degamma_lut,
+ crtc_state->uapi.degamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.gamma_lut,
+ crtc_state->uapi.gamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.ctm,
+ crtc_state->uapi.ctm);
}
static void
-intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
+
crtc_state->hw.enable = crtc_state->uapi.enable;
crtc_state->hw.active = crtc_state->uapi.active;
crtc_state->hw.mode = crtc_state->uapi.mode;
crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
- intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
}
static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
{
- if (crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
return;
crtc_state->uapi.enable = crtc_state->hw.enable;
@@ -5857,7 +5525,6 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
- /* copy color blobs to uapi */
drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
crtc_state->hw.degamma_lut);
drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
@@ -5866,51 +5533,79 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->hw.ctm);
}
+static void
+copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
+ struct intel_crtc *slave_crtc)
+{
+ struct intel_crtc_state *slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
+ const struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
+
+ drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
+ master_crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
+ master_crtc_state->hw.gamma_lut);
+ drm_property_replace_blob(&slave_crtc_state->hw.ctm,
+ master_crtc_state->hw.ctm);
+
+ slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
+}
+
static int
-copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
- const struct intel_crtc_state *from_crtc_state)
+copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *slave_crtc)
{
+ struct intel_crtc_state *slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
+ const struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
struct intel_crtc_state *saved_state;
- saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
+ saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
if (!saved_state)
return -ENOMEM;
- saved_state->uapi = crtc_state->uapi;
- saved_state->scaler_state = crtc_state->scaler_state;
- saved_state->shared_dpll = crtc_state->shared_dpll;
- saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
- saved_state->crc_enabled = crtc_state->crc_enabled;
+ /* preserve some things from the slave's original crtc state */
+ saved_state->uapi = slave_crtc_state->uapi;
+ saved_state->scaler_state = slave_crtc_state->scaler_state;
+ saved_state->shared_dpll = slave_crtc_state->shared_dpll;
+ saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
+ saved_state->crc_enabled = slave_crtc_state->crc_enabled;
- intel_crtc_free_hw_state(crtc_state);
- memcpy(crtc_state, saved_state, sizeof(*crtc_state));
+ intel_crtc_free_hw_state(slave_crtc_state);
+ memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
kfree(saved_state);
/* Re-init hw state */
- memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
- crtc_state->hw.enable = from_crtc_state->hw.enable;
- crtc_state->hw.active = from_crtc_state->hw.active;
- crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
- crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
+ memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
+ slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
+ slave_crtc_state->hw.active = master_crtc_state->hw.active;
+ slave_crtc_state->hw.mode = master_crtc_state->hw.mode;
+ slave_crtc_state->hw.pipe_mode = master_crtc_state->hw.pipe_mode;
+ slave_crtc_state->hw.adjusted_mode = master_crtc_state->hw.adjusted_mode;
+ slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
+
+ copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
/* Some fixups */
- crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
- crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
- crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
- crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
- crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
- crtc_state->bigjoiner_slave = true;
- crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
- crtc_state->has_audio = from_crtc_state->has_audio;
+ slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
+ slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
+ slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
+ slave_crtc_state->cpu_transcoder = master_crtc_state->cpu_transcoder;
+ slave_crtc_state->has_audio = master_crtc_state->has_audio;
return 0;
}
static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *saved_state;
@@ -5940,7 +5635,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
memcpy(crtc_state, saved_state, sizeof(*crtc_state));
kfree(saved_state);
- intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
+ intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
return 0;
}
@@ -6618,6 +6313,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.div0);
PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
@@ -6669,8 +6365,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
PIPE_CONF_CHECK_BOOL(bigjoiner);
- PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
- PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
+ PIPE_CONF_CHECK_X(bigjoiner_pipes);
PIPE_CONF_CHECK_I(dsc.compression_enable);
PIPE_CONF_CHECK_I(dsc.dsc_split);
@@ -7456,20 +7151,25 @@ static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- int ret;
+ struct intel_crtc *other;
- if (!crtc_state->bigjoiner)
- continue;
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
+ crtc_state->bigjoiner_pipes) {
+ int ret;
- ret = intel_crtc_add_bigjoiner_planes(state, crtc,
- crtc_state->bigjoiner_linked_crtc);
- if (ret)
- return ret;
+ if (crtc == other)
+ continue;
+
+ ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -7571,71 +7271,123 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
return false;
}
+static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
+ u8 pipes)
+{
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->hw.enable &&
+ pipes & BIT(crtc->pipe) &&
+ intel_crtc_needs_modeset(new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+ struct intel_crtc *master_crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
- struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
- struct intel_crtc *slave_crtc, *master_crtc;
+ struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
+ struct intel_crtc *slave_crtc;
+ u8 slave_pipes;
- /* slave being enabled, is master is still claiming this crtc? */
- if (old_crtc_state->bigjoiner_slave) {
- slave_crtc = crtc;
- master_crtc = old_crtc_state->bigjoiner_linked_crtc;
- master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
- if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
- goto claimed;
- }
+ /*
+ * TODO: encoder.compute_config() may be the best
+ * place to populate the bitmask for the master crtc.
+ * For now encoder.compute_config() just flags things
+ * as needing bigjoiner and we populate the bitmask
+ * here.
+ */
+ WARN_ON(master_crtc_state->bigjoiner_pipes);
- if (!new_crtc_state->bigjoiner)
+ if (!master_crtc_state->bigjoiner)
return 0;
- slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
- if (!slave_crtc) {
+ slave_pipes = BIT(master_crtc->pipe + 1);
+
+ if (slave_pipes & ~bigjoiner_pipes(i915)) {
drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] Big joiner configuration requires "
- "CRTC + 1 to be used, doesn't exist\n",
- crtc->base.base.id, crtc->base.name);
+ "[CRTC:%d:%s] Cannot act as big joiner master "
+ "(need 0x%x as slave pipes, only 0x%x possible)\n",
+ master_crtc->base.base.id, master_crtc->base.name,
+ slave_pipes, bigjoiner_pipes(i915));
return -EINVAL;
}
- new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
- slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
- master_crtc = crtc;
- if (IS_ERR(slave_crtc_state))
- return PTR_ERR(slave_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, slave_pipes) {
+ struct intel_crtc_state *slave_crtc_state;
+ int ret;
- /* master being enabled, slave was already configured? */
- if (slave_crtc_state->uapi.enable)
- goto claimed;
+ slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
+ if (IS_ERR(slave_crtc_state))
+ return PTR_ERR(slave_crtc_state);
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] Used as slave for big joiner\n",
- slave_crtc->base.base.id, slave_crtc->base.name);
+ /* master being enabled, slave was already configured? */
+ if (slave_crtc_state->uapi.enable) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
+ "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
+ slave_crtc->base.base.id, slave_crtc->base.name,
+ master_crtc->base.base.id, master_crtc->base.name);
+ return -EINVAL;
+ }
- return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
+ /*
+ * The state copy logic assumes the master crtc gets processed
+ * before the slave crtc during the main compute_config loop.
+ * This works because the crtcs are created in pipe order,
+ * and the hardware requires master pipe < slave pipe as well.
+ * Should that change we need to rethink the logic.
+ */
+ if (WARN_ON(drm_crtc_index(&master_crtc->base) >
+ drm_crtc_index(&slave_crtc->base)))
+ return -EINVAL;
-claimed:
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
- "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
- slave_crtc->base.base.id, slave_crtc->base.name,
- master_crtc->base.base.id, master_crtc->base.name);
- return -EINVAL;
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
+ slave_crtc->base.base.id, slave_crtc->base.name,
+ master_crtc->base.base.id, master_crtc->base.name);
+
+ master_crtc_state->bigjoiner_pipes =
+ BIT(master_crtc->pipe) | BIT(slave_crtc->pipe);
+ slave_crtc_state->bigjoiner_pipes =
+ BIT(master_crtc->pipe) | BIT(slave_crtc->pipe);
+
+ ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void kill_bigjoiner_slave(struct intel_atomic_state *state,
- struct intel_crtc_state *master_crtc_state)
+ struct intel_crtc *master_crtc)
{
- struct intel_crtc_state *slave_crtc_state =
- intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
+ struct intel_crtc *slave_crtc;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
+ struct intel_crtc_state *slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
- slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
- slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
- slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
- intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
+ slave_crtc_state->bigjoiner = false;
+ slave_crtc_state->bigjoiner_pipes = 0;
+
+ intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
+ }
+
+ master_crtc_state->bigjoiner = false;
+ master_crtc_state->bigjoiner_pipes = 0;
}
/**
@@ -7785,34 +7537,37 @@ static int intel_atomic_check_async(struct intel_atomic_state *state, struct int
static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
+ u8 affected_pipes = 0;
+ u8 modeset_pipes = 0;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct intel_crtc_state *linked_crtc_state;
- struct intel_crtc *linked_crtc;
- int ret;
+ affected_pipes |= crtc_state->bigjoiner_pipes;
+ if (intel_crtc_needs_modeset(crtc_state))
+ modeset_pipes |= crtc_state->bigjoiner_pipes;
+ }
- if (!crtc_state->bigjoiner)
- continue;
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
- linked_crtc = crtc_state->bigjoiner_linked_crtc;
- linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
- if (IS_ERR(linked_crtc_state))
- return PTR_ERR(linked_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
+ int ret;
- if (!intel_crtc_needs_modeset(crtc_state))
- continue;
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- linked_crtc_state->uapi.mode_changed = true;
+ crtc_state->uapi.mode_changed = true;
- ret = drm_atomic_add_affected_connectors(&state->base,
- &linked_crtc->base);
+ ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
if (ret)
return ret;
- ret = intel_atomic_add_affected_planes(state, linked_crtc);
+ ret = intel_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
}
@@ -7820,8 +7575,8 @@ static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
/* Kill old bigjoiner link, we may re-establish afterwards */
if (intel_crtc_needs_modeset(crtc_state) &&
- crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
- kill_bigjoiner_slave(state, crtc_state);
+ intel_crtc_is_bigjoiner_master(crtc_state))
+ kill_bigjoiner_slave(state, crtc);
}
return 0;
@@ -7846,6 +7601,10 @@ static int intel_atomic_check(struct drm_device *dev,
new_crtc_state, i) {
if (new_crtc_state->inherited != old_crtc_state->inherited)
new_crtc_state->uapi.mode_changed = true;
+
+ if (new_crtc_state->uapi.scaling_filter !=
+ old_crtc_state->uapi.scaling_filter)
+ new_crtc_state->uapi.mode_changed = true;
}
intel_vrr_check_modeset(state);
@@ -7861,30 +7620,30 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!intel_crtc_needs_modeset(new_crtc_state)) {
- /* Light copy */
- intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
-
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ copy_bigjoiner_crtc_state_nomodeset(state, crtc);
+ else
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
continue;
}
- if (!new_crtc_state->uapi.enable) {
- if (!new_crtc_state->bigjoiner_slave) {
- intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
- any_ms = true;
- }
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
+ drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
continue;
}
- ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
+ ret = intel_crtc_prepare_cleared_state(state, crtc);
if (ret)
goto fail;
+ if (!new_crtc_state->hw.enable)
+ continue;
+
ret = intel_modeset_pipe_config(state, new_crtc_state);
if (ret)
goto fail;
- ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
- new_crtc_state);
+ ret = intel_atomic_check_bigjoiner(state, crtc);
if (ret)
goto fail;
}
@@ -7938,10 +7697,7 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (new_crtc_state->bigjoiner) {
- struct intel_crtc_state *linked_crtc_state =
- intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
-
- if (intel_crtc_needs_modeset(linked_crtc_state)) {
+ if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
}
@@ -8121,9 +7877,6 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
hsw_set_linetime_wm(new_crtc_state);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- icl_set_pipe_chicken(new_crtc_state);
}
static void commit_pipe_pre_planes(struct intel_atomic_state *state,
@@ -8188,7 +7941,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
dev_priv->display->crtc_enable(state, crtc);
- if (new_crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
return;
/* vblanks work again, re-enable pipe CRC. */
@@ -8198,7 +7951,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
static void intel_update_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -8215,21 +7968,22 @@ static void intel_update_crtc(struct intel_atomic_state *state,
if (new_crtc_state->update_pipe)
intel_encoders_update_pipe(state, crtc);
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ new_crtc_state->update_pipe)
+ icl_set_pipe_chicken(new_crtc_state);
}
intel_fbc_update(state, crtc);
- intel_update_planes_on_crtc(state, crtc);
+ intel_crtc_planes_update_noarm(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
commit_pipe_pre_planes(state, crtc);
- if (DISPLAY_VER(dev_priv) >= 9)
- skl_arm_planes_on_crtc(state, crtc);
- else
- i9xx_arm_planes_on_crtc(state, crtc);
+ intel_crtc_planes_update_arm(state, crtc);
commit_pipe_post_planes(state, crtc);
@@ -8305,7 +8059,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
*/
if (!is_trans_port_sync_slave(old_crtc_state) &&
!intel_dp_mst_is_slave_trans(old_crtc_state) &&
- !old_crtc_state->bigjoiner_slave)
+ !intel_crtc_is_bigjoiner_slave(old_crtc_state))
continue;
intel_old_crtc_state_disables(state, old_crtc_state,
@@ -8420,7 +8174,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
is_trans_port_sync_master(new_crtc_state) ||
- (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
+ intel_crtc_is_bigjoiner_master(new_crtc_state))
continue;
modeset_pipes &= ~BIT(pipe);
@@ -8947,10 +8701,8 @@ static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
struct intel_crtc *crtc;
u32 possible_crtcs = 0;
- for_each_intel_crtc(dev, crtc) {
- if (encoder->pipe_mask & BIT(crtc->pipe))
- possible_crtcs |= drm_crtc_mask(&crtc->base);
- }
+ for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
+ possible_crtcs |= drm_crtc_mask(&crtc->base);
return possible_crtcs;
}
@@ -9006,6 +8758,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
intel_ddi_init(dev_priv, PORT_D_XELPD);
+ intel_ddi_init(dev_priv, PORT_TC1);
} else if (IS_ALDERLAKE_P(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
@@ -10132,7 +9885,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
- !crtc_state->bigjoiner_slave)
+ !intel_crtc_is_bigjoiner_slave(crtc_state))
intel_crtc_disable_noatomic(crtc, ctx);
if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
@@ -10345,12 +10098,18 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
/* read out to slave crtc as well for bigjoiner */
if (crtc_state->bigjoiner) {
+ struct intel_crtc *slave_crtc;
+
/* encoder should read be linked to bigjoiner master */
- WARN_ON(crtc_state->bigjoiner_slave);
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
- crtc = crtc_state->bigjoiner_linked_crtc;
- crtc_state = to_intel_crtc_state(crtc->base.state);
- intel_encoder_get_config(encoder, crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(crtc_state)) {
+ struct intel_crtc_state *slave_crtc_state;
+
+ slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state);
+ intel_encoder_get_config(encoder, slave_crtc_state);
+ }
}
} else {
encoder->base.crtc = NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 457738aeee3e..11d6134c53c8 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -430,11 +430,11 @@ enum hpd_pin {
&(dev)->mode_config.crtc_list, \
base.head)
-#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+#define for_each_intel_crtc_in_pipe_mask(dev, intel_crtc, pipe_mask) \
list_for_each_entry(intel_crtc, \
&(dev)->mode_config.crtc_list, \
base.head) \
- for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base))
+ for_each_if((pipe_mask) & BIT(intel_crtc->pipe))
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
@@ -555,6 +555,10 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
bool bigjoiner);
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
+bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state);
+u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state);
+struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state);
void intel_plane_destroy(struct drm_plane *plane);
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
@@ -632,9 +636,6 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
-void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
-void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index f4de004d470f..ffe6822d7414 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -16,6 +16,7 @@
#include "intel_dp_mst.h"
#include "intel_drrs.h"
#include "intel_fbc.h"
+#include "intel_fbdev.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_pm.h"
@@ -78,7 +79,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
if (DISPLAY_VER(dev_priv) >= 9)
/* no global SR status; inspect per-plane WM */;
else if (HAS_PCH_SPLIT(dev_priv))
- sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
+ sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM_LP_ENABLE;
else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
IS_I945G(dev_priv) || IS_I945GM(dev_priv))
sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -124,9 +125,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_framebuffer *drm_fb;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
- fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
-
+ fbdev_fb = intel_fbdev_framebuffer(dev_priv->fbdev);
+ if (fbdev_fb) {
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
fbdev_fb->base.height,
@@ -474,8 +474,8 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
* reg for DC3CO debugging and validation,
* but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
*/
- seq_printf(m, "DC3CO count: %d\n",
- intel_de_read(dev_priv, DMC_DEBUG3));
+ seq_printf(m, "DC3CO count: %d\n", intel_de_read(dev_priv, IS_DGFX(dev_priv) ?
+ DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3));
} else {
dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
SKL_DMC_DC3_DC5_COUNT;
@@ -923,23 +923,23 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
yesno(crtc_state->uapi.active),
DRM_MODE_ARG(&crtc_state->uapi.mode));
- if (crtc_state->hw.enable) {
- seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
- yesno(crtc_state->hw.active),
- DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+ seq_printf(m, "\thw: enable=%s, active=%s\n",
+ yesno(crtc_state->hw.enable), yesno(crtc_state->hw.active));
+ seq_printf(m, "\tadjusted_mode=" DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+ seq_printf(m, "\tpipe__mode=" DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&crtc_state->hw.pipe_mode));
- seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
- crtc_state->pipe_src_w, crtc_state->pipe_src_h,
- yesno(crtc_state->dither), crtc_state->pipe_bpp);
+ seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ yesno(crtc_state->dither), crtc_state->pipe_bpp);
- intel_scaler_info(m, crtc);
- }
+ intel_scaler_info(m, crtc);
if (crtc_state->bigjoiner)
- seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
- crtc_state->bigjoiner_linked_crtc->base.base.id,
- crtc_state->bigjoiner_linked_crtc->base.name,
- crtc_state->bigjoiner_slave ? "slave" : "master");
+ seq_printf(m, "\tLinked to 0x%x pipes as a %s\n",
+ crtc_state->bigjoiner_pipes,
+ intel_crtc_is_bigjoiner_slave(crtc_state) ? "slave" : "master");
for_each_intel_encoder_mask(&dev_priv->drm, encoder,
crtc_state->uapi.encoder_mask)
@@ -1015,6 +1015,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
+ seq_printf(m, " div0: 0x%08x\n", pll->state.hw_state.div0);
seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
pll->state.hw_state.mg_refclkin_ctl);
seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index d2102cc17bb4..9ebae7ac3235 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -16,6 +16,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
+#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
#include "intel_pm.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 60e15226a8cb..b50d0e6efe21 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -26,7 +26,6 @@
#ifndef __INTEL_DISPLAY_TYPES_H__
#define __INTEL_DISPLAY_TYPES_H__
-#include <linux/async.h>
#include <linux/i2c.h>
#include <linux/pm_qos.h>
#include <linux/pwm.h>
@@ -38,7 +37,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_dsc.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
@@ -145,25 +143,6 @@ struct intel_framebuffer {
struct i915_address_space *dpt_vm;
};
-struct intel_fbdev {
- struct drm_fb_helper helper;
- struct intel_framebuffer *fb;
- struct i915_vma *vma;
- unsigned long vma_flags;
- async_cookie_t cookie;
- int preferred_bpp;
-
- /* Whether or not fbdev hpd processing is temporarily suspended */
- bool hpd_suspended : 1;
- /* Set when a hotplug was received while HPD processing was
- * suspended
- */
- bool hpd_waiting : 1;
-
- /* Protects hpd_suspended */
- struct mutex hpd_lock;
-};
-
enum intel_hotplug_state {
INTEL_HOTPLUG_UNCHANGED,
INTEL_HOTPLUG_CHANGED,
@@ -1168,6 +1147,7 @@ struct intel_crtc_state {
/* bitmask of actually visible planes (enum plane_id) */
u8 active_planes;
+ u8 scaled_planes;
u8 nv12_planes;
u8 c8_planes;
@@ -1202,11 +1182,8 @@ struct intel_crtc_state {
/* enable pipe big joiner? */
bool bigjoiner;
- /* big joiner slave crtc? */
- bool bigjoiner_slave;
-
- /* linked crtc for bigjoiner, either slave or master */
- struct intel_crtc *bigjoiner_linked_crtc;
+ /* big joiner pipe bitmask */
+ u8 bigjoiner_pipes;
/* Display Stream compression state */
struct {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 146b83916005..1046e7fe310a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -886,9 +886,8 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
return MODE_CLOCK_HIGH;
/* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
- tmds_clock = target_clock;
- if (drm_mode_is_420_only(info, mode))
- tmds_clock /= 2;
+ tmds_clock = intel_hdmi_tmds_clock(target_clock, 8,
+ drm_mode_is_420_only(info, mode));
if (intel_dp->dfp.min_tmds_clock &&
tmds_clock < intel_dp->dfp.min_tmds_clock)
@@ -1139,21 +1138,12 @@ static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
intel_dp->dfp.ycbcr_444_to_420);
}
-static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state, int bpc)
-{
- int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
-
- if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
- clock /= 2;
-
- return clock;
-}
-
static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state, int bpc)
{
- int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock;
+ int tmds_clock = intel_hdmi_tmds_clock(clock, bpc,
+ intel_dp_hdmi_ycbcr420(intel_dp, crtc_state));
if (intel_dp->dfp.min_tmds_clock &&
tmds_clock < intel_dp->dfp.min_tmds_clock)
@@ -3628,6 +3618,32 @@ update_status:
"Could not write test response to sink\n");
}
+static bool intel_dp_link_ok(struct intel_dp *intel_dp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ bool uhbr = intel_dp->link_rate >= 1000000;
+ bool ok;
+
+ if (uhbr)
+ ok = drm_dp_128b132b_lane_channel_eq_done(link_status,
+ intel_dp->lane_count);
+ else
+ ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
+
+ if (ok)
+ return true;
+
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] %s link not ok, retraining\n",
+ encoder->base.base.id, encoder->base.name,
+ uhbr ? "128b/132b" : "8b/10b");
+
+ return false;
+}
+
static void
intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
{
@@ -3658,14 +3674,7 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
return false;
}
- if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
- drm_dbg_kms(&i915->drm,
- "[ENCODER:%d:%s] channel EQ not ok, retraining\n",
- encoder->base.base.id, encoder->base.name);
- return false;
- }
-
- return true;
+ return intel_dp_link_ok(intel_dp, link_status);
}
/**
@@ -3779,8 +3788,8 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
intel_dp->lane_count))
return false;
- /* Retrain if Channel EQ or CR not ok */
- return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
+ /* Retrain if link not ok */
+ return !intel_dp_link_ok(intel_dp, link_status);
}
static bool intel_dp_has_connector(struct intel_dp *intel_dp,
@@ -3810,14 +3819,14 @@ static bool intel_dp_has_connector(struct intel_dp *intel_dp,
static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx,
- u32 *crtc_mask)
+ u8 *pipe_mask)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
int ret = 0;
- *crtc_mask = 0;
+ *pipe_mask = 0;
if (!intel_dp_needs_link_retrain(intel_dp))
return 0;
@@ -3851,12 +3860,12 @@ static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
!try_wait_for_completion(&conn_state->commit->hw_done))
continue;
- *crtc_mask |= drm_crtc_mask(&crtc->base);
+ *pipe_mask |= BIT(crtc->pipe);
}
drm_connector_list_iter_end(&conn_iter);
if (!intel_dp_needs_link_retrain(intel_dp))
- *crtc_mask = 0;
+ *pipe_mask = 0;
return ret;
}
@@ -3875,7 +3884,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc;
- u32 crtc_mask;
+ u8 pipe_mask;
int ret;
if (!intel_dp_is_connected(intel_dp))
@@ -3886,17 +3895,17 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
+ ret = intel_dp_prep_link_retrain(intel_dp, ctx, &pipe_mask);
if (ret)
return ret;
- if (crtc_mask == 0)
+ if (pipe_mask == 0)
return 0;
drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
encoder->base.base.id, encoder->base.name);
- for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -3907,7 +3916,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
intel_crtc_pch_transcoder(crtc), false);
}
- for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -3924,7 +3933,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
break;
}
- for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -3942,14 +3951,14 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx,
- u32 *crtc_mask)
+ u8 *pipe_mask)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
int ret = 0;
- *crtc_mask = 0;
+ *pipe_mask = 0;
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
@@ -3980,7 +3989,7 @@ static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
!try_wait_for_completion(&conn_state->commit->hw_done))
continue;
- *crtc_mask |= drm_crtc_mask(&crtc->base);
+ *pipe_mask |= BIT(crtc->pipe);
}
drm_connector_list_iter_end(&conn_iter);
@@ -3993,7 +4002,7 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc;
- u32 crtc_mask;
+ u8 pipe_mask;
int ret;
ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
@@ -4001,17 +4010,17 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask);
+ ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask);
if (ret)
return ret;
- if (crtc_mask == 0)
+ if (pipe_mask == 0)
return 0;
drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
encoder->base.base.id, encoder->base.name);
- for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9451f336f28f..5d98773efd1b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -712,7 +712,7 @@ static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_
return false;
}
-static void
+void
intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
@@ -996,6 +996,23 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
}
+static int
+intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 sink_status;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status);
+ if (ret != 1) {
+ drm_dbg_kms(&i915->drm, "Failed to read sink status\n");
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0;
+}
+
/**
* intel_dp_stop_link_train - stop link training
* @intel_dp: DP struct
@@ -1015,11 +1032,21 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
intel_dp->link_trained = true;
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
DP_TRAINING_PATTERN_DISABLE);
+
+ if (intel_dp_is_uhbr(crtc_state) &&
+ wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] 128b/132b intra-hop not clearing\n",
+ encoder->base.base.id, encoder->base.name);
+ }
}
static bool
@@ -1083,8 +1110,6 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
bool ret = true;
int i;
- intel_dp_prepare_link_train(intel_dp, crtc_state);
-
for (i = lttpr_count - 1; i >= 0; i--) {
enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
@@ -1104,6 +1129,272 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
return ret;
}
+/*
+ * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1)
+ */
+static bool
+intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ int delay_us;
+ int try, max_tries = 20;
+ unsigned long deadline;
+ bool timeout = false;
+
+ /*
+ * Reset signal levels. Start transmitting 128b/132b TPS1.
+ *
+ * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1
+ * in DP_TRAINING_PATTERN_SET.
+ */
+ if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
+ DP_TRAINING_PATTERN_1)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to start 128b/132b TPS1\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+
+ /* Read the initial TX FFE settings. */
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read TX FFE presets\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Update signal levels and training set as requested. */
+ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
+ if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to set initial TX FFE settings\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Start transmitting 128b/132b TPS2. */
+ if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
+ DP_TRAINING_PATTERN_2)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to start 128b/132b TPS2\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Time budget for the LANEx_EQ_DONE Sequence */
+ deadline = jiffies + msecs_to_jiffies_timeout(400);
+
+ for (try = 0; try < max_tries; try++) {
+ usleep_range(delay_us, 2 * delay_us);
+
+ /*
+ * The delay may get updated. The transmitter shall read the
+ * delay before link status during link training.
+ */
+ delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_link_training_failed(link_status)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Downstream link training failure\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Lane channel eq done\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ if (timeout) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Lane channel eq timeout\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (time_after(jiffies, deadline))
+ timeout = true; /* try one last time after deadline */
+
+ /* Update signal levels and training set as requested. */
+ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
+ if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to update TX FFE settings\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+ }
+
+ if (try == max_tries) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Max loop count reached\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ for (;;) {
+ if (time_after(jiffies, deadline))
+ timeout = true; /* try one last time after deadline */
+
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_link_training_failed(link_status)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Downstream link training failure\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_eq_interlane_align_done(link_status)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Interlane align done\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ if (timeout) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Interlane align timeout\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ usleep_range(2000, 3000);
+ }
+
+ return true;
+}
+
+/*
+ * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2)
+ */
+static bool
+intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lttpr_count)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ unsigned long deadline;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_2_CDS) != 1) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to start 128b/132b TPS2 CDS\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Time budget for the LANEx_CDS_DONE Sequence */
+ deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20);
+
+ for (;;) {
+ bool timeout = false;
+
+ if (time_after(jiffies, deadline))
+ timeout = true; /* try one last time after deadline */
+
+ usleep_range(2000, 3000);
+
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_eq_interlane_align_done(link_status) &&
+ drm_dp_128b132b_cds_interlane_align_done(link_status) &&
+ drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] CDS interlane align done\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ if (drm_dp_128b132b_link_training_failed(link_status)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Downstream link training failure\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (timeout) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] CDS timeout\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+ }
+
+ /* FIXME: Should DP_TRAINING_PATTERN_DISABLE be written first? */
+ if (intel_dp->set_idle_link_train)
+ intel_dp->set_idle_link_train(intel_dp, crtc_state);
+
+ return true;
+}
+
+/*
+ * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.)
+ */
+static bool
+intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lttpr_count)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ bool passed = false;
+
+ if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] 128b/132b intra-hop not clear\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
+ intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count))
+ passed = true;
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] 128b/132b Link Training %s at link rate = %d, lane count = %d\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ passed ? "passed" : "failed",
+ crtc_state->port_clock, crtc_state->lane_count);
+
+ return passed;
+}
+
/**
* intel_dp_start_link_train - start link training
* @intel_dp: DP struct
@@ -1117,6 +1408,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ bool passed;
/*
* TODO: Reiniting LTTPRs here won't be needed once proper connector
* HW state readout is added.
@@ -1127,6 +1419,13 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
/* Still continue with enabling the port and link training. */
lttpr_count = 0;
- if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
+ intel_dp_prepare_link_train(intel_dp, crtc_state);
+
+ if (intel_dp_is_uhbr(crtc_state))
+ passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count);
+ else
+ passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
+
+ if (!passed)
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index dbfb15705aaa..dc1556b46b85 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -29,6 +29,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void
+intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE]);
+
/* Get the TPSx symbol type of the value programmed to DP_TRAINING_PATTERN_SET */
static inline u8 intel_dp_training_pattern_symbol(u8 pattern)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 6b6eab507d30..e30e698aa684 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -99,6 +99,29 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
+ struct drm_dp_mst_topology_state *topology_state;
+ u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
+ DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
+
+ topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
+ if (IS_ERR(topology_state)) {
+ drm_dbg_kms(&i915->drm, "slot update failed\n");
+ return PTR_ERR(topology_state);
+ }
+
+ drm_dp_mst_update_slots(topology_state, link_coding_cap);
+
+ return 0;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -155,6 +178,10 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
+ ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state);
+ if (ret)
+ return ret;
+
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
@@ -357,6 +384,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ int start_slot = intel_dp_is_uhbr(old_crtc_state) ? 0 : 1;
int ret;
drm_dbg_kms(&i915->drm, "active links %d\n",
@@ -366,7 +394,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1);
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
if (ret) {
drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
}
@@ -475,6 +503,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
+ int start_slot = intel_dp_is_uhbr(pipe_config) ? 0 : 1;
int ret;
bool first_mst_stream;
@@ -509,7 +538,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_dp->active_mst_links++;
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1);
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
/*
* Before Gen 12 this is not done as part of
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 1ce0c171f4fb..14f5ffe27d05 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -16,6 +16,10 @@
#include "intel_snps_phy.h"
#include "vlv_sideband.h"
+struct intel_dpll_funcs {
+ int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state);
+};
+
struct intel_limit {
struct {
int min, max;
@@ -1400,6 +1404,14 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = {
.crtc_compute_clock = i8xx_crtc_compute_clock,
};
+int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ return i915->dpll_funcs->crtc_compute_clock(crtc_state);
+}
+
void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 1af0ac43cca4..69b06a9e473e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -15,6 +15,7 @@ struct intel_crtc_state;
enum pipe;
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state);
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
int pnv_calc_dpll_params(int refclk, struct dpll *clock);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 6723c3de5a80..569903d47aea 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -2748,6 +2748,9 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
else
pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
+
+ if (i915->vbt.override_afc_startup)
+ pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
}
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
@@ -2949,6 +2952,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
DKL_PLL_DIV0_FBPREDIV(m1div) |
DKL_PLL_DIV0_FBDIV_INT(m2div_int);
+ if (dev_priv->vbt.override_afc_startup) {
+ u8 val = dev_priv->vbt.override_afc_startup_val;
+
+ pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
+ }
pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
@@ -3448,10 +3456,10 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
- hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
- DKL_PLL_DIV0_PROP_COEFF_MASK |
- DKL_PLL_DIV0_FBPREDIV_MASK |
- DKL_PLL_DIV0_FBDIV_INT_MASK);
+ val = DKL_PLL_DIV0_MASK;
+ if (dev_priv->vbt.override_afc_startup)
+ val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
+ hw_state->mg_pll_div0 &= val;
hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
@@ -3513,6 +3521,10 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
TGL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = intel_de_read(dev_priv,
TGL_DPLL_CFGCR1(id));
+ if (dev_priv->vbt.override_afc_startup) {
+ hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
+ hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
+ }
} else {
if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
hw_state->cfgcr0 = intel_de_read(dev_priv,
@@ -3554,7 +3566,7 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
const enum intel_dpll_id id = pll->info->id;
- i915_reg_t cfgcr0_reg, cfgcr1_reg;
+ i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
if (IS_ALDERLAKE_S(dev_priv)) {
cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
@@ -3568,6 +3580,7 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
} else if (DISPLAY_VER(dev_priv) >= 12) {
cfgcr0_reg = TGL_DPLL_CFGCR0(id);
cfgcr1_reg = TGL_DPLL_CFGCR1(id);
+ div0_reg = TGL_DPLL0_DIV0(id);
} else {
if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
cfgcr0_reg = ICL_DPLL_CFGCR0(4);
@@ -3580,6 +3593,12 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
+ drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup &&
+ !i915_mmio_reg_valid(div0_reg));
+ if (dev_priv->vbt.override_afc_startup &&
+ i915_mmio_reg_valid(div0_reg))
+ intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
+ hw_state->div0);
intel_de_posting_read(dev_priv, cfgcr1_reg);
}
@@ -3667,13 +3686,11 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
val |= hw_state->mg_clktop2_hsclkctl;
intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
- val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
- val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
- DKL_PLL_DIV0_PROP_COEFF_MASK |
- DKL_PLL_DIV0_FBPREDIV_MASK |
- DKL_PLL_DIV0_FBDIV_INT_MASK);
- val |= hw_state->mg_pll_div0;
- intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
+ val = DKL_PLL_DIV0_MASK;
+ if (dev_priv->vbt.override_afc_startup)
+ val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
+ intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
+ hw_state->mg_pll_div0);
val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
@@ -3912,13 +3929,14 @@ static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
drm_dbg_kms(&dev_priv->drm,
- "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
+ "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
"mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
"mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
"mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
"mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
"mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
hw_state->cfgcr0, hw_state->cfgcr1,
+ hw_state->div0,
hw_state->mg_refclkin_ctl,
hw_state->mg_clktop2_coreclkctl1,
hw_state->mg_clktop2_hsclkctl,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 91fe181462b2..ba2fdfce1579 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -208,6 +208,9 @@ struct intel_dpll_hw_state {
/* icl */
u32 cfgcr0;
+ /* tgl */
+ u32 div0;
+
/* bxt */
u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 8f674745e7e0..05dd7dba3a5c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -3,11 +3,13 @@
* Copyright © 2021 Intel Corporation
*/
+#include "gem/i915_gem_domain.h"
+#include "gt/gen8_ppgtt.h"
+
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
-#include "gt/gen8_ppgtt.h"
struct i915_dpt {
struct i915_address_space vm;
@@ -48,7 +50,7 @@ static void dpt_insert_page(struct i915_address_space *vm,
}
static void dpt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level level,
u32 flags)
{
@@ -64,8 +66,8 @@ static void dpt_insert_entries(struct i915_address_space *vm,
* not to allow the user to override access to a read only page.
*/
- i = vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+ i = vma_res->start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
gen8_set_pte(&base[i++], pte_encode | addr);
}
@@ -76,35 +78,38 @@ static void dpt_clear_range(struct i915_address_space *vm,
static void dpt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
- struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
+ if (vma_res->bound_flags)
+ return;
+
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
- if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
+ if (vm->has_read_only && vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
- if (i915_gem_object_is_lmem(obj))
+ if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
/*
* Without aliasing PPGTT there's no difference between
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
- atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+ vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
-static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+static void dpt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
{
- vm->clear_range(vm, vma->node.start, vma->size);
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
static void dpt_cleanup(struct i915_address_space *vm)
@@ -250,7 +255,11 @@ intel_dpt_create(struct intel_framebuffer *fb)
if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj);
- ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
+ ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
+ if (!ret) {
+ ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
+ i915_gem_object_unlock(dpt_obj);
+ }
if (ret) {
i915_gem_object_put(dpt_obj);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 83a69a4a4fea..b34a67309976 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,6 +4,8 @@
*
*/
+#include "gem/i915_gem_internal.h"
+
#include "i915_drv.h"
#include "intel_de.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index a3a906cb097e..eafef0a87fea 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -79,8 +79,8 @@ struct intel_dsi {
*/
enum mipi_dsi_pixel_format pixel_format;
- /* video mode format for MIPI_VIDEO_MODE_FORMAT register */
- u32 video_mode_format;
+ /* NON_BURST_SYNC_PULSE, NON_BURST_SYNC_EVENTS, or BURST_MODE */
+ int video_mode;
/* eot for MIPI_EOT_DISABLE register */
u8 eotp_pkt;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index a85574c413e8..6b4a27372c82 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -44,6 +44,7 @@
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "vlv_dsi.h"
+#include "vlv_dsi_regs.h"
#include "vlv_sideband.h"
#define MIPI_TRANSFER_MODE_SHIFT 0
@@ -675,11 +676,11 @@ void intel_dsi_log_params(struct intel_dsi *intel_dsi)
drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
drm_dbg_kms(&i915->drm, "Video mode format %s\n",
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
+ intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
"non-burst with sync pulse" :
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
+ intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
"non-burst with sync events" :
- intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
+ intel_dsi->video_mode == BURST_MODE ?
"burst" : "<unknown>");
drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
intel_dsi->burst_mode_ratio);
@@ -739,7 +740,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
- intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
+ intel_dsi->video_mode = mipi_config->video_transfer_mode;
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout;
@@ -770,7 +771,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
* Target ddr frequency from VBT / non burst ddr freq
* multiply by 100 to preserve remainder
*/
- if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ if (intel_dsi->video_mode == BURST_MODE) {
if (mipi_config->target_burst_mode_freq) {
u32 bitrate = intel_dsi_bitrate(intel_dsi);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 31c15e5fca95..a307b4993bcf 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -7,6 +7,7 @@
* DOC: display pinning helpers
*/
+#include "gem/i915_gem_domain.h"
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
@@ -36,7 +37,11 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
+ if (!ret) {
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ i915_gem_object_unlock(obj);
+ }
if (ret) {
vma = ERR_PTR(ret);
goto err;
@@ -47,7 +52,7 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
goto err;
if (i915_vma_misplaced(vma, 0, alignment, 0)) {
- ret = i915_vma_unbind(vma);
+ ret = i915_vma_unbind_unlocked(vma);
if (ret) {
vma = ERR_PTR(ret);
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 465dc4e97ea8..87f4af3fd523 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -605,7 +605,7 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
else if (DISPLAY_VER(i915) == 9)
skl_fbc_program_cfb_stride(fbc);
- if (i915->ggtt.num_fences)
+ if (to_gt(i915)->ggtt->num_fences)
snb_fbc_program_fence(fbc);
intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
@@ -1125,7 +1125,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
if (DISPLAY_VER(i915) >= 11 &&
- (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) {
+ (plane_state->view.color_plane[0].y +
+ (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index adc3a81be9f7..fd5bc7acf08d 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -50,6 +50,23 @@
#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
+struct intel_fbdev {
+ struct drm_fb_helper helper;
+ struct intel_framebuffer *fb;
+ struct i915_vma *vma;
+ unsigned long vma_flags;
+ async_cookie_t cookie;
+ int preferred_bpp;
+
+ /* Whether or not fbdev hpd processing is temporarily suspended */
+ bool hpd_suspended: 1;
+ /* Set when a hotplug was received while HPD processing was suspended */
+ bool hpd_waiting: 1;
+
+ /* Protects hpd_suspended */
+ struct mutex hpd_lock;
+};
+
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
{
return ifbdev->fb->frontbuffer;
@@ -180,7 +197,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt;
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
};
@@ -680,3 +697,11 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
intel_fbdev_invalidate(ifbdev);
}
+
+struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
+{
+ if (!fbdev || !fbdev->helper.fb)
+ return NULL;
+
+ return to_intel_framebuffer(fbdev->helper.fb);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index de7c84250eb5..0e95e9472fa3 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -10,6 +10,8 @@
struct drm_device;
struct drm_i915_private;
+struct intel_fbdev;
+struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int intel_fbdev_init(struct drm_device *dev);
@@ -19,6 +21,7 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv);
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
void intel_fbdev_output_poll_changed(struct drm_device *dev);
void intel_fbdev_restore_mode(struct drm_device *dev);
+struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
#else
static inline int intel_fbdev_init(struct drm_device *dev)
{
@@ -48,6 +51,10 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
static inline void intel_fbdev_restore_mode(struct drm_device *dev)
{
}
+static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
+{
+ return NULL;
+}
#endif
#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 3d6e22923601..4e4b43669b14 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -10,6 +10,11 @@
#include "intel_display_types.h"
#include "intel_fdi.h"
+struct intel_fdi_funcs {
+ void (*fdi_link_train)(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+};
+
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 6ce8c10fe975..2fad03250661 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -98,11 +98,21 @@ static const struct gmbus_pin gmbus_pins_dg1[] = {
[GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
};
+static const struct gmbus_pin gmbus_pins_dg2[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
+ [GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+};
+
/* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG2)
+ return &gmbus_pins_dg2[pin];
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
return &gmbus_pins_dg1[pin];
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
return &gmbus_pins_icp[pin];
@@ -123,7 +133,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG2)
+ size = ARRAY_SIZE(gmbus_pins_dg2);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
size = ARRAY_SIZE(gmbus_pins_dg1);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
size = ARRAY_SIZE(gmbus_pins_icp);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 6c72f8587240..1aa5bdc7b0dc 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1869,7 +1869,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_OK;
}
-static int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
+int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
{
/* YCBCR420 TMDS rate requirement is half the pixel clock */
if (ycbcr420_output)
@@ -1935,25 +1935,30 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
- enum drm_mode_status status;
+ enum drm_mode_status status = MODE_OK;
+ int bpc;
+
+ /*
+ * Try all color depths since valid port clock range
+ * can have holes. Any mode that can be used with at
+ * least one color depth is accepted.
+ */
+ for (bpc = 12; bpc >= 8; bpc -= 2) {
+ int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
+
+ if (!intel_hdmi_source_bpc_possible(i915, bpc))
+ continue;
+
+ if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
+ continue;
+
+ status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink);
+ if (status == MODE_OK)
+ return MODE_OK;
+ }
- /* check if we can do 8bpc */
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 8, ycbcr420_output),
- true, has_hdmi_sink);
-
- /* if we can't do 8bpc we may still be able to do 12bpc */
- if (status != MODE_OK &&
- intel_hdmi_source_bpc_possible(i915, 12) &&
- intel_hdmi_sink_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output))
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 12, ycbcr420_output),
- true, has_hdmi_sink);
-
- /* if we can't do 8,12bpc we may still be able to do 10bpc */
- if (status != MODE_OK &&
- intel_hdmi_source_bpc_possible(i915, 10) &&
- intel_hdmi_sink_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output))
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 10, ycbcr420_output),
- true, has_hdmi_sink);
+ /* can never happen */
+ drm_WARN_ON(&i915->drm, status == MODE_OK);
return status;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index b0804b862a89..93f65a917c36 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -46,6 +46,7 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
int bpc, bool has_hdmi_sink, bool ycbcr420_output);
+int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output);
int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width,
int num_slices, int output_format, bool hdmi_all_bpp,
int hdmi_max_chunk_bytes);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 912b7003dcfa..8204126d17f9 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include "i915_drv.h"
+#include "i915_irq.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
@@ -213,12 +214,6 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
}
}
-static void intel_hpd_irq_setup(struct drm_i915_private *i915)
-{
- if (i915->display_irqs_enabled && i915->hotplug_funcs)
- i915->hotplug_funcs->hpd_irq_setup(i915);
-}
-
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index af9d30f56cc1..f31e8c3f8ce0 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -47,10 +47,11 @@
#define OPREGION_ASLE_EXT_OFFSET 0x1C00
#define OPREGION_SIGNATURE "IntelGraphicsMem"
-#define MBOX_ACPI (1<<0)
-#define MBOX_SWSCI (1<<1)
-#define MBOX_ASLE (1<<2)
-#define MBOX_ASLE_EXT (1<<4)
+#define MBOX_ACPI BIT(0) /* Mailbox #1 */
+#define MBOX_SWSCI BIT(1) /* Mailbox #2 (obsolete from v2.x) */
+#define MBOX_ASLE BIT(2) /* Mailbox #3 */
+#define MBOX_ASLE_EXT BIT(4) /* Mailbox #5 */
+#define MBOX_BACKLIGHT BIT(5) /* Mailbox #2 (valid from v3.x) */
struct opregion_header {
u8 signature[16];
@@ -245,14 +246,10 @@ struct opregion_asle_ext {
#define MAX_DSLP 1500
-static int swsci(struct drm_i915_private *dev_priv,
- u32 function, u32 parm, u32 *parm_out)
+static int check_swsci_function(struct drm_i915_private *i915, u32 function)
{
- struct opregion_swsci *swsci = dev_priv->opregion.swsci;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- u32 main_function, sub_function, scic;
- u16 swsci_val;
- u32 dslp;
+ struct opregion_swsci *swsci = i915->opregion.swsci;
+ u32 main_function, sub_function;
if (!swsci)
return -ENODEV;
@@ -264,15 +261,31 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Check if we can call the function. See swsci_setup for details. */
if (main_function == SWSCI_SBCB) {
- if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+ if ((i915->opregion.swsci_sbcb_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
} else if (main_function == SWSCI_GBDA) {
- if ((dev_priv->opregion.swsci_gbda_sub_functions &
+ if ((i915->opregion.swsci_gbda_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
}
+ return 0;
+}
+
+static int swsci(struct drm_i915_private *dev_priv,
+ u32 function, u32 parm, u32 *parm_out)
+{
+ struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ u32 scic, dslp;
+ u16 swsci_val;
+ int ret;
+
+ ret = check_swsci_function(dev_priv, function);
+ if (ret)
+ return ret;
+
/* Driver sleep timeout in ms. */
dslp = swsci->dslp;
if (!dslp) {
@@ -346,11 +359,17 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
u32 parm = 0;
u32 type = 0;
u32 port;
+ int ret;
/* don't care about old stuff for now */
if (!HAS_DDI(dev_priv))
return 0;
+ /* Avoid port out of bounds checks if SWSCI isn't there. */
+ ret = check_swsci_function(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE);
+ if (ret)
+ return ret;
+
if (intel_encoder->type == INTEL_OUTPUT_DSI)
port = 0;
else
@@ -363,6 +382,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
port++;
}
+ /*
+ * The port numbering and mapping here is bizarre. The now-obsolete
+ * swsci spec supports ports numbered [0..4]. Port E is handled as a
+ * special case, but port F and beyond are not. The functionality is
+ * supposed to be obsolete for new platforms. Just bail out if the port
+ * number is out of bounds after mapping.
+ */
+ if (port > 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
+ intel_encoder->base.base.id, intel_encoder->base.name,
+ port_name(intel_encoder->port), port);
+ return -EINVAL;
+ }
+
if (!enable)
parm |= 4 << 8;
@@ -899,9 +933,17 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
}
if (mboxes & MBOX_SWSCI) {
- drm_dbg(&dev_priv->drm, "SWSCI supported\n");
- opregion->swsci = base + OPREGION_SWSCI_OFFSET;
- swsci_setup(dev_priv);
+ u8 major = opregion->header->over.major;
+
+ if (major >= 3) {
+ drm_err(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n");
+ } else {
+ if (major >= 2)
+ drm_dbg(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v2.x\n");
+ drm_dbg(&dev_priv->drm, "SWSCI supported\n");
+ opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+ swsci_setup(dev_priv);
+ }
}
if (mboxes & MBOX_ASLE) {
@@ -916,6 +958,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
opregion->asle_ext = base + OPREGION_ASLE_EXT_OFFSET;
}
+ if (mboxes & MBOX_BACKLIGHT) {
+ drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n");
+ }
+
if (intel_load_vbt_firmware(dev_priv) == 0)
goto out;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 5358f03b52db..76845d34ad0c 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -28,6 +28,7 @@
#include <drm/drm_fourcc.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 01ce1d72297f..d7b1de4cc205 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -46,17 +46,18 @@ static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
+ struct intel_memory_region *mem = i915->mm.stolen_region;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 base, size;
- if (plane_config->size == 0)
+ if (!mem || plane_config->size == 0)
return NULL;
base = round_down(plane_config->base,
I915_GTT_MIN_ALIGNMENT);
size = round_up(plane_config->base + plane_config->size,
- I915_GTT_MIN_ALIGNMENT);
+ mem->min_page_size);
size -= base;
/*
@@ -94,7 +95,7 @@ initial_plane_vma(struct drm_i915_private *i915,
goto err_obj;
}
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err_obj;
@@ -165,8 +166,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
@@ -203,11 +202,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
* pretend the BIOS never had it enabled.
*/
intel_plane_disable_noatomic(crtc, plane);
- if (crtc_state->bigjoiner) {
- struct intel_crtc *slave =
- crtc_state->bigjoiner_linked_crtc;
- intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
- }
return;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index a1a663f362e7..2e0b092f4b6b 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1063,31 +1063,28 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
intel_dp->psr.active = true;
}
-static void intel_psr_enable_source(struct intel_dp *intel_dp)
+static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
+{
+ switch (intel_dp->psr.pipe) {
+ case PIPE_A:
+ return LATENCY_REPORTING_REMOVED_PIPE_A;
+ case PIPE_B:
+ return LATENCY_REPORTING_REMOVED_PIPE_B;
+ case PIPE_C:
+ return LATENCY_REPORTING_REMOVED_PIPE_C;
+ default:
+ MISSING_CASE(intel_dp->psr.pipe);
+ return 0;
+ }
+}
+
+static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
- if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) {
- i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
- u32 chicken = intel_de_read(dev_priv, reg);
-
- chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
- PSR2_ADD_VERTICAL_LINE_COUNT;
- intel_de_write(dev_priv, reg, chicken);
- }
-
- /*
- * Wa_16014451276:adlp
- * All supported adlp panels have 1-based X granularity, this may
- * cause issues if non-supported panels are used.
- */
- if (IS_ALDERLAKE_P(dev_priv) &&
- intel_dp->psr.psr2_enabled)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
- ADLP_1_BASED_X_GRANULARITY);
-
/*
* Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
* mask LPSP to avoid dependency on other drivers that might block
@@ -1126,18 +1123,47 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) &&
- intel_dp->psr.psr2_enabled)
- intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK,
- TRANS_SET_CONTEXT_LATENCY_VALUE(1));
+ if (intel_dp->psr.psr2_enabled) {
+ if (DISPLAY_VER(dev_priv) == 9)
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ PSR2_VSC_ENABLE_PROG_HEADER |
+ PSR2_ADD_VERTICAL_LINE_COUNT);
- /* Wa_16012604467:adlp */
- if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled)
- intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
- CLKGATE_DIS_MISC_DMASC_GATING_DIS);
+ /*
+ * Wa_16014451276:adlp
+ * All supported adlp panels have 1-based X granularity, this may
+ * cause issues if non-supported panels are used.
+ */
+ if (IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ ADLP_1_BASED_X_GRANULARITY);
+
+ /* Wa_16011168373:adl-p */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv,
+ TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
+ TRANS_SET_CONTEXT_LATENCY_MASK,
+ TRANS_SET_CONTEXT_LATENCY_VALUE(1));
+
+ /* Wa_16012604467:adlp */
+ if (IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
+ CLKGATE_DIS_MISC_DMASC_GATING_DIS);
+
+ /* Wa_16013835468:tgl[b0+], dg1 */
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
+ IS_DG1(dev_priv)) {
+ u16 vtotal, vblank;
+
+ vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
+ crtc_state->uapi.adjusted_mode.crtc_vdisplay;
+ vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
+ crtc_state->uapi.adjusted_mode.crtc_vblank_start;
+ if (vblank > vtotal)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
+ wa_16013835468_bit_get(intel_dp));
+ }
+ }
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -1202,7 +1228,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
intel_psr_enable_sink(intel_dp);
- intel_psr_enable_source(intel_dp);
+ intel_psr_enable_source(intel_dp, crtc_state);
intel_dp->psr.enabled = true;
intel_dp->psr.paused = false;
@@ -1290,17 +1316,24 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) &&
- intel_dp->psr.psr2_enabled)
- intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK, 0);
-
- /* Wa_16012604467:adlp */
- if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled)
- intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
- CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
+ if (intel_dp->psr.psr2_enabled) {
+ /* Wa_16011168373:adl-p */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv,
+ TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
+ TRANS_SET_CONTEXT_LATENCY_MASK, 0);
+
+ /* Wa_16012604467:adlp */
+ if (IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
+ CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
+
+ /* Wa_16013835468:tgl[b0+], dg1 */
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
+ IS_DG1(dev_priv))
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ wa_16013835468_bit_get(intel_dp), 0);
+ }
intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 8573a458811a..7e6245b97fed 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -32,10 +32,10 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
if (!intel_phy_is_snps(i915, phy))
continue;
- if (intel_de_wait_for_clear(i915, ICL_PHY_MISC(phy),
+ if (intel_de_wait_for_clear(i915, DG2_PHY_MISC(phy),
DG2_PHY_DP_TX_ACK_MASK, 25))
drm_err(&i915->drm, "SNPS PHY %c failed to calibrate after 25ms.\n",
- phy);
+ phy_name(phy));
}
}
@@ -251,197 +251,6 @@ static const struct intel_mpllb_state * const dg2_dp_100_tables[] = {
};
/*
- * Basic DP link rates with 38.4 MHz reference clock.
- */
-
-static const struct intel_mpllb_state dg2_dp_rbr_38_4 = {
- .clock = 162000,
- .ref_control =
- REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
- .mpllb_cp =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 25) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
- .mpllb_div =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
- .mpllb_div2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 304),
- .mpllb_fracn1 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
- .mpllb_fracn2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 49152),
-};
-
-static const struct intel_mpllb_state dg2_dp_hbr1_38_4 = {
- .clock = 270000,
- .ref_control =
- REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
- .mpllb_cp =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 25) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
- .mpllb_div =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
- .mpllb_div2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 248),
- .mpllb_fracn1 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
- .mpllb_fracn2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40960),
-};
-
-static const struct intel_mpllb_state dg2_dp_hbr2_38_4 = {
- .clock = 540000,
- .ref_control =
- REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
- .mpllb_cp =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 25) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
- .mpllb_div =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
- .mpllb_div2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 248),
- .mpllb_fracn1 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
- .mpllb_fracn2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40960),
-};
-
-static const struct intel_mpllb_state dg2_dp_hbr3_38_4 = {
- .clock = 810000,
- .ref_control =
- REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
- .mpllb_cp =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 26) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
- .mpllb_div =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
- .mpllb_div2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 388),
- .mpllb_fracn1 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
- .mpllb_fracn2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 61440),
-};
-
-static const struct intel_mpllb_state dg2_dp_uhbr10_38_4 = {
- .clock = 1000000,
- .ref_control =
- REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
- .mpllb_cp =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 26) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
- .mpllb_div =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
- .mpllb_div2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 488),
- .mpllb_fracn1 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 3),
- .mpllb_fracn2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 27306),
-
- /*
- * SSC will be enabled, DP UHBR has a minimum SSC requirement.
- */
- .mpllb_sscen =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 76800),
- .mpllb_sscstep =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 129024),
-};
-
-static const struct intel_mpllb_state dg2_dp_uhbr13_38_4 = {
- .clock = 1350000,
- .ref_control =
- REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
- .mpllb_cp =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 56) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
- .mpllb_div =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 3),
- .mpllb_div2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 670),
- .mpllb_fracn1 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
- .mpllb_fracn2 =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36864),
-
- /*
- * SSC will be enabled, DP UHBR has a minimum SSC requirement.
- */
- .mpllb_sscen =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
- REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 103680),
- .mpllb_sscstep =
- REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 174182),
-};
-
-static const struct intel_mpllb_state * const dg2_dp_38_4_tables[] = {
- &dg2_dp_rbr_38_4,
- &dg2_dp_hbr1_38_4,
- &dg2_dp_hbr2_38_4,
- &dg2_dp_hbr3_38_4,
- &dg2_dp_uhbr10_38_4,
- &dg2_dp_uhbr13_38_4,
- NULL,
-};
-
-/*
* eDP link rates with 100 MHz reference clock.
*/
@@ -749,22 +558,7 @@ intel_mpllb_tables_get(struct intel_crtc_state *crtc_state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
return dg2_edp_tables;
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
- /*
- * FIXME: Initially we're just enabling the "combo" outputs on
- * port A-D. The MPLLB for those ports takes an input from the
- * "Display Filter PLL" which always has an output frequency
- * of 100 MHz, hence the use of the _100 tables below.
- *
- * Once we enable port TC1 it will either use the same 100 MHz
- * "Display Filter PLL" (when strapped to support a native
- * display connection) or different 38.4 MHz "Filter PLL" when
- * strapped to support a USB connection, so we'll need to check
- * that to determine which table to use.
- */
- if (0)
- return dg2_dp_38_4_tables;
- else
- return dg2_dp_100_tables;
+ return dg2_dp_100_tables;
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
return dg2_hdmi_tables;
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index feead08ddf8f..fc037c027ea5 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -693,6 +693,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
+ intel_wakeref_t tc_cold_wref;
+ enum intel_display_power_domain domain;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
@@ -704,12 +706,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
- if (active_links) {
- enum intel_display_power_domain domain;
- intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain);
- dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ tc_cold_wref = tc_cold_block(dig_port, &domain);
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ if (active_links) {
if (!icl_tc_phy_is_connected(dig_port))
drm_dbg_kms(&i915->drm,
"Port %s: PHY disconnected with %d active link(s)\n",
@@ -718,10 +719,23 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
&dig_port->tc_lock_power_domain);
-
- tc_cold_unblock(dig_port, domain, tc_cold_wref);
+ } else {
+ /*
+ * TBT-alt is the default mode in any case the PHY ownership is not
+ * held (regardless of the sink's connected live state), so
+ * we'll just switch to disconnected mode from it here without
+ * a note.
+ */
+ if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+ icl_tc_phy_disconnect(dig_port);
}
+ tc_cold_unblock(dig_port, domain, tc_cold_wref);
+
drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index a39d6cfea87a..b9397d9363c5 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -162,6 +162,14 @@ struct bdb_general_features {
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 dp_ssc_dongle_supported:1;
u8 rsvd11:2; /* finish byte */
+
+ /* bits 6 */
+ u8 tc_hpd_retry_timeout:7; /* 242 */
+ u8 rsvd12:1;
+
+ /* bits 7 */
+ u8 afc_startup_config:2;/* 249 */
+ u8 rsvd13:6;
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 3faea903b9ae..545eff5bf158 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -1107,18 +1107,6 @@ static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_tran
ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2;
}
-struct intel_crtc *
-intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc)
-{
- return intel_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1);
-}
-
-static struct intel_crtc *
-intel_dsc_get_bigjoiner_primary(const struct intel_crtc *secondary_crtc)
-{
- return intel_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1);
-}
-
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1126,7 +1114,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
u32 dss_ctl1_val = 0;
if (crtc_state->bigjoiner && !crtc_state->dsc.compression_enable) {
- if (crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
dss_ctl1_val |= UNCOMPRESSED_JOINER_SLAVE;
else
dss_ctl1_val |= UNCOMPRESSED_JOINER_MASTER;
@@ -1154,7 +1142,7 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
}
if (crtc_state->bigjoiner) {
dss_ctl1_val |= BIG_JOINER_ENABLE;
- if (!crtc_state->bigjoiner_slave)
+ if (!intel_crtc_is_bigjoiner_slave(crtc_state))
dss_ctl1_val |= MASTER_BIG_JOINER_ENABLE;
}
intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
@@ -1174,25 +1162,6 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
}
}
-void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dss_ctl1;
-
- dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder));
- if (dss_ctl1 & UNCOMPRESSED_JOINER_MASTER) {
- crtc_state->bigjoiner = true;
- crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
- drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
- } else if (dss_ctl1 & UNCOMPRESSED_JOINER_SLAVE) {
- crtc_state->bigjoiner = true;
- crtc_state->bigjoiner_slave = true;
- crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_primary(crtc);
- drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
- }
-}
-
void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1223,18 +1192,6 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) &&
(dss_ctl1 & JOINER_ENABLE);
- if (dss_ctl1 & BIG_JOINER_ENABLE) {
- crtc_state->bigjoiner = true;
-
- if (!(dss_ctl1 & MASTER_BIG_JOINER_ENABLE)) {
- crtc_state->bigjoiner_slave = true;
- crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_primary(crtc);
- } else {
- crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
- }
- drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
- }
-
/* FIXME: add more state readout as needed */
/* PPS1 */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 4ec75f715986..8763f00fa7e2 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -18,7 +18,6 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
void intel_dsc_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config);
-void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 20141f33ed64..0d936f658b3f 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -44,6 +44,7 @@
#include "skl_scaler.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
+#include "vlv_dsi_regs.h"
#include "vlv_sideband.h"
/* return pixels in terms of txbyteclkhs */
@@ -1492,7 +1493,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
*/
if (is_vid_mode(intel_dsi) &&
- intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ intel_dsi->video_mode == BURST_MODE) {
intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
} else {
@@ -1568,12 +1569,33 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
- if (is_vid_mode(intel_dsi))
- /* Some panels might have resolution which is not a
+ if (is_vid_mode(intel_dsi)) {
+ u32 fmt = intel_dsi->video_frmt_cfg_bits | IP_TG_CONFIG;
+
+ /*
+ * Some panels might have resolution which is not a
* multiple of 64 like 1366 x 768. Enable RANDOM
- * resolution support for such panels by default */
- intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port),
- intel_dsi->video_frmt_cfg_bits | intel_dsi->video_mode_format | IP_TG_CONFIG | RANDOM_DPI_DISPLAY_RESOLUTION);
+ * resolution support for such panels by default.
+ */
+ fmt |= RANDOM_DPI_DISPLAY_RESOLUTION;
+
+ switch (intel_dsi->video_mode) {
+ default:
+ MISSING_CASE(intel_dsi->video_mode);
+ fallthrough;
+ case NON_BURST_SYNC_EVENTS:
+ fmt |= VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS;
+ break;
+ case NON_BURST_SYNC_PULSE:
+ fmt |= VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE;
+ break;
+ case BURST_MODE:
+ fmt |= VIDEO_MODE_BURST;
+ break;
+ }
+
+ intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port), fmt);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 1b81797dd02e..df880f44700a 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -32,6 +32,7 @@
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "vlv_dsi_pll.h"
+#include "vlv_dsi_pll_regs.h"
#include "vlv_sideband.h"
static const u16 lfsr_converts[] = {
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h
new file mode 100644
index 000000000000..45590e14e54b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __VLV_DSI_PLL_REGS_H__
+#define __VLV_DSI_PLL_REGS_H__
+
+#include "vlv_dsi_regs.h"
+
+#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
+#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF
+#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
+#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
+
+#define BXT_MAX_VAR_OUTPUT_KHZ 39500
+
+#define BXT_MIPI_CLOCK_CTL _MMIO(0x46090)
+#define BXT_MIPI1_DIV_SHIFT 26
+#define BXT_MIPI2_DIV_SHIFT 10
+#define BXT_MIPI_DIV_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
+ BXT_MIPI2_DIV_SHIFT)
+
+/* TX control divider to select actual TX clock output from (8x/var) */
+#define BXT_MIPI1_TX_ESCLK_SHIFT 26
+#define BXT_MIPI2_TX_ESCLK_SHIFT 10
+#define BXT_MIPI_TX_ESCLK_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
+ BXT_MIPI2_TX_ESCLK_SHIFT)
+#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (0x3F << 26)
+#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (0x3F << 10)
+#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
+ BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
+#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \
+ (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
+/* RX upper control divider to select actual RX clock output from 8x */
+#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21
+#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5
+#define BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_SHIFT, \
+ BXT_MIPI2_RX_ESCLK_UPPER_SHIFT)
+#define BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 21)
+#define BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 5)
+#define BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
+ BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
+#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \
+ (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
+/* 8/3X divider to select the actual 8/3X clock output from 8x */
+#define BXT_MIPI1_8X_BY3_SHIFT 19
+#define BXT_MIPI2_8X_BY3_SHIFT 3
+#define BXT_MIPI_8X_BY3_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_8X_BY3_SHIFT, \
+ BXT_MIPI2_8X_BY3_SHIFT)
+#define BXT_MIPI1_8X_BY3_DIVIDER_MASK (3 << 19)
+#define BXT_MIPI2_8X_BY3_DIVIDER_MASK (3 << 3)
+#define BXT_MIPI_8X_BY3_DIVIDER_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
+ BXT_MIPI2_8X_BY3_DIVIDER_MASK)
+#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \
+ (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
+/* RX lower control divider to select actual RX clock output from 8x */
+#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16
+#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0
+#define BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_SHIFT, \
+ BXT_MIPI2_RX_ESCLK_LOWER_SHIFT)
+#define BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 16)
+#define BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 0)
+#define BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
+ BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
+#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \
+ (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
+
+#define RX_DIVIDER_BIT_1_2 0x3
+#define RX_DIVIDER_BIT_3_4 0xC
+
+#define BXT_DSI_PLL_CTL _MMIO(0x161000)
+#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
+#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
+#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
+#define BXT_DSIC_16X_BY1 (0 << 10)
+#define BXT_DSIC_16X_BY2 (1 << 10)
+#define BXT_DSIC_16X_BY3 (2 << 10)
+#define BXT_DSIC_16X_BY4 (3 << 10)
+#define BXT_DSIC_16X_MASK (3 << 10)
+#define BXT_DSIA_16X_BY1 (0 << 8)
+#define BXT_DSIA_16X_BY2 (1 << 8)
+#define BXT_DSIA_16X_BY3 (2 << 8)
+#define BXT_DSIA_16X_BY4 (3 << 8)
+#define BXT_DSIA_16X_MASK (3 << 8)
+#define BXT_DSI_FREQ_SEL_SHIFT 8
+#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT)
+
+#define BXT_DSI_PLL_RATIO_MAX 0x7D
+#define BXT_DSI_PLL_RATIO_MIN 0x22
+#define GLK_DSI_PLL_RATIO_MAX 0x6F
+#define GLK_DSI_PLL_RATIO_MIN 0x22
+#define BXT_DSI_PLL_RATIO_MASK 0xFF
+#define BXT_REF_CLOCK_KHZ 19200
+
+#define BXT_DSI_PLL_ENABLE _MMIO(0x46080)
+#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
+#define BXT_DSI_PLL_LOCKED (1 << 30)
+
+#endif /* __VLV_DSI_PLL_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
new file mode 100644
index 000000000000..356e51515346
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
@@ -0,0 +1,480 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __VLV_DSI_REGS_H__
+#define __VLV_DSI_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define VLV_MIPI_BASE VLV_DISPLAY_BASE
+#define BXT_MIPI_BASE 0x60000
+
+#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
+#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
+
+/* BXT MIPI mode configure */
+#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
+#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
+#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \
+ _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
+
+#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
+#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
+#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \
+ _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
+
+#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
+#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
+#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \
+ _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
+
+#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
+#define STAP_SELECT (1 << 0)
+
+#define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054)
+#define HS_IO_CTRL_SELECT (1 << 0)
+
+#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
+#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
+
+ /* BXT port control */
+#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
+#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
+#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
+
+#define DPI_ENABLE (1 << 31) /* A + C */
+#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
+#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
+#define DUAL_LINK_MODE_SHIFT 26
+#define DUAL_LINK_MODE_MASK (1 << 26)
+#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
+#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
+#define DITHERING_ENABLE (1 << 25) /* A + C */
+#define FLOPPED_HSTX (1 << 23)
+#define DE_INVERT (1 << 19) /* XXX */
+#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
+#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
+#define AFE_LATCHOUT (1 << 17)
+#define LP_OUTPUT_HOLD (1 << 16)
+#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
+#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
+#define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11
+#define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
+#define CSB_SHIFT 9
+#define CSB_MASK (3 << 9)
+#define CSB_20MHZ (0 << 9)
+#define CSB_10MHZ (1 << 9)
+#define CSB_40MHZ (2 << 9)
+#define BANDGAP_MASK (1 << 8)
+#define BANDGAP_PNW_CIRCUIT (0 << 8)
+#define BANDGAP_LNC_CIRCUIT (1 << 8)
+#define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
+#define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
+#define TEARING_EFFECT_DELAY (1 << 4) /* A + C */
+#define TEARING_EFFECT_SHIFT 2 /* A + C */
+#define TEARING_EFFECT_MASK (3 << 2)
+#define TEARING_EFFECT_OFF (0 << 2)
+#define TEARING_EFFECT_DSI (1 << 2)
+#define TEARING_EFFECT_GPIO (2 << 2)
+#define LANE_CONFIGURATION_SHIFT 0
+#define LANE_CONFIGURATION_MASK (3 << 0)
+#define LANE_CONFIGURATION_4LANE (0 << 0)
+#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
+#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
+
+#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
+#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
+#define TEARING_EFFECT_DELAY_SHIFT 0
+#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
+
+/* XXX: all bits reserved */
+#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
+
+/* MIPI DSI Controller and D-PHY registers */
+
+#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
+#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
+#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
+#define ULPS_STATE_MASK (3 << 1)
+#define ULPS_STATE_ENTER (2 << 1)
+#define ULPS_STATE_EXIT (1 << 1)
+#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
+#define DEVICE_READY (1 << 0)
+
+#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
+#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
+#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
+#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
+#define TEARING_EFFECT (1 << 31)
+#define SPL_PKT_SENT_INTERRUPT (1 << 30)
+#define GEN_READ_DATA_AVAIL (1 << 29)
+#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
+#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
+#define RX_PROT_VIOLATION (1 << 26)
+#define RX_INVALID_TX_LENGTH (1 << 25)
+#define ACK_WITH_NO_ERROR (1 << 24)
+#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
+#define LP_RX_TIMEOUT (1 << 22)
+#define HS_TX_TIMEOUT (1 << 21)
+#define DPI_FIFO_UNDERRUN (1 << 20)
+#define LOW_CONTENTION (1 << 19)
+#define HIGH_CONTENTION (1 << 18)
+#define TXDSI_VC_ID_INVALID (1 << 17)
+#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
+#define TXCHECKSUM_ERROR (1 << 15)
+#define TXECC_MULTIBIT_ERROR (1 << 14)
+#define TXECC_SINGLE_BIT_ERROR (1 << 13)
+#define TXFALSE_CONTROL_ERROR (1 << 12)
+#define RXDSI_VC_ID_INVALID (1 << 11)
+#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
+#define RXCHECKSUM_ERROR (1 << 9)
+#define RXECC_MULTIBIT_ERROR (1 << 8)
+#define RXECC_SINGLE_BIT_ERROR (1 << 7)
+#define RXFALSE_CONTROL_ERROR (1 << 6)
+#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
+#define RX_LP_TX_SYNC_ERROR (1 << 4)
+#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
+#define RXEOT_SYNC_ERROR (1 << 2)
+#define RXSOT_SYNC_ERROR (1 << 1)
+#define RXSOT_ERROR (1 << 0)
+
+#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
+#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
+#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
+#define CMD_MODE_NOT_SUPPORTED (0 << 13)
+#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
+#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
+#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
+#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
+#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
+#define VID_MODE_FORMAT_MASK (0xf << 7)
+#define VID_MODE_NOT_SUPPORTED (0 << 7)
+#define VID_MODE_FORMAT_RGB565 (1 << 7)
+#define VID_MODE_FORMAT_RGB666_PACKED (2 << 7)
+#define VID_MODE_FORMAT_RGB666 (3 << 7)
+#define VID_MODE_FORMAT_RGB888 (4 << 7)
+#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
+#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
+#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
+#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
+#define DATA_LANES_PRG_REG_SHIFT 0
+#define DATA_LANES_PRG_REG_MASK (7 << 0)
+
+#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
+#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
+#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
+
+#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
+#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
+#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
+
+#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
+#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
+#define TURN_AROUND_TIMEOUT_MASK 0x3f
+
+#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
+#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
+#define DEVICE_RESET_TIMER_MASK 0xffff
+
+#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
+#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
+#define VERTICAL_ADDRESS_SHIFT 16
+#define VERTICAL_ADDRESS_MASK (0xffff << 16)
+#define HORIZONTAL_ADDRESS_SHIFT 0
+#define HORIZONTAL_ADDRESS_MASK 0xffff
+
+#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
+#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
+#define DBI_FIFO_EMPTY_HALF (0 << 0)
+#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
+#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
+
+/* regs below are bits 15:0 */
+#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
+#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
+#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
+#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
+#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
+#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
+#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
+#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
+
+#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
+#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
+#define DPI_LP_MODE (1 << 6)
+#define BACKLIGHT_OFF (1 << 5)
+#define BACKLIGHT_ON (1 << 4)
+#define COLOR_MODE_OFF (1 << 3)
+#define COLOR_MODE_ON (1 << 2)
+#define TURN_ON (1 << 1)
+#define SHUTDOWN (1 << 0)
+
+#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
+#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
+#define COMMAND_BYTE_SHIFT 0
+#define COMMAND_BYTE_MASK (0x3f << 0)
+
+#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
+#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
+#define MASTER_INIT_TIMER_SHIFT 0
+#define MASTER_INIT_TIMER_MASK (0xffff << 0)
+
+#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
+#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
+ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
+#define MAX_RETURN_PKT_SIZE_SHIFT 0
+#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
+
+#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
+#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
+#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
+#define DISABLE_VIDEO_BTA (1 << 3)
+#define IP_TG_CONFIG (1 << 2)
+#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
+#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
+#define VIDEO_MODE_BURST (3 << 0)
+
+#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
+#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
+#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
+#define BXT_DPHY_DEFEATURE_EN (1 << 8)
+#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
+#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
+#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
+#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
+#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
+#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
+#define CLOCKSTOP (1 << 1)
+#define EOT_DISABLE (1 << 0)
+
+#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
+#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
+#define LP_BYTECLK_SHIFT 0
+#define LP_BYTECLK_MASK (0xffff << 0)
+
+#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4)
+#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4)
+#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
+
+#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098)
+#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898)
+#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
+
+/* bits 31:0 */
+#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
+#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
+
+/* bits 31:0 */
+#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
+#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
+#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
+#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
+#define LONG_PACKET_WORD_COUNT_SHIFT 8
+#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
+#define SHORT_PACKET_PARAM_SHIFT 8
+#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
+#define VIRTUAL_CHANNEL_SHIFT 6
+#define VIRTUAL_CHANNEL_MASK (3 << 6)
+#define DATA_TYPE_SHIFT 0
+#define DATA_TYPE_MASK (0x3f << 0)
+/* data type values, see include/video/mipi_display.h */
+
+#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
+#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
+#define DPI_FIFO_EMPTY (1 << 28)
+#define DBI_FIFO_EMPTY (1 << 27)
+#define LP_CTRL_FIFO_EMPTY (1 << 26)
+#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
+#define LP_CTRL_FIFO_FULL (1 << 24)
+#define HS_CTRL_FIFO_EMPTY (1 << 18)
+#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
+#define HS_CTRL_FIFO_FULL (1 << 16)
+#define LP_DATA_FIFO_EMPTY (1 << 10)
+#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
+#define LP_DATA_FIFO_FULL (1 << 8)
+#define HS_DATA_FIFO_EMPTY (1 << 2)
+#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
+#define HS_DATA_FIFO_FULL (1 << 0)
+
+#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
+#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
+#define DBI_HS_LP_MODE_MASK (1 << 0)
+#define DBI_LP_MODE (1 << 0)
+#define DBI_HS_MODE (0 << 0)
+
+#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
+#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
+#define EXIT_ZERO_COUNT_SHIFT 24
+#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
+#define TRAIL_COUNT_SHIFT 16
+#define TRAIL_COUNT_MASK (0x1f << 16)
+#define CLK_ZERO_COUNT_SHIFT 8
+#define CLK_ZERO_COUNT_MASK (0xff << 8)
+#define PREPARE_COUNT_SHIFT 0
+#define PREPARE_COUNT_MASK (0x3f << 0)
+
+#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
+#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088)
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
+#define LP_HS_SSW_CNT_SHIFT 16
+#define LP_HS_SSW_CNT_MASK (0xffff << 16)
+#define HS_LP_PWR_SW_CNT_SHIFT 0
+#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
+
+#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
+#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
+#define STOP_STATE_STALL_COUNTER_SHIFT 0
+#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
+
+#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
+#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
+#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
+#define RX_CONTENTION_DETECTED (1 << 0)
+
+/* XXX: only pipe A ?!? */
+#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
+#define DBI_TYPEC_ENABLE (1 << 31)
+#define DBI_TYPEC_WIP (1 << 30)
+#define DBI_TYPEC_OPTION_SHIFT 28
+#define DBI_TYPEC_OPTION_MASK (3 << 28)
+#define DBI_TYPEC_FREQ_SHIFT 24
+#define DBI_TYPEC_FREQ_MASK (0xf << 24)
+#define DBI_TYPEC_OVERRIDE (1 << 8)
+#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
+#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
+
+/* MIPI adapter registers */
+
+#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
+#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
+#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
+#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
+#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
+#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
+#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
+#define READ_REQUEST_PRIORITY_SHIFT 3
+#define READ_REQUEST_PRIORITY_MASK (3 << 3)
+#define READ_REQUEST_PRIORITY_LOW (0 << 3)
+#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
+#define RGB_FLIP_TO_BGR (1 << 2)
+
+#define BXT_PIPE_SELECT_SHIFT 7
+#define BXT_PIPE_SELECT_MASK (7 << 7)
+#define BXT_PIPE_SELECT(pipe) ((pipe) << 7)
+#define GLK_PHY_STATUS_PORT_READY (1 << 31) /* RO */
+#define GLK_ULPS_NOT_ACTIVE (1 << 30) /* RO */
+#define GLK_MIPIIO_RESET_RELEASED (1 << 28)
+#define GLK_CLOCK_LANE_STOP_STATE (1 << 27) /* RO */
+#define GLK_DATA_LANE_STOP_STATE (1 << 26) /* RO */
+#define GLK_LP_WAKE (1 << 22)
+#define GLK_LP11_LOW_PWR_MODE (1 << 21)
+#define GLK_LP00_LOW_PWR_MODE (1 << 20)
+#define GLK_FIREWALL_ENABLE (1 << 16)
+#define BXT_PIXEL_OVERLAP_CNT_MASK (0xf << 10)
+#define BXT_PIXEL_OVERLAP_CNT_SHIFT 10
+#define BXT_DSC_ENABLE (1 << 3)
+#define BXT_RGB_FLIP (1 << 2)
+#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
+#define GLK_MIPIIO_ENABLE (1 << 0)
+
+#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
+#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
+#define DATA_MEM_ADDRESS_SHIFT 5
+#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
+#define DATA_VALID (1 << 0)
+
+#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
+#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
+#define DATA_LENGTH_SHIFT 0
+#define DATA_LENGTH_MASK (0xfffff << 0)
+
+#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
+#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
+#define COMMAND_MEM_ADDRESS_SHIFT 5
+#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
+#define AUTO_PWG_ENABLE (1 << 2)
+#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
+#define COMMAND_VALID (1 << 0)
+
+#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
+#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
+#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
+#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
+
+#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
+#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+
+#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
+#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
+#define READ_DATA_VALID(n) (1 << (n))
+
+#endif /* __VLV_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 8a248003dfae..ce91b23385cf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -4,6 +4,8 @@
* Copyright © 2016 Intel Corporation
*/
+#include <drm/drm_cache.h>
+
#include "display/intel_frontbuffer.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 00327b750fbb..bc6d59df064d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -67,6 +67,7 @@
#include <linux/log2.h>
#include <linux/nospec.h>
+#include <drm/drm_cache.h>
#include <drm/drm_syncobj.h>
#include "gt/gen6_ppgtt.h"
@@ -79,6 +80,7 @@
#include "pxp/intel_pxp.h"
+#include "i915_file_private.h"
#include "i915_gem_context.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
@@ -343,6 +345,20 @@ static int proto_context_register(struct drm_i915_file_private *fpriv,
return ret;
}
+static struct i915_address_space *
+i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
+{
+ struct i915_address_space *vm;
+
+ xa_lock(&file_priv->vm_xa);
+ vm = xa_load(&file_priv->vm_xa, id);
+ if (vm)
+ kref_get(&vm->ref);
+ xa_unlock(&file_priv->vm_xa);
+
+ return vm;
+}
+
static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
const struct drm_i915_gem_context_param *args)
@@ -571,10 +587,6 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
struct intel_engine_cs **siblings = NULL;
intel_engine_mask_t prev_mask;
- /* FIXME: This is NIY for execlists */
- if (!(intel_uc_uses_guc_submission(&to_gt(i915)->uc)))
- return -ENODEV;
-
if (get_user(slot, &ext->engine_index))
return -EFAULT;
@@ -584,6 +596,13 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
if (get_user(num_siblings, &ext->num_siblings))
return -EFAULT;
+ if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
+ num_siblings != 1) {
+ drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
+ num_siblings);
+ return -EINVAL;
+ }
+
if (slot >= set->num_engines) {
drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
slot, set->num_engines);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index babfecb17ad1..e5b0f66ea1fe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -174,7 +174,7 @@ i915_gem_context_get_eb_vm(struct i915_gem_context *ctx)
vm = ctx->vm;
if (!vm)
- vm = &ctx->i915->ggtt.vm;
+ vm = &to_gt(ctx->i915)->ggtt->vm;
vm = i915_vm_get(vm);
return vm;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 9402d4bf4ffc..c6eb023d3d86 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -3,12 +3,15 @@
* Copyright © 2020 Intel Corporation
*/
+#include <drm/drm_fourcc.h>
+
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_gem_create.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.h b/drivers/gpu/drm/i915/gem/i915_gem_create.h
new file mode 100644
index 000000000000..9536aa906001
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_GEM_CREATE_H__
+#define __I915_GEM_CREATE_H__
+
+struct drm_file;
+struct drm_device;
+struct drm_mode_create_dumb;
+
+int i915_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+#endif /* __I915_GEM_CREATE_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 14fdb0796c52..13917231ae81 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -11,6 +11,7 @@
#include <asm/smp.h>
+#include "gem/i915_gem_dmabuf.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h
new file mode 100644
index 000000000000..6e0405d47ce1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __I915_GEM_DMABUF_H__
+#define __I915_GEM_DMABUF_H__
+
+struct drm_gem_object;
+struct drm_device;
+struct dma_buf;
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
+
+#endif /* __I915_GEM_DMABUF_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 26532c07d467..3e5d6057b3ef 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -9,12 +9,13 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
+#include "i915_gem_domain.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
-#include "i915_gem_object.h"
-#include "i915_vma.h"
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
+#include "i915_gem_object.h"
+#include "i915_vma.h"
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.h b/drivers/gpu/drm/i915/gem/i915_gem_domain.h
new file mode 100644
index 000000000000..9622df962bfc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __I915_GEM_DOMAIN_H__
+#define __I915_GEM_DOMAIN_H__
+
+struct drm_i915_gem_object;
+enum i915_cache_level;
+
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+
+#endif /* __I915_GEM_DOMAIN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 54cae821513b..d42f437149c9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -25,13 +25,13 @@
#include "i915_cmd_parser.h"
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_evict.h"
#include "i915_gem_ioctls.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
-#include "i915_vma_snapshot.h"
struct eb_vma {
struct i915_vma *vma;
@@ -443,7 +443,7 @@ eb_pin_vma(struct i915_execbuffer *eb,
else
pin_flags = entry->offset & PIN_OFFSET_MASK;
- pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
+ pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED | PIN_VALIDATE;
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
@@ -461,17 +461,15 @@ eb_pin_vma(struct i915_execbuffer *eb,
entry->pad_to_size,
entry->alignment,
eb_pin_flags(entry, ev->flags) |
- PIN_USER | PIN_NOEVICT);
+ PIN_USER | PIN_NOEVICT | PIN_VALIDATE);
if (unlikely(err))
return err;
}
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_pin_fence(vma);
- if (unlikely(err)) {
- i915_vma_unpin(vma);
+ if (unlikely(err))
return err;
- }
if (vma->fence)
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -487,13 +485,9 @@ eb_pin_vma(struct i915_execbuffer *eb,
static inline void
eb_unreserve_vma(struct eb_vma *ev)
{
- if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
- return;
-
if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
__i915_vma_unpin_fence(ev->vma);
- __i915_vma_unpin(ev->vma);
ev->flags &= ~__EXEC_OBJECT_RESERVED;
}
@@ -675,10 +669,8 @@ static int eb_reserve_vma(struct i915_execbuffer *eb,
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_pin_fence(vma);
- if (unlikely(err)) {
- i915_vma_unpin(vma);
+ if (unlikely(err))
return err;
- }
if (vma->fence)
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -690,85 +682,95 @@ static int eb_reserve_vma(struct i915_execbuffer *eb,
return 0;
}
-static int eb_reserve(struct i915_execbuffer *eb)
+static bool eb_unbind(struct i915_execbuffer *eb, bool force)
{
const unsigned int count = eb->buffer_count;
- unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
+ unsigned int i;
struct list_head last;
+ bool unpinned = false;
+
+ /* Resort *all* the objects into priority order */
+ INIT_LIST_HEAD(&eb->unbound);
+ INIT_LIST_HEAD(&last);
+
+ for (i = 0; i < count; i++) {
+ struct eb_vma *ev = &eb->vma[i];
+ unsigned int flags = ev->flags;
+
+ if (!force && flags & EXEC_OBJECT_PINNED &&
+ flags & __EXEC_OBJECT_HAS_PIN)
+ continue;
+
+ unpinned = true;
+ eb_unreserve_vma(ev);
+
+ if (flags & EXEC_OBJECT_PINNED)
+ /* Pinned must have their slot */
+ list_add(&ev->bind_link, &eb->unbound);
+ else if (flags & __EXEC_OBJECT_NEEDS_MAP)
+ /* Map require the lowest 256MiB (aperture) */
+ list_add_tail(&ev->bind_link, &eb->unbound);
+ else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
+ /* Prioritise 4GiB region for restricted bo */
+ list_add(&ev->bind_link, &last);
+ else
+ list_add_tail(&ev->bind_link, &last);
+ }
+
+ list_splice_tail(&last, &eb->unbound);
+ return unpinned;
+}
+
+static int eb_reserve(struct i915_execbuffer *eb)
+{
struct eb_vma *ev;
- unsigned int i, pass;
+ unsigned int pass;
int err = 0;
+ bool unpinned;
/*
* Attempt to pin all of the buffers into the GTT.
- * This is done in 3 phases:
+ * This is done in 2 phases:
*
- * 1a. Unbind all objects that do not match the GTT constraints for
- * the execbuffer (fenceable, mappable, alignment etc).
- * 1b. Increment pin count for already bound objects.
- * 2. Bind new objects.
- * 3. Decrement pin count.
+ * 1. Unbind all objects that do not match the GTT constraints for
+ * the execbuffer (fenceable, mappable, alignment etc).
+ * 2. Bind new objects.
*
* This avoid unnecessary unbinding of later objects in order to make
* room for the earlier objects *unless* we need to defragment.
+ *
+ * Defragmenting is skipped if all objects are pinned at a fixed location.
*/
- pass = 0;
- do {
- list_for_each_entry(ev, &eb->unbound, bind_link) {
- err = eb_reserve_vma(eb, ev, pin_flags);
- if (err)
- break;
- }
- if (err != -ENOSPC)
- return err;
+ for (pass = 0; pass <= 2; pass++) {
+ int pin_flags = PIN_USER | PIN_VALIDATE;
- /* Resort *all* the objects into priority order */
- INIT_LIST_HEAD(&eb->unbound);
- INIT_LIST_HEAD(&last);
- for (i = 0; i < count; i++) {
- unsigned int flags;
+ if (pass == 0)
+ pin_flags |= PIN_NONBLOCK;
- ev = &eb->vma[i];
- flags = ev->flags;
- if (flags & EXEC_OBJECT_PINNED &&
- flags & __EXEC_OBJECT_HAS_PIN)
- continue;
+ if (pass >= 1)
+ unpinned = eb_unbind(eb, pass == 2);
- eb_unreserve_vma(ev);
-
- if (flags & EXEC_OBJECT_PINNED)
- /* Pinned must have their slot */
- list_add(&ev->bind_link, &eb->unbound);
- else if (flags & __EXEC_OBJECT_NEEDS_MAP)
- /* Map require the lowest 256MiB (aperture) */
- list_add_tail(&ev->bind_link, &eb->unbound);
- else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
- /* Prioritise 4GiB region for restricted bo */
- list_add(&ev->bind_link, &last);
- else
- list_add_tail(&ev->bind_link, &last);
- }
- list_splice_tail(&last, &eb->unbound);
-
- switch (pass++) {
- case 0:
- break;
-
- case 1:
- /* Too fragmented, unbind everything and retry */
- mutex_lock(&eb->context->vm->mutex);
- err = i915_gem_evict_vm(eb->context->vm);
- mutex_unlock(&eb->context->vm->mutex);
+ if (pass == 2) {
+ err = mutex_lock_interruptible(&eb->context->vm->mutex);
+ if (!err) {
+ err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
+ mutex_unlock(&eb->context->vm->mutex);
+ }
if (err)
return err;
- break;
+ }
- default:
- return -ENOSPC;
+ list_for_each_entry(ev, &eb->unbound, bind_link) {
+ err = eb_reserve_vma(eb, ev, pin_flags);
+ if (err)
+ break;
}
- pin_flags = PIN_USER;
- } while (1);
+ if (err != -ENOSPC)
+ break;
+ }
+
+ return err;
}
static int eb_select_context(struct i915_execbuffer *eb)
@@ -1097,7 +1099,7 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
struct drm_i915_private *i915 =
container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
- return &i915->ggtt;
+ return to_gt(i915)->ggtt;
}
static void reloc_cache_unmap(struct reloc_cache *cache)
@@ -1216,10 +1218,11 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
return vaddr;
}
-static void *reloc_iomap(struct drm_i915_gem_object *obj,
+static void *reloc_iomap(struct i915_vma *batch,
struct i915_execbuffer *eb,
unsigned long page)
{
+ struct drm_i915_gem_object *obj = batch->obj;
struct reloc_cache *cache = &eb->reloc_cache;
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
unsigned long offset;
@@ -1229,7 +1232,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
- struct i915_vma *vma;
+ struct i915_vma *vma = ERR_PTR(-ENODEV);
int err;
if (i915_gem_object_is_tiled(obj))
@@ -1242,10 +1245,23 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (err)
return ERR_PTR(err);
- vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
+ /*
+ * i915_gem_object_ggtt_pin_ww may attempt to remove the batch
+ * VMA from the object list because we no longer pin.
+ *
+ * Only attempt to pin the batch buffer to ggtt if the current batch
+ * is not inside ggtt, or the batch buffer is not misplaced.
+ */
+ if (!i915_is_ggtt(batch->vm)) {
+ vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
+ } else if (i915_vma_is_map_and_fenceable(batch)) {
+ __i915_vma_pin(batch);
+ vma = batch;
+ }
+
if (vma == ERR_PTR(-EDEADLK))
return vma;
@@ -1283,7 +1299,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
return vaddr;
}
-static void *reloc_vaddr(struct drm_i915_gem_object *obj,
+static void *reloc_vaddr(struct i915_vma *vma,
struct i915_execbuffer *eb,
unsigned long page)
{
@@ -1295,9 +1311,9 @@ static void *reloc_vaddr(struct drm_i915_gem_object *obj,
} else {
vaddr = NULL;
if ((cache->vaddr & KMAP) == 0)
- vaddr = reloc_iomap(obj, eb, page);
+ vaddr = reloc_iomap(vma, eb, page);
if (!vaddr)
- vaddr = reloc_kmap(obj, cache, page);
+ vaddr = reloc_kmap(vma->obj, cache, page);
}
return vaddr;
@@ -1338,7 +1354,7 @@ relocate_entry(struct i915_vma *vma,
void *vaddr;
repeat:
- vaddr = reloc_vaddr(vma->obj, eb,
+ vaddr = reloc_vaddr(vma, eb,
offset >> PAGE_SHIFT);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -1413,7 +1429,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
mutex_lock(&vma->vm->mutex);
err = i915_vma_bind(target->vma,
target->vma->obj->cache_level,
- PIN_GLOBAL, NULL);
+ PIN_GLOBAL, NULL, NULL);
mutex_unlock(&vma->vm->mutex);
reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
if (err)
@@ -1943,7 +1959,6 @@ static void eb_capture_stage(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
unsigned int i = count, j;
- struct i915_vma_snapshot *vsnap;
while (i--) {
struct eb_vma *ev = &eb->vma[i];
@@ -1953,11 +1968,6 @@ static void eb_capture_stage(struct i915_execbuffer *eb)
if (!(flags & EXEC_OBJECT_CAPTURE))
continue;
- vsnap = i915_vma_snapshot_alloc(GFP_KERNEL);
- if (!vsnap)
- continue;
-
- i915_vma_snapshot_init(vsnap, vma, "user");
for_each_batch_create_order(eb, j) {
struct i915_capture_list *capture;
@@ -1966,10 +1976,9 @@ static void eb_capture_stage(struct i915_execbuffer *eb)
continue;
capture->next = eb->capture_lists[j];
- capture->vma_snapshot = i915_vma_snapshot_get(vsnap);
+ capture->vma_res = i915_vma_resource_get(vma->resource);
eb->capture_lists[j] = capture;
}
- i915_vma_snapshot_put(vsnap);
}
}
@@ -2200,7 +2209,7 @@ shadow_batch_pin(struct i915_execbuffer *eb,
if (IS_ERR(vma))
return vma;
- err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags);
+ err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags | PIN_VALIDATE);
if (err)
return ERR_PTR(err);
@@ -2214,7 +2223,7 @@ static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i9
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
if (eb->batch_flags & I915_DISPATCH_SECURE)
- return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, 0);
+ return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, PIN_VALIDATE);
return NULL;
}
@@ -2265,13 +2274,12 @@ static int eb_parse(struct i915_execbuffer *eb)
err = i915_gem_object_lock(pool->obj, &eb->ww);
if (err)
- goto err;
+ return err;
shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
- if (IS_ERR(shadow)) {
- err = PTR_ERR(shadow);
- goto err;
- }
+ if (IS_ERR(shadow))
+ return PTR_ERR(shadow);
+
intel_gt_buffer_pool_mark_used(pool);
i915_gem_object_set_readonly(shadow->obj);
shadow->private = pool;
@@ -2283,25 +2291,21 @@ static int eb_parse(struct i915_execbuffer *eb)
shadow = shadow_batch_pin(eb, pool->obj,
&eb->gt->ggtt->vm,
PIN_GLOBAL);
- if (IS_ERR(shadow)) {
- err = PTR_ERR(shadow);
- shadow = trampoline;
- goto err_shadow;
- }
+ if (IS_ERR(shadow))
+ return PTR_ERR(shadow);
+
shadow->private = pool;
eb->batch_flags |= I915_DISPATCH_SECURE;
}
batch = eb_dispatch_secure(eb, shadow);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err_trampoline;
- }
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
err = dma_resv_reserve_shared(shadow->obj->base.resv, 1);
if (err)
- goto err_trampoline;
+ return err;
err = intel_engine_cmd_parser(eb->context->engine,
eb->batches[0]->vma,
@@ -2309,7 +2313,7 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->batch_len[0],
shadow, trampoline);
if (err)
- goto err_unpin_batch;
+ return err;
eb->batches[0] = &eb->vma[eb->buffer_count++];
eb->batches[0]->vma = i915_vma_get(shadow);
@@ -2328,17 +2332,6 @@ secure_batch:
eb->batches[0]->vma = i915_vma_get(batch);
}
return 0;
-
-err_unpin_batch:
- if (batch)
- i915_vma_unpin(batch);
-err_trampoline:
- if (trampoline)
- i915_vma_unpin(trampoline);
-err_shadow:
- i915_vma_unpin(shadow);
-err:
- return err;
}
static int eb_request_submit(struct i915_execbuffer *eb,
@@ -3277,9 +3270,8 @@ eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
* _onstack interface.
*/
if (eb->batches[i]->vma)
- i915_vma_snapshot_init_onstack(&eb->requests[i]->batch_snapshot,
- eb->batches[i]->vma,
- "batch");
+ eb->requests[i]->batch_res =
+ i915_vma_resource_get(eb->batches[i]->vma->resource);
if (eb->batch_pool) {
GEM_BUG_ON(intel_context_is_parallel(eb->context));
intel_gt_buffer_pool_mark_active(eb->batch_pool,
@@ -3464,8 +3456,6 @@ err_request:
err_vma:
eb_release_vmas(&eb, true);
- if (eb.trampoline)
- i915_vma_unpin(eb.trampoline);
WARN_ON(err == -EDEADLK);
i915_gem_ww_ctx_fini(&eb.ww);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index c5150a1ee3d2..c698f95af15f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "i915_gem.h"
+#include "i915_gem_internal.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.h b/drivers/gpu/drm/i915/gem/i915_gem_internal.h
new file mode 100644
index 000000000000..6664e06112fc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __I915_GEM_INTERNAL_H__
+#define __I915_GEM_INTERNAL_H__
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct drm_i915_gem_object_ops;
+struct drm_i915_private;
+
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *i915,
+ phys_addr_t size);
+struct drm_i915_gem_object *
+__i915_gem_object_create_internal(struct drm_i915_private *i915,
+ const struct drm_i915_gem_object_ops *ops,
+ phys_addr_t size);
+
+#endif /* __I915_GEM_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 1478c02a82cb..efe69d6b86f4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -9,10 +9,13 @@
#include <linux/pfn_t.h>
#include <linux/sizes.h>
+#include <drm/drm_cache.h>
+
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
+#include "i915_gem_evict.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -295,7 +298,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
struct intel_runtime_pm *rpm = &i915->runtime_pm;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww;
intel_wakeref_t wakeref;
@@ -358,8 +361,21 @@ retry:
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
}
- /* The entire mappable GGTT is pinned? Unexpected! */
- GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
+ /*
+ * The entire mappable GGTT is pinned? Unexpected!
+ * Try to evict the object we locked too, as normally we skip it
+ * due to lack of short term pinning inside execbuf.
+ */
+ if (vma == ERR_PTR(-ENOSPC)) {
+ ret = mutex_lock_interruptible(&ggtt->vm.mutex);
+ if (!ret) {
+ ret = i915_gem_evict_vm(&ggtt->vm, &ww);
+ mutex_unlock(&ggtt->vm.mutex);
+ }
+ if (ret)
+ goto err_reset;
+ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
+ }
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
@@ -388,16 +404,16 @@ retry:
assert_rpm_wakelock_held(rpm);
/* Mark as being mmapped into userspace for later revocation */
- mutex_lock(&i915->ggtt.vm.mutex);
+ mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
- list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
- mutex_unlock(&i915->ggtt.vm.mutex);
+ list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
+ mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
/* Track the mmo associated with the fenced vma */
vma->mmo = mmo;
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
- intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
+ intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
if (write) {
@@ -512,7 +528,7 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
* wakeref.
*/
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- mutex_lock(&i915->ggtt.vm.mutex);
+ mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
if (!obj->userfault_count)
goto out;
@@ -530,7 +546,7 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
wmb();
out:
- mutex_unlock(&i915->ggtt.vm.mutex);
+ mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
@@ -736,13 +752,14 @@ i915_gem_dumb_mmap_offset(struct drm_file *file,
u32 handle,
u64 *offset)
{
+ struct drm_i915_private *i915 = to_i915(dev);
enum i915_mmap_type mmap_type;
if (HAS_LMEM(to_i915(dev)))
mmap_type = I915_MMAP_TYPE_FIXED;
else if (pat_enabled())
mmap_type = I915_MMAP_TYPE_WC;
- else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
+ else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return -ENODEV;
else
mmap_type = I915_MMAP_TYPE_GTT;
@@ -790,7 +807,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
switch (args->flags) {
case I915_MMAP_OFFSET_GTT:
- if (!i915_ggtt_has_aperture(&i915->ggtt))
+ if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return -ENODEV;
type = I915_MMAP_TYPE_GTT;
break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d87b508b59b1..2d593d573ef1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -24,11 +24,16 @@
#include <linux/sched/mm.h>
+#include <drm/drm_cache.h>
+
#include "display/intel_frontbuffer.h"
#include "pxp/intel_pxp.h"
+
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
+#include "i915_gem_dmabuf.h"
#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_gem_ttm.h"
@@ -280,6 +285,12 @@ void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock);
+ /* Verify that the vma is unbound under the vm mutex. */
+ mutex_lock(&vma->vm->mutex);
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ __i915_vma_unbind(vma);
+ mutex_unlock(&vma->vm->mutex);
+
__i915_vma_put(vma);
spin_lock(&obj->vma.lock);
@@ -756,6 +767,18 @@ i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
return dma_fence_get(i915_gem_to_ttm(obj)->moving);
}
+void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence *fence)
+{
+ struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
+
+ if (*moving == fence)
+ return;
+
+ dma_fence_put(*moving);
+ *moving = dma_fence_get(fence);
+}
+
/**
* i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
* @obj: The object whose moving fence to wait for.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index f66d46882ea7..02c37fe4a535 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -459,7 +459,6 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
@@ -524,6 +523,9 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
struct dma_fence *
i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence *fence);
+
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 0dd107dcecc2..0098a32490f0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -15,6 +15,7 @@
#include "i915_active.h"
#include "i915_selftest.h"
+#include "i915_vma_resource.h"
struct drm_i915_gem_object;
struct intel_fronbuffer;
@@ -57,10 +58,26 @@ struct drm_i915_gem_object_ops {
void (*put_pages)(struct drm_i915_gem_object *obj,
struct sg_table *pages);
int (*truncate)(struct drm_i915_gem_object *obj);
- void (*writeback)(struct drm_i915_gem_object *obj);
- int (*shrinker_release_pages)(struct drm_i915_gem_object *obj,
- bool no_gpu_wait,
- bool should_writeback);
+ /**
+ * shrink - Perform further backend specific actions to facilate
+ * shrinking.
+ * @obj: The gem object
+ * @flags: Extra flags to control shrinking behaviour in the backend
+ *
+ * Possible values for @flags:
+ *
+ * I915_GEM_OBJECT_SHRINK_WRITEBACK - Try to perform writeback of the
+ * backing pages, if supported.
+ *
+ * I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT - Don't wait for the object to
+ * idle. Active objects can be considered later. The TTM backend for
+ * example might have aync migrations going on, which don't use any
+ * i915_vma to track the active GTT binding, and hence having an unbound
+ * object might not be enough.
+ */
+#define I915_GEM_OBJECT_SHRINK_WRITEBACK BIT(0)
+#define I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT BIT(1)
+ int (*shrink)(struct drm_i915_gem_object *obj, unsigned int flags);
int (*pread)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *arg);
@@ -551,31 +568,7 @@ struct drm_i915_gem_object {
struct sg_table *pages;
void *mapping;
- struct i915_page_sizes {
- /**
- * The sg mask of the pages sg_table. i.e the mask of
- * of the lengths for each sg entry.
- */
- unsigned int phys;
-
- /**
- * The gtt page sizes we are allowed to use given the
- * sg mask and the supported page sizes. This will
- * express the smallest unit we can use for the whole
- * object, as well as the larger sizes we may be able
- * to use opportunistically.
- */
- unsigned int sg;
-
- /**
- * The actual gtt page size usage. Since we can have
- * multiple vma associated with this object we need to
- * prevent any trampling of state, hence a copy of this
- * struct also lives in each vma, therefore the gtt
- * value here should only be read/write through the vma.
- */
- unsigned int gtt;
- } page_sizes;
+ struct i915_page_sizes page_sizes;
I915_SELFTEST_DECLARE(unsigned int page_mask);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index a50f884973bc..183b861620b8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -4,6 +4,8 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include <drm/drm_cache.h>
+
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
@@ -169,16 +171,6 @@ int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
return 0;
}
-/* Try to discard unwanted pages */
-void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
-{
- assert_object_held_shared(obj);
- GEM_BUG_ON(i915_gem_object_has_pages(obj));
-
- if (obj->ops->writeback)
- obj->ops->writeback(obj);
-}
-
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
struct radix_tree_iter iter;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index ac56124760e1..00359ec9d58b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -10,6 +10,7 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#if defined(CONFIG_X86)
@@ -23,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
- intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
+ intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
flush_workqueue(i915->wq);
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index cc9fe258fba7..4efa821f3cb1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -5,8 +5,11 @@
*/
#include <linux/pagevec.h>
+#include <linux/shmem_fs.h>
#include <linux/swap.h>
+#include <drm/drm_cache.h>
+
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gemfs.h"
@@ -331,6 +334,21 @@ shmem_writeback(struct drm_i915_gem_object *obj)
__shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
}
+static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
+{
+ switch (obj->mm.madv) {
+ case I915_MADV_DONTNEED:
+ return i915_gem_object_truncate(obj);
+ case __I915_MADV_PURGED:
+ return 0;
+ }
+
+ if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
+ shmem_writeback(obj);
+
+ return 0;
+}
+
void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -503,7 +521,7 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.get_pages = shmem_get_pages,
.put_pages = shmem_put_pages,
.truncate = shmem_truncate,
- .writeback = shmem_writeback,
+ .shrink = shmem_shrink,
.pwrite = shmem_pwrite,
.pread = shmem_pread,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index cc927e49d21f..6a6ff98a8746 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -57,21 +57,17 @@ static int drop_pages(struct drm_i915_gem_object *obj,
static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
{
- if (obj->ops->shrinker_release_pages)
- return obj->ops->shrinker_release_pages(obj,
- !(flags & I915_SHRINK_ACTIVE),
- flags & I915_SHRINK_WRITEBACK);
-
- switch (obj->mm.madv) {
- case I915_MADV_DONTNEED:
- i915_gem_object_truncate(obj);
- return 0;
- case __I915_MADV_PURGED:
- return 0;
- }
+ if (obj->ops->shrink) {
+ unsigned int shrink_flags = 0;
+
+ if (!(flags & I915_SHRINK_ACTIVE))
+ shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT;
- if (flags & I915_SHRINK_WRITEBACK)
- i915_gem_object_writeback(obj);
+ if (flags & I915_SHRINK_WRITEBACK)
+ shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK;
+
+ return obj->ops->shrink(obj, shrink_flags);
+ }
return 0;
}
@@ -401,9 +397,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
I915_SHRINK_VMAPS);
/* We also want to clear any cached iomaps as they wrap vmap */
- mutex_lock(&i915->ggtt.vm.mutex);
+ mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
list_for_each_entry_safe(vma, next,
- &i915->ggtt.vm.bound_list, vm_link) {
+ &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
struct drm_i915_gem_object *obj = vma->obj;
@@ -418,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
i915_gem_object_unlock(obj);
}
- mutex_unlock(&i915->ggtt.vm.mutex);
+ mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 4dfed34191c6..b9c3196b91ca 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -16,6 +16,7 @@
#include "i915_gem_stolen.h"
#include "i915_reg.h"
#include "i915_vgpu.h"
+#include "intel_mchbar_regs.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -72,7 +73,7 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
static int i915_adjust_stolen(struct drm_i915_private *i915,
struct resource *dsm)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct resource *r;
@@ -583,6 +584,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages =
i915_pages_create_for_stolen(obj->base.dev,
obj->stolen->start,
@@ -590,7 +592,7 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
if (IS_ERR(pages))
return PTR_ERR(pages);
- dbg_poison(&to_i915(obj->base.dev)->ggtt,
+ dbg_poison(to_gt(i915)->ggtt,
sg_dma_address(pages->sgl),
sg_dma_len(pages->sgl),
POISON_INUSE);
@@ -603,9 +605,10 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
/* Should only be called from i915_gem_object_release_stolen() */
- dbg_poison(&to_i915(obj->base.dev)->ggtt,
+ dbg_poison(to_gt(i915)->ggtt,
sg_dma_address(pages->sgl),
sg_dma_len(pages->sgl),
POISON_FREE);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index 75501db71041..af85d0c28168 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -9,6 +9,7 @@
#include <drm/drm_file.h>
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_gem_context.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 0e0e4805161a..d6adda5bf96b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -183,7 +183,8 @@ static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
- struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma, *vn;
LIST_HEAD(unbind);
int ret = 0;
@@ -338,7 +339,7 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err;
- if (!dev_priv->ggtt.num_fences)
+ if (!to_gt(dev_priv)->ggtt->num_fences)
return -EOPNOTSUPP;
obj = i915_gem_object_lookup(file, args->handle);
@@ -364,9 +365,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
else
- args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -421,7 +422,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err = -ENOENT;
- if (!dev_priv->ggtt.num_fences)
+ if (!to_gt(dev_priv)->ggtt->num_fences)
return -EOPNOTSUPP;
rcu_read_lock();
@@ -437,10 +438,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
- args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
- args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index de3fe79b665a..8419096d4056 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <linux/shmem_fs.h>
+
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -424,16 +426,14 @@ int i915_ttm_purge(struct drm_i915_gem_object *obj)
return 0;
}
-static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
- bool no_wait_gpu,
- bool should_writeback)
+static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
struct i915_ttm_tt *i915_tt =
container_of(bo->ttm, typeof(*i915_tt), ttm);
struct ttm_operation_ctx ctx = {
.interruptible = true,
- .no_wait_gpu = no_wait_gpu,
+ .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT,
};
struct ttm_placement place = {};
int ret;
@@ -467,7 +467,7 @@ static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
return ret;
}
- if (should_writeback)
+ if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
__shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
return 0;
@@ -842,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
} else if (obj->mm.madv != I915_MADV_WILLNEED) {
bo->priority = I915_TTM_PRIO_PURGE;
} else if (!i915_gem_object_has_pages(obj)) {
- if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
- bo->priority = I915_TTM_PRIO_HAS_PAGES;
+ bo->priority = I915_TTM_PRIO_NO_PAGES;
} else {
- if (bo->priority > I915_TTM_PRIO_NO_PAGES)
- bo->priority = I915_TTM_PRIO_NO_PAGES;
+ bo->priority = I915_TTM_PRIO_HAS_PAGES;
}
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
@@ -977,7 +975,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.get_pages = i915_ttm_get_pages,
.put_pages = i915_ttm_put_pages,
.truncate = i915_ttm_truncate,
- .shrinker_release_pages = i915_ttm_shrinker_release_pages,
+ .shrink = i915_ttm_shrink,
.adjust_lru = i915_ttm_adjust_lru,
.delayed_free = i915_ttm_delayed_free,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index e130c820ae4e..1ebe6e4086a1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -142,7 +142,16 @@ int i915_ttm_move_notify(struct ttm_buffer_object *bo)
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
int ret;
- ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ /*
+ * Note: The async unbinding here will actually transform the
+ * blocking wait for unbind into a wait before finally submitting
+ * evict / migration blit and thus stall the migration timeline
+ * which may not be good for overall throughput. We should make
+ * sure we await the unbind fences *after* the migration blit
+ * instead of *before* as we currently do.
+ */
+ ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE |
+ I915_GEM_OBJECT_UNBIND_ASYNC);
if (ret)
return ret;
@@ -531,7 +540,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
return ret;
}
- migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, bo->ttm,
+ migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, ttm,
dst_rsgt, true, &deps);
i915_deps_fini(&deps);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 11f0aa65f8a3..8424ee8c5eb8 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -8,9 +8,10 @@
#include "i915_selftest.h"
-#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_pm.h"
+#include "gem/i915_gem_region.h"
#include "gt/intel_gt.h"
@@ -370,9 +371,9 @@ static int igt_check_page_sizes(struct i915_vma *vma)
err = -EINVAL;
}
- if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
+ if (!HAS_PAGE_SIZES(i915, vma->resource->page_sizes_gtt)) {
pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
- vma->page_sizes.gtt & ~supported, supported);
+ vma->resource->page_sizes_gtt & ~supported, supported);
err = -EINVAL;
}
@@ -403,15 +404,9 @@ static int igt_check_page_sizes(struct i915_vma *vma)
if (i915_gem_object_is_lmem(obj) &&
IS_ALIGNED(vma->node.start, SZ_2M) &&
vma->page_sizes.sg & SZ_2M &&
- vma->page_sizes.gtt < SZ_2M) {
+ vma->resource->page_sizes_gtt < SZ_2M) {
pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
- vma->page_sizes.sg, vma->page_sizes.gtt);
- err = -EINVAL;
- }
-
- if (obj->mm.page_sizes.gtt) {
- pr_err("obj->page_sizes.gtt(%u) should never be set\n",
- obj->mm.page_sizes.gtt);
+ vma->page_sizes.sg, vma->resource->page_sizes_gtt);
err = -EINVAL;
}
@@ -547,9 +542,9 @@ static int igt_mock_memory_region_huge_pages(void *arg)
goto out_unpin;
}
- if (vma->page_sizes.gtt != page_size) {
+ if (vma->resource->page_sizes_gtt != page_size) {
pr_err("%s page_sizes.gtt=%u, expected=%u\n",
- __func__, vma->page_sizes.gtt,
+ __func__, vma->resource->page_sizes_gtt,
page_size);
err = -EINVAL;
goto out_unpin;
@@ -630,9 +625,9 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
err = igt_check_page_sizes(vma);
- if (vma->page_sizes.gtt != page_size) {
+ if (vma->resource->page_sizes_gtt != page_size) {
pr_err("page_sizes.gtt=%u, expected %u\n",
- vma->page_sizes.gtt, page_size);
+ vma->resource->page_sizes_gtt, page_size);
err = -EINVAL;
}
@@ -647,7 +642,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
* pages.
*/
for (offset = 4096; offset < page_size; offset += 4096) {
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err)
goto out_unpin;
@@ -657,9 +652,10 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
err = igt_check_page_sizes(vma);
- if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
+ if (vma->resource->page_sizes_gtt != I915_GTT_PAGE_SIZE_4K) {
pr_err("page_sizes.gtt=%u, expected %llu\n",
- vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
+ vma->resource->page_sizes_gtt,
+ I915_GTT_PAGE_SIZE_4K);
err = -EINVAL;
}
@@ -805,9 +801,9 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
}
}
- if (vma->page_sizes.gtt != expected_gtt) {
+ if (vma->resource->page_sizes_gtt != expected_gtt) {
pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
- vma->page_sizes.gtt, expected_gtt,
+ vma->resource->page_sizes_gtt, expected_gtt,
obj->base.size, yesno(!!single));
err = -EINVAL;
break;
@@ -961,10 +957,10 @@ static int igt_mock_ppgtt_64K(void *arg)
}
}
- if (vma->page_sizes.gtt != expected_gtt) {
+ if (vma->resource->page_sizes_gtt != expected_gtt) {
pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
- vma->page_sizes.gtt, expected_gtt, i,
- yesno(!!single));
+ vma->resource->page_sizes_gtt,
+ expected_gtt, i, yesno(!!single));
err = -EINVAL;
goto out_vma_unpin;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 0be86ffb7c19..8f28e46e8ee5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -319,7 +319,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
int err;
if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err)
return err;
}
@@ -544,7 +544,7 @@ static bool has_bit17_swizzle(int sw)
static bool bad_swizzling(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
return true;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7d327ffd0464..bd60d42238fb 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -6,6 +6,7 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_regs.h"
@@ -1375,7 +1376,7 @@ static int igt_ctx_readonly(void *arg)
goto out_file;
}
- vm = ctx->vm ?: &i915->ggtt.alias->vm;
+ vm = ctx->vm ?: &to_gt(i915)->ggtt->alias->vm;
if (!vm || !vm->has_read_only) {
err = 0;
goto out_file;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index ecb691c81d1e..d534141b2cf7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -4,8 +4,13 @@
*/
#include "gt/intel_migrate.h"
+#include "gt/intel_gpu_commands.h"
#include "gem/i915_gem_ttm_move.h"
+#include "i915_deps.h"
+
+#include "selftests/igt_spinner.h"
+
static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
bool fill)
{
@@ -101,7 +106,8 @@ static int igt_same_create_migrate(void *arg)
}
static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ struct i915_vma *vma)
{
int err;
@@ -109,6 +115,24 @@ static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
if (err)
return err;
+ if (vma) {
+ err = i915_vma_pin_ww(vma, ww, obj->base.size, 0,
+ 0UL | PIN_OFFSET_FIXED |
+ PIN_USER);
+ if (err) {
+ if (err != -EINTR && err != ERESTARTSYS &&
+ err != -EDEADLK)
+ pr_err("Failed to pin vma.\n");
+ return err;
+ }
+
+ i915_vma_unpin(vma);
+ }
+
+ /*
+ * Migration will implicitly unbind (asynchronously) any bound
+ * vmas.
+ */
if (i915_gem_object_is_lmem(obj)) {
err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
if (err) {
@@ -149,11 +173,15 @@ static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
return err;
}
-static int igt_lmem_pages_migrate(void *arg)
+static int __igt_lmem_pages_migrate(struct intel_gt *gt,
+ struct i915_address_space *vm,
+ struct i915_deps *deps,
+ struct igt_spinner *spin,
+ struct dma_fence *spin_fence)
{
- struct intel_gt *gt = arg;
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
+ struct i915_vma *vma = NULL;
struct i915_gem_ww_ctx ww;
struct i915_request *rq;
int err;
@@ -165,6 +193,14 @@ static int igt_lmem_pages_migrate(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ if (vm) {
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+ }
+
/* Initial GPU fill, sync, CPU initialization. */
for_i915_gem_ww(&ww, err, true) {
err = i915_gem_object_lock(obj, &ww);
@@ -175,25 +211,23 @@ static int igt_lmem_pages_migrate(void *arg)
if (err)
continue;
- err = intel_migrate_clear(&gt->migrate, &ww, NULL,
+ err = intel_migrate_clear(&gt->migrate, &ww, deps,
obj->mm.pages->sgl, obj->cache_level,
i915_gem_object_is_lmem(obj),
0xdeadbeaf, &rq);
if (rq) {
dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+ i915_gem_object_set_moving_fence(obj, &rq->fence);
i915_request_put(rq);
}
if (err)
continue;
- err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE,
- 5 * HZ);
- if (err)
- continue;
-
- err = igt_fill_check_buffer(obj, true);
- if (err)
- continue;
+ if (!vma) {
+ err = igt_fill_check_buffer(obj, true);
+ if (err)
+ continue;
+ }
}
if (err)
goto out_put;
@@ -204,7 +238,7 @@ static int igt_lmem_pages_migrate(void *arg)
*/
for (i = 1; i <= 5; ++i) {
for_i915_gem_ww(&ww, err, true)
- err = lmem_pages_migrate_one(&ww, obj);
+ err = lmem_pages_migrate_one(&ww, obj, vma);
if (err)
goto out_put;
}
@@ -213,12 +247,27 @@ static int igt_lmem_pages_migrate(void *arg)
if (err)
goto out_put;
+ if (spin) {
+ if (dma_fence_is_signaled(spin_fence)) {
+ pr_err("Spinner was terminated by hangcheck.\n");
+ err = -EBUSY;
+ goto out_unlock;
+ }
+ igt_spinner_end(spin);
+ }
+
/* Finally sync migration and check content. */
err = i915_gem_object_wait_migration(obj, true);
if (err)
goto out_unlock;
- err = igt_fill_check_buffer(obj, false);
+ if (vma) {
+ err = i915_vma_wait_for_bind(vma);
+ if (err)
+ goto out_unlock;
+ } else {
+ err = igt_fill_check_buffer(obj, false);
+ }
out_unlock:
i915_gem_object_unlock(obj);
@@ -231,6 +280,7 @@ out_put:
static int igt_lmem_pages_failsafe_migrate(void *arg)
{
int fail_gpu, fail_alloc, ret;
+ struct intel_gt *gt = arg;
for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) {
@@ -238,7 +288,118 @@ static int igt_lmem_pages_failsafe_migrate(void *arg)
fail_gpu, fail_alloc);
i915_ttm_migrate_set_failure_modes(fail_gpu,
fail_alloc);
- ret = igt_lmem_pages_migrate(arg);
+ ret = __igt_lmem_pages_migrate(gt, NULL, NULL, NULL, NULL);
+ if (ret)
+ goto out_err;
+ }
+ }
+
+out_err:
+ i915_ttm_migrate_set_failure_modes(false, false);
+ return ret;
+}
+
+/*
+ * This subtest tests that unbinding at migration is indeed performed
+ * async. We launch a spinner and a number of migrations depending on
+ * that spinner to have terminated. Before each migration we bind a
+ * vma, which should then be async unbound by the migration operation.
+ * If we are able to schedule migrations without blocking while the
+ * spinner is still running, those unbinds are indeed async and non-
+ * blocking.
+ *
+ * Note that each async bind operation is awaiting the previous migration
+ * due to the moving fence resulting from the migration.
+ */
+static int igt_async_migrate(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_ppgtt *ppgtt;
+ struct igt_spinner spin;
+ int err;
+
+ ppgtt = i915_ppgtt_create(gt, 0);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ if (igt_spinner_init(&spin, gt)) {
+ err = -ENOMEM;
+ goto out_spin;
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true
+ };
+ struct dma_fence *spin_fence;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_deps deps;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_ce;
+ }
+
+ /*
+ * Use MI_NOOP, making the spinner non-preemptible. If there
+ * is a code path where we fail async operation due to the
+ * running spinner, we will block and fail to end the
+ * spinner resulting in a deadlock. But with a non-
+ * preemptible spinner, hangcheck will terminate the spinner
+ * for us, and we will later detect that and fail the test.
+ */
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ i915_deps_init(&deps, GFP_KERNEL);
+ err = i915_deps_add_dependency(&deps, &rq->fence, &ctx);
+ spin_fence = dma_fence_get(&rq->fence);
+ i915_request_add(rq);
+ if (err)
+ goto out_ce;
+
+ err = __igt_lmem_pages_migrate(gt, &ppgtt->vm, &deps, &spin,
+ spin_fence);
+ i915_deps_fini(&deps);
+ dma_fence_put(spin_fence);
+ if (err)
+ goto out_ce;
+ }
+
+out_ce:
+ igt_spinner_fini(&spin);
+out_spin:
+ i915_vm_put(&ppgtt->vm);
+
+ return err;
+}
+
+/*
+ * Setting ASYNC_FAIL_ALLOC to 2 will simulate memory allocation failure while
+ * arming the migration error check and block async migration. This
+ * will cause us to deadlock and hangcheck will terminate the spinner
+ * causing the test to fail.
+ */
+#define ASYNC_FAIL_ALLOC 1
+static int igt_lmem_async_migrate(void *arg)
+{
+ int fail_gpu, fail_alloc, ret;
+ struct intel_gt *gt = arg;
+
+ for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
+ for (fail_alloc = 0; fail_alloc < ASYNC_FAIL_ALLOC; ++fail_alloc) {
+ pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
+ fail_gpu, fail_alloc);
+ i915_ttm_migrate_set_failure_modes(fail_gpu,
+ fail_alloc);
+ ret = igt_async_migrate(gt);
if (ret)
goto out_err;
}
@@ -256,6 +417,7 @@ int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_lmem_create_migrate),
SUBTEST(igt_same_create_migrate),
SUBTEST(igt_lmem_pages_failsafe_migrate),
+ SUBTEST(igt_lmem_async_migrate),
};
if (!HAS_LMEM(i915))
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index c6291429b00c..8ae1a1530bd8 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -6,11 +6,13 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_region.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gem/i915_gem_region.h"
+
#include "huge_gem_object.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@@ -166,7 +168,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
kunmap(p);
out:
+ i915_gem_object_lock(obj, NULL);
__i915_vma_put(vma);
+ i915_gem_object_unlock(obj);
return err;
}
@@ -261,7 +265,9 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
if (err)
return err;
+ i915_gem_object_lock(obj, NULL);
__i915_vma_put(vma);
+ i915_gem_object_unlock(obj);
if (igt_timeout(end_time,
"%s: timed out after tiling=%d stride=%d\n",
@@ -307,7 +313,7 @@ static int igt_partial_tiling(void *arg)
int tiling;
int err;
- if (!i915_ggtt_has_aperture(&i915->ggtt))
+ if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return 0;
/* We want to check the page mapping and fencing of a large object
@@ -320,7 +326,7 @@ static int igt_partial_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
- (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -366,10 +372,10 @@ static int igt_partial_tiling(void *arg)
tile.tiling = tiling;
switch (tiling) {
case I915_TILING_X:
- tile.swizzle = i915->ggtt.bit_6_swizzle_x;
+ tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
- tile.swizzle = i915->ggtt.bit_6_swizzle_y;
+ tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
break;
}
@@ -440,7 +446,7 @@ static int igt_smoke_tiling(void *arg)
IGT_TIMEOUT(end);
int err;
- if (!i915_ggtt_has_aperture(&i915->ggtt))
+ if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return 0;
/*
@@ -457,7 +463,7 @@ static int igt_smoke_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
- (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -486,10 +492,10 @@ static int igt_smoke_tiling(void *arg)
break;
case I915_TILING_X:
- tile.swizzle = i915->ggtt.bit_6_swizzle_x;
+ tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
- tile.swizzle = i915->ggtt.bit_6_swizzle_y;
+ tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
break;
}
@@ -856,6 +862,7 @@ static int wc_check(struct drm_i915_gem_object *obj)
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
bool no_map;
if (obj->ops->mmap_offset)
@@ -864,7 +871,7 @@ static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
return false;
if (type == I915_MMAP_TYPE_GTT &&
- !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
+ !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return false;
i915_gem_object_lock(obj, NULL);
@@ -1351,7 +1358,9 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
* for other objects. Ergo we have to revoke the previous mmap PTE
* access as it no longer points to the same object.
*/
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ i915_gem_object_unlock(obj);
if (err) {
pr_err("Failed to unbind object!\n");
goto out_unmap;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index 740ee8086a27..fe0a890775e2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -43,7 +43,7 @@ static int igt_gem_huge(void *arg)
obj = huge_gem_object(i915,
nreal * PAGE_SIZE,
- i915->ggtt.vm.total + PAGE_SIZE);
+ to_gt(i915)->ggtt->vm.total + PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index b35c1219c852..3c55e77b0f1b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -7,6 +7,7 @@
#include "igt_gem_utils.h"
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
#include "gt/intel_gpu_commands.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index c0a8ef368044..6d6082b5f31f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -4,6 +4,7 @@
* Copyright © 2016 Intel Corporation
*/
+#include "i915_file_private.h"
#include "mock_context.h"
#include "selftests/mock_drm.h"
#include "selftests/mock_gtt.h"
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 8471758b3ef4..871fe7bda0e0 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -5,6 +5,8 @@
#include <linux/log2.h>
+#include "gem/i915_gem_internal.h"
+
#include "gen6_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
@@ -106,17 +108,17 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
}
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory * const pd = ppgtt->pd;
- unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
+ unsigned int first_entry = vma_res->start / I915_GTT_PAGE_SIZE;
unsigned int act_pt = first_entry / GEN6_PTES;
unsigned int act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
- struct sgt_dma iter = sgt_dma(vma);
+ struct sgt_dma iter = sgt_dma(vma_res);
gen6_pte_t *vaddr;
GEM_BUG_ON(!pd->entry[act_pt]);
@@ -142,7 +144,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
}
} while (1);
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
@@ -273,13 +275,13 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static void pd_vma_bind(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 unused)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- struct gen6_ppgtt *ppgtt = vma->private;
- u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
+ struct gen6_ppgtt *ppgtt = vma_res->private;
+ u32 ggtt_offset = vma_res->start / I915_GTT_PAGE_SIZE;
ppgtt->pp_dir = ggtt_offset * sizeof(gen6_pte_t) << 10;
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
@@ -287,9 +289,10 @@ static void pd_vma_bind(struct i915_address_space *vm,
gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
}
-static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
+static void pd_vma_unbind(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
{
- struct gen6_ppgtt *ppgtt = vma->private;
+ struct gen6_ppgtt *ppgtt = vma_res->private;
struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt;
unsigned int pde;
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index b012c50f7ce7..c43e724afa9f 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -453,20 +453,21 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
return idx;
}
-static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
+static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
struct sgt_dma *iter,
enum i915_cache_level cache_level,
u32 flags)
{
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
unsigned int rem = sg_dma_len(iter->sg);
- u64 start = vma->node.start;
+ u64 start = vma_res->start;
- GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
+ GEM_BUG_ON(!i915_vm_is_4lvl(vm));
do {
struct i915_page_directory * const pdp =
- gen8_pdp_for_page_address(vma->vm, start);
+ gen8_pdp_for_page_address(vm, start);
struct i915_page_directory * const pd =
i915_pd_entry(pdp, __gen8_pte_index(start, 2));
gen8_pte_t encode = pte_encode;
@@ -475,7 +476,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
gen8_pte_t *vaddr;
u16 index;
- if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+ if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
rem >= I915_GTT_PAGE_SIZE_2M &&
!__gen8_pte_index(start, 0)) {
@@ -492,7 +493,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
page_size = I915_GTT_PAGE_SIZE;
if (!index &&
- vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+ vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
@@ -541,9 +542,9 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
*/
if (maybe_64K != -1 &&
(index == I915_PDES ||
- (i915_vm_has_scratch_64K(vma->vm) &&
- !iter->sg && IS_ALIGNED(vma->node.start +
- vma->node.size,
+ (i915_vm_has_scratch_64K(vm) &&
+ !iter->sg && IS_ALIGNED(vma_res->start +
+ vma_res->node_size,
I915_GTT_PAGE_SIZE_2M)))) {
vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
@@ -559,10 +560,10 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
* instead - which we detect as missing results during
* selftests.
*/
- if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
+ if (I915_SELFTEST_ONLY(vm->scrub_64K)) {
u16 i;
- encode = vma->vm->scratch[0]->encode;
+ encode = vm->scratch[0]->encode;
vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
@@ -572,22 +573,22 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
}
}
- vma->page_sizes.gtt |= page_size;
+ vma_res->page_sizes_gtt |= page_size;
} while (iter->sg && sg_dma_len(iter->sg));
}
static void gen8_ppgtt_insert(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = sgt_dma(vma);
+ struct sgt_dma iter = sgt_dma(vma_res);
- if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
+ if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ gen8_ppgtt_insert_huge(vm, vma_res, &iter, cache_level, flags);
} else {
- u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
+ u64 idx = vma_res->start >> GEN8_PTE_SHIFT;
do {
struct i915_page_directory * const pdp =
@@ -597,7 +598,7 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
cache_level, flags);
} while (idx);
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index ba083d800a08..5d0ec7c49b6a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -79,7 +79,8 @@ static int intel_context_active_acquire(struct intel_context *ce)
__i915_active_acquire(&ce->active);
- if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine))
+ if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine) ||
+ intel_context_is_parallel(ce))
return 0;
/* Preallocate tracking nodes */
@@ -563,7 +564,6 @@ void intel_context_bind_parent_child(struct intel_context *parent,
* Callers responsibility to validate that this function is used
* correctly but we use GEM_BUG_ON here ensure that they do.
*/
- GEM_BUG_ON(!intel_engine_uses_guc(parent->engine));
GEM_BUG_ON(intel_context_is_pinned(parent));
GEM_BUG_ON(intel_context_is_child(parent));
GEM_BUG_ON(intel_context_is_pinned(child));
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index e86d8255feec..ece16c2b5b8e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -9,6 +9,7 @@
#include "intel_engine_pm.h"
#include "intel_gpu_commands.h"
#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
#include "intel_ring.h"
#include "intel_sseu.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 0e353d8c2bc8..be4b1e65442f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -182,6 +182,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_HWS_CSB_BUF0_INDEX 0x10
#define I915_HWS_CSB_WRITE_INDEX 0x1f
#define ICL_HWS_CSB_WRITE_INDEX 0x2f
+#define INTEL_HWS_CSB_WRITE_INDEX(__i915) \
+ (GRAPHICS_VER(__i915) >= 11 ? ICL_HWS_CSB_WRITE_INDEX : I915_HWS_CSB_WRITE_INDEX)
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 84ad09fb9b8b..e53008b4dd05 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -6,6 +6,7 @@
#include <drm/drm_print.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "gt/intel_gt_regs.h"
#include "i915_cmd_parser.h"
@@ -1229,17 +1230,6 @@ void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
-const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
-{
- switch (type) {
- case I915_CACHE_NONE: return " uncached";
- case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
- case I915_CACHE_L3_LLC: return " L3+LLC";
- case I915_CACHE_WT: return " WT";
- default: return "";
- }
-}
-
static u32
read_subslice_reg(const struct intel_engine_cs *engine,
int slice, int subslice, i915_reg_t reg)
@@ -1710,18 +1700,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
{
- struct i915_vma_snapshot *vsnap = &rq->batch_snapshot;
+ struct i915_vma_resource *vma_res = rq->batch_res;
void *ring;
int size;
- if (!i915_vma_snapshot_present(vsnap))
- vsnap = NULL;
-
drm_printf(m,
"[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
rq->head, rq->postfix, rq->tail,
- vsnap ? upper_32_bits(vsnap->gtt_offset) : ~0u,
- vsnap ? lower_32_bits(vsnap->gtt_offset) : ~0u);
+ vma_res ? upper_32_bits(vma_res->start) : ~0u,
+ vma_res ? lower_32_bits(vma_res->start) : ~0u);
size = rq->tail - rq->head;
if (rq->tail < rq->head)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index e9fec6214073..0bf8b45c9319 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -70,6 +70,12 @@
#define RING_NOPID(base) _MMIO((base) + 0x94)
#define RING_HWSTAM(base) _MMIO((base) + 0x98)
#define RING_MI_MODE(base) _MMIO((base) + 0x9c)
+#define ASYNC_FLIP_PERF_DISABLE REG_BIT(14)
+#define MI_FLUSH_ENABLE REG_BIT(12)
+#define TGL_NESTED_BB_EN REG_BIT(12)
+#define MODE_IDLE REG_BIT(9)
+#define STOP_RING REG_BIT(8)
+#define VS_TIMER_DISPATCH REG_BIT(6)
#define RING_IMR(base) _MMIO((base) + 0xa8)
#define RING_EIR(base) _MMIO((base) + 0xb0)
#define RING_EMR(base) _MMIO((base) + 0xb4)
@@ -211,8 +217,25 @@
#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4)
+#define GEN11_VCS_SFC_FORCED_LOCK(base) _MMIO((base) + 0x88c)
+#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0)
+#define GEN11_VCS_SFC_LOCK_STATUS(base) _MMIO((base) + 0x890)
+#define GEN11_VCS_SFC_USAGE_BIT (1 << 0)
+#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1)
+
+#define GEN11_VECS_SFC_FORCED_LOCK(base) _MMIO((base) + 0x201c)
+#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0)
+#define GEN11_VECS_SFC_LOCK_ACK(base) _MMIO((base) + 0x2018)
+#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0)
+#define GEN11_VECS_SFC_USAGE(base) _MMIO((base) + 0x2014)
+#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
+
#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
+#define GEN12_HCP_SFC_LOCK_STATUS(base) _MMIO((base) + 0x2914)
+#define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1)
+#define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0)
+
#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10)
#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 49e26d23ce1f..961d795220a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2601,6 +2601,43 @@ static void execlists_context_cancel_request(struct intel_context *ce,
current->comm);
}
+static struct intel_context *
+execlists_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width)
+{
+ struct intel_context *parent = NULL, *ce, *err;
+ int i;
+
+ GEM_BUG_ON(num_siblings != 1);
+
+ for (i = 0; i < width; ++i) {
+ ce = intel_context_create(engines[i]);
+ if (IS_ERR(ce)) {
+ err = ce;
+ goto unwind;
+ }
+
+ if (i == 0)
+ parent = ce;
+ else
+ intel_context_bind_parent_child(parent, ce);
+ }
+
+ parent->parallel.fence_context = dma_fence_context_alloc(1);
+
+ intel_context_set_nopreempt(parent);
+ for_each_child(parent, ce)
+ intel_context_set_nopreempt(ce);
+
+ return parent;
+
+unwind:
+ if (parent)
+ intel_context_put(parent);
+ return err;
+}
+
static const struct intel_context_ops execlists_context_ops = {
.flags = COPS_HAS_INFLIGHT,
@@ -2619,6 +2656,7 @@ static const struct intel_context_ops execlists_context_ops = {
.reset = lrc_reset,
.destroy = lrc_destroy,
+ .create_parallel = execlists_create_parallel,
.create_virtual = execlists_create_virtual,
};
@@ -3465,7 +3503,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
(u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
execlists->csb_write =
- &engine->status_page.addr[intel_hws_csb_write_index(i915)];
+ &engine->status_page.addr[INTEL_HWS_CSB_WRITE_INDEX(i915)];
if (GRAPHICS_VER(i915) < 11)
execlists->csb_size = GEN8_CSB_ENTRIES;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index d2922f64d1c8..8850d4e0f9cc 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -87,7 +87,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
* beyond the end of the batch buffer, across the page boundary,
* and beyond the end of the GTT if we do not provide a guard.
*/
- ret = ggtt_init_hw(&i915->ggtt);
+ ret = ggtt_init_hw(to_gt(i915)->ggtt);
if (ret)
return ret;
@@ -130,22 +130,51 @@ void i915_ggtt_suspend_vm(struct i915_address_space *vm)
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+retry:
+ i915_gem_drain_freed_objects(vm->i915);
+
mutex_lock(&vm->mutex);
/* Skip rewriting PTE on VMA unbind. */
open = atomic_xchg(&vm->open, 0);
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- i915_vma_wait_for_bind(vma);
- if (i915_vma_is_pinned(vma))
+ if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue;
+ /* unlikely to race when GPU is idle, so no worry about slowpath.. */
+ if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
+ /*
+ * No dead objects should appear here, GPU should be
+ * completely idle, and userspace suspended
+ */
+ i915_gem_object_get(obj);
+
+ atomic_set(&vm->open, open);
+ mutex_unlock(&vm->mutex);
+
+ i915_gem_object_lock(obj, NULL);
+ open = i915_vma_unbind(vma);
+ i915_gem_object_unlock(obj);
+
+ GEM_WARN_ON(open);
+
+ i915_gem_object_put(obj);
+ goto retry;
+ }
+
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
- __i915_vma_evict(vma);
+ i915_vma_wait_for_bind(vma);
+
+ __i915_vma_evict(vma, false);
drm_mm_remove_node(&vma->node);
}
+
+ i915_gem_object_unlock(obj);
}
vm->clear_range(vm, 0, vm->total);
@@ -236,7 +265,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level level,
u32 flags)
{
@@ -253,10 +282,10 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
gte = (gen8_pte_t __iomem *)ggtt->gsm;
- gte += vma->node.start / I915_GTT_PAGE_SIZE;
- end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, iter, vma->pages)
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
gen8_set_pte(gte++, pte_encode | addr);
GEM_BUG_ON(gte > end);
@@ -293,7 +322,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
* through the GMADR mapped BAR (i915->mm.gtt->gtt).
*/
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level level,
u32 flags)
{
@@ -304,10 +333,10 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
dma_addr_t addr;
gte = (gen6_pte_t __iomem *)ggtt->gsm;
- gte += vma->node.start / I915_GTT_PAGE_SIZE;
- end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, iter, vma->pages)
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
iowrite32(vm->pte_encode(addr, level, flags), gte++);
GEM_BUG_ON(gte > end);
@@ -390,7 +419,7 @@ static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
struct insert_entries {
struct i915_address_space *vm;
- struct i915_vma *vma;
+ struct i915_vma_resource *vma_res;
enum i915_cache_level level;
u32 flags;
};
@@ -399,18 +428,18 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
+ gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level level,
u32 flags)
{
- struct insert_entries arg = { vm, vma, level, flags };
+ struct insert_entries arg = { vm, vma_res, level, flags };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
@@ -449,14 +478,14 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 unused)
{
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
+ intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
flags);
}
@@ -468,30 +497,32 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
static void ggtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
- struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
- if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
+ if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK))
return;
+ vma_res->bound_flags |= flags;
+
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
- if (i915_gem_object_is_readonly(obj))
+ if (vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
- if (i915_gem_object_is_lmem(obj))
+ if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
- vm->insert_entries(vm, vma, cache_level, pte_flags);
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
-static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+static void ggtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
{
- vm->clear_range(vm, vma->node.start, vma->size);
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -505,7 +536,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
size = ggtt->vm.total - GUC_GGTT_TOP;
- ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
PIN_NOEVICT);
if (ret)
@@ -624,7 +655,7 @@ err:
static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
@@ -632,25 +663,27 @@ static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
/* Currently applicable only to VLV */
pte_flags = 0;
- if (i915_gem_object_is_readonly(vma->obj))
+ if (vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND)
ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
- stash, vma, cache_level, flags);
+ stash, vma_res, cache_level, flags);
if (flags & I915_VMA_GLOBAL_BIND)
- vm->insert_entries(vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+
+ vma_res->bound_flags |= flags;
}
static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
- struct i915_vma *vma)
+ struct i915_vma_resource *vma_res)
{
- if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
- vm->clear_range(vm, vma->node.start, vma->size);
+ if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND)
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
- if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
- ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
+ if (vma_res->bound_flags & I915_VMA_LOCAL_BIND)
+ ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
}
static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
@@ -723,14 +756,14 @@ int i915_init_ggtt(struct drm_i915_private *i915)
{
int ret;
- ret = init_ggtt(&i915->ggtt);
+ ret = init_ggtt(to_gt(i915)->ggtt);
if (ret)
return ret;
if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
- ret = init_aliasing_ppgtt(&i915->ggtt);
+ ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
if (ret)
- cleanup_init_ggtt(&i915->ggtt);
+ cleanup_init_ggtt(to_gt(i915)->ggtt);
}
return 0;
@@ -743,11 +776,21 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
atomic_set(&ggtt->vm.open, 0);
flush_workqueue(ggtt->vm.i915->wq);
+ i915_gem_drain_freed_objects(ggtt->vm.i915);
mutex_lock(&ggtt->vm.mutex);
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ bool trylock;
+
+ trylock = i915_gem_object_trylock(obj, NULL);
+ WARN_ON(!trylock);
+
WARN_ON(__i915_vma_unbind(vma));
+ if (trylock)
+ i915_gem_object_unlock(obj);
+ }
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
@@ -773,7 +816,7 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
*/
void i915_ggtt_driver_release(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
fini_aliasing_ppgtt(ggtt);
@@ -788,7 +831,7 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915)
*/
void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
dma_resv_fini(&ggtt->vm._resv);
@@ -1209,7 +1252,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
{
int ret;
- ret = ggtt_probe_hw(&i915->ggtt, to_gt(i915));
+ ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
if (ret)
return ret;
@@ -1281,7 +1324,7 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- vma->ops->bind_vma(vm, NULL, vma,
+ vma->ops->bind_vma(vm, NULL, vma->resource,
obj ? obj->cache_level : 0,
was_bound);
if (obj) { /* only used during resume => exclusive access */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 95c9145232f5..76880fb8fc19 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -9,6 +9,7 @@
#include "i915_pvinfo.h"
#include "i915_vgpu.h"
#include "intel_gt_regs.h"
+#include "intel_mchbar_regs.h"
/**
* DOC: fence register handling
@@ -427,7 +428,6 @@ int i915_vma_pin_fence(struct i915_vma *vma)
* must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
err = mutex_lock_interruptible(&vma->vm->mutex);
@@ -730,8 +730,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
- i915->ggtt.bit_6_swizzle_x = swizzle_x;
- i915->ggtt.bit_6_swizzle_y = swizzle_y;
+ to_gt(i915)->ggtt->bit_6_swizzle_x = swizzle_x;
+ to_gt(i915)->ggtt->bit_6_swizzle_y = swizzle_y;
}
/*
@@ -898,7 +898,7 @@ void intel_gt_init_swizzling(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
if (GRAPHICS_VER(i915) < 5 ||
- i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 57763aede976..e8403fa53909 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -3,17 +3,20 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/drm_managed.h>
#include <drm/intel-gtt.h>
-#include "intel_gt_debugfs.h"
-
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
+#include "pxp/intel_pxp.h"
+
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_regs.h"
#include "intel_gt.h"
#include "intel_gt_buffer_pool.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_debugfs.h"
#include "intel_gt_pm.h"
#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
@@ -25,7 +28,6 @@
#include "intel_rps.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
-#include "pxp/intel_pxp.h"
void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
@@ -89,9 +91,11 @@ int intel_gt_probe_lmem(struct intel_gt *gt)
return 0;
}
-void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
+int intel_gt_assign_ggtt(struct intel_gt *gt)
{
- gt->ggtt = ggtt;
+ gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);
+
+ return gt->ggtt ? 0 : -ENOMEM;
}
static const struct intel_mmio_range icl_l3bank_steering_table[] = {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index a913fb6ffec3..2dad46c3eff2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -36,7 +36,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
-void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int intel_gt_assign_ggtt(struct intel_gt *gt);
int intel_gt_probe_lmem(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 9db3dcbd917f..cadfd85785b1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -3,6 +3,7 @@
* Copyright © 2014-2018 Intel Corporation
*/
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 4e448c13a64c..37765919fe32 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -15,6 +15,7 @@
#include "intel_gt_pm_debugfs.h"
#include "intel_gt_regs.h"
#include "intel_llc.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_rc6.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 16d98ebee687..18d158d77aba 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -8,779 +8,166 @@
#include "i915_reg_defs.h"
-#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
-#define ILK_GRDOM_FULL (0 << 1)
-#define ILK_GRDOM_RENDER (1 << 1)
-#define ILK_GRDOM_MEDIA (3 << 1)
-#define ILK_GRDOM_MASK (3 << 1)
-#define ILK_GRDOM_RESET_ENABLE (1 << 0)
-
-#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
-#define GEN6_MBC_SNPCR_SHIFT 21
-#define GEN6_MBC_SNPCR_MASK (3 << 21)
-#define GEN6_MBC_SNPCR_MAX (0 << 21)
-#define GEN6_MBC_SNPCR_MED (1 << 21)
-#define GEN6_MBC_SNPCR_LOW (2 << 21)
-#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */
-
-#define VLV_G3DCTL _MMIO(0x9024)
-#define VLV_GSCKGCTL _MMIO(0x9028)
-
-#define FBC_LLC_READ_CTRL _MMIO(0x9044)
-#define FBC_LLC_FULLY_OPEN REG_BIT(30)
-
-#define GEN6_MBCTL _MMIO(0x0907c)
-#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
-#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
-#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
-#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
-#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
-
-#define GEN6_GDRST _MMIO(0x941c)
-#define GEN6_GRDOM_FULL (1 << 0)
-#define GEN6_GRDOM_RENDER (1 << 1)
-#define GEN6_GRDOM_MEDIA (1 << 2)
-#define GEN6_GRDOM_BLT (1 << 3)
-#define GEN6_GRDOM_VECS (1 << 4)
-#define GEN9_GRDOM_GUC (1 << 5)
-#define GEN8_GRDOM_MEDIA2 (1 << 7)
-/* GEN11 changed all bit defs except for FULL & RENDER */
-#define GEN11_GRDOM_FULL GEN6_GRDOM_FULL
-#define GEN11_GRDOM_RENDER GEN6_GRDOM_RENDER
-#define GEN11_GRDOM_BLT (1 << 2)
-#define GEN11_GRDOM_GUC (1 << 3)
-#define GEN11_GRDOM_MEDIA (1 << 5)
-#define GEN11_GRDOM_MEDIA2 (1 << 6)
-#define GEN11_GRDOM_MEDIA3 (1 << 7)
-#define GEN11_GRDOM_MEDIA4 (1 << 8)
-#define GEN11_GRDOM_MEDIA5 (1 << 9)
-#define GEN11_GRDOM_MEDIA6 (1 << 10)
-#define GEN11_GRDOM_MEDIA7 (1 << 11)
-#define GEN11_GRDOM_MEDIA8 (1 << 12)
-#define GEN11_GRDOM_VECS (1 << 13)
-#define GEN11_GRDOM_VECS2 (1 << 14)
-#define GEN11_GRDOM_VECS3 (1 << 15)
-#define GEN11_GRDOM_VECS4 (1 << 16)
-#define GEN11_GRDOM_SFC0 (1 << 17)
-#define GEN11_GRDOM_SFC1 (1 << 18)
-#define GEN11_GRDOM_SFC2 (1 << 19)
-#define GEN11_GRDOM_SFC3 (1 << 20)
-#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
-#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
-
-#define GEN11_VCS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x88C)
-#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0)
-#define GEN11_VCS_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x890)
-#define GEN11_VCS_SFC_USAGE_BIT (1 << 0)
-#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1)
-
-#define GEN11_VECS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x201C)
-#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0)
-#define GEN11_VECS_SFC_LOCK_ACK(engine) _MMIO((engine)->mmio_base + 0x2018)
-#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0)
-#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
-#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
-
-#define GEN12_HCP_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x2910)
-#define GEN12_HCP_SFC_FORCED_LOCK_BIT REG_BIT(0)
-#define GEN12_HCP_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x2914)
-#define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1)
-#define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0)
-
-#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
-
-#define WAIT_FOR_RC6_EXIT _MMIO(0x20CC)
-/* HSW only */
-#define HSW_SELECTIVE_READ_ADDRESSING_SHIFT 2
-#define HSW_SELECTIVE_READ_ADDRESSING_MASK (0x3 << HSW_SLECTIVE_READ_ADDRESSING_SHIFT)
-#define HSW_SELECTIVE_WRITE_ADDRESS_SHIFT 4
-#define HSW_SELECTIVE_WRITE_ADDRESS_MASK (0x7 << HSW_SELECTIVE_WRITE_ADDRESS_SHIFT)
-/* HSW+ */
-#define HSW_WAIT_FOR_RC6_EXIT_ENABLE (1 << 0)
-#define HSW_RCS_CONTEXT_ENABLE (1 << 7)
-#define HSW_RCS_INHIBIT (1 << 8)
-/* Gen8 */
-#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
-#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
-#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
-#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
-#define GEN8_SELECTIVE_WRITE_ADDRESSING_ENABLE (1 << 6)
-#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT 9
-#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT)
-#define GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT 11
-#define GEN8_SELECTIVE_READ_SLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT)
-#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
-
-#define GAM_ECOCHK _MMIO(0x4090)
-#define BDW_DISABLE_HDC_INVALIDATION (1 << 25)
-#define ECOCHK_SNB_BIT (1 << 10)
-#define ECOCHK_DIS_TLB (1 << 8)
-#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6)
-#define ECOCHK_PPGTT_CACHE64B (0x3 << 3)
-#define ECOCHK_PPGTT_CACHE4B (0x0 << 3)
-#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4)
-#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3)
-#define ECOCHK_PPGTT_UC_HSW (0x1 << 3)
-#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
-#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
-
-#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
-
-#define GAC_ECO_BITS _MMIO(0x14090)
-#define ECOBITS_SNB_BIT (1 << 13)
-#define ECOBITS_PPGTT_CACHE64B (3 << 8)
-#define ECOBITS_PPGTT_CACHE4B (0 << 8)
-
-#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54)
-#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
-#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
-
-#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c)
-#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12)
-#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11)
-#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7)
-
-#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28)
-#define RENDER_MOD_CTRL _MMIO(0xcf2c)
-#define COMP_MOD_CTRL _MMIO(0xcf30)
-#define VDBX_MOD_CTRL _MMIO(0xcf34)
-#define VEBX_MOD_CTRL _MMIO(0xcf38)
-#define FORCE_MISS_FTLB REG_BIT(3)
-
-#define GAB_CTL _MMIO(0x24000)
-#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
-
-#define HSW_MI_PREDICATE_RESULT_2 _MMIO(0x2214)
-
-/*
- * Registers used only by the command parser
- */
-#define BCS_SWCTRL _MMIO(0x22200)
-#define BCS_SRC_Y REG_BIT(0)
-#define BCS_DST_Y REG_BIT(1)
-
-#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
-#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
-#define HS_INVOCATION_COUNT _MMIO(0x2300)
-#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4)
-#define DS_INVOCATION_COUNT _MMIO(0x2308)
-#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4)
-#define IA_VERTICES_COUNT _MMIO(0x2310)
-#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4)
-#define IA_PRIMITIVES_COUNT _MMIO(0x2318)
-#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4)
-#define VS_INVOCATION_COUNT _MMIO(0x2320)
-#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4)
-#define GS_INVOCATION_COUNT _MMIO(0x2328)
-#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4)
-#define GS_PRIMITIVES_COUNT _MMIO(0x2330)
-#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4)
-#define CL_INVOCATION_COUNT _MMIO(0x2338)
-#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4)
-#define CL_PRIMITIVES_COUNT _MMIO(0x2340)
-#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4)
-#define PS_INVOCATION_COUNT _MMIO(0x2348)
-#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4)
-#define PS_DEPTH_COUNT _MMIO(0x2350)
-#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4)
-
-/* There are the 4 64-bit counter registers, one for each stream output */
-#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8)
-#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4)
-
-#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8)
-#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4)
-
-#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420)
-#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430)
-#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434)
-#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438)
-#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243C)
-#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440)
-
-#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500)
-#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
-#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
-
-#define GEN12_SQCM _MMIO(0x8724)
-#define EN_32B_ACCESS REG_BIT(30)
-
-/*
- * Flexible, Aggregate EU Counter Registers.
- * Note: these aren't contiguous
- */
-#define EU_PERF_CNTL0 _MMIO(0xe458)
-#define EU_PERF_CNTL1 _MMIO(0xe558)
-#define EU_PERF_CNTL2 _MMIO(0xe658)
-#define EU_PERF_CNTL3 _MMIO(0xe758)
-#define EU_PERF_CNTL4 _MMIO(0xe45c)
-#define EU_PERF_CNTL5 _MMIO(0xe55c)
-#define EU_PERF_CNTL6 _MMIO(0xe65c)
-
-#define RT_CTRL _MMIO(0xe530)
-#define DIS_NULL_QUERY REG_BIT(10)
-
/* RPM unit config (Gen8+) */
-#define RPM_CONFIG0 _MMIO(0x0D00)
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3
-#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
-#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
-
-#define RPM_CONFIG1 _MMIO(0x0D04)
-#define GEN10_GT_NOA_ENABLE (1 << 9)
-
-/* GPM unit config (Gen9+) */
-#define CTC_MODE _MMIO(0xA26C)
-#define CTC_SOURCE_PARAMETER_MASK 1
-#define CTC_SOURCE_CRYSTAL_CLOCK 0
-#define CTC_SOURCE_DIVIDE_LOGIC 1
-#define CTC_SHIFT_PARAMETER_SHIFT 1
-#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
+#define RPM_CONFIG0 _MMIO(0xd00)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
+
+#define RPM_CONFIG1 _MMIO(0xd04)
+#define GEN10_GT_NOA_ENABLE (1 << 9)
/* RCP unit config (Gen8+) */
-#define RCP_CONFIG _MMIO(0x0D08)
-
-#define MICRO_BP0_0 _MMIO(0x9800)
-#define MICRO_BP0_2 _MMIO(0x9804)
-#define MICRO_BP0_1 _MMIO(0x9808)
-
-#define MICRO_BP1_0 _MMIO(0x980C)
-#define MICRO_BP1_2 _MMIO(0x9810)
-#define MICRO_BP1_1 _MMIO(0x9814)
-
-#define MICRO_BP2_0 _MMIO(0x9818)
-#define MICRO_BP2_2 _MMIO(0x981C)
-#define MICRO_BP2_1 _MMIO(0x9820)
-
-#define MICRO_BP3_0 _MMIO(0x9824)
-#define MICRO_BP3_2 _MMIO(0x9828)
-#define MICRO_BP3_1 _MMIO(0x982C)
-
-#define MICRO_BP_TRIGGER _MMIO(0x9830)
-#define MICRO_BP3_COUNT_STATUS01 _MMIO(0x9834)
-#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
-#define MICRO_BP_FIRED_ARMED _MMIO(0x983C)
-
-#define GAMTARBMODE _MMIO(0x04a08)
-#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
-#define ARB_MODE_SWIZZLE_BDW (1 << 1)
-#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
-
-#define _RING_FAULT_REG_RCS 0x4094
-#define _RING_FAULT_REG_VCS 0x4194
-#define _RING_FAULT_REG_BCS 0x4294
-#define _RING_FAULT_REG_VECS 0x4394
-#define RING_FAULT_REG(engine) _MMIO(_PICK((engine)->class, \
- _RING_FAULT_REG_RCS, \
- _RING_FAULT_REG_VCS, \
- _RING_FAULT_REG_VECS, \
- _RING_FAULT_REG_BCS))
-#define GEN8_RING_FAULT_REG _MMIO(0x4094)
-#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
-#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
-#define RING_FAULT_GTTSEL_MASK (1 << 11)
-#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
-#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
-#define RING_FAULT_VALID (1 << 0)
-#define DONE_REG _MMIO(0x40b0)
-#define GEN12_GAM_DONE _MMIO(0xcf68)
-#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
-#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
-#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
-#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
-#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
-#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
-#define GEN12_VD0_AUX_NV _MMIO(0x4218)
-#define GEN12_VD1_AUX_NV _MMIO(0x4228)
-#define GEN12_VD2_AUX_NV _MMIO(0x4298)
-#define GEN12_VD3_AUX_NV _MMIO(0x42A8)
-#define GEN12_VE0_AUX_NV _MMIO(0x4238)
-#define GEN12_VE1_AUX_NV _MMIO(0x42B8)
-#define AUX_INV REG_BIT(0)
-#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
-#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
-
-#define MISC_STATUS0 _MMIO(0xA500)
-#define MISC_STATUS1 _MMIO(0xA504)
-
-#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
-
-#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
-#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18)
-
-#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
-#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
-#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
-
-#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
-#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
-#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
-#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
+#define RCP_CONFIG _MMIO(0xd08)
+
+#define RC6_LOCATION _MMIO(0xd40)
+#define RC6_CTX_IN_DRAM (1 << 0)
+#define RC6_CTX_BASE _MMIO(0xd48)
+#define RC6_CTX_BASE_MASK 0xFFFFFFF0
+
+#define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0xd50 + (n) * 4)
+#define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0xd70 + (n) * 4)
+#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0xd84)
+#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0xd88)
+
+#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
+#define SF_MCR_SELECTOR _MMIO(0xfd8)
+#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
+#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
+#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
+#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
+#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
+#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27)
+#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
+#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
+#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
+
+#define IPEIR_I965 _MMIO(0x2064)
+#define IPEHR_I965 _MMIO(0x2068)
-#define GEN8_RTCR _MMIO(0x4260)
-#define GEN8_M1TCR _MMIO(0x4264)
-#define GEN8_M2TCR _MMIO(0x4268)
-#define GEN8_BTCR _MMIO(0x426c)
-#define GEN8_VTCR _MMIO(0x4270)
-
-#define IPEIR_I965 _MMIO(0x2064)
-#define IPEHR_I965 _MMIO(0x2068)
-#define GEN7_SC_INSTDONE _MMIO(0x7100)
-#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
-#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
-#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
-#define GEN7_ROW_INSTDONE _MMIO(0xe164)
-#define XEHPG_INSTDONE_GEOM_SVG _MMIO(0x666c)
-#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
-#define SF_MCR_SELECTOR _MMIO(0xfd8)
-#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
-#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
-#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
-#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
-#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
-#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27)
-#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
-#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
-#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
* The GEN2 counterpart of this register is GEN2_INSTDONE.
*/
-#define INSTPS _MMIO(0x2070) /* 965+ only */
-#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
-#define ACTHD_I965 _MMIO(0x2074)
-#define HWS_PGA _MMIO(0x2080)
-#define HWS_ADDRESS_MASK 0xfffff000
-#define HWS_START_ADDRESS_SHIFT 4
-#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
-#define PWRCTX_EN (1 << 0)
-#define GEN2_INSTDONE _MMIO(0x2090)
-#define NOPID _MMIO(0x2094)
-#define HWSTAM _MMIO(0x2098)
-
-#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18)
-#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
-
-#define ERROR_GEN6 _MMIO(0x40a0)
-
-#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
-#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
-#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8)
-#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc)
-#define FAULT_VA_HIGH_BITS (0xf << 0)
-#define FAULT_GTT_SEL (1 << 4)
-
-#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
-#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
-#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
-#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
-
-#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
+#define INSTPS _MMIO(0x2070) /* 965+ only */
+#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
+#define ACTHD_I965 _MMIO(0x2074)
+#define HWS_PGA _MMIO(0x2080)
+#define HWS_ADDRESS_MASK 0xfffff000
+#define HWS_START_ADDRESS_SHIFT 4
+
+#define _3D_CHICKEN _MMIO(0x2084)
+#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
+
+#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
+#define PWRCTX_EN (1 << 0)
+
+#define FF_SLICE_CHICKEN _MMIO(0x2088)
+#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
* the enables for writing to the corresponding low bit.
*/
-#define _3D_CHICKEN _MMIO(0x2084)
-#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
-#define _3D_CHICKEN2 _MMIO(0x208c)
-
-#define FF_SLICE_CHICKEN _MMIO(0x2088)
-#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
-
+#define _3D_CHICKEN2 _MMIO(0x208c)
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
* particular danger of not doing so is not specified.
*/
-# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
-#define _3D_CHICKEN3 _MMIO(0x2090)
-#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
-#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
-#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
-#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
-#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */
-#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
-
-#define MI_MODE _MMIO(0x209c)
-# define VS_TIMER_DISPATCH (1 << 6)
-# define MI_FLUSH_ENABLE (1 << 12)
-# define TGL_NESTED_BB_EN (1 << 12)
-# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
-# define MODE_IDLE (1 << 9)
-# define STOP_RING (1 << 8)
-
-#define GEN6_GT_MODE _MMIO(0x20d0)
-#define GEN7_GT_MODE _MMIO(0x7008)
-#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
-#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
-#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
-#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
-#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
-#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
-#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
-#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
+#define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+
+#define _3D_CHICKEN3 _MMIO(0x2090)
+#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
+#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
+#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
+#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */
+#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
+
+#define GEN2_INSTDONE _MMIO(0x2090)
+#define NOPID _MMIO(0x2094)
+#define HWSTAM _MMIO(0x2098)
+
+#define WAIT_FOR_RC6_EXIT _MMIO(0x20cc)
+/* HSW only */
+#define HSW_SELECTIVE_READ_ADDRESSING_SHIFT 2
+#define HSW_SELECTIVE_READ_ADDRESSING_MASK (0x3 << HSW_SLECTIVE_READ_ADDRESSING_SHIFT)
+#define HSW_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define HSW_SELECTIVE_WRITE_ADDRESS_MASK (0x7 << HSW_SELECTIVE_WRITE_ADDRESS_SHIFT)
+/* HSW+ */
+#define HSW_WAIT_FOR_RC6_EXIT_ENABLE (1 << 0)
+#define HSW_RCS_CONTEXT_ENABLE (1 << 7)
+#define HSW_RCS_INHIBIT (1 << 8)
+/* Gen8 */
+#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
+#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
+#define GEN8_SELECTIVE_WRITE_ADDRESSING_ENABLE (1 << 6)
+#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT 9
+#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT)
+#define GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT 11
+#define GEN8_SELECTIVE_READ_SLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT)
+#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
+
+#define GEN6_GT_MODE _MMIO(0x20d0)
+#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
+#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
+#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
+#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
+#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
+#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
-#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
-#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
-#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
+#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20d4)
+#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
-#define SCCGCTL94DC _MMIO(0x94dc)
-#define CG3DDISURB REG_BIT(14)
+#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
+#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14)
-#define MLTICTXCTL _MMIO(0xb170)
-#define TDONRENDER REG_BIT(2)
+#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
+#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
+#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
-#define L3SQCREG1_CCS0 _MMIO(0xb200)
-#define FLUSHALLNONCOH REG_BIT(5)
+#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
+#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20ec)
+#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0)
/* WaClearTdlStateAckDirtyBits */
-#define GEN8_STATE_ACK _MMIO(0x20F0)
-#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
-#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100)
-#define GEN9_STATE_ACK_TDL0 (1 << 12)
-#define GEN9_STATE_ACK_TDL1 (1 << 13)
-#define GEN9_STATE_ACK_TDL2 (1 << 14)
-#define GEN9_STATE_ACK_TDL3 (1 << 15)
-#define GEN9_SUBSLICE_TDL_ACK_BITS \
+#define GEN8_STATE_ACK _MMIO(0x20f0)
+#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20f8)
+#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100)
+#define GEN9_STATE_ACK_TDL0 (1 << 12)
+#define GEN9_STATE_ACK_TDL1 (1 << 13)
+#define GEN9_STATE_ACK_TDL2 (1 << 14)
+#define GEN9_STATE_ACK_TDL3 (1 << 15)
+#define GEN9_SUBSLICE_TDL_ACK_BITS \
(GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \
GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0)
-#define GFX_MODE _MMIO(0x2520)
-
-#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
-#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
-#define CM0_IZ_OPT_DISABLE (1 << 6)
-#define CM0_ZR_OPT_DISABLE (1 << 5)
-#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
-#define CM0_DEPTH_EVICT_DISABLE (1 << 4)
-#define CM0_COLOR_EVICT_DISABLE (1 << 3)
-#define CM0_DEPTH_WRITE_DISABLE (1 << 1)
-#define CM0_RC_OP_FLUSH_DISABLE (1 << 0)
-#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
-#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
-#define GFX_FLSH_CNTL_EN (1 << 0)
-
-#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
-#define RC_OP_FLUSH_ENABLE (1 << 0)
-#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
-#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
-#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
-#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
-#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
+#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
+#define CM0_IZ_OPT_DISABLE (1 << 6)
+#define CM0_ZR_OPT_DISABLE (1 << 5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
+#define CM0_DEPTH_EVICT_DISABLE (1 << 4)
+#define CM0_COLOR_EVICT_DISABLE (1 << 3)
+#define CM0_DEPTH_WRITE_DISABLE (1 << 1)
+#define CM0_RC_OP_FLUSH_DISABLE (1 << 0)
-#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
-#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
-
-#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
-#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
-#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4)
-
-/* Fuse readout registers for GT */
-#define HSW_PAVP_FUSE1 _MMIO(0x911C)
-#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24)
-#define HSW_F1_EU_DIS_MASK REG_GENMASK(17, 16)
-#define HSW_F1_EU_DIS_10EUS 0
-#define HSW_F1_EU_DIS_8EUS 1
-#define HSW_F1_EU_DIS_6EUS 2
-
-#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
-#define CHV_FGT_DISABLE_SS0 (1 << 10)
-#define CHV_FGT_DISABLE_SS1 (1 << 11)
-#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
-#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
-#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
-#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
-#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
-#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
-#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
-#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
-
-#define GEN8_FUSE2 _MMIO(0x9120)
-#define GEN8_F2_SS_DIS_SHIFT 21
-#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
-#define GEN8_F2_S_ENA_SHIFT 25
-#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
-
-#define GEN9_F2_SS_DIS_SHIFT 20
-#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
-
-#define GEN10_F2_S_ENA_SHIFT 22
-#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT)
-#define GEN10_F2_SS_DIS_SHIFT 18
-#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
-
-#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
-#define GEN10_L3BANK_PAIR_COUNT 4
-#define GEN10_L3BANK_MASK 0x0F
-/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
-#define GEN12_MAX_MSLICES 4
-#define GEN12_MEML3_EN_MASK 0x0F
-
-#define GEN8_EU_DISABLE0 _MMIO(0x9134)
-#define GEN8_EU_DIS0_S0_MASK 0xffffff
-#define GEN8_EU_DIS0_S1_SHIFT 24
-#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
-
-#define GEN8_EU_DISABLE1 _MMIO(0x9138)
-#define GEN8_EU_DIS1_S1_MASK 0xffff
-#define GEN8_EU_DIS1_S2_SHIFT 16
-#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
-
-#define GEN8_EU_DISABLE2 _MMIO(0x913c)
-#define GEN8_EU_DIS2_S2_MASK 0xff
-
-#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
-
-#define GEN10_EU_DISABLE3 _MMIO(0x9140)
-#define GEN10_EU_DIS_SS_MASK 0xff
-
-#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
-#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
-#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
-#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
-
-#define GEN11_EU_DISABLE _MMIO(0x9134)
-#define GEN11_EU_DIS_MASK 0xFF
-
-#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138)
-#define GEN11_GT_S_ENA_MASK 0xFF
-
-#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C)
-
-#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913C)
-#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
-
-#define XEHP_EU_ENABLE _MMIO(0x9134)
-#define XEHP_EU_ENA_MASK 0xFF
-
-#define CRSTANDVID _MMIO(0x11100)
-#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
-#define PXVFREQ_PX_MASK 0x7f000000
-#define PXVFREQ_PX_SHIFT 24
-#define VIDFREQ_BASE _MMIO(0x11110)
-#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */
-#define VIDFREQ2 _MMIO(0x11114)
-#define VIDFREQ3 _MMIO(0x11118)
-#define VIDFREQ4 _MMIO(0x1111c)
-#define VIDFREQ_P0_MASK 0x1f000000
-#define VIDFREQ_P0_SHIFT 24
-#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
-#define VIDFREQ_P0_CSCLK_SHIFT 20
-#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
-#define VIDFREQ_P0_CRCLK_SHIFT 16
-#define VIDFREQ_P1_MASK 0x00001f00
-#define VIDFREQ_P1_SHIFT 8
-#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
-#define VIDFREQ_P1_CSCLK_SHIFT 4
-#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
-#define INTTOEXT_BASE_ILK _MMIO(0x11300)
-#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */
-#define INTTOEXT_MAP3_SHIFT 24
-#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
-#define INTTOEXT_MAP2_SHIFT 16
-#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
-#define INTTOEXT_MAP1_SHIFT 8
-#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
-#define INTTOEXT_MAP0_SHIFT 0
-#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
-#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */
-#define MEMCTL_CMD_MASK 0xe000
-#define MEMCTL_CMD_SHIFT 13
-#define MEMCTL_CMD_RCLK_OFF 0
-#define MEMCTL_CMD_RCLK_ON 1
-#define MEMCTL_CMD_CHFREQ 2
-#define MEMCTL_CMD_CHVID 3
-#define MEMCTL_CMD_VMMOFF 4
-#define MEMCTL_CMD_VMMON 5
-#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears
- when command complete */
-#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
-#define MEMCTL_FREQ_SHIFT 8
-#define MEMCTL_SFCAVM (1 << 7)
-#define MEMCTL_TGT_VID_MASK 0x007f
-#define MEMIHYST _MMIO(0x1117c)
-#define MEMINTREN _MMIO(0x11180) /* 16 bits */
-#define MEMINT_RSEXIT_EN (1 << 8)
-#define MEMINT_CX_SUPR_EN (1 << 7)
-#define MEMINT_CONT_BUSY_EN (1 << 6)
-#define MEMINT_AVG_BUSY_EN (1 << 5)
-#define MEMINT_EVAL_CHG_EN (1 << 4)
-#define MEMINT_MON_IDLE_EN (1 << 3)
-#define MEMINT_UP_EVAL_EN (1 << 2)
-#define MEMINT_DOWN_EVAL_EN (1 << 1)
-#define MEMINT_SW_CMD_EN (1 << 0)
-#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
-#define MEM_RSEXIT_MASK 0xc000
-#define MEM_RSEXIT_SHIFT 14
-#define MEM_CONT_BUSY_MASK 0x3000
-#define MEM_CONT_BUSY_SHIFT 12
-#define MEM_AVG_BUSY_MASK 0x0c00
-#define MEM_AVG_BUSY_SHIFT 10
-#define MEM_EVAL_CHG_MASK 0x0300
-#define MEM_EVAL_BUSY_SHIFT 8
-#define MEM_MON_IDLE_MASK 0x00c0
-#define MEM_MON_IDLE_SHIFT 6
-#define MEM_UP_EVAL_MASK 0x0030
-#define MEM_UP_EVAL_SHIFT 4
-#define MEM_DOWN_EVAL_MASK 0x000c
-#define MEM_DOWN_EVAL_SHIFT 2
-#define MEM_SW_CMD_MASK 0x0003
-#define MEM_INT_STEER_GFX 0
-#define MEM_INT_STEER_CMR 1
-#define MEM_INT_STEER_SMI 2
-#define MEM_INT_STEER_SCI 3
-#define MEMINTRSTS _MMIO(0x11184)
-#define MEMINT_RSEXIT (1 << 7)
-#define MEMINT_CONT_BUSY (1 << 6)
-#define MEMINT_AVG_BUSY (1 << 5)
-#define MEMINT_EVAL_CHG (1 << 4)
-#define MEMINT_MON_IDLE (1 << 3)
-#define MEMINT_UP_EVAL (1 << 2)
-#define MEMINT_DOWN_EVAL (1 << 1)
-#define MEMINT_SW_CMD (1 << 0)
-#define MEMMODECTL _MMIO(0x11190)
-#define MEMMODE_BOOST_EN (1 << 31)
-#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
-#define MEMMODE_BOOST_FREQ_SHIFT 24
-#define MEMMODE_IDLE_MODE_MASK 0x00030000
-#define MEMMODE_IDLE_MODE_SHIFT 16
-#define MEMMODE_IDLE_MODE_EVAL 0
-#define MEMMODE_IDLE_MODE_CONT 1
-#define MEMMODE_HWIDLE_EN (1 << 15)
-#define MEMMODE_SWMODE_EN (1 << 14)
-#define MEMMODE_RCLK_GATE (1 << 13)
-#define MEMMODE_HW_UPDATE (1 << 12)
-#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
-#define MEMMODE_FSTART_SHIFT 8
-#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
-#define MEMMODE_FMAX_SHIFT 4
-#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
-#define RCBMAXAVG _MMIO(0x1119c)
-#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */
-#define SWMEMCMD_RENDER_OFF (0 << 13)
-#define SWMEMCMD_RENDER_ON (1 << 13)
-#define SWMEMCMD_SWFREQ (2 << 13)
-#define SWMEMCMD_TARVID (3 << 13)
-#define SWMEMCMD_VRM_OFF (4 << 13)
-#define SWMEMCMD_VRM_ON (5 << 13)
-#define CMDSTS (1 << 12)
-#define SFCAVM (1 << 11)
-#define SWFREQ_MASK 0x0380 /* P0-7 */
-#define SWFREQ_SHIFT 7
-#define TARVID_MASK 0x001f
-#define MEMSTAT_CTG _MMIO(0x111a0)
-#define RCBMINAVG _MMIO(0x111a0)
-#define RCUPEI _MMIO(0x111b0)
-#define RCDNEI _MMIO(0x111b4)
-#define RSTDBYCTL _MMIO(0x111b8)
-#define RS1EN (1 << 31)
-#define RS2EN (1 << 30)
-#define RS3EN (1 << 29)
-#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */
-#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */
-#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */
-#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */
-#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */
-#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */
-#define RSX_STATUS_MASK (7 << 20)
-#define RSX_STATUS_ON (0 << 20)
-#define RSX_STATUS_RC1 (1 << 20)
-#define RSX_STATUS_RC1E (2 << 20)
-#define RSX_STATUS_RS1 (3 << 20)
-#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */
-#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */
-#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */
-#define RSX_STATUS_RSVD2 (7 << 20)
-#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */
-#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */
-#define JRSC (1 << 17) /* rsx coupled to cpu c-state */
-#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */
-#define RS1CONTSAV_MASK (3 << 14)
-#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */
-#define RS1CONTSAV_RSVD (1 << 14)
-#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */
-#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */
-#define NORMSLEXLAT_MASK (3 << 12)
-#define SLOW_RS123 (0 << 12)
-#define SLOW_RS23 (1 << 12)
-#define SLOW_RS3 (2 << 12)
-#define NORMAL_RS123 (3 << 12)
-#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */
-#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
-#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
-#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */
-#define RS_CSTATE_MASK (3 << 4)
-#define RS_CSTATE_C367_RS1 (0 << 4)
-#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
-#define RS_CSTATE_RSVD (2 << 4)
-#define RS_CSTATE_C367_RS2 (3 << 4)
-#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */
-#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */
-#define VIDCTL _MMIO(0x111c0)
-#define VIDSTS _MMIO(0x111c8)
-#define VIDSTART _MMIO(0x111cc) /* 8 bits */
-#define MEMSTAT_ILK _MMIO(0x111f8)
-#define MEMSTAT_VID_MASK 0x7f00
-#define MEMSTAT_VID_SHIFT 8
-#define MEMSTAT_PSTATE_MASK 0x00f8
-#define MEMSTAT_PSTATE_SHIFT 3
-#define MEMSTAT_MON_ACTV (1 << 2)
-#define MEMSTAT_SRC_CTL_MASK 0x0003
-#define MEMSTAT_SRC_CTL_CORE 0
-#define MEMSTAT_SRC_CTL_TRB 1
-#define MEMSTAT_SRC_CTL_THM 2
-#define MEMSTAT_SRC_CTL_STDBY 3
-#define RCPREVBSYTUPAVG _MMIO(0x113b8)
-#define RCPREVBSYTDNAVG _MMIO(0x113bc)
-#define PMMISC _MMIO(0x11214)
-#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */
-#define SDEW _MMIO(0x1124c)
-#define CSIEW0 _MMIO(0x11250)
-#define CSIEW1 _MMIO(0x11254)
-#define CSIEW2 _MMIO(0x11258)
-#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */
-#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */
-#define MCHAFE _MMIO(0x112c0)
-#define CSIEC _MMIO(0x112e0)
-#define DMIEC _MMIO(0x112e4)
-#define DDREC _MMIO(0x112e8)
-#define PEG0EC _MMIO(0x112ec)
-#define PEG1EC _MMIO(0x112f0)
-#define GFXEC _MMIO(0x112f4)
-#define RPPREVBSYTUPAVG _MMIO(0x113b8)
-#define RPPREVBSYTDNAVG _MMIO(0x113bc)
-#define ECR _MMIO(0x11600)
-#define ECR_GPFE (1 << 31)
-#define ECR_IMONE (1 << 30)
-#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
-#define OGW0 _MMIO(0x11608)
-#define OGW1 _MMIO(0x1160c)
-#define EG0 _MMIO(0x11610)
-#define EG1 _MMIO(0x11614)
-#define EG2 _MMIO(0x11618)
-#define EG3 _MMIO(0x1161c)
-#define EG4 _MMIO(0x11620)
-#define EG5 _MMIO(0x11624)
-#define EG6 _MMIO(0x11628)
-#define EG7 _MMIO(0x1162c)
-#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */
-#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */
-#define LCFUSE02 _MMIO(0x116c0)
-#define LCFUSE_HIV_MASK 0x000000ff
-
-#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c)
-#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
/*
* Logical Context regs
@@ -798,427 +185,541 @@
* - GT1 size just indicates how much of render context
* doesn't need saving on GT1
*/
-#define CXT_SIZE _MMIO(0x21a0)
-#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
-#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
-#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
-#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f)
-#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f)
-#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
- GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
- GEN6_CXT_PIPELINE_SIZE(cxt_reg))
-#define GEN7_CXT_SIZE _MMIO(0x21a8)
-#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
-#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
-#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
-#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f)
-#define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7)
-#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f)
-#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
- GEN7_CXT_VFSTATE_SIZE(ctx_reg))
-
-enum {
- INTEL_ADVANCED_CONTEXT = 0,
- INTEL_LEGACY_32B_CONTEXT,
- INTEL_ADVANCED_AD_CONTEXT,
- INTEL_LEGACY_64B_CONTEXT
-};
-
-enum {
- FAULT_AND_HANG = 0,
- FAULT_AND_HALT, /* Debug only */
- FAULT_AND_STREAM,
- FAULT_AND_CONTINUE /* Unsupported */
-};
-
-#define CTX_GTT_ADDRESS_MASK GENMASK(31, 12)
-#define GEN8_CTX_VALID (1 << 0)
-#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
-#define GEN8_CTX_FORCE_RESTORE (1 << 2)
-#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
-#define GEN8_CTX_PRIVILEGE (1 << 8)
-#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
-
-#define GEN8_CTX_ID_SHIFT 32
-#define GEN8_CTX_ID_WIDTH 21
-#define GEN11_SW_CTX_ID_SHIFT 37
-#define GEN11_SW_CTX_ID_WIDTH 11
-#define GEN11_ENGINE_CLASS_SHIFT 61
-#define GEN11_ENGINE_CLASS_WIDTH 3
-#define GEN11_ENGINE_INSTANCE_SHIFT 48
-#define GEN11_ENGINE_INSTANCE_WIDTH 6
-
-#define XEHP_SW_CTX_ID_SHIFT 39
-#define XEHP_SW_CTX_ID_WIDTH 16
-#define XEHP_SW_COUNTER_SHIFT 58
-#define XEHP_SW_COUNTER_WIDTH 6
-
-#define UNSLCGCTL9440 _MMIO(0x9440)
-#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
-#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
-#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
-#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
-#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
-#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
-#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
-#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
-#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
-#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
-#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
-#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
-
-#define UNSLCGCTL9444 _MMIO(0x9444)
-#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
-#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
-#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
-#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
-#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
-#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
-#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
-#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
-#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
-#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
-#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
-#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
-#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
-#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
-#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
-#define LTCDD_CLKGATE_DIS REG_BIT(10)
-
-#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
-#define SARBUNIT_CLKGATE_DIS (1 << 5)
-#define RCCUNIT_CLKGATE_DIS (1 << 7)
-#define MSCUNIT_CLKGATE_DIS (1 << 10)
-#define NODEDSS_CLKGATE_DIS REG_BIT(12)
-#define L3_CLKGATE_DIS REG_BIT(16)
-#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
-
-#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
-#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28)
-#define GWUNIT_CLKGATE_DIS REG_BIT(16)
-
-#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
-#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
-
-#define SSMCGCTL9530 _MMIO(0x9530)
-#define RTFUNIT_CLKGATE_DIS REG_BIT(18)
-
-#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
-#define VFUNIT_CLKGATE_DIS REG_BIT(20)
-#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
-#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
-#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
-#define HSUNIT_CLKGATE_DIS REG_BIT(8)
-#define VSUNIT_CLKGATE_DIS REG_BIT(3)
-
-#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
-#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
-#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
-
-#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560)
-#define CGPSF_CLKGATE_DIS (1 << 3)
-
-#define GEN11_GT_INTR_DW0 _MMIO(0x190018)
-#define GEN11_CSME (31)
-#define GEN11_GUNIT (28)
-#define GEN11_GUC (25)
-#define GEN11_WDPERF (20)
-#define GEN11_KCR (19)
-#define GEN11_GTPM (16)
-#define GEN11_BCS (15)
-#define GEN11_RCS0 (0)
-
-#define GEN11_GT_INTR_DW1 _MMIO(0x19001c)
-#define GEN11_VECS(x) (31 - (x))
-#define GEN11_VCS(x) (x)
-
-#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
-
-#define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060)
-#define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064)
-#define GEN11_INTR_DATA_VALID (1 << 31)
-#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16)
-#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
-#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
-/* irq instances for OTHER_CLASS */
-#define OTHER_GUC_INSTANCE 0
-#define OTHER_GTPM_INSTANCE 1
-#define OTHER_KCR_INSTANCE 4
-
-#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
-
-#define GEN11_IIR_REG0_SELECTOR _MMIO(0x190070)
-#define GEN11_IIR_REG1_SELECTOR _MMIO(0x190074)
-
-#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
-
-#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
-#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
-#define GEN11_GUC_SG_INTR_ENABLE _MMIO(0x190038)
-#define GEN11_GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c)
-#define GEN11_CRYPTO_RSVD_INTR_ENABLE _MMIO(0x190040)
-#define GEN11_GUNIT_CSME_INTR_ENABLE _MMIO(0x190044)
-
-#define GEN11_RCS0_RSVD_INTR_MASK _MMIO(0x190090)
-#define GEN11_BCS_RSVD_INTR_MASK _MMIO(0x1900a0)
-#define GEN11_VCS0_VCS1_INTR_MASK _MMIO(0x1900a8)
-#define GEN11_VCS2_VCS3_INTR_MASK _MMIO(0x1900ac)
-#define GEN12_VCS4_VCS5_INTR_MASK _MMIO(0x1900b0)
-#define GEN12_VCS6_VCS7_INTR_MASK _MMIO(0x1900b4)
-#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0)
-#define GEN12_VECS2_VECS3_INTR_MASK _MMIO(0x1900d4)
-#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8)
-#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
-#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0)
-#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
-
-#define ENGINE1_MASK REG_GENMASK(31, 16)
-#define ENGINE0_MASK REG_GENMASK(15, 0)
-
-#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
-#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14)
-
-#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
-#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
-#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
+#define CXT_SIZE _MMIO(0x21a0)
+#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
+ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+ GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE _MMIO(0x21a8)
+#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
+#define HSW_MI_PREDICATE_RESULT_2 _MMIO(0x2214)
+
+#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
+#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+
+#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
+#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
+
+#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
+#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
+
+#define HS_INVOCATION_COUNT _MMIO(0x2300)
+#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4)
+#define DS_INVOCATION_COUNT _MMIO(0x2308)
+#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4)
+#define IA_VERTICES_COUNT _MMIO(0x2310)
+#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4)
+#define IA_PRIMITIVES_COUNT _MMIO(0x2318)
+#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4)
+#define VS_INVOCATION_COUNT _MMIO(0x2320)
+#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4)
+#define GS_INVOCATION_COUNT _MMIO(0x2328)
+#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4)
+#define GS_PRIMITIVES_COUNT _MMIO(0x2330)
+#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4)
+#define CL_INVOCATION_COUNT _MMIO(0x2338)
+#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4)
+#define CL_PRIMITIVES_COUNT _MMIO(0x2340)
+#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4)
+#define PS_INVOCATION_COUNT _MMIO(0x2348)
+#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4)
+#define PS_DEPTH_COUNT _MMIO(0x2350)
+#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4)
+#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420)
+#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430)
+#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434)
+#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438)
+#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243c)
+#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440)
+#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500)
+#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
+#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
+
+#define GFX_MODE _MMIO(0x2520)
+
+#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
+#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
+#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
+#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
+#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
+#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
+
+#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
+
+#define RENDER_HWS_PGA_GEN7 _MMIO(0x4080)
+
+#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
+#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
-#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
-#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
-#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
-#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+#define GAM_ECOCHK _MMIO(0x4090)
+#define BDW_DISABLE_HDC_INVALIDATION (1 << 25)
+#define ECOCHK_SNB_BIT (1 << 10)
+#define ECOCHK_DIS_TLB (1 << 8)
+#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6)
+#define ECOCHK_PPGTT_CACHE64B (0x3 << 3)
+#define ECOCHK_PPGTT_CACHE4B (0x0 << 3)
+#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4)
+#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3)
+#define ECOCHK_PPGTT_UC_HSW (0x1 << 3)
+#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
+#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
+
+#define GEN8_RING_FAULT_REG _MMIO(0x4094)
+#define _RING_FAULT_REG_RCS 0x4094
+#define _RING_FAULT_REG_VCS 0x4194
+#define _RING_FAULT_REG_BCS 0x4294
+#define _RING_FAULT_REG_VECS 0x4394
+#define RING_FAULT_REG(engine) _MMIO(_PICK((engine)->class, \
+ _RING_FAULT_REG_RCS, \
+ _RING_FAULT_REG_VCS, \
+ _RING_FAULT_REG_VECS, \
+ _RING_FAULT_REG_BCS))
+
+#define ERROR_GEN6 _MMIO(0x40a0)
+
+#define DONE_REG _MMIO(0x40b0)
+#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
+#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
+#define BSD_HWS_PGA_GEN7 _MMIO(0x4180)
+#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
+#define GEN12_VD0_AUX_NV _MMIO(0x4218)
+#define GEN12_VD1_AUX_NV _MMIO(0x4228)
+
+#define GEN8_RTCR _MMIO(0x4260)
+#define GEN8_M1TCR _MMIO(0x4264)
+#define GEN8_M2TCR _MMIO(0x4268)
+#define GEN8_BTCR _MMIO(0x426c)
+#define GEN8_VTCR _MMIO(0x4270)
+
+#define GEN12_VD2_AUX_NV _MMIO(0x4298)
+#define GEN12_VD3_AUX_NV _MMIO(0x42a8)
+#define GEN12_VE0_AUX_NV _MMIO(0x4238)
+
+#define BLT_HWS_PGA_GEN7 _MMIO(0x4280)
+
+#define GEN12_VE1_AUX_NV _MMIO(0x42b8)
+#define AUX_INV REG_BIT(0)
+#define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380)
+
+#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
+
+#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+
+#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
+
+#define XEHPSDV_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
+#define XEHPSDV_CCS_BASE_SHIFT 8
+
+#define GAMTARBMODE _MMIO(0x4a08)
+#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
+#define ARB_MODE_SWIZZLE_BDW (1 << 1)
+
+#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
+#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18)
-#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20EC)
-#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0)
+#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
+#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
+#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
-#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
-#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
-#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
-#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
-#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
-#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
-#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
+#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
+#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
-/* GEN7 chicken */
-#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
- #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC (1 << 10)
- #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
+#define GEN11_GACB_PERF_CTRL _MMIO(0x4b80)
+#define GEN11_HASH_CTRL_MASK (0x3 << 12 | 0xf << 0)
+#define GEN11_HASH_CTRL_BIT0 (1 << 0)
+#define GEN11_HASH_CTRL_BIT4 (1 << 12)
-#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
- #define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
- #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
- #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
- #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+/* gamt regs */
+#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
+#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
+#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
+#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
+#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
-#define GEN8_L3CNTLREG _MMIO(0x7034)
- #define GEN8_ERRDETBCTRL (1 << 9)
+#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */
+#define MMCD_PCLA (1 << 31)
+#define MMCD_HOTSPOT_EN (1 << 27)
-#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
-#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
-#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12)
-#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
-#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
+/* There are the 4 64-bit counter registers, one for each stream output */
+#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8)
+#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4)
-#define HIZ_CHICKEN _MMIO(0x7018)
-# define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15)
-# define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14)
-# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3)
+#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8)
+#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4)
-#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
-#define DISABLE_PIXEL_MASK_CAMMING (1 << 14)
+#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
+#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
-#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
-#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
+#define VFLSKPD _MMIO(0x62a8)
+#define DIS_OVER_FETCH_CACHE REG_BIT(1)
+#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
-#define GEN7_SARCHKMD _MMIO(0xB000)
-#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
-#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
+#define FF_MODE2 _MMIO(0x6604)
+#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24)
+#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224)
+#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
+#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
-#define GEN7_L3SQCREG1 _MMIO(0xB010)
-#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
+#define XEHPG_INSTDONE_GEOM_SVG _MMIO(0x666c)
-#define GEN8_L3SQCREG1 _MMIO(0xB100)
-/*
- * Note that on CHV the following has an off-by-one error wrt. to BSpec.
- * Using the formula in BSpec leads to a hang, while the formula here works
- * fine and matches the formulas for all other platforms. A BSpec change
- * request has been filed to clarify this.
- */
-#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
-#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
-#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
+#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
+#define RC_OP_FLUSH_ENABLE (1 << 0)
+#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
+#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
-#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
-#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
-#define GEN7_L3AGDIS (1 << 19)
-#define GEN7_L3CNTLREG2 _MMIO(0xB020)
-#define GEN7_L3CNTLREG3 _MMIO(0xB024)
+#define GEN7_GT_MODE _MMIO(0x7008)
+#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
+#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
-#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xB030)
-#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
-#define GEN10_L3_CHICKEN_MODE_REGISTER _MMIO(0xB114)
-#define GEN11_I2M_WRITE_DISABLE (1 << 28)
+/* GEN7 chicken */
+#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
+#define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC (1 << 10)
+#define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
-#define GEN7_L3SQCREG4 _MMIO(0xb034)
-#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
+#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+#define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
+#define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
+#define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
+#define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
-#define GEN11_SCRATCH2 _MMIO(0xb140)
-#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19)
+#define HIZ_CHICKEN _MMIO(0x7018)
+#define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15)
+#define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14)
+#define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3)
-#define GEN8_L3SQCREG4 _MMIO(0xb118)
-#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
-#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
-#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
-#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
+#define GEN8_L3CNTLREG _MMIO(0x7034)
+#define GEN8_ERRDETBCTRL (1 << 9)
-#define GEN11_L3SQCREG5 _MMIO(0xb158)
-#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
-
-#define XEHP_L3SCQREG7 _MMIO(0xb188)
-#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
+#define GEN7_SC_INSTDONE _MMIO(0x7100)
+#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
+#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
-#define ICL_HDC_MODE _MMIO(0xE5F4)
-#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15)
-#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14)
-#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11)
-#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5)
-#define HDC_FORCE_NON_COHERENT (1 << 4)
-#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
-
-#define GEN12_HDC_CHICKEN0 _MMIO(0xE5F0)
-#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
-
-#define SARB_CHICKEN1 _MMIO(0xe90c)
-#define COMP_CKN_IN REG_GENMASK(30, 29)
+#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15)
+#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11)
+#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5)
+#define HDC_FORCE_NON_COHERENT (1 << 4)
+#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
+#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
+#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
+#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12)
+#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
+#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
+
/* GEN9 chicken */
#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
-#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
-#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
+#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
+#define DISABLE_PIXEL_MASK_CAMMING (1 << 14)
+
+#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
+
+#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+
+#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
+#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
+ ((slice) % 3) * 0x4)
+#define GEN9_PGCTL_SLICE_ACK (1 << 0)
+#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2))
+#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
+
+#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
+#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
+ ((slice) % 3) * 0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
+#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
+ ((slice) % 3) * 0x8)
+#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
+#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
+#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
+#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
+#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
+#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
+#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
+#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
+
+#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
+
+#define GEN12_SQCM _MMIO(0x8724)
+#define EN_32B_ACCESS REG_BIT(30)
+
+#define HSW_IDICR _MMIO(0x9008)
+#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
+
+#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
+#define GEN6_MBC_SNPCR_SHIFT 21
+#define GEN6_MBC_SNPCR_MASK (3 << 21)
+#define GEN6_MBC_SNPCR_MAX (0 << 21)
+#define GEN6_MBC_SNPCR_MED (1 << 21)
+#define GEN6_MBC_SNPCR_LOW (2 << 21)
+#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */
+
+#define VLV_G3DCTL _MMIO(0x9024)
+#define VLV_GSCKGCTL _MMIO(0x9028)
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
-#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11)
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11)
-#define HSW_SCRATCH1 _MMIO(0xb038)
-#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27)
-
-#define BDW_SCRATCH1 _MMIO(0xb11c)
-#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
-
-#define VFLSKPD _MMIO(0x62a8)
-#define DIS_OVER_FETCH_CACHE REG_BIT(1)
-#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
-
-#define FF_MODE2 _MMIO(0x6604)
-#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24)
-#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224)
-#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
-#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
-
-#define RC6_LOCATION _MMIO(0xD40)
-#define RC6_CTX_IN_DRAM (1 << 0)
-#define RC6_CTX_BASE _MMIO(0xD48)
-#define RC6_CTX_BASE_MASK 0xFFFFFFF0
-#define FORCEWAKE _MMIO(0xA18C)
-#define FORCEWAKE_VLV _MMIO(0x1300b0)
-#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
-#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8)
-#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc)
-#define FORCEWAKE_ACK_HSW _MMIO(0x130044)
-#define FORCEWAKE_ACK _MMIO(0x130090)
-#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090)
-#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
-#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
-#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
+#define FBC_LLC_READ_CTRL _MMIO(0x9044)
+#define FBC_LLC_FULLY_OPEN REG_BIT(30)
-#define VLV_GTLC_PW_STATUS _MMIO(0x130094)
-#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
-#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
-#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
-#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
-#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */
-#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
-#define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4)
-#define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4)
-#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
-#define FORCEWAKE_GT_GEN9 _MMIO(0xa188)
-#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
-#define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0x0D50 + (n) * 4)
-#define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0x0D70 + (n) * 4)
-#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
-#define FORCEWAKE_ACK_GT_GEN9 _MMIO(0x130044)
-#define FORCEWAKE_KERNEL BIT(0)
-#define FORCEWAKE_USER BIT(1)
-#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
-#define FORCEWAKE_MT_ACK _MMIO(0x130040)
-#define ECOBUS _MMIO(0xa180)
-#define FORCEWAKE_MT_ENABLE (1 << 5)
-#define VLV_SPAREG2H _MMIO(0xA194)
-#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0)
-#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
-#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
+#define GEN6_MBCTL _MMIO(0x907c)
+#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
+#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
+#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
+#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
+#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
-#define GTFIFODBG _MMIO(0x120000)
-#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
-#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
-#define GT_FIFO_SBDROPERR (1 << 6)
-#define GT_FIFO_BLOBDROPERR (1 << 5)
-#define GT_FIFO_SB_READ_ABORTERR (1 << 4)
-#define GT_FIFO_DROPERR (1 << 3)
-#define GT_FIFO_OVFERR (1 << 2)
-#define GT_FIFO_IAWRERR (1 << 1)
-#define GT_FIFO_IARDERR (1 << 0)
-
-#define GTFIFOCTL _MMIO(0x120008)
-#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
-#define GT_FIFO_NUM_RESERVED_ENTRIES 20
-#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
-#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
-
-#define HSW_IDICR _MMIO(0x9008)
-#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
+/* Fuse readout registers for GT */
+#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
+#define GEN10_L3BANK_PAIR_COUNT 4
+#define GEN10_L3BANK_MASK 0x0F
+/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
+#define GEN12_MAX_MSLICES 4
+#define GEN12_MEML3_EN_MASK 0x0F
+
+#define HSW_PAVP_FUSE1 _MMIO(0x911c)
+#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24)
+#define HSW_F1_EU_DIS_MASK REG_GENMASK(17, 16)
+#define HSW_F1_EU_DIS_10EUS 0
+#define HSW_F1_EU_DIS_8EUS 1
+#define HSW_F1_EU_DIS_6EUS 2
+
+#define GEN8_FUSE2 _MMIO(0x9120)
+#define GEN8_F2_SS_DIS_SHIFT 21
+#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
+#define GEN8_F2_S_ENA_SHIFT 25
+#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
+#define GEN9_F2_SS_DIS_SHIFT 20
+#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
+#define GEN10_F2_S_ENA_SHIFT 22
+#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT)
+#define GEN10_F2_SS_DIS_SHIFT 18
+#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+
+#define GEN8_EU_DISABLE0 _MMIO(0x9134)
+#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
+#define GEN11_EU_DISABLE _MMIO(0x9134)
+#define GEN8_EU_DIS0_S0_MASK 0xffffff
+#define GEN8_EU_DIS0_S1_SHIFT 24
+#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
+#define GEN11_EU_DIS_MASK 0xFF
+#define XEHP_EU_ENABLE _MMIO(0x9134)
+#define XEHP_EU_ENA_MASK 0xFF
+
+#define GEN8_EU_DISABLE1 _MMIO(0x9138)
+#define GEN8_EU_DIS1_S1_MASK 0xffff
+#define GEN8_EU_DIS1_S2_SHIFT 16
+#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
+
+#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138)
+#define GEN11_GT_S_ENA_MASK 0xFF
+
+#define GEN8_EU_DISABLE2 _MMIO(0x913c)
+#define GEN8_EU_DIS2_S2_MASK 0xff
+
+#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913c)
+#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913c)
+
+#define GEN10_EU_DISABLE3 _MMIO(0x9140)
+#define GEN10_EU_DIS_SS_MASK 0xff
+#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
+#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
+#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
+#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
+
+#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
#define GEN6_UCGCTL1 _MMIO(0x9400)
-# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
-# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
-# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
-# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
+#define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
+#define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
+#define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+#define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 _MMIO(0x9404)
-# define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31)
-# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
-# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
-# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
-# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
-# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+#define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
+#define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
+#define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
+#define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define GEN6_UCGCTL3 _MMIO(0x9408)
-# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
+#define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
#define GEN7_UCGCTL4 _MMIO(0x940c)
-#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25)
-#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
+
+#define GEN6_GDRST _MMIO(0x941c)
+#define GEN6_GRDOM_FULL (1 << 0)
+#define GEN6_GRDOM_RENDER (1 << 1)
+#define GEN6_GRDOM_MEDIA (1 << 2)
+#define GEN6_GRDOM_BLT (1 << 3)
+#define GEN6_GRDOM_VECS (1 << 4)
+#define GEN9_GRDOM_GUC (1 << 5)
+#define GEN8_GRDOM_MEDIA2 (1 << 7)
+/* GEN11 changed all bit defs except for FULL & RENDER */
+#define GEN11_GRDOM_FULL GEN6_GRDOM_FULL
+#define GEN11_GRDOM_RENDER GEN6_GRDOM_RENDER
+#define GEN11_GRDOM_BLT (1 << 2)
+#define GEN11_GRDOM_GUC (1 << 3)
+#define GEN11_GRDOM_MEDIA (1 << 5)
+#define GEN11_GRDOM_MEDIA2 (1 << 6)
+#define GEN11_GRDOM_MEDIA3 (1 << 7)
+#define GEN11_GRDOM_MEDIA4 (1 << 8)
+#define GEN11_GRDOM_MEDIA5 (1 << 9)
+#define GEN11_GRDOM_MEDIA6 (1 << 10)
+#define GEN11_GRDOM_MEDIA7 (1 << 11)
+#define GEN11_GRDOM_MEDIA8 (1 << 12)
+#define GEN11_GRDOM_VECS (1 << 13)
+#define GEN11_GRDOM_VECS2 (1 << 14)
+#define GEN11_GRDOM_VECS3 (1 << 15)
+#define GEN11_GRDOM_VECS4 (1 << 16)
+#define GEN11_GRDOM_SFC0 (1 << 17)
+#define GEN11_GRDOM_SFC1 (1 << 18)
+#define GEN11_GRDOM_SFC2 (1 << 19)
+#define GEN11_GRDOM_SFC3 (1 << 20)
+#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
+#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
+
#define GEN6_RSTCTL _MMIO(0x9420)
+#define GEN7_MISCCPCTL _MMIO(0x9424)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
+#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
+#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
+#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6)
+
#define GEN8_UCGCTL6 _MMIO(0x9430)
#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1 << 24)
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14)
-#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
+#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
#define UNSLCGCTL9430 _MMIO(0x9430)
#define MSQDUNIT_CLKGATE_DIS REG_BIT(3)
-#define GEN6_GFXPAUSE _MMIO(0xA000)
-#define GEN6_RPNSWREQ _MMIO(0xA008)
+#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
+#define VFUNIT_CLKGATE_DIS REG_BIT(20)
+#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
+#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
+#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
+#define HSUNIT_CLKGATE_DIS REG_BIT(8)
+#define VSUNIT_CLKGATE_DIS REG_BIT(3)
+
+#define UNSLCGCTL9440 _MMIO(0x9440)
+#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
+#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
+#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
+#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
+
+#define UNSLCGCTL9444 _MMIO(0x9444)
+#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
+#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
+#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
+#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
+#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
+#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
+#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
+#define LTCDD_CLKGATE_DIS REG_BIT(10)
+
+#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
+#define SARBUNIT_CLKGATE_DIS (1 << 5)
+#define RCCUNIT_CLKGATE_DIS (1 << 7)
+#define MSCUNIT_CLKGATE_DIS (1 << 10)
+#define NODEDSS_CLKGATE_DIS REG_BIT(12)
+#define L3_CLKGATE_DIS REG_BIT(16)
+#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
+
+#define SCCGCTL94DC _MMIO(0x94dc)
+#define CG3DDISURB REG_BIT(14)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
+#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
+#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
+
+#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
+#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28)
+#define GWUNIT_CLKGATE_DIS REG_BIT(16)
+
+#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
+#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
+
+#define SSMCGCTL9530 _MMIO(0x9530)
+#define RTFUNIT_CLKGATE_DIS REG_BIT(18)
+
+#define GEN10_DFR_RATIO_EN_AND_CHICKEN _MMIO(0x9550)
+#define DFR_DISABLE (1 << 9)
+
+#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560)
+#define CGPSF_CLKGATE_DIS (1 << 3)
+
+#define MICRO_BP0_0 _MMIO(0x9800)
+#define MICRO_BP0_2 _MMIO(0x9804)
+#define MICRO_BP0_1 _MMIO(0x9808)
+#define MICRO_BP1_0 _MMIO(0x980c)
+#define MICRO_BP1_2 _MMIO(0x9810)
+#define MICRO_BP1_1 _MMIO(0x9814)
+#define MICRO_BP2_0 _MMIO(0x9818)
+#define MICRO_BP2_2 _MMIO(0x981c)
+#define MICRO_BP2_1 _MMIO(0x9820)
+#define MICRO_BP3_0 _MMIO(0x9824)
+#define MICRO_BP3_2 _MMIO(0x9828)
+#define MICRO_BP3_1 _MMIO(0x982c)
+#define MICRO_BP_TRIGGER _MMIO(0x9830)
+#define MICRO_BP3_COUNT_STATUS01 _MMIO(0x9834)
+#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
+#define MICRO_BP_FIRED_ARMED _MMIO(0x983c)
+
+#define GEN6_GFXPAUSE _MMIO(0xa000)
+#define GEN6_RPNSWREQ _MMIO(0xa008)
#define GEN6_TURBO_DISABLE (1 << 31)
#define GEN6_FREQUENCY(x) ((x) << 25)
#define HSW_FREQUENCY(x) ((x) << 24)
@@ -1228,8 +729,7 @@ enum {
#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23
#define GEN9_IGNORE_SLICE_RATIO (0 << 0)
-#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
-#define GEN6_RC_CONTROL _MMIO(0xA090)
+#define GEN6_RC_VIDEO_FREQ _MMIO(0xa00c)
#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16)
#define GEN6_RC_CTL_RC6p_ENABLE (1 << 17)
#define GEN6_RC_CTL_RC6_ENABLE (1 << 18)
@@ -1239,16 +739,16 @@ enum {
#define GEN7_RC_CTL_TO_MODE (1 << 28)
#define GEN6_RC_CTL_EI_MODE(x) ((x) << 27)
#define GEN6_RC_CTL_HW_ENABLE (1 << 31)
-#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010)
-#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014)
-#define GEN6_RPSTAT1 _MMIO(0xA01C)
+#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xa010)
+#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xa014)
+#define GEN6_RPSTAT1 _MMIO(0xa01c)
#define GEN6_CAGF_SHIFT 8
#define HSW_CAGF_SHIFT 7
#define GEN9_CAGF_SHIFT 23
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
-#define GEN6_RP_CONTROL _MMIO(0xA024)
+#define GEN6_RP_CONTROL _MMIO(0xa024)
#define GEN6_RP_MEDIA_TURBO (1 << 11)
#define GEN6_RP_MEDIA_MODE_MASK (3 << 9)
#define GEN6_RP_MEDIA_HW_TURBO_MODE (3 << 9)
@@ -1265,193 +765,295 @@ enum {
#define GEN6_RPSWCTL_SHIFT 9
#define GEN9_RPSWCTL_ENABLE (0x2 << GEN6_RPSWCTL_SHIFT)
#define GEN9_RPSWCTL_DISABLE (0x0 << GEN6_RPSWCTL_SHIFT)
-#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
-#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
-#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
+#define GEN6_RP_UP_THRESHOLD _MMIO(0xa02c)
+#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xa030)
+#define GEN6_RP_CUR_UP_EI _MMIO(0xa050)
#define GEN6_RP_EI_MASK 0xffffff
#define GEN6_CURICONT_MASK GEN6_RP_EI_MASK
-#define GEN6_RP_CUR_UP _MMIO(0xA054)
+#define GEN6_RP_CUR_UP _MMIO(0xa054)
#define GEN6_CURBSYTAVG_MASK GEN6_RP_EI_MASK
-#define GEN6_RP_PREV_UP _MMIO(0xA058)
-#define GEN6_RP_CUR_DOWN_EI _MMIO(0xA05C)
+#define GEN6_RP_PREV_UP _MMIO(0xa058)
+#define GEN6_RP_CUR_DOWN_EI _MMIO(0xa05c)
#define GEN6_CURIAVG_MASK GEN6_RP_EI_MASK
-#define GEN6_RP_CUR_DOWN _MMIO(0xA060)
-#define GEN6_RP_PREV_DOWN _MMIO(0xA064)
-#define GEN6_RP_UP_EI _MMIO(0xA068)
-#define GEN6_RP_DOWN_EI _MMIO(0xA06C)
-#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xA070)
-#define GEN6_RPDEUHWTC _MMIO(0xA080)
-#define GEN6_RPDEUC _MMIO(0xA084)
-#define GEN6_RPDEUCSW _MMIO(0xA088)
-#define GEN6_RC_STATE _MMIO(0xA094)
+#define GEN6_RP_CUR_DOWN _MMIO(0xa060)
+#define GEN6_RP_PREV_DOWN _MMIO(0xa064)
+#define GEN6_RP_UP_EI _MMIO(0xa068)
+#define GEN6_RP_DOWN_EI _MMIO(0xa06c)
+#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xa070)
+#define GEN6_RPDEUHWTC _MMIO(0xa080)
+#define GEN6_RPDEUC _MMIO(0xa084)
+#define GEN6_RPDEUCSW _MMIO(0xa088)
+#define GEN6_RC_CONTROL _MMIO(0xa090)
+#define GEN6_RC_STATE _MMIO(0xa094)
#define RC_SW_TARGET_STATE_SHIFT 16
#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT)
-#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
-#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
-#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
-#define GEN10_MEDIA_WAKE_RATE_LIMIT _MMIO(0xA0A0)
-#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xA0A8)
-#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xA0AC)
-#define GEN6_RC_SLEEP _MMIO(0xA0B0)
-#define GEN6_RCUBMABDTMR _MMIO(0xA0B0)
-#define GEN6_RC1e_THRESHOLD _MMIO(0xA0B4)
-#define GEN6_RC6_THRESHOLD _MMIO(0xA0B8)
-#define GEN6_RC6p_THRESHOLD _MMIO(0xA0BC)
-#define VLV_RCEDATA _MMIO(0xA0BC)
-#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
-#define GEN6_PMINTRMSK _MMIO(0xA168)
+#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xa098)
+#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xa09c)
+#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xa0a0)
+#define GEN10_MEDIA_WAKE_RATE_LIMIT _MMIO(0xa0a0)
+#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xa0a8)
+#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xa0ac)
+#define GEN6_RC_SLEEP _MMIO(0xa0b0)
+#define GEN6_RCUBMABDTMR _MMIO(0xa0b0)
+#define GEN6_RC1e_THRESHOLD _MMIO(0xa0b4)
+#define GEN6_RC6_THRESHOLD _MMIO(0xa0b8)
+#define GEN6_RC6p_THRESHOLD _MMIO(0xa0bc)
+#define VLV_RCEDATA _MMIO(0xa0bc)
+#define GEN6_RC6pp_THRESHOLD _MMIO(0xa0c0)
+#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xa0c4)
+#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xa0c8)
+
+#define GEN6_PMINTRMSK _MMIO(0xa168)
#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1 << 31)
#define ARAT_EXPIRED_INTRMSK (1 << 9)
-#define GEN8_MISC_CTRL0 _MMIO(0xA180)
-#define VLV_PWRDWNUPCTL _MMIO(0xA294)
-#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
-#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
-#define GEN9_PG_ENABLE _MMIO(0xA210)
+
+#define GEN8_MISC_CTRL0 _MMIO(0xa180)
+
+#define ECOBUS _MMIO(0xa180)
+#define FORCEWAKE_MT_ENABLE (1 << 5)
+
+#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */
+#define FORCEWAKE_GT_GEN9 _MMIO(0xa188)
+#define FORCEWAKE _MMIO(0xa18c)
+
+#define VLV_SPAREG2H _MMIO(0xa194)
+
+#define GEN9_PG_ENABLE _MMIO(0xa210)
#define GEN9_RENDER_PG_ENABLE REG_BIT(0)
#define GEN9_MEDIA_PG_ENABLE REG_BIT(1)
#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2)
#define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n))
#define VDN_MFX_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n))
-#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
-#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
-#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
-#define GEN6_PMISR _MMIO(0x44020)
-#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
-#define GEN6_PMIIR _MMIO(0x44028)
-#define GEN6_PMIER _MMIO(0x4402C)
-#define GEN6_PM_MBOX_EVENT (1 << 25)
-#define GEN6_PM_THERMAL_EVENT (1 << 24)
+#define GEN8_PUSHBUS_CONTROL _MMIO(0xa248)
+#define GEN8_PUSHBUS_ENABLE _MMIO(0xa250)
+#define GEN8_PUSHBUS_SHIFT _MMIO(0xa25c)
-/*
- * For Gen11 these are in the upper word of the GPM_WGBOXPERF
- * registers. Shifting is handled on accessing the imr and ier.
- */
-#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
-#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
-#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
-#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2)
-#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1)
-#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_EI_EXPIRED | \
- GEN6_PM_RP_UP_THRESHOLD | \
- GEN6_PM_RP_DOWN_EI_EXPIRED | \
- GEN6_PM_RP_DOWN_THRESHOLD | \
- GEN6_PM_RP_DOWN_TIMEOUT)
+/* GPM unit config (Gen9+) */
+#define CTC_MODE _MMIO(0xa26c)
+#define CTC_SOURCE_PARAMETER_MASK 1
+#define CTC_SOURCE_CRYSTAL_CLOCK 0
+#define CTC_SOURCE_DIVIDE_LOGIC 1
+#define CTC_SHIFT_PARAMETER_SHIFT 1
+#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
-#define GEN7_GT_SCRATCH(i) _MMIO(0x4F100 + (i) * 4)
-#define GEN7_GT_SCRATCH_REG_NUM 8
+#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
+#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
-#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
-#define VLV_GFX_CLK_STATUS_BIT (1 << 3)
-#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2)
+#define VLV_PWRDWNUPCTL _MMIO(0xa294)
-#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
-#define VLV_COUNTER_CONTROL _MMIO(0x138104)
-#define VLV_COUNT_RANGE_HIGH (1 << 15)
-#define VLV_MEDIA_RC0_COUNT_EN (1 << 5)
-#define VLV_RENDER_RC0_COUNT_EN (1 << 4)
-#define VLV_MEDIA_RC6_COUNT_EN (1 << 1)
-#define VLV_RENDER_RC6_COUNT_EN (1 << 0)
-#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
-#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
-#define VLV_GT_MEDIA_RC6 _MMIO(0x13810C)
+#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xa2a0)
+#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
+#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
-#define GEN6_GT_GFX_RC6p _MMIO(0x13810C)
-#define GEN6_GT_GFX_RC6pp _MMIO(0x138110)
-#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
-#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C)
-
-#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
-#define GEN6_CORE_CPD_STATE_MASK (7 << 4)
-#define GEN6_RCn_MASK 7
-#define GEN6_RC0 0
-#define GEN6_RC3 2
-#define GEN6_RC6 3
-#define GEN6_RC7 4
-
-#define GEN8_GT_SLICE_INFO _MMIO(0x138064)
-#define GEN8_LSLICESTAT_MASK 0x7
-
-#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
-#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
-#define CHV_SS_PG_ENABLE (1 << 1)
-#define CHV_EU08_PG_ENABLE (1 << 9)
-#define CHV_EU19_PG_ENABLE (1 << 17)
-#define CHV_EU210_PG_ENABLE (1 << 25)
-
-#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
-#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
-#define CHV_EU311_PG_ENABLE (1 << 1)
-
-#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
-#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
- ((slice) % 3) * 0x4)
-#define GEN9_PGCTL_SLICE_ACK (1 << 0)
-#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2))
-#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
-
-#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
-#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
- ((slice) % 3) * 0x8)
-#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
-#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
- ((slice) % 3) * 0x8)
-#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
-#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
-#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
-#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
-#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
-#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
-#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
-#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
+#define MISC_STATUS0 _MMIO(0xa500)
+#define MISC_STATUS1 _MMIO(0xa504)
-#define GEN7_MISCCPCTL _MMIO(0x9424)
-#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
-#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
-#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
-#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6)
+#define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4)
+#define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4)
+
+#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
+#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
+#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
+#define CHV_SS_PG_ENABLE (1 << 1)
+#define CHV_EU08_PG_ENABLE (1 << 9)
+#define CHV_EU19_PG_ENABLE (1 << 17)
+#define CHV_EU210_PG_ENABLE (1 << 25)
+#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
+#define CHV_EU311_PG_ENABLE (1 << 1)
-#define GEN8_GARBCNTL _MMIO(0xB004)
+#define GEN7_SARCHKMD _MMIO(0xb000)
+#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
+#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
+
+#define GEN8_GARBCNTL _MMIO(0xb004)
#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7)
#define GEN11_ARBITRATION_PRIO_ORDER_MASK (0x3f << 22)
#define GEN11_HASH_CTRL_EXCL_MASK (0x7f << 0)
#define GEN11_HASH_CTRL_EXCL_BIT0 (1 << 0)
-#define GEN11_GLBLINVL _MMIO(0xB404)
-#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5)
-#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5)
+#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008)
+#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
-#define GEN10_DFR_RATIO_EN_AND_CHICKEN _MMIO(0x9550)
-#define DFR_DISABLE (1 << 9)
+#define GEN7_L3SQCREG1 _MMIO(0xb010)
+#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
-#define GEN11_GACB_PERF_CTRL _MMIO(0x4B80)
-#define GEN11_HASH_CTRL_MASK (0x3 << 12 | 0xf << 0)
-#define GEN11_HASH_CTRL_BIT0 (1 << 0)
-#define GEN11_HASH_CTRL_BIT4 (1 << 12)
+#define GEN7_L3CNTLREG1 _MMIO(0xb01c)
+#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
+#define GEN7_L3AGDIS (1 << 19)
+#define GEN7_L3CNTLREG2 _MMIO(0xb020)
+
+/* MOCS (Memory Object Control State) registers */
+#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
+#define GEN9_LNCFCMOCS_REG_COUNT 32
+
+#define GEN7_L3CNTLREG3 _MMIO(0xb024)
+
+#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xb030)
+#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+
+#define GEN7_L3SQCREG4 _MMIO(0xb034)
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
+
+#define HSW_SCRATCH1 _MMIO(0xb038)
+#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27)
+
+#define GEN7_L3LOG(slice, i) _MMIO(0xb070 + (slice) * 0x200 + (i) * 4)
+#define GEN7_L3LOG_SIZE 0x80
-#define GEN11_LSN_UNSLCVC _MMIO(0xB43C)
+#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
+#define PMFLUSHDONE_LNICRSDROP (1 << 20)
+#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
+#define PMFLUSHDONE_LNEBLK (1 << 22)
+
+#define XEHP_L3NODEARBCFG _MMIO(0xb0b4)
+#define XEHP_LNESPARE REG_BIT(19)
+
+#define GEN8_L3SQCREG1 _MMIO(0xb100)
+/*
+ * Note that on CHV the following has an off-by-one error wrt. to BSpec.
+ * Using the formula in BSpec leads to a hang, while the formula here works
+ * fine and matches the formulas for all other platforms. A BSpec change
+ * request has been filed to clarify this.
+ */
+#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
+#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
+#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
+
+#define GEN10_L3_CHICKEN_MODE_REGISTER _MMIO(0xb114)
+#define GEN11_I2M_WRITE_DISABLE (1 << 28)
+
+#define GEN8_L3SQCREG4 _MMIO(0xb118)
+#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
+#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
+#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
+#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
+
+#define GEN9_SCRATCH1 _MMIO(0xb11c)
+#define EVICTION_PERF_FIX_ENABLE REG_BIT(8)
+
+#define BDW_SCRATCH1 _MMIO(0xb11c)
+#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
+
+#define GEN11_SCRATCH2 _MMIO(0xb140)
+#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19)
+
+#define GEN11_L3SQCREG5 _MMIO(0xb158)
+#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
+
+#define MLTICTXCTL _MMIO(0xb170)
+#define TDONRENDER REG_BIT(2)
+
+#define XEHP_L3SCQREG7 _MMIO(0xb188)
+#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
+
+#define L3SQCREG1_CCS0 _MMIO(0xb200)
+#define FLUSHALLNONCOH REG_BIT(5)
+
+#define GEN11_GLBLINVL _MMIO(0xb404)
+#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5)
+#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5)
+
+#define GEN11_LSN_UNSLCVC _MMIO(0xb43c)
#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
-#define GEN10_SAMPLER_MODE _MMIO(0xE18C)
-#define ENABLE_SMALLPL REG_BIT(15)
-#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
+#define __GEN9_RCS0_MOCS0 0xc800
+#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS0_MOCS0 0xc900
+#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS1_MOCS0 0xca00
+#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4)
+#define __GEN9_VECS0_MOCS0 0xcb00
+#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4)
+#define __GEN9_BCS0_MOCS0 0xcc00
+#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4)
+
+#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8)
+#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc)
+#define FAULT_VA_HIGH_BITS (0xf << 0)
+#define FAULT_GTT_SEL (1 << 4)
+
+#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
+#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
+#define RING_FAULT_GTTSEL_MASK (1 << 11)
+#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
+#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
+#define RING_FAULT_VALID (1 << 0)
+
+#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
+#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
+#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
+#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
+
+#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28)
+#define RENDER_MOD_CTRL _MMIO(0xcf2c)
+#define COMP_MOD_CTRL _MMIO(0xcf30)
+#define VDBX_MOD_CTRL _MMIO(0xcf34)
+#define VEBX_MOD_CTRL _MMIO(0xcf38)
+#define FORCE_MISS_FTLB REG_BIT(3)
+
+#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c)
+#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12)
+#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11)
+#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7)
+
+#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54)
+#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
+#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
-#define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4)
-#define GEN7_L3LOG_SIZE 0x80
+#define GEN12_GAM_DONE _MMIO(0xcf68)
-#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
-#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
+#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
#define GEN7_MAX_PS_THREAD_DEP (8 << 12)
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1 << 10)
#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1 << 4)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3)
-#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
-#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5)
-#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
+#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
+#define GEN7_ROW_INSTDONE _MMIO(0xe164)
+
+#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
+#define GEN8_ST_PO_DISABLE (1 << 13)
+
+#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
+#define HSW_SAMPLE_C_PERFORMANCE (1 << 9)
+#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8)
+#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5)
+#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1)
+
+#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
+#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5)
+#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
+
+#define GEN10_SAMPLER_MODE _MMIO(0xe18c)
+#define ENABLE_SMALLPL REG_BIT(15)
+#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
+
+#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
+#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15)
+#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8)
+#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4)
+#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2)
+
+#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
+#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
+#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4)
+
+#define EU_PERF_CNTL0 _MMIO(0xe458)
+#define EU_PERF_CNTL4 _MMIO(0xe45c)
+
+#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
+#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
+#define XEHP_DIS_BBL_SYSPIPE REG_BIT(11)
+#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
+#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
+
+#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
+#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
-#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
+#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
#define FLOW_CONTROL_ENABLE REG_BIT(15)
#define UGM_BACKUP_MODE REG_BIT(13)
#define MDQ_ARBITRATION_MODE REG_BIT(12)
@@ -1466,6 +1068,20 @@ enum {
#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12)
#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+#define RT_CTRL _MMIO(0xe530)
+#define DIS_NULL_QUERY REG_BIT(10)
+
+#define EU_PERF_CNTL1 _MMIO(0xe558)
+#define EU_PERF_CNTL5 _MMIO(0xe55c)
+
+#define GEN12_HDC_CHICKEN0 _MMIO(0xe5f0)
+#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
+#define ICL_HDC_MODE _MMIO(0xe5f4)
+
+#define EU_PERF_CNTL2 _MMIO(0xe658)
+#define EU_PERF_CNTL6 _MMIO(0xe65c)
+#define EU_PERF_CNTL3 _MMIO(0xe758)
+
#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8)
#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15)
#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4)
@@ -1475,80 +1091,436 @@ enum {
#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32)
#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32)
-#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
-#define DOP_CLOCK_GATING_DISABLE (1 << 0)
-#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
-#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
+#define SARB_CHICKEN1 _MMIO(0xe90c)
+#define COMP_CKN_IN REG_GENMASK(30, 29)
-#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
-#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
-#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
-#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
-#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
+#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
-#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
-#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
+#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
+#define DOP_CLOCK_GATING_DISABLE (1 << 0)
+#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
+#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
-#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
-#define GEN8_ST_PO_DISABLE (1 << 13)
+#define __GEN11_VCS2_MOCS0 0x10000
+#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
+
+#define CRSTANDVID _MMIO(0x11100)
+#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define PXVFREQ_PX_MASK 0x7f000000
+#define PXVFREQ_PX_SHIFT 24
+#define VIDFREQ_BASE _MMIO(0x11110)
+#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2 _MMIO(0x11114)
+#define VIDFREQ3 _MMIO(0x11118)
+#define VIDFREQ4 _MMIO(0x1111c)
+#define VIDFREQ_P0_MASK 0x1f000000
+#define VIDFREQ_P0_SHIFT 24
+#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
+#define VIDFREQ_P0_CSCLK_SHIFT 20
+#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
+#define VIDFREQ_P0_CRCLK_SHIFT 16
+#define VIDFREQ_P1_MASK 0x00001f00
+#define VIDFREQ_P1_SHIFT 8
+#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
+#define VIDFREQ_P1_CSCLK_SHIFT 4
+#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
+#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */
+#define INTTOEXT_MAP3_SHIFT 24
+#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
+#define INTTOEXT_MAP2_SHIFT 16
+#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
+#define INTTOEXT_MAP1_SHIFT 8
+#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
+#define INTTOEXT_MAP0_SHIFT 0
+#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */
+#define MEMCTL_CMD_MASK 0xe000
+#define MEMCTL_CMD_SHIFT 13
+#define MEMCTL_CMD_RCLK_OFF 0
+#define MEMCTL_CMD_RCLK_ON 1
+#define MEMCTL_CMD_CHFREQ 2
+#define MEMCTL_CMD_CHVID 3
+#define MEMCTL_CMD_VMMOFF 4
+#define MEMCTL_CMD_VMMON 5
+#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears
+ when command complete */
+#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
+#define MEMCTL_FREQ_SHIFT 8
+#define MEMCTL_SFCAVM (1 << 7)
+#define MEMCTL_TGT_VID_MASK 0x007f
+#define MEMIHYST _MMIO(0x1117c)
+#define MEMINTREN _MMIO(0x11180) /* 16 bits */
+#define MEMINT_RSEXIT_EN (1 << 8)
+#define MEMINT_CX_SUPR_EN (1 << 7)
+#define MEMINT_CONT_BUSY_EN (1 << 6)
+#define MEMINT_AVG_BUSY_EN (1 << 5)
+#define MEMINT_EVAL_CHG_EN (1 << 4)
+#define MEMINT_MON_IDLE_EN (1 << 3)
+#define MEMINT_UP_EVAL_EN (1 << 2)
+#define MEMINT_DOWN_EVAL_EN (1 << 1)
+#define MEMINT_SW_CMD_EN (1 << 0)
+#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
+#define MEM_RSEXIT_MASK 0xc000
+#define MEM_RSEXIT_SHIFT 14
+#define MEM_CONT_BUSY_MASK 0x3000
+#define MEM_CONT_BUSY_SHIFT 12
+#define MEM_AVG_BUSY_MASK 0x0c00
+#define MEM_AVG_BUSY_SHIFT 10
+#define MEM_EVAL_CHG_MASK 0x0300
+#define MEM_EVAL_BUSY_SHIFT 8
+#define MEM_MON_IDLE_MASK 0x00c0
+#define MEM_MON_IDLE_SHIFT 6
+#define MEM_UP_EVAL_MASK 0x0030
+#define MEM_UP_EVAL_SHIFT 4
+#define MEM_DOWN_EVAL_MASK 0x000c
+#define MEM_DOWN_EVAL_SHIFT 2
+#define MEM_SW_CMD_MASK 0x0003
+#define MEM_INT_STEER_GFX 0
+#define MEM_INT_STEER_CMR 1
+#define MEM_INT_STEER_SMI 2
+#define MEM_INT_STEER_SCI 3
+#define MEMINTRSTS _MMIO(0x11184)
+#define MEMINT_RSEXIT (1 << 7)
+#define MEMINT_CONT_BUSY (1 << 6)
+#define MEMINT_AVG_BUSY (1 << 5)
+#define MEMINT_EVAL_CHG (1 << 4)
+#define MEMINT_MON_IDLE (1 << 3)
+#define MEMINT_UP_EVAL (1 << 2)
+#define MEMINT_DOWN_EVAL (1 << 1)
+#define MEMINT_SW_CMD (1 << 0)
+#define MEMMODECTL _MMIO(0x11190)
+#define MEMMODE_BOOST_EN (1 << 31)
+#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define MEMMODE_BOOST_FREQ_SHIFT 24
+#define MEMMODE_IDLE_MODE_MASK 0x00030000
+#define MEMMODE_IDLE_MODE_SHIFT 16
+#define MEMMODE_IDLE_MODE_EVAL 0
+#define MEMMODE_IDLE_MODE_CONT 1
+#define MEMMODE_HWIDLE_EN (1 << 15)
+#define MEMMODE_SWMODE_EN (1 << 14)
+#define MEMMODE_RCLK_GATE (1 << 13)
+#define MEMMODE_HW_UPDATE (1 << 12)
+#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
+#define MEMMODE_FSTART_SHIFT 8
+#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
+#define MEMMODE_FMAX_SHIFT 4
+#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG _MMIO(0x1119c)
+#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */
+#define SWMEMCMD_RENDER_OFF (0 << 13)
+#define SWMEMCMD_RENDER_ON (1 << 13)
+#define SWMEMCMD_SWFREQ (2 << 13)
+#define SWMEMCMD_TARVID (3 << 13)
+#define SWMEMCMD_VRM_OFF (4 << 13)
+#define SWMEMCMD_VRM_ON (5 << 13)
+#define CMDSTS (1 << 12)
+#define SFCAVM (1 << 11)
+#define SWFREQ_MASK 0x0380 /* P0-7 */
+#define SWFREQ_SHIFT 7
+#define TARVID_MASK 0x001f
+#define MEMSTAT_CTG _MMIO(0x111a0)
+#define RCBMINAVG _MMIO(0x111a0)
+#define RCUPEI _MMIO(0x111b0)
+#define RCDNEI _MMIO(0x111b4)
+#define RSTDBYCTL _MMIO(0x111b8)
+#define RS1EN (1 << 31)
+#define RS2EN (1 << 30)
+#define RS3EN (1 << 29)
+#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */
+#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7 << 20)
+#define RSX_STATUS_ON (0 << 20)
+#define RSX_STATUS_RC1 (1 << 20)
+#define RSX_STATUS_RC1E (2 << 20)
+#define RSX_STATUS_RS1 (3 << 20)
+#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7 << 20)
+#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */
+#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1 << 17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3 << 14)
+#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1 << 14)
+#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3 << 12)
+#define SLOW_RS123 (0 << 12)
+#define SLOW_RS23 (1 << 12)
+#define SLOW_RS3 (2 << 12)
+#define NORMAL_RS123 (3 << 12)
+#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */
+#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3 << 4)
+#define RS_CSTATE_C367_RS1 (0 << 4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
+#define RS_CSTATE_RSVD (2 << 4)
+#define RS_CSTATE_C367_RS2 (3 << 4)
+#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */
+#define VIDCTL _MMIO(0x111c0)
+#define VIDSTS _MMIO(0x111c8)
+#define VIDSTART _MMIO(0x111cc) /* 8 bits */
+#define MEMSTAT_ILK _MMIO(0x111f8)
+#define MEMSTAT_VID_MASK 0x7f00
+#define MEMSTAT_VID_SHIFT 8
+#define MEMSTAT_PSTATE_MASK 0x00f8
+#define MEMSTAT_PSTATE_SHIFT 3
+#define MEMSTAT_MON_ACTV (1 << 2)
+#define MEMSTAT_SRC_CTL_MASK 0x0003
+#define MEMSTAT_SRC_CTL_CORE 0
+#define MEMSTAT_SRC_CTL_TRB 1
+#define MEMSTAT_SRC_CTL_THM 2
+#define MEMSTAT_SRC_CTL_STDBY 3
+#define PMMISC _MMIO(0x11214)
+#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */
+#define SDEW _MMIO(0x1124c)
+#define CSIEW0 _MMIO(0x11250)
+#define CSIEW1 _MMIO(0x11254)
+#define CSIEW2 _MMIO(0x11258)
+#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */
+#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */
+#define MCHAFE _MMIO(0x112c0)
+#define CSIEC _MMIO(0x112e0)
+#define DMIEC _MMIO(0x112e4)
+#define DDREC _MMIO(0x112e8)
+#define PEG0EC _MMIO(0x112ec)
+#define PEG1EC _MMIO(0x112f0)
+#define GFXEC _MMIO(0x112f4)
+#define INTTOEXT_BASE_ILK _MMIO(0x11300)
+#define RPPREVBSYTUPAVG _MMIO(0x113b8)
+#define RCPREVBSYTUPAVG _MMIO(0x113b8)
+#define RCPREVBSYTDNAVG _MMIO(0x113bc)
+#define RPPREVBSYTDNAVG _MMIO(0x113bc)
+#define ECR _MMIO(0x11600)
+#define ECR_GPFE (1 << 31)
+#define ECR_IMONE (1 << 30)
+#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
+#define OGW0 _MMIO(0x11608)
+#define OGW1 _MMIO(0x1160c)
+#define EG0 _MMIO(0x11610)
+#define EG1 _MMIO(0x11614)
+#define EG2 _MMIO(0x11618)
+#define EG3 _MMIO(0x1161c)
+#define EG4 _MMIO(0x11620)
+#define EG5 _MMIO(0x11624)
+#define EG6 _MMIO(0x11628)
+#define EG7 _MMIO(0x1162c)
+#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */
+#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */
+#define LCFUSE02 _MMIO(0x116c0)
+#define LCFUSE_HIV_MASK 0x000000ff
+
+#define GAC_ECO_BITS _MMIO(0x14090)
+#define ECOBITS_SNB_BIT (1 << 13)
+#define ECOBITS_PPGTT_CACHE64B (3 << 8)
+#define ECOBITS_PPGTT_CACHE4B (0 << 8)
+
+#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
+#define CHV_FGT_DISABLE_SS0 (1 << 10)
+#define CHV_FGT_DISABLE_SS1 (1 << 11)
+#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
+#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
+#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
+#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
+#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+
+#define BCS_SWCTRL _MMIO(0x22200)
+#define BCS_SRC_Y REG_BIT(0)
+#define BCS_DST_Y REG_BIT(1)
+
+#define GAB_CTL _MMIO(0x24000)
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
-#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
-#define HSW_SAMPLE_C_PERFORMANCE (1 << 9)
-#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8)
-#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5)
-#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1)
+#define GEN6_PMISR _MMIO(0x44020)
+#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
+#define GEN6_PMIIR _MMIO(0x44028)
+#define GEN6_PMIER _MMIO(0x4402c)
+#define GEN6_PM_MBOX_EVENT (1 << 25)
+#define GEN6_PM_THERMAL_EVENT (1 << 24)
+/*
+ * For Gen11 these are in the upper word of the GPM_WGBOXPERF
+ * registers. Shifting is handled on accessing the imr and ier.
+ */
+#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
+#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1)
+#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_EI_EXPIRED | \
+ GEN6_PM_RP_UP_THRESHOLD | \
+ GEN6_PM_RP_DOWN_EI_EXPIRED | \
+ GEN6_PM_RP_DOWN_THRESHOLD | \
+ GEN6_PM_RP_DOWN_TIMEOUT)
-#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
-#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15)
-#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8)
-#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4)
-#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2)
+#define GEN7_GT_SCRATCH(i) _MMIO(0x4f100 + (i) * 4)
+#define GEN7_GT_SCRATCH_REG_NUM 8
+
+#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
+#define GFX_FLSH_CNTL_EN (1 << 0)
+
+#define GTFIFODBG _MMIO(0x120000)
+#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
+#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
+#define GT_FIFO_SBDROPERR (1 << 6)
+#define GT_FIFO_BLOBDROPERR (1 << 5)
+#define GT_FIFO_SB_READ_ABORTERR (1 << 4)
+#define GT_FIFO_DROPERR (1 << 3)
+#define GT_FIFO_OVFERR (1 << 2)
+#define GT_FIFO_IAWRERR (1 << 1)
+#define GT_FIFO_IARDERR (1 << 0)
+
+#define GTFIFOCTL _MMIO(0x120008)
+#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
+#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
+#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
+
+#define FORCEWAKE_MT_ACK _MMIO(0x130040)
+#define FORCEWAKE_ACK_HSW _MMIO(0x130044)
+#define FORCEWAKE_ACK_GT_GEN9 _MMIO(0x130044)
+#define FORCEWAKE_KERNEL BIT(0)
+#define FORCEWAKE_USER BIT(1)
+#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
+#define FORCEWAKE_ACK _MMIO(0x130090)
+#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090)
+#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
+#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
+#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
+#define VLV_GTLC_PW_STATUS _MMIO(0x130094)
+#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
+#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
+#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
+#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
+#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
+#define VLV_GFX_CLK_STATUS_BIT (1 << 3)
+#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2)
+#define FORCEWAKE_VLV _MMIO(0x1300b0)
+#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
+#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8)
+#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc)
+
+#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c)
+#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+
+#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
+#define GEN6_CORE_CPD_STATE_MASK (7 << 4)
+#define GEN6_RCn_MASK 7
+#define GEN6_RC0 0
+#define GEN6_RC3 2
+#define GEN6_RC6 3
+#define GEN6_RC7 4
+
+#define GEN8_GT_SLICE_INFO _MMIO(0x138064)
+#define GEN8_LSLICESTAT_MASK 0x7
-/* MOCS (Memory Object Control State) registers */
-#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
-#define GEN9_LNCFCMOCS_REG_COUNT 32
-
-#define __GEN9_RCS0_MOCS0 0xc800
-#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
-#define __GEN9_VCS0_MOCS0 0xc900
-#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4)
-#define __GEN9_VCS1_MOCS0 0xca00
-#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4)
-#define __GEN9_VECS0_MOCS0 0xcb00
-#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4)
-#define __GEN9_BCS0_MOCS0 0xcc00
-#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4)
-#define __GEN11_VCS2_MOCS0 0x10000
-#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
-
-#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008)
-#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
-
-#define GEN9_SCRATCH1 _MMIO(0xb11c)
-#define EVICTION_PERF_FIX_ENABLE REG_BIT(8)
-
-#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
-#define PMFLUSHDONE_LNICRSDROP (1 << 20)
-#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
-#define PMFLUSHDONE_LNEBLK (1 << 22)
-
-#define XEHP_L3NODEARBCFG _MMIO(0xb0b4)
-#define XEHP_LNESPARE REG_BIT(19)
-
-#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
+#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
+#define VLV_COUNTER_CONTROL _MMIO(0x138104)
+#define VLV_COUNT_RANGE_HIGH (1 << 15)
+#define VLV_MEDIA_RC0_COUNT_EN (1 << 5)
+#define VLV_RENDER_RC0_COUNT_EN (1 << 4)
+#define VLV_MEDIA_RC6_COUNT_EN (1 << 1)
+#define VLV_RENDER_RC6_COUNT_EN (1 << 0)
+#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
+#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
+#define VLV_GT_MEDIA_RC6 _MMIO(0x13810c)
-/* gamt regs */
-#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
-#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
-#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
-#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
-#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
+#define GEN6_GT_GFX_RC6p _MMIO(0x13810c)
+#define GEN6_GT_GFX_RC6pp _MMIO(0x138110)
+#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
+#define VLV_MEDIA_C0_COUNT _MMIO(0x13811c)
+
+#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
+#define GEN11_CSME (31)
+#define GEN11_GUNIT (28)
+#define GEN11_GUC (25)
+#define GEN11_WDPERF (20)
+#define GEN11_KCR (19)
+#define GEN11_GTPM (16)
+#define GEN11_BCS (15)
+#define GEN11_RCS0 (0)
+#define GEN11_VECS(x) (31 - (x))
+#define GEN11_VCS(x) (x)
+
+#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
+#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
+#define GEN11_GUC_SG_INTR_ENABLE _MMIO(0x190038)
+#define ENGINE1_MASK REG_GENMASK(31, 16)
+#define ENGINE0_MASK REG_GENMASK(15, 0)
+#define GEN11_GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c)
+#define GEN11_CRYPTO_RSVD_INTR_ENABLE _MMIO(0x190040)
+#define GEN11_GUNIT_CSME_INTR_ENABLE _MMIO(0x190044)
+
+#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
+#define GEN11_INTR_DATA_VALID (1 << 31)
+#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16)
+#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
+#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
+/* irq instances for OTHER_CLASS */
+#define OTHER_GUC_INSTANCE 0
+#define OTHER_GTPM_INSTANCE 1
+#define OTHER_KCR_INSTANCE 4
+
+#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
+
+#define GEN11_RCS0_RSVD_INTR_MASK _MMIO(0x190090)
+#define GEN11_BCS_RSVD_INTR_MASK _MMIO(0x1900a0)
+#define GEN11_VCS0_VCS1_INTR_MASK _MMIO(0x1900a8)
+#define GEN11_VCS2_VCS3_INTR_MASK _MMIO(0x1900ac)
+#define GEN12_VCS4_VCS5_INTR_MASK _MMIO(0x1900b0)
+#define GEN12_VCS6_VCS7_INTR_MASK _MMIO(0x1900b4)
+#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0)
+#define GEN12_VECS2_VECS3_INTR_MASK _MMIO(0x1900d4)
+#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8)
+#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
+#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0)
+#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
+
+#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
+
+enum {
+ INTEL_ADVANCED_CONTEXT = 0,
+ INTEL_LEGACY_32B_CONTEXT,
+ INTEL_ADVANCED_AD_CONTEXT,
+ INTEL_LEGACY_64B_CONTEXT
+};
-#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */
-#define MMCD_PCLA (1 << 31)
-#define MMCD_HOTSPOT_EN (1 << 27)
+enum {
+ FAULT_AND_HANG = 0,
+ FAULT_AND_HALT, /* Debug only */
+ FAULT_AND_STREAM,
+ FAULT_AND_CONTINUE /* Unsupported */
+};
-#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731C)
-#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+#define CTX_GTT_ADDRESS_MASK GENMASK(31, 12)
+#define GEN8_CTX_VALID (1 << 0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
+#define GEN8_CTX_FORCE_RESTORE (1 << 2)
+#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
+#define GEN8_CTX_PRIVILEGE (1 << 8)
+#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
+#define GEN8_CTX_ID_SHIFT 32
+#define GEN8_CTX_ID_WIDTH 21
+#define GEN11_SW_CTX_ID_SHIFT 37
+#define GEN11_SW_CTX_ID_WIDTH 11
+#define GEN11_ENGINE_CLASS_SHIFT 61
+#define GEN11_ENGINE_CLASS_WIDTH 3
+#define GEN11_ENGINE_INSTANCE_SHIFT 48
+#define GEN11_ENGINE_INSTANCE_WIDTH 6
+#define XEHP_SW_CTX_ID_SHIFT 39
+#define XEHP_SW_CTX_ID_WIDTH 16
+#define XEHP_SW_COUNTER_SHIFT 58
+#define XEHP_SW_COUNTER_WIDTH 6
#endif /* __INTEL_GT_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index b5d3cde08aac..49a8fb63e6e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -10,6 +10,7 @@
#include <drm/drm_cache.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
#include "intel_gt.h"
@@ -162,6 +163,9 @@ static void __i915_vm_release(struct work_struct *work)
struct i915_address_space *vm =
container_of(work, struct i915_address_space, release_work);
+ /* Synchronize async unbinds. */
+ i915_vma_resource_bind_dep_sync_all(vm);
+
vm->cleanup(vm);
i915_address_space_fini(vm);
@@ -190,6 +194,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
if (!kref_read(&vm->resv_ref))
kref_init(&vm->resv_ref);
+ vm->pending_unbind = RB_ROOT_CACHED;
INIT_WORK(&vm->release_work, __i915_vm_release);
atomic_set(&vm->open, 1);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 177b42b935a1..8073438b67c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -27,6 +27,7 @@
#include "gt/intel_reset.h"
#include "i915_selftest.h"
+#include "i915_vma_resource.h"
#include "i915_vma_types.h"
#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -200,7 +201,7 @@ struct i915_vma_ops {
/* Map an object into an address space with the given cache flags. */
void (*bind_vma)(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags);
/*
@@ -208,7 +209,8 @@ struct i915_vma_ops {
* setting the valid PTE entries to a reserved scratch page.
*/
void (*unbind_vma)(struct i915_address_space *vm,
- struct i915_vma *vma);
+ struct i915_vma_resource *vma_res);
+
};
struct i915_address_space {
@@ -263,6 +265,9 @@ struct i915_address_space {
/* Flags used when creating page-table objects for this vm */
unsigned long lmem_pt_obj_flags;
+ /* Interval tree for pending unbind vma resources */
+ struct rb_root_cached pending_unbind;
+
struct drm_i915_gem_object *
(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
struct drm_i915_gem_object *
@@ -285,7 +290,7 @@ struct i915_address_space {
enum i915_cache_level cache_level,
u32 flags);
void (*insert_entries)(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
@@ -600,11 +605,11 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags);
void ppgtt_unbind_vma(struct i915_address_space *vm,
- struct i915_vma *vma);
+ struct i915_vma_resource *vma_res);
void gtt_write_workarounds(struct intel_gt *gt);
@@ -627,8 +632,8 @@ __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long
static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
-} sgt_dma(struct i915_vma *vma) {
- struct scatterlist *sg = vma->pages->sgl;
+} sgt_dma(struct i915_vma_resource *vma_res) {
+ struct scatterlist *sg = vma_res->bi.pages->sgl;
dma_addr_t addr = sg_dma_address(sg);
return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 335c65758d6f..40e2e28ee6c7 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -10,6 +10,7 @@
#include "i915_reg.h"
#include "intel_gt.h"
#include "intel_llc.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
struct ia_constants {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index e46bf1429f2c..004e1216e654 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -8,6 +8,8 @@
#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "i915_perf.h"
+#include "i915_reg.h"
+#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
@@ -1067,6 +1069,10 @@ lrc_pin(struct intel_context *ce,
void lrc_unpin(struct intel_context *ce)
{
+ if (unlikely(ce->parallel.last_rq)) {
+ i915_request_put(ce->parallel.last_rq);
+ ce->parallel.last_rq = NULL;
+ }
check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
ce->engine);
}
@@ -1162,6 +1168,29 @@ gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
return cs;
}
+/*
+ * On DG2 during context restore of a preempted context in GPGPU mode,
+ * RCS restore hang is detected. This is extremely timing dependent.
+ * To address this below sw wabb is implemented for DG2 A steppings.
+ */
+static u32 *
+dg2_emit_rcs_hang_wabb(const struct intel_context *ce, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(GEN12_STATE_ACK_DEBUG);
+ *cs++ = 0x21;
+
+ *cs++ = MI_LOAD_REGISTER_REG;
+ *cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
+ *cs++ = i915_mmio_reg_offset(GEN12_CULLBIT1);
+
+ *cs++ = MI_LOAD_REGISTER_REG;
+ *cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
+ *cs++ = i915_mmio_reg_offset(GEN12_CULLBIT2);
+
+ return cs;
+}
+
static u32 *
gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
{
@@ -1169,6 +1198,11 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
cs = gen12_emit_cmd_buf_wa(ce, cs);
cs = gen12_emit_restore_scratch(ce, cs);
+ /* Wa_22011450934:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(ce->engine->i915, G11, STEP_A0, STEP_B0))
+ cs = dg2_emit_rcs_hang_wabb(ce, cs);
+
/* Wa_16013000631:dg2 */
if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_G11(ce->engine->i915))
@@ -1688,6 +1722,17 @@ static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
#endif
}
+static u32 lrc_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
void lrc_update_runtime(struct intel_context *ce)
{
u32 old;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 7f697845c4cf..0b76f096b559 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -8,12 +8,12 @@
#include <linux/types.h>
-#include "intel_context.h"
-#include "intel_lrc_reg.h"
-
struct drm_i915_gem_object;
+struct i915_gem_ww_ctx;
+struct intel_context;
struct intel_engine_cs;
struct intel_ring;
+struct kref;
/* At the start of the context image is its per-process HWS page */
#define LRC_PPHWSP_PN (0)
@@ -68,15 +68,5 @@ void lrc_check_regs(const struct intel_context *ce,
const char *when);
void lrc_update_runtime(struct intel_context *ce);
-static inline u32 lrc_get_runtime(const struct intel_context *ce)
-{
- /*
- * We can use either ppHWSP[16] which is recorded before the context
- * switch (and so excludes the cost of context switches) or use the
- * value from the context image itself, which is saved/restored earlier
- * and so includes the cost of the save.
- */
- return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
-}
#endif /* __INTEL_LRC_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 083b3090c69c..48e6e2f87700 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -179,32 +179,34 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
u32 pte_flags;
- if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
- vm->allocate_va_range(vm, stash, vma->node.start, vma->size);
- set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+ if (!vma_res->allocated) {
+ vm->allocate_va_range(vm, stash, vma_res->start,
+ vma_res->vma_size);
+ vma_res->allocated = true;
}
/* Applicable to VLV, and gen8+ */
pte_flags = 0;
- if (i915_gem_object_is_readonly(vma->obj))
+ if (vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
- if (i915_gem_object_is_lmem(vma->obj))
+ if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
- vm->insert_entries(vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
wmb();
}
-void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+void ppgtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
{
- if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
- vm->clear_range(vm, vma->node.start, vma->size);
+ if (vma_res->allocated)
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
static unsigned long pd_count(u64 size, int shift)
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index cb5a67c98f30..a04e0cf4a94b 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -12,11 +12,12 @@
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_regs.h"
static int init_fake_lmem_bar(struct intel_memory_region *mem)
{
struct drm_i915_private *i915 = mem->i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
unsigned long n;
int ret;
@@ -132,7 +133,7 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt)
if (!i915->params.fake_lmem_start)
return ERR_PTR(-ENODEV);
- GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
+ GEM_BUG_ON(i915_ggtt_has_aperture(to_gt(i915)->ggtt));
/* Your mappable aperture belongs to me now! */
mappable_end = pci_resource_len(pdev, 2);
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index b575cd6e0b7a..5121e6dc2fa5 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -3,6 +3,8 @@
* Copyright © 2014 Intel Corporation
*/
+#include "gem/i915_gem_internal.h"
+
#include "i915_drv.h"
#include "intel_renderstate.h"
#include "intel_context.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 59beb69ff6f2..82713264b96c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -14,6 +14,7 @@
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "intel_breadcrumbs.h"
@@ -22,6 +23,7 @@
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
+#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_reset.h"
@@ -347,25 +349,25 @@ static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
MISSING_CASE(engine->class);
fallthrough;
case VIDEO_DECODE_CLASS:
- sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine);
+ sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
- sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
- sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
break;
case VIDEO_ENHANCEMENT_CLASS:
- sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine);
+ sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
- sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine);
+ sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
- sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine);
+ sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
@@ -412,7 +414,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine,
* forced lock on the VE engine that shares the same SFC.
*/
if (!(intel_uncore_read_fw(uncore,
- GEN12_HCP_SFC_LOCK_STATUS(engine)) &
+ GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
GEN12_HCP_SFC_USAGE_BIT))
return 0;
@@ -602,6 +604,15 @@ static int gen8_reset_engines(struct intel_gt *gt,
*/
}
+ /*
+ * Wa_22011100796:dg2, whenever Full soft reset is required,
+ * reset all individual engines firstly, and then do a full soft reset.
+ *
+ * This is best effort, so ignore any error from the initial reset.
+ */
+ if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
+ gen11_reset_engines(gt, gt->info.engine_mask, 0);
+
if (GRAPHICS_VER(gt->i915) >= 11)
ret = gen11_reset_engines(gt, engine_mask, retry);
else
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 723055340c9b..40ffcb94e379 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -3,6 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 8362eb09092e..6d7ec3bf1f32 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -3,6 +3,10 @@
* Copyright © 2008-2021 Intel Corporation
*/
+#include <drm/drm_cache.h>
+
+#include "gem/i915_gem_internal.h"
+
#include "gen2_engine_cs.h"
#include "gen6_engine_cs.h"
#include "gen6_ppgtt.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 0cb299cdfc8f..fd95449ed46d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -13,6 +13,7 @@
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_gt_regs.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_rps.h"
#include "vlv_sideband.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 438bbc7b8147..b9640212d659 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -3,9 +3,12 @@
* Copyright © 2016-2018 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_cache.h>
+
+#include "gem/i915_gem_internal.h"
#include "i915_active.h"
+#include "i915_drv.h"
#include "i915_syncmap.h"
#include "intel_gt.h"
#include "intel_ring.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 59ad23118875..26038066e90b 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -237,7 +237,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
- wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+ wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
/* WaDisablePartialInstShootdown:bdw,chv */
wa_masked_en(wal, GEN8_ROW_CHICKEN,
@@ -1510,6 +1510,12 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS);
+
+ /* Wa_18018781329:dg2 */
+ wa_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
}
static void
@@ -2040,7 +2046,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ if (IS_DG2(i915)) {
+ /* Wa_14015227452:dg2 */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14013392000:dg2_g11 */
wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
@@ -2048,15 +2059,15 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) ||
- IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14012419201:dg2 */
wa_masked_en(wal, GEN9_ROW_CHICKEN4,
GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) ||
- IS_DG2_G11(engine->i915)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(i915)) {
/*
* Wa_22012826095:dg2
* Wa_22013059131:dg2
@@ -2071,14 +2082,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
/* Wa_1308578152:dg2_g10 when first gslice is fused off */
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) &&
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) &&
needs_wa_1308578152(engine)) {
wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON,
GEN12_REPLAY_MODE_GRANULARITY);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
- IS_DG2_G11(engine->i915)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
/* Wa_22013037850:dg2 */
wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
DISABLE_128B_EVICTION_COMMAND_UDW);
@@ -2095,7 +2106,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
/*
* Wa_1608949956:dg2_g10
* Wa_14010198302:dg2_g10
@@ -2114,7 +2125,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, false);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
/* Wa_22010430635:dg2 */
wa_masked_en(wal,
GEN9_ROW_CHICKEN4,
@@ -2124,8 +2135,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
- IS_DG2_G11(engine->i915)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_G11(i915)) {
/* Wa_22012654132:dg2 */
wa_add(wal, GEN10_CACHE_MODE_SS, 0,
_MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
@@ -2134,8 +2145,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
/* Wa_14013202645:dg2 */
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) ||
- IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
wa_write_or(wal, RT_CTRL, DIS_NULL_QUERY);
if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
@@ -2463,7 +2474,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
wa_masked_en(wal,
- MI_MODE,
+ RING_MI_MODE(RENDER_RING_BASE),
ASYNC_FLIP_PERF_DISABLE);
if (GRAPHICS_VER(i915) == 6) {
@@ -2522,7 +2533,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
if (IS_GRAPHICS_VER(i915, 4, 6))
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- wa_add(wal, MI_MODE,
+ wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index e10da897e07a..72d5faab8f9a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -5,6 +5,7 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_reset.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 4a20ba63446c..83ff4c2e57c5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -6,6 +6,7 @@
#include <linux/kthread.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "i915_gem_evict.h"
#include "intel_gt.h"
@@ -1383,7 +1384,7 @@ static int evict_vma(void *data)
complete(&arg->completion);
mutex_lock(&vm->mutex);
- err = i915_gem_evict_for_node(vm, &evict, 0);
+ err = i915_gem_evict_for_node(vm, NULL, &evict, 0);
mutex_unlock(&vm->mutex);
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 618c905daa19..21c29d315cc0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -5,6 +5,8 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_internal.h"
+
#include "i915_selftest.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index fa4293d2944f..c9c4f391c5cc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -5,6 +5,8 @@
#include <linux/sort.h>
+#include "gem/i915_gem_internal.h"
+
#include "selftests/i915_random.h"
static const unsigned int sizes[] = {
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 8a873f6bda7f..37c38bdd5f47 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -19,7 +19,7 @@ __igt_reset_stolen(struct intel_gt *gt,
intel_engine_mask_t mask,
const char *msg)
{
- struct i915_ggtt *ggtt = &gt->i915->ggtt;
+ struct i915_ggtt *ggtt = gt->ggtt;
const struct resource *dsm = &gt->i915->dsm;
resource_size_t num_pages, page;
struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index e1e5dd5f7638..6a69ac0184ad 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -6,6 +6,8 @@
#include <linux/pm_qos.h>
#include <linux/sort.h>
+#include "gem/i915_gem_internal.h"
+
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 0287c2573c51..67a9aab801dd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -3,6 +3,7 @@
* Copyright © 2018 Intel Corporation
*/
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index fe5d7d261797..7afdadc7656f 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -7,9 +7,9 @@
#define _ABI_GUC_ACTIONS_ABI_H
/**
- * DOC: HOST2GUC_REGISTER_CTB
+ * DOC: HOST2GUC_SELF_CFG
*
- * This message is used as part of the `CTB based communication`_ setup.
+ * This message is used by Host KMD to setup of the `GuC Self Config KLVs`_.
*
* This message must be sent as `MMIO HXG Message`_.
*
@@ -22,20 +22,18 @@
* | +-------+--------------------------------------------------------------+
* | | 27:16 | DATA0 = MBZ |
* | +-------+--------------------------------------------------------------+
- * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_REGISTER_CTB` = 0x4505 |
+ * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_SELF_CFG` = 0x0508 |
* +---+-------+--------------------------------------------------------------+
- * | 1 | 31:12 | RESERVED = MBZ |
+ * | 1 | 31:16 | **KLV_KEY** - KLV key, see `GuC Self Config KLVs`_ |
* | +-------+--------------------------------------------------------------+
- * | | 11:8 | **TYPE** - type for the `CT Buffer`_ |
+ * | | 15:0 | **KLV_LEN** - KLV length |
* | | | |
- * | | | - _`GUC_CTB_TYPE_HOST2GUC` = 0 |
- * | | | - _`GUC_CTB_TYPE_GUC2HOST` = 1 |
- * | +-------+--------------------------------------------------------------+
- * | | 7:0 | **SIZE** - size of the `CT Buffer`_ in 4K units minus 1 |
+ * | | | - 32 bit KLV = 1 |
+ * | | | - 64 bit KLV = 2 |
* +---+-------+--------------------------------------------------------------+
- * | 2 | 31:0 | **DESC_ADDR** - GGTT address of the `CTB Descriptor`_ |
+ * | 2 | 31:0 | **VALUE32** - Bits 31-0 of the KLV value |
* +---+-------+--------------------------------------------------------------+
- * | 3 | 31:0 | **BUFF_ADDF** - GGTT address of the `CT Buffer`_ |
+ * | 3 | 31:0 | **VALUE64** - Bits 63-32 of the KLV value (**KLV_LEN** = 2) |
* +---+-------+--------------------------------------------------------------+
*
* +---+-------+--------------------------------------------------------------+
@@ -45,28 +43,25 @@
* | +-------+--------------------------------------------------------------+
* | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
* | +-------+--------------------------------------------------------------+
- * | | 27:0 | DATA0 = MBZ |
+ * | | 27:0 | DATA0 = **NUM** - 1 if KLV was parsed, 0 if not recognized |
* +---+-------+--------------------------------------------------------------+
*/
-#define GUC_ACTION_HOST2GUC_REGISTER_CTB 0x4505
+#define GUC_ACTION_HOST2GUC_SELF_CFG 0x0508
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_MBZ (0xfffff << 12)
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE (0xf << 8)
-#define GUC_CTB_TYPE_HOST2GUC 0u
-#define GUC_CTB_TYPE_GUC2HOST 1u
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE (0xff << 0)
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR GUC_HXG_REQUEST_MSG_n_DATAn
-#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR GUC_HXG_REQUEST_MSG_n_DATAn
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn
-#define HOST2GUC_REGISTER_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
-#define HOST2GUC_REGISTER_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+#define HOST2GUC_SELF_CFG_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define HOST2GUC_SELF_CFG_RESPONSE_MSG_0_NUM GUC_HXG_RESPONSE_MSG_0_DATA0
/**
- * DOC: HOST2GUC_DEREGISTER_CTB
+ * DOC: HOST2GUC_CONTROL_CTB
*
- * This message is used as part of the `CTB based communication`_ teardown.
+ * This H2G action allows Vf Host to enable or disable H2G and G2H `CT Buffer`_.
*
* This message must be sent as `MMIO HXG Message`_.
*
@@ -79,15 +74,12 @@
* | +-------+--------------------------------------------------------------+
* | | 27:16 | DATA0 = MBZ |
* | +-------+--------------------------------------------------------------+
- * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_DEREGISTER_CTB` = 0x4506 |
+ * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_CONTROL_CTB` = 0x4509 |
* +---+-------+--------------------------------------------------------------+
- * | 1 | 31:12 | RESERVED = MBZ |
- * | +-------+--------------------------------------------------------------+
- * | | 11:8 | **TYPE** - type of the `CT Buffer`_ |
+ * | 1 | 31:0 | **CONTROL** - control `CTB based communication`_ |
* | | | |
- * | | | see `GUC_ACTION_HOST2GUC_REGISTER_CTB`_ |
- * | +-------+--------------------------------------------------------------+
- * | | 7:0 | RESERVED = MBZ |
+ * | | | - _`GUC_CTB_CONTROL_DISABLE` = 0 |
+ * | | | - _`GUC_CTB_CONTROL_ENABLE` = 1 |
* +---+-------+--------------------------------------------------------------+
*
* +---+-------+--------------------------------------------------------------+
@@ -100,16 +92,16 @@
* | | 27:0 | DATA0 = MBZ |
* +---+-------+--------------------------------------------------------------+
*/
-#define GUC_ACTION_HOST2GUC_DEREGISTER_CTB 0x4506
+#define GUC_ACTION_HOST2GUC_CONTROL_CTB 0x4509
-#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
-#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_MBZ (0xfffff << 12)
-#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE (0xf << 8)
-#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_MBZ2 (0xff << 0)
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL GUC_HXG_REQUEST_MSG_n_DATAn
+#define GUC_CTB_CONTROL_DISABLE 0u
+#define GUC_CTB_CONTROL_ENABLE 1u
-#define HOST2GUC_DEREGISTER_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
-#define HOST2GUC_DEREGISTER_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+#define HOST2GUC_CONTROL_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define HOST2GUC_CONTROL_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
/* legacy definitions */
@@ -143,8 +135,12 @@ enum intel_guc_action {
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
- INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
+ INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
+ INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002,
+ INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
+ INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
+ INTEL_GUC_ACTION_NOTIFY_EXCEPTION = 0x8005,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index 488b6061ee89..c20658ee85a5 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -11,4 +11,27 @@ enum intel_guc_response_status {
INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
};
+enum intel_guc_load_status {
+ INTEL_GUC_LOAD_STATUS_DEFAULT = 0x00,
+ INTEL_GUC_LOAD_STATUS_START = 0x01,
+ INTEL_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH = 0x02,
+ INTEL_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH = 0x03,
+ INTEL_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE = 0x04,
+ INTEL_GUC_LOAD_STATUS_GDT_DONE = 0x10,
+ INTEL_GUC_LOAD_STATUS_IDT_DONE = 0x20,
+ INTEL_GUC_LOAD_STATUS_LAPIC_DONE = 0x30,
+ INTEL_GUC_LOAD_STATUS_GUCINT_DONE = 0x40,
+ INTEL_GUC_LOAD_STATUS_DPC_READY = 0x50,
+ INTEL_GUC_LOAD_STATUS_DPC_ERROR = 0x60,
+ INTEL_GUC_LOAD_STATUS_EXCEPTION = 0x70,
+ INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID = 0x71,
+ INTEL_GUC_LOAD_STATUS_PXP_TEARDOWN_CTRL_ENABLED = 0x72,
+ INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
+ INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
+ INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
+
+ INTEL_GUC_LOAD_STATUS_READY = 0xF0,
+};
+
#endif /* _ABI_GUC_ERRORS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
new file mode 100644
index 000000000000..f0814a57c191
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_KLVS_ABI_H
+#define _ABI_GUC_KLVS_ABI_H
+
+/**
+ * DOC: GuC KLV
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:16 | **KEY** - KLV key identifier |
+ * | | | - `GuC Self Config KLVs`_ |
+ * | | | |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **LEN** - length of VALUE (in 32bit dwords) |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VALUE** - actual value of the KLV (format depends on KEY) |
+ * +---+-------+ |
+ * |...| | |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_KLV_LEN_MIN 1u
+#define GUC_KLV_0_KEY (0xffff << 16)
+#define GUC_KLV_0_LEN (0xffff << 0)
+#define GUC_KLV_n_VALUE (0xffffffff << 0)
+
+/**
+ * DOC: GuC Self Config KLVs
+ *
+ * `GuC KLV`_ keys available for use with HOST2GUC_SELF_CFG_.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_ADDR` : 0x0902
+ * Refers to 64 bit Global Gfx address of H2G `CT Buffer`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR` : 0x0903
+ * Refers to 64 bit Global Gfx address of H2G `CTB Descriptor`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_SIZE` : 0x0904
+ * Refers to size of H2G `CT Buffer`_ in bytes.
+ * Should be a multiple of 4K.
+ *
+ * _`GUC_KLV_SELF_CFG_G2H_CTB_ADDR` : 0x0905
+ * Refers to 64 bit Global Gfx address of G2H `CT Buffer`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR` : 0x0906
+ * Refers to 64 bit Global Gfx address of G2H `CTB Descriptor`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_G2H_CTB_SIZE` : 0x0907
+ * Refers to size of G2H `CT Buffer`_ in bytes.
+ * Should be a multiple of 4K.
+ */
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY 0x0902
+#define GUC_KLV_SELF_CFG_H2G_CTB_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY 0x0903
+#define GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY 0x0904
+#define GUC_KLV_SELF_CFG_H2G_CTB_SIZE_LEN 1u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY 0x0905
+#define GUC_KLV_SELF_CFG_G2H_CTB_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY 0x0906
+#define GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY 0x0907
+#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u
+
+#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 5bab32fef120..447a976c9f25 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -184,6 +184,9 @@ void intel_guc_init_early(struct intel_guc *guc)
guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
}
+
+ intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION |
+ INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
}
void intel_guc_init_late(struct intel_guc *guc)
@@ -224,32 +227,48 @@ static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
u32 flags;
#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
- #define UNIT SZ_1M
- #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
+ #define LOG_UNIT SZ_1M
+ #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
#else
- #define UNIT SZ_4K
- #define FLAG 0
+ #define LOG_UNIT SZ_4K
+ #define LOG_FLAG 0
+ #endif
+
+ #if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
+ #define CAPTURE_UNIT SZ_1M
+ #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
+ #else
+ #define CAPTURE_UNIT SZ_4K
+ #define CAPTURE_FLAG 0
#endif
BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
+ BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, UNIT));
+ BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
+ BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
- BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
+ BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
- BUILD_BUG_ON((DEBUG_BUFFER_SIZE / UNIT - 1) >
+ BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
(GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
+ BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
+ (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
- FLAG |
- ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
- ((DEBUG_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
+ CAPTURE_FLAG |
+ LOG_FLAG |
+ ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
+ ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
+ ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) << GUC_LOG_CAPTURE_SHIFT) |
(offset << GUC_LOG_BUF_ADDR_SHIFT);
- #undef UNIT
- #undef FLAG
+ #undef LOG_UNIT
+ #undef LOG_FLAG
+ #undef CAPTURE_UNIT
+ #undef CAPTURE_FLAG
return flags;
}
@@ -262,6 +281,26 @@ static u32 guc_ctl_ads_flags(struct intel_guc *guc)
return flags;
}
+static u32 guc_ctl_wa_flags(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 flags = 0;
+
+ /* Wa_22012773006:gen11,gen12 < XeHP */
+ if (GRAPHICS_VER(gt->i915) >= 11 &&
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
+ flags |= GUC_WA_POLLCS;
+
+ return flags;
+}
+
+static u32 guc_ctl_devid(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
+}
+
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
@@ -278,6 +317,8 @@ static void guc_init_params(struct intel_guc *guc)
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
+ params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
+ params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
@@ -515,9 +556,10 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
/* Make sure to handle only enabled messages */
msg = payload[0] & guc->msg_enabled_mask;
- if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
- INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
- intel_guc_log_handle_flush_event(&guc->log);
+ if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)
+ drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC crash dump notification!\n");
+ if (msg & INTEL_GUC_RECV_MSG_EXCEPTION)
+ drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC exception notification!\n");
return 0;
}
@@ -551,7 +593,7 @@ int intel_guc_suspend(struct intel_guc *guc)
{
int ret;
u32 action[] = {
- INTEL_GUC_ACTION_RESET_CLIENT,
+ INTEL_GUC_ACTION_CLIENT_SOFT_RESET,
};
if (!intel_guc_is_ready(guc))
@@ -715,6 +757,56 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
return 0;
}
+static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
+{
+ u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_SELF_CFG),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32, lower_32_bits(value)),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64, upper_32_bits(value)),
+ };
+ int ret;
+
+ GEM_BUG_ON(len > 2);
+ GEM_BUG_ON(len == 1 && upper_32_bits(value));
+
+ /* Self config must go over MMIO */
+ ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+
+ if (unlikely(ret < 0))
+ return ret;
+ if (unlikely(ret > 1))
+ return -EPROTO;
+ if (unlikely(!ret))
+ return -ENOKEY;
+
+ return 0;
+}
+
+static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int err = __guc_action_self_cfg(guc, key, len, value);
+
+ if (unlikely(err))
+ i915_probe_error(i915, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
+ ERR_PTR(err), key, value);
+ return err;
+}
+
+int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value)
+{
+ return __guc_self_cfg(guc, key, 1, value);
+}
+
+int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value)
+{
+ return __guc_self_cfg(guc, key, 2, value);
+}
+
/**
* intel_guc_load_status - dump information about GuC load status
* @guc: the GuC
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 3aabe164c329..9d779de16613 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -119,6 +119,15 @@ struct intel_guc {
* function as it might be in an atomic context (no sleeping)
*/
struct work_struct destroyed_worker;
+ /**
+ * @reset_fail_worker: worker to trigger a GT reset after an
+ * engine reset fails
+ */
+ struct work_struct reset_fail_worker;
+ /**
+ * @reset_fail_mask: mask of engines that failed to reset
+ */
+ intel_engine_mask_t reset_fail_mask;
} submission_state;
/**
@@ -141,6 +150,13 @@ struct intel_guc {
struct __guc_ads_blob *ads_blob;
/** @ads_regset_size: size of the save/restore regsets in the ADS */
u32 ads_regset_size;
+ /**
+ * @ads_regset_count: number of save/restore registers in the ADS for
+ * each engine
+ */
+ u32 ads_regset_count[I915_NUM_ENGINES];
+ /** @ads_regset: save/restore regsets in the ADS */
+ struct guc_mmio_reg *ads_regset;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
/** @ads_engine_usage_size: size of engine usage in the ADS */
@@ -333,6 +349,8 @@ int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
struct i915_vma **out_vma, void **out_vaddr);
+int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value);
+int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value);
static inline bool intel_guc_is_supported(struct intel_guc *guc)
{
@@ -409,6 +427,8 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len);
int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len);
+int intel_guc_error_capture_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len);
void intel_guc_find_hung_context(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 162b89198567..7e41175618f5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -42,6 +42,10 @@
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
+ * | capture lists |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
* | private data |
* +---------------------------------------+
* | padding |
@@ -67,6 +71,12 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
return PAGE_ALIGN(guc->ads_golden_ctxt_size);
}
+static u32 guc_ads_capture_size(struct intel_guc *guc)
+{
+ /* FIXME: Allocate a proper capture list */
+ return PAGE_ALIGN(PAGE_SIZE);
+}
+
static u32 guc_ads_private_data_size(struct intel_guc *guc)
{
return PAGE_ALIGN(guc->fw.private_data_size);
@@ -87,7 +97,7 @@ static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
-static u32 guc_ads_private_data_offset(struct intel_guc *guc)
+static u32 guc_ads_capture_offset(struct intel_guc *guc)
{
u32 offset;
@@ -97,6 +107,16 @@ static u32 guc_ads_private_data_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
+static u32 guc_ads_private_data_offset(struct intel_guc *guc)
+{
+ u32 offset;
+
+ offset = guc_ads_capture_offset(guc) +
+ guc_ads_capture_size(guc);
+
+ return PAGE_ALIGN(offset);
+}
+
static u32 guc_ads_blob_size(struct intel_guc *guc)
{
return guc_ads_private_data_offset(guc) +
@@ -188,14 +208,18 @@ static void guc_mapping_table_init(struct intel_gt *gt,
/*
* The save/restore register list must be pre-calculated to a temporary
- * buffer of driver defined size before it can be generated in place
- * inside the ADS.
+ * buffer before it can be copied inside the ADS.
*/
-#define MAX_MMIO_REGS 128 /* Arbitrary size, increase as needed */
struct temp_regset {
+ /*
+ * ptr to the section of the storage for the engine currently being
+ * worked on
+ */
struct guc_mmio_reg *registers;
- u32 used;
- u32 size;
+ /* ptr to the base of the allocated storage for all engines */
+ struct guc_mmio_reg *storage;
+ u32 storage_used;
+ u32 storage_max;
};
static int guc_mmio_reg_cmp(const void *a, const void *b)
@@ -206,18 +230,44 @@ static int guc_mmio_reg_cmp(const void *a, const void *b)
return (int)ra->offset - (int)rb->offset;
}
-static void guc_mmio_reg_add(struct temp_regset *regset,
- u32 offset, u32 flags)
+static struct guc_mmio_reg * __must_check
+__mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg)
{
- u32 count = regset->used;
+ u32 pos = regset->storage_used;
+ struct guc_mmio_reg *slot;
+
+ if (pos >= regset->storage_max) {
+ size_t size = ALIGN((pos + 1) * sizeof(*slot), PAGE_SIZE);
+ struct guc_mmio_reg *r = krealloc(regset->storage,
+ size, GFP_KERNEL);
+ if (!r) {
+ WARN_ONCE(1, "Incomplete regset list: can't add register (%d)\n",
+ -ENOMEM);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ regset->registers = r + (regset->registers - regset->storage);
+ regset->storage = r;
+ regset->storage_max = size / sizeof(*slot);
+ }
+
+ slot = &regset->storage[pos];
+ regset->storage_used++;
+ *slot = *reg;
+
+ return slot;
+}
+
+static long __must_check guc_mmio_reg_add(struct temp_regset *regset,
+ u32 offset, u32 flags)
+{
+ u32 count = regset->storage_used - (regset->registers - regset->storage);
struct guc_mmio_reg reg = {
.offset = offset,
.flags = flags,
};
struct guc_mmio_reg *slot;
- GEM_BUG_ON(count >= regset->size);
-
/*
* The mmio list is built using separate lists within the driver.
* It's possible that at some point we may attempt to add the same
@@ -226,11 +276,11 @@ static void guc_mmio_reg_add(struct temp_regset *regset,
*/
if (bsearch(&reg, regset->registers, count,
sizeof(reg), guc_mmio_reg_cmp))
- return;
+ return 0;
- slot = &regset->registers[count];
- regset->used++;
- *slot = reg;
+ slot = __mmio_reg_add(regset, &reg);
+ if (IS_ERR(slot))
+ return PTR_ERR(slot);
while (slot-- > regset->registers) {
GEM_BUG_ON(slot[0].offset == slot[1].offset);
@@ -239,6 +289,8 @@ static void guc_mmio_reg_add(struct temp_regset *regset,
swap(slot[1], slot[0]);
}
+
+ return 0;
}
#define GUC_MMIO_REG_ADD(regset, reg, masked) \
@@ -246,62 +298,71 @@ static void guc_mmio_reg_add(struct temp_regset *regset,
i915_mmio_reg_offset((reg)), \
(masked) ? GUC_REGSET_MASKED : 0)
-static void guc_mmio_regset_init(struct temp_regset *regset,
- struct intel_engine_cs *engine)
+static int guc_mmio_regset_init(struct temp_regset *regset,
+ struct intel_engine_cs *engine)
{
const u32 base = engine->mmio_base;
struct i915_wa_list *wal = &engine->wa_list;
struct i915_wa *wa;
unsigned int i;
+ int ret = 0;
- regset->used = 0;
+ /*
+ * Each engine's registers point to a new start relative to
+ * storage
+ */
+ regset->registers = regset->storage + regset->storage_used;
- GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true);
- GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false);
- GUC_MMIO_REG_ADD(regset, RING_IMR(base), false);
+ ret |= GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true);
+ ret |= GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false);
+ ret |= GUC_MMIO_REG_ADD(regset, RING_IMR(base), false);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg);
+ ret |= GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg);
/* Be extra paranoid and include all whitelist registers. */
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
- GUC_MMIO_REG_ADD(regset,
- RING_FORCE_TO_NONPRIV(base, i),
- false);
+ ret |= GUC_MMIO_REG_ADD(regset,
+ RING_FORCE_TO_NONPRIV(base, i),
+ false);
/* add in local MOCS registers */
for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++)
- GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false);
+ ret |= GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false);
+
+ return ret ? -1 : 0;
}
-static int guc_mmio_reg_state_query(struct intel_guc *guc)
+static long guc_mmio_reg_state_create(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- struct temp_regset temp_set;
- u32 total;
+ struct temp_regset temp_set = {};
+ long total = 0;
+ long ret;
- /*
- * Need to actually build the list in order to filter out
- * duplicates and other such data dependent constructions.
- */
- temp_set.size = MAX_MMIO_REGS;
- temp_set.registers = kmalloc_array(temp_set.size,
- sizeof(*temp_set.registers),
- GFP_KERNEL);
- if (!temp_set.registers)
- return -ENOMEM;
-
- total = 0;
for_each_engine(engine, gt, id) {
- guc_mmio_regset_init(&temp_set, engine);
- total += temp_set.used;
+ u32 used = temp_set.storage_used;
+
+ ret = guc_mmio_regset_init(&temp_set, engine);
+ if (ret < 0)
+ goto fail_regset_init;
+
+ guc->ads_regset_count[id] = temp_set.storage_used - used;
+ total += guc->ads_regset_count[id];
}
- kfree(temp_set.registers);
+ guc->ads_regset = temp_set.storage;
+
+ drm_dbg(&guc_to_gt(guc)->i915->drm, "Used %zu KB for temporary ADS regset\n",
+ (temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10);
return total * sizeof(struct guc_mmio_reg);
+
+fail_regset_init:
+ kfree(temp_set.storage);
+ return ret;
}
static void guc_mmio_reg_state_init(struct intel_guc *guc,
@@ -309,40 +370,38 @@ static void guc_mmio_reg_state_init(struct intel_guc *guc,
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
+ struct guc_mmio_reg *ads_registers;
enum intel_engine_id id;
- struct temp_regset temp_set;
- struct guc_mmio_reg_set *ads_reg_set;
u32 addr_ggtt, offset;
- u8 guc_class;
offset = guc_ads_regset_offset(guc);
addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
- temp_set.registers = (struct guc_mmio_reg *)(((u8 *)blob) + offset);
- temp_set.size = guc->ads_regset_size / sizeof(temp_set.registers[0]);
+ ads_registers = (struct guc_mmio_reg *)(((u8 *)blob) + offset);
+
+ memcpy(ads_registers, guc->ads_regset, guc->ads_regset_size);
for_each_engine(engine, gt, id) {
+ u32 count = guc->ads_regset_count[id];
+ struct guc_mmio_reg_set *ads_reg_set;
+ u8 guc_class;
+
/* Class index is checked in class converter */
GEM_BUG_ON(engine->instance >= GUC_MAX_INSTANCES_PER_CLASS);
guc_class = engine_class_to_guc_class(engine->class);
ads_reg_set = &blob->ads.reg_state_list[guc_class][engine->instance];
- guc_mmio_regset_init(&temp_set, engine);
- if (!temp_set.used) {
+ if (!count) {
ads_reg_set->address = 0;
ads_reg_set->count = 0;
continue;
}
ads_reg_set->address = addr_ggtt;
- ads_reg_set->count = temp_set.used;
+ ads_reg_set->count = count;
- temp_set.size -= temp_set.used;
- temp_set.registers += temp_set.used;
- addr_ggtt += temp_set.used * sizeof(struct guc_mmio_reg);
+ addr_ggtt += count * sizeof(struct guc_mmio_reg);
}
-
- GEM_BUG_ON(temp_set.size);
}
static void fill_engine_enable_masks(struct intel_gt *gt,
@@ -501,6 +560,26 @@ static void guc_init_golden_context(struct intel_guc *guc)
GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
}
+static void guc_capture_list_init(struct intel_guc *guc, struct __guc_ads_blob *blob)
+{
+ int i, j;
+ u32 addr_ggtt, offset;
+
+ offset = guc_ads_capture_offset(guc);
+ addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+
+ /* FIXME: Populate a proper capture list */
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
+ for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
+ blob->ads.capture_instance[i][j] = addr_ggtt;
+ blob->ads.capture_class[i][j] = addr_ggtt;
+ }
+
+ blob->ads.capture_global[i] = addr_ggtt;
+ }
+}
+
static void __guc_ads_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -534,6 +613,9 @@ static void __guc_ads_init(struct intel_guc *guc)
base = intel_guc_ggtt_offset(guc, guc->ads_vma);
+ /* Capture list for hang debug */
+ guc_capture_list_init(guc, blob);
+
/* ADS */
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
@@ -561,8 +643,11 @@ int intel_guc_ads_create(struct intel_guc *guc)
GEM_BUG_ON(guc->ads_vma);
- /* Need to calculate the reg state size dynamically: */
- ret = guc_mmio_reg_state_query(guc);
+ /*
+ * Create reg state size dynamically on system memory to be copied to
+ * the final ads blob on gt init/reset
+ */
+ ret = guc_mmio_reg_state_create(guc);
if (ret < 0)
return ret;
guc->ads_regset_size = ret;
@@ -602,6 +687,7 @@ void intel_guc_ads_destroy(struct intel_guc *guc)
{
i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
guc->ads_blob = NULL;
+ kfree(guc->ads_regset);
}
static void guc_ads_private_data_reset(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index aa6dd6415202..2f7fc87a78e1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -112,18 +112,6 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
init_waitqueue_head(&ct->wq);
}
-static inline const char *guc_ct_buffer_type_to_str(u32 type)
-{
- switch (type) {
- case GUC_CTB_TYPE_HOST2GUC:
- return "SEND";
- case GUC_CTB_TYPE_GUC2HOST:
- return "RECV";
- default:
- return "<invalid>";
- }
-}
-
static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
{
memset(desc, 0, sizeof(*desc));
@@ -156,71 +144,65 @@ static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
guc_ct_buffer_reset(ctb);
}
-static int guc_action_register_ct_buffer(struct intel_guc *guc, u32 type,
- u32 desc_addr, u32 buff_addr, u32 size)
+static int guc_action_control_ctb(struct intel_guc *guc, u32 control)
{
- u32 request[HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN] = {
+ u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
- FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_REGISTER_CTB),
- FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE, size / SZ_4K - 1) |
- FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE, type),
- FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR, desc_addr),
- FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR, buff_addr),
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_CONTROL_CTB),
+ FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, control),
};
int ret;
- GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
- GEM_BUG_ON(size % SZ_4K);
+ GEM_BUG_ON(control != GUC_CTB_CONTROL_DISABLE && control != GUC_CTB_CONTROL_ENABLE);
- /* CT registration must go over MMIO */
+ /* CT control must go over MMIO */
ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
return ret > 0 ? -EPROTO : ret;
}
-static int ct_register_buffer(struct intel_guc_ct *ct, u32 type,
- u32 desc_addr, u32 buff_addr, u32 size)
+static int ct_control_enable(struct intel_guc_ct *ct, bool enable)
{
int err;
- err = i915_inject_probe_error(guc_to_gt(ct_to_guc(ct))->i915, -ENXIO);
+ err = guc_action_control_ctb(ct_to_guc(ct), enable ?
+ GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE);
if (unlikely(err))
- return err;
+ CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n",
+ enabledisable(enable), ERR_PTR(err));
- err = guc_action_register_ct_buffer(ct_to_guc(ct), type,
- desc_addr, buff_addr, size);
- if (unlikely(err))
- CT_ERROR(ct, "Failed to register %s buffer (%pe)\n",
- guc_ct_buffer_type_to_str(type), ERR_PTR(err));
return err;
}
-static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
+static int ct_register_buffer(struct intel_guc_ct *ct, bool send,
+ u32 desc_addr, u32 buff_addr, u32 size)
{
- u32 request[HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN] = {
- FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
- FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
- FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_DEREGISTER_CTB),
- FIELD_PREP(HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE, type),
- };
- int ret;
-
- GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
-
- /* CT deregistration must go over MMIO */
- ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+ int err;
- return ret > 0 ? -EPROTO : ret;
-}
+ err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
+ GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY :
+ GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
+ desc_addr);
+ if (unlikely(err))
+ goto failed;
-static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
-{
- int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
+ err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
+ GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY :
+ GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
+ buff_addr);
+ if (unlikely(err))
+ goto failed;
+ err = intel_guc_self_cfg32(ct_to_guc(ct), send ?
+ GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY :
+ GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
+ size);
if (unlikely(err))
- CT_ERROR(ct, "Failed to deregister %s buffer (%pe)\n",
- guc_ct_buffer_type_to_str(type), ERR_PTR(err));
+failed:
+ CT_PROBE_ERROR(ct, "Failed to register %s buffer (%pe)\n",
+ send ? "SEND" : "RECV", ERR_PTR(err));
+
return err;
}
@@ -308,7 +290,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
- u32 base, desc, cmds;
+ u32 base, desc, cmds, size;
void *blob;
int err;
@@ -333,27 +315,27 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
*/
desc = base + ptrdiff(ct->ctbs.recv.desc, blob);
cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
- err = ct_register_buffer(ct, GUC_CTB_TYPE_GUC2HOST,
- desc, cmds, ct->ctbs.recv.size * 4);
-
+ size = ct->ctbs.recv.size * 4;
+ err = ct_register_buffer(ct, false, desc, cmds, size);
if (unlikely(err))
goto err_out;
desc = base + ptrdiff(ct->ctbs.send.desc, blob);
cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
- err = ct_register_buffer(ct, GUC_CTB_TYPE_HOST2GUC,
- desc, cmds, ct->ctbs.send.size * 4);
+ size = ct->ctbs.send.size * 4;
+ err = ct_register_buffer(ct, true, desc, cmds, size);
+ if (unlikely(err))
+ goto err_out;
+ err = ct_control_enable(ct, true);
if (unlikely(err))
- goto err_deregister;
+ goto err_out;
ct->enabled = true;
ct->stall_time = KTIME_MAX;
return 0;
-err_deregister:
- ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
err_out:
CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
return err;
@@ -372,8 +354,7 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
ct->enabled = false;
if (intel_guc_is_fw_running(guc)) {
- ct_deregister_buffer(ct, GUC_CTB_TYPE_HOST2GUC);
- ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
+ ct_control_enable(ct, false);
}
}
@@ -662,6 +643,7 @@ static int ct_send(struct intel_guc_ct *ct,
struct ct_request request;
unsigned long flags;
unsigned int sleep_period_ms = 1;
+ bool send_again;
u32 fence;
int err;
@@ -671,6 +653,9 @@ static int ct_send(struct intel_guc_ct *ct,
GEM_BUG_ON(!response_buf && response_buf_size);
might_sleep();
+resend:
+ send_again = false;
+
/*
* We use a lazy spin wait loop here as we believe that if the CT
* buffers are sized correctly the flow control condition should be
@@ -725,6 +710,13 @@ retry:
goto unlink;
}
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
+ CT_DEBUG(ct, "retrying request %#x (%u)\n", *action,
+ FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, *status));
+ send_again = true;
+ goto unlink;
+ }
+
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
err = -EIO;
goto unlink;
@@ -747,6 +739,9 @@ unlink:
list_del(&request.link);
spin_unlock_irqrestore(&ct->requests.lock, flags);
+ if (unlikely(send_again))
+ goto resend;
+
return err;
}
@@ -789,7 +784,7 @@ static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
{
struct ct_incoming_msg *msg;
- msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC);
+ msg = kmalloc(struct_size(msg, msg, num_dwords), GFP_ATOMIC);
if (msg)
msg->size = num_dwords;
return msg;
@@ -918,6 +913,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r
GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN);
GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC);
GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS &&
+ FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_NO_RESPONSE_RETRY &&
FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE);
CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]);
@@ -990,9 +986,27 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r
case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
ret = intel_guc_context_reset_process_msg(guc, payload, len);
break;
+ case INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
+ ret = intel_guc_error_capture_process_msg(guc, payload, len);
+ if (unlikely(ret))
+ CT_ERROR(ct, "error capture notification failed %x %*ph\n",
+ action, 4 * len, payload);
+ break;
case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
ret = intel_guc_engine_failure_process_msg(guc, payload, len);
break;
+ case INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
+ intel_guc_log_handle_flush_event(&guc->log);
+ ret = 0;
+ break;
+ case INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
+ CT_ERROR(ct, "Received GuC crash dump notification!\n");
+ ret = 0;
+ break;
+ case INTEL_GUC_ACTION_NOTIFY_EXCEPTION:
+ CT_ERROR(ct, "Received GuC exception notification!\n");
+ ret = 0;
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1098,6 +1112,7 @@ static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
break;
case GUC_HXG_TYPE_RESPONSE_SUCCESS:
case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
err = ct_handle_response(ct, msg);
break;
default:
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index dcb51b53b495..a0372735cddb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -16,13 +16,15 @@
static void guc_prepare_xfer(struct intel_uncore *uncore)
{
- u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
- GUC_ENABLE_READ_CACHE_LOGIC |
- GUC_ENABLE_MIA_CACHING |
+ u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
GUC_ENABLE_MIA_CLOCK_GATING;
+ if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50))
+ shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
+ GUC_ENABLE_MIA_CACHING;
+
/* Must program this register before loading the ucode with DMA */
intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags);
@@ -91,11 +93,10 @@ static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
{
u32 val = intel_uncore_read(uncore, GUC_STATUS);
- u32 uk_val = val & GS_UKERNEL_MASK;
+ u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, val);
*status = val;
- return (uk_val == GS_UKERNEL_READY) ||
- ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
+ return uk_val == INTEL_GUC_LOAD_STATUS_READY;
}
static int guc_wait_ucode(struct intel_uncore *uncore)
@@ -106,17 +107,26 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
/*
* Wait for the GuC to start up.
* NB: Docs recommend not using the interrupt for completion.
- * Measurements indicate this should take no more than 20ms, so a
+ * Measurements indicate this should take no more than 20ms
+ * (assuming the GT clock is at maximum frequency). So, a
* timeout here indicates that the GuC has failed and is unusable.
* (Higher levels of the driver may decide to reset the GuC and
* attempt the ucode load again if this happens.)
+ *
+ * FIXME: There is a known (but exceedingly unlikely) race condition
+ * where the asynchronous frequency management code could reduce
+ * the GT clock while a GuC reload is in progress (during a full
+ * GT reset). A fix is in progress but there are complex locking
+ * issues to be resolved. In the meantime bump the timeout to
+ * 200ms. Even at slowest clock, this should be sufficient. And
+ * in the working case, a larger timeout makes no difference.
*/
- ret = wait_for(guc_ready(uncore, &status), 100);
+ ret = wait_for(guc_ready(uncore, &status), 200);
if (ret) {
struct drm_device *drm = &uncore->i915->drm;
- drm_dbg(drm, "GuC load failed: status = 0x%08X\n", status);
- drm_dbg(drm, "GuC load failed: status: Reset = %d, "
+ drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
+ drm_info(drm, "GuC load failed: status: Reset = %d, "
"BootROM = 0x%02X, UKernel = 0x%02X, "
"MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
@@ -126,13 +136,13 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- drm_dbg(drm, "GuC firmware signature verification failed\n");
+ drm_info(drm, "GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
- if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
- drm_dbg(drm, "GuC firmware exception. EIP: %#x\n",
- intel_uncore_read(uncore, SOFT_SCRATCH(13)));
+ if (REG_FIELD_GET(GS_UKERNEL_MASK, status) == INTEL_GUC_LOAD_STATUS_EXCEPTION) {
+ drm_info(drm, "GuC firmware exception. EIP: %#x\n",
+ intel_uncore_read(uncore, SOFT_SCRATCH(13)));
ret = -ENXIO;
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 7072e30e99f4..6a4612a852e2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -16,6 +16,7 @@
#include "abi/guc_errors_abi.h"
#include "abi/guc_communication_mmio_abi.h"
#include "abi/guc_communication_ctb_abi.h"
+#include "abi/guc_klvs_abi.h"
#include "abi/guc_messages_abi.h"
/* Payload length only i.e. don't include G2H header length */
@@ -84,19 +85,24 @@
#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
#define GUC_CTL_LOG_PARAMS 0
-#define GUC_LOG_VALID (1 << 0)
-#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
-#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
+#define GUC_LOG_VALID BIT(0)
+#define GUC_LOG_NOTIFY_ON_HALF_FULL BIT(1)
+#define GUC_LOG_CAPTURE_ALLOC_UNITS BIT(2)
+#define GUC_LOG_LOG_ALLOC_UNITS BIT(3)
#define GUC_LOG_CRASH_SHIFT 4
#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
#define GUC_LOG_DEBUG_SHIFT 6
#define GUC_LOG_DEBUG_MASK (0xF << GUC_LOG_DEBUG_SHIFT)
+#define GUC_LOG_CAPTURE_SHIFT 10
+#define GUC_LOG_CAPTURE_MASK (0x3 << GUC_LOG_CAPTURE_SHIFT)
#define GUC_LOG_BUF_ADDR_SHIFT 12
#define GUC_CTL_WA 1
+#define GUC_WA_POLLCS BIT(18)
+
#define GUC_CTL_FEATURE 2
-#define GUC_CTL_DISABLE_SCHEDULER (1 << 14)
#define GUC_CTL_ENABLE_SLPC BIT(2)
+#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
#define GUC_CTL_DEBUG 3
#define GUC_LOG_VERBOSITY_SHIFT 0
@@ -116,6 +122,8 @@
#define GUC_ADS_ADDR_SHIFT 1
#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
+#define GUC_CTL_DEVID 5
+
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
/* Generic GT SysInfo data types */
@@ -263,7 +271,10 @@ struct guc_mmio_reg {
u32 offset;
u32 value;
u32 flags;
-#define GUC_REGSET_MASKED (1 << 0)
+ u32 mask;
+#define GUC_REGSET_MASKED BIT(0)
+#define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
+#define GUC_REGSET_RESTORE_ONLY BIT(3)
} __packed;
/* GuC register sets */
@@ -280,6 +291,12 @@ struct guc_gt_system_info {
u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX];
} __packed;
+enum {
+ GUC_CAPTURE_LIST_INDEX_PF = 0,
+ GUC_CAPTURE_LIST_INDEX_VF = 1,
+ GUC_CAPTURE_LIST_INDEX_MAX = 2,
+};
+
/* GuC Additional Data Struct */
struct guc_ads {
struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
@@ -291,7 +308,11 @@ struct guc_ads {
u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
u32 private_data;
- u32 reserved[15];
+ u32 reserved2;
+ u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
+ u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
+ u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
+ u32 reserved[14];
} __packed;
/* Engine usage stats */
@@ -312,6 +333,7 @@ struct guc_engine_usage {
enum guc_log_buffer_type {
GUC_DEBUG_LOG_BUFFER,
GUC_CRASH_DUMP_LOG_BUFFER,
+ GUC_CAPTURE_LOG_BUFFER,
GUC_MAX_LOG_BUFFER
};
@@ -342,6 +364,7 @@ struct guc_log_buffer_state {
u32 write_ptr;
u32 size;
u32 sampled_write_ptr;
+ u32 wrap_offset;
union {
struct {
u32 flush_to_file:1;
@@ -382,7 +405,7 @@ struct guc_shared_ctx_data {
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
enum intel_guc_recv_message {
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
- INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3)
+ INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30),
};
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 7b0b43e87244..b53f61f3101f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -56,20 +56,6 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static void guc_log_enable_flush_events(struct intel_guc_log *log)
-{
- intel_guc_enable_msg(log_to_guc(log),
- INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
- INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
-}
-
-static void guc_log_disable_flush_events(struct intel_guc_log *log)
-{
- intel_guc_disable_msg(log_to_guc(log),
- INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
- INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
-}
-
/*
* Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned.
@@ -202,6 +188,8 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
return DEBUG_BUFFER_SIZE;
case GUC_CRASH_DUMP_LOG_BUFFER:
return CRASH_BUFFER_SIZE;
+ case GUC_CAPTURE_LOG_BUFFER:
+ return CAPTURE_BUFFER_SIZE;
default:
MISSING_CASE(type);
}
@@ -464,14 +452,19 @@ int intel_guc_log_create(struct intel_guc_log *log)
* +-------------------------------+ 32B
* | Debug state header |
* +-------------------------------+ 64B
+ * | Capture state header |
+ * +-------------------------------+ 96B
* | |
* +===============================+ PAGE_SIZE (4KB)
* | Crash Dump logs |
* +===============================+ + CRASH_SIZE
* | Debug logs |
* +===============================+ + DEBUG_SIZE
+ * | Capture logs |
+ * +===============================+ + CAPTURE_SIZE
*/
- guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE;
+ guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE +
+ CAPTURE_BUFFER_SIZE;
vma = intel_guc_allocate_vma(guc, guc_log_size);
if (IS_ERR(vma)) {
@@ -593,8 +586,6 @@ int intel_guc_log_relay_start(struct intel_guc_log *log)
if (log->relay.started)
return -EEXIST;
- guc_log_enable_flush_events(log);
-
/*
* When GuC is logging without us relaying to userspace, we're ignoring
* the flush notification. This means that we need to unconditionally
@@ -641,7 +632,6 @@ static void guc_log_relay_stop(struct intel_guc_log *log)
if (!log->relay.started)
return;
- guc_log_disable_flush_events(log);
intel_synchronize_irq(i915);
flush_work(&log->relay.flush_work);
@@ -662,7 +652,8 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
{
- queue_work(system_highpri_wq, &log->relay.flush_work);
+ if (log->relay.started)
+ queue_work(system_highpri_wq, &log->relay.flush_work);
}
static const char *
@@ -673,6 +664,8 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
return "DEBUG";
case GUC_CRASH_DUMP_LOG_BUFFER:
return "CRASH";
+ case GUC_CAPTURE_LOG_BUFFER:
+ return "CAPTURE";
default:
MISSING_CASE(type);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index fe6ab7550a14..d7e1b6471fed 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -18,12 +18,15 @@ struct intel_guc;
#if defined(CONFIG_DRM_I915_DEBUG_GUC)
#define CRASH_BUFFER_SIZE SZ_2M
#define DEBUG_BUFFER_SIZE SZ_16M
+#define CAPTURE_BUFFER_SIZE SZ_4M
#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
#define CRASH_BUFFER_SIZE SZ_1M
#define DEBUG_BUFFER_SIZE SZ_2M
+#define CAPTURE_BUFFER_SIZE SZ_1M
#else
#define CRASH_BUFFER_SIZE SZ_8K
#define DEBUG_BUFFER_SIZE SZ_64K
+#define CAPTURE_BUFFER_SIZE SZ_16K
#endif
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index 85846c5570c5..66027a42cda9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -22,10 +22,6 @@
#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT)
#define GS_UKERNEL_SHIFT 8
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
-#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
-#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT)
-#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT)
-#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
#define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT)
@@ -98,6 +94,9 @@
#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15)
#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
+#define GUC_SHIM_CONTROL2 _MMIO(0xc068)
+#define GUC_IS_PRIVILEGED (1<<29)
+
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index ddbea939b1dc..b3d28b003b73 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -3,9 +3,12 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_cache.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_guc_slpc.h"
+#include "intel_mchbar_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 04b8321fc758..b3a429a92c0d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1430,7 +1430,8 @@ submission_disabled(struct intel_guc *guc)
struct i915_sched_engine * const sched_engine = guc->sched_engine;
return unlikely(!sched_engine ||
- !__tasklet_is_enabled(&sched_engine->tasklet));
+ !__tasklet_is_enabled(&sched_engine->tasklet) ||
+ intel_gt_is_wedged(guc_to_gt(guc)));
}
static void disable_submission(struct intel_guc *guc)
@@ -1475,8 +1476,6 @@ static void guc_flush_destroyed_contexts(struct intel_guc *guc);
void intel_guc_submission_reset_prepare(struct intel_guc *guc)
{
- int i;
-
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
return;
@@ -1493,21 +1492,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
guc_flush_submissions(guc);
guc_flush_destroyed_contexts(guc);
-
- /*
- * Handle any outstanding G2Hs before reset. Call IRQ handler directly
- * each pass as interrupt have been disabled. We always scrub for
- * outstanding G2H as it is possible for outstanding_submission_g2h to
- * be incremented after the context state update.
- */
- for (i = 0; i < 4 && atomic_read(&guc->outstanding_submission_g2h); ++i) {
- intel_guc_to_host_event_handler(guc);
-#define wait_for_reset(guc, wait_var) \
- intel_guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20))
- do {
- wait_for_reset(guc, &guc->outstanding_submission_g2h);
- } while (!list_empty(&guc->ct.requests.incoming));
- }
+ flush_work(&guc->ct.requests.worker);
scrub_guc_desc_for_outstanding_g2h(guc);
}
@@ -1612,7 +1597,6 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
unsigned long flags;
u32 head;
int i, number_children = ce->parallel.number_children;
- bool skip = false;
struct intel_context *parent = ce;
GEM_BUG_ON(intel_context_is_child(ce));
@@ -1623,23 +1607,10 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
* GuC will implicitly mark the context as non-schedulable when it sends
* the reset notification. Make sure our state reflects this change. The
* context will be marked enabled on resubmission.
- *
- * XXX: If the context is reset as a result of the request cancellation
- * this G2H is received after the schedule disable complete G2H which is
- * wrong as this creates a race between the request cancellation code
- * re-submitting the context and this G2H handler. This is a bug in the
- * GuC but can be worked around in the meantime but converting this to a
- * NOP if a pending enable is in flight as this indicates that a request
- * cancellation has occurred.
*/
spin_lock_irqsave(&ce->guc_state.lock, flags);
- if (likely(!context_pending_enable(ce)))
- clr_context_enabled(ce);
- else
- skip = true;
+ clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- if (unlikely(skip))
- goto out_put;
/*
* For each context in the relationship find the hanging request
@@ -1671,7 +1642,6 @@ next_context:
}
__unwind_incomplete_requests(parent);
-out_put:
intel_context_put(parent);
}
@@ -1806,7 +1776,7 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
- test_bit(I915_WEDGED, &guc_to_gt(guc)->reset.flags))) {
+ intel_gt_is_wedged(guc_to_gt(guc)))) {
return;
}
@@ -1825,6 +1795,7 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
}
static void destroyed_worker_func(struct work_struct *w);
+static void reset_fail_worker_func(struct work_struct *w);
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
@@ -1855,6 +1826,8 @@ int intel_guc_submission_init(struct intel_guc *guc)
INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
INIT_WORK(&guc->submission_state.destroyed_worker,
destroyed_worker_func);
+ INIT_WORK(&guc->submission_state.reset_fail_worker,
+ reset_fail_worker_func);
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
@@ -2611,12 +2584,6 @@ static void guc_context_cancel_request(struct intel_context *ce,
true);
}
- /*
- * XXX: Racey if context is reset, see comment in
- * __guc_reset_context().
- */
- flush_work(&ce_to_guc(ce)->ct.requests.worker);
-
guc_context_unblock(block_context);
intel_context_put(ce);
}
@@ -3330,8 +3297,6 @@ static void guc_parent_context_unpin(struct intel_context *ce)
GEM_BUG_ON(!intel_context_is_parent(ce));
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
- if (ce->parallel.last_rq)
- i915_request_put(ce->parallel.last_rq);
unpin_guc_id(guc, ce);
lrc_unpin(ce);
}
@@ -4053,14 +4018,14 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- /*
- * XXX: Racey if request cancellation has occurred, see comment in
- * __guc_reset_context().
- */
- if (likely(!intel_context_is_banned(ce) &&
- !context_blocked(ce))) {
+ if (likely(!intel_context_is_banned(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
+ } else {
+ drm_err(&guc_to_gt(guc)->i915->drm,
+ "Invalid GuC engine reset notificaion for 0x%04X on %s: banned = %d, blocked = %d",
+ ce->guc_id.id, ce->engine->name, intel_context_is_banned(ce),
+ context_blocked(ce));
}
}
@@ -4099,6 +4064,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
return 0;
}
+int intel_guc_error_capture_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len)
+{
+ int status;
+
+ if (unlikely(len != 1)) {
+ drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ status = msg[0];
+ drm_info(&guc_to_gt(guc)->i915->drm, "Got error capture: status = %d", status);
+
+ /* FIXME: Do something with the capture */
+
+ return 0;
+}
+
static struct intel_engine_cs *
guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
{
@@ -4111,6 +4094,26 @@ guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
return gt->engine_class[engine_class][instance];
}
+static void reset_fail_worker_func(struct work_struct *w)
+{
+ struct intel_guc *guc = container_of(w, struct intel_guc,
+ submission_state.reset_fail_worker);
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_engine_mask_t reset_fail_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ reset_fail_mask = guc->submission_state.reset_fail_mask;
+ guc->submission_state.reset_fail_mask = 0;
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ if (likely(reset_fail_mask))
+ intel_gt_handle_error(gt, reset_fail_mask,
+ I915_ERROR_CAPTURE,
+ "GuC failed to reset engine mask=0x%x\n",
+ reset_fail_mask);
+}
+
int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
@@ -4118,6 +4121,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
struct intel_gt *gt = guc_to_gt(guc);
u8 guc_class, instance;
u32 reason;
+ unsigned long flags;
if (unlikely(len != 3)) {
drm_err(&gt->i915->drm, "Invalid length %u", len);
@@ -4142,10 +4146,15 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
drm_err(&gt->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
guc_class, instance, engine->name, reason);
- intel_gt_handle_error(gt, engine->mask,
- I915_ERROR_CAPTURE,
- "GuC failed to reset %s (reason=0x%08x)\n",
- engine->name, reason);
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ guc->submission_state.reset_fail_mask |= engine->mask;
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ /*
+ * A GT reset flushes this worker queue (G2H handler) so we must use
+ * another worker to trigger a GT reset.
+ */
+ queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
return 0;
}
@@ -4514,27 +4523,31 @@ static inline bool skip_handshake(struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
}
+#define NON_SKIP_LEN 6
static u32 *
emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
u32 *cs)
{
struct intel_context *ce = rq->context;
+ __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
+ __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
GEM_BUG_ON(!intel_context_is_parent(ce));
if (unlikely(skip_handshake(rq))) {
/*
* NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
- * the -6 comes from the length of the emits below.
+ * the NON_SKIP_LEN comes from the length of the emits below.
*/
memset(cs, 0, sizeof(u32) *
- (ce->engine->emit_fini_breadcrumb_dw - 6));
- cs += ce->engine->emit_fini_breadcrumb_dw - 6;
+ (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
+ cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
} else {
cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
}
/* Emit fini breadcrumb */
+ before_fini_breadcrumb_user_interrupt_cs = cs;
cs = gen8_emit_ggtt_write(cs,
rq->fence.seqno,
i915_request_active_timeline(rq)->hwsp_offset,
@@ -4544,6 +4557,12 @@ emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
+ /* Ensure our math for skip + emit is correct */
+ GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
+ cs);
+ GEM_BUG_ON(start_fini_breadcrumb_cs +
+ ce->engine->emit_fini_breadcrumb_dw != cs);
+
rq->tail = intel_ring_offset(rq, cs);
return cs;
@@ -4586,22 +4605,25 @@ emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
u32 *cs)
{
struct intel_context *ce = rq->context;
+ __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
+ __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
GEM_BUG_ON(!intel_context_is_child(ce));
if (unlikely(skip_handshake(rq))) {
/*
* NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
- * the -6 comes from the length of the emits below.
+ * the NON_SKIP_LEN comes from the length of the emits below.
*/
memset(cs, 0, sizeof(u32) *
- (ce->engine->emit_fini_breadcrumb_dw - 6));
- cs += ce->engine->emit_fini_breadcrumb_dw - 6;
+ (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
+ cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
} else {
cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
}
/* Emit fini breadcrumb */
+ before_fini_breadcrumb_user_interrupt_cs = cs;
cs = gen8_emit_ggtt_write(cs,
rq->fence.seqno,
i915_request_active_timeline(rq)->hwsp_offset,
@@ -4611,11 +4633,19 @@ emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
+ /* Ensure our math for skip + emit is correct */
+ GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
+ cs);
+ GEM_BUG_ON(start_fini_breadcrumb_cs +
+ ce->engine->emit_fini_breadcrumb_dw != cs);
+
rq->tail = intel_ring_offset(rq, cs);
return cs;
}
+#undef NON_SKIP_LEN
+
static struct intel_context *
guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
unsigned long flags)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index d10b227ac4aa..556829de9c17 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -124,6 +124,7 @@ int intel_huc_auth(struct intel_huc *huc)
}
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
+ drm_info(&gt->i915->drm, "HuC authenticated\n");
return 0;
fail:
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 09ed29df67bc..da199aa6989f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -432,6 +432,15 @@ static int __uc_check_hw(struct intel_uc *uc)
return 0;
}
+static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ drm_info(&i915->drm, "%s firmware %s version %u.%u\n",
+ intel_uc_fw_type_repr(fw->type), fw->path,
+ fw->major_ver_found, fw->minor_ver_found);
+}
+
static int __uc_init_hw(struct intel_uc *uc)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
@@ -442,6 +451,11 @@ static int __uc_init_hw(struct intel_uc *uc)
GEM_BUG_ON(!intel_uc_supports_guc(uc));
GEM_BUG_ON(!intel_uc_wants_guc(uc));
+ print_fw_ver(uc, &guc->fw);
+
+ if (intel_uc_uses_huc(uc))
+ print_fw_ver(uc, &huc->fw);
+
if (!intel_uc_fw_is_loadable(&guc->fw)) {
ret = __uc_check_hw(uc) ||
intel_uc_fw_is_overridden(&guc->fw) ||
@@ -507,24 +521,11 @@ static int __uc_init_hw(struct intel_uc *uc)
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
- drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
- intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
- guc->fw.major_ver_found, guc->fw.minor_ver_found,
- "submission",
+ drm_info(&i915->drm, "GuC submission %s\n",
enableddisabled(intel_uc_uses_guc_submission(uc)));
-
- drm_info(&i915->drm, "GuC SLPC: %s\n",
+ drm_info(&i915->drm, "GuC SLPC %s\n",
enableddisabled(intel_uc_uses_guc_slpc(uc)));
- if (intel_uc_uses_huc(uc)) {
- drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
- intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
- huc->fw.path,
- huc->fw.major_ver_found, huc->fw.minor_ver_found,
- "authenticated",
- yesno(intel_huc_is_authenticated(huc)));
- }
-
return 0;
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index dd588ecf048a..c88113044494 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -5,6 +5,8 @@
#include <linux/bitfield.h>
#include <linux/firmware.h>
+
+#include <drm/drm_cache.h>
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
@@ -50,21 +52,21 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* firmware as TGL.
*/
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0)) \
- fw_def(DG1, 0, guc_def(dg1, 62, 0, 0)) \
- fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0)) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0)) \
- fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0)) \
- fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0)) \
- fw_def(ICELAKE, 0, guc_def(icl, 62, 0, 0)) \
- fw_def(COMETLAKE, 5, guc_def(cml, 62, 0, 0)) \
- fw_def(COMETLAKE, 0, guc_def(kbl, 62, 0, 0)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 62, 0, 0)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 62, 0, 0)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 62, 0, 0)) \
- fw_def(BROXTON, 0, guc_def(bxt, 62, 0, 0)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 62, 0, 0))
+ fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3)) \
+ fw_def(DG1, 0, guc_def(dg1, 69, 0, 3)) \
+ fw_def(ROCKETLAKE, 0, guc_def(tgl, 69, 0, 3)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 69, 0, 3)) \
+ fw_def(JASPERLAKE, 0, guc_def(ehl, 69, 0, 3)) \
+ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 69, 0, 3)) \
+ fw_def(ICELAKE, 0, guc_def(icl, 69, 0, 3)) \
+ fw_def(COMETLAKE, 5, guc_def(cml, 69, 0, 3)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 69, 0, 3)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 69, 0, 3)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 69, 0, 3)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 69, 0, 3)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 69, 0, 3)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 69, 0, 3))
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
@@ -449,20 +451,19 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
- struct i915_vma *dummy = &uc_fw->dummy;
+ struct i915_vma_resource *dummy = &uc_fw->dummy;
u32 pte_flags = 0;
- dummy->node.start = uc_fw_ggtt_offset(uc_fw);
- dummy->node.size = obj->base.size;
- dummy->pages = obj->mm.pages;
- dummy->vm = &ggtt->vm;
+ dummy->start = uc_fw_ggtt_offset(uc_fw);
+ dummy->node_size = obj->base.size;
+ dummy->bi.pages = obj->mm.pages;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(dummy->node.size > ggtt->uc_fw.size);
+ GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
/* uc_fw->obj cache domains were not controlled across suspend */
if (i915_gem_object_has_struct_page(obj))
- drm_clflush_sg(dummy->pages);
+ drm_clflush_sg(dummy->bi.pages);
if (i915_gem_object_is_lmem(obj))
pte_flags |= PTE_LM;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index d9d1dc0b4cbb..3229018877d3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -85,7 +85,7 @@ struct intel_uc_fw {
* threaded as it done during driver load (inherently single threaded)
* or during a GT reset (mutex guarantees single threaded).
*/
- struct i915_vma dummy;
+ struct i915_vma_resource dummy;
struct i915_vma *rsa_data;
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index d3327b802b76..a115894d5896 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -157,7 +157,7 @@ static int intel_guc_steal_guc_ids(void *arg)
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine = intel_selftest_find_any_engine(gt);
sv = guc->submission_state.num_guc_ids;
- guc->submission_state.num_guc_ids = 4096;
+ guc->submission_state.num_guc_ids = 512;
/* Create spinner to block requests in below loop */
ce[context_index] = intel_context_create(engine);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 6b3dedd321bb..557f3314291a 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -64,7 +64,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
mutex_lock(&gt->ggtt->vm.mutex);
mmio_hw_access_pre(gt);
- ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
+ ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index d02a48a5335a..c95c25d2addb 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -31,6 +31,11 @@
#include <linux/dma-buf.h>
#include <linux/vfio.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+
+#include "gem/i915_gem_dmabuf.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
@@ -85,7 +90,7 @@ static int vgpu_gem_get_pages(
kfree(st);
return ret;
}
- gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+ gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm +
(fb_info->start >> PAGE_SHIFT);
for_each_sg(st->sgl, sg, page_num, i) {
dma_addr_t dma_addr =
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index c8dcda6d4f0d..66d354c4195b 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -163,7 +163,7 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
status, 8);
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4,
+ hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
&write_pointer, 4);
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 28a94c3dc991..d4082f4b9be1 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1150,7 +1150,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
ops->set_pfn(se, s->shadow_page.mfn);
}
-/**
+/*
* Check if can do 2M page
* @vgpu: target vgpu
* @entry: target pfn's gtt entry
@@ -2195,7 +2195,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
}
/**
- * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
+ * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
* @vgpu: a vGPU
* @off: register offset
* @p_data: data will be returned to guest
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index c2ae79092b14..520a7e1942f3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -40,8 +40,10 @@
#include "i915_reg.h"
#include "gvt.h"
#include "i915_pvinfo.h"
+#include "intel_mchbar_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_fbc.h"
+#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
/* XXX FIXME i915 has changed PP_XXX definition */
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 20b82fb036f8..e8d6c76e9234 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -46,6 +46,8 @@
#include <linux/nospec.h>
+#include <drm/drm_edid.h>
+
#include "i915_drv.h"
#include "gvt.h"
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index aea4c30645ff..5f6e41636655 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_cache.h>
+
#include "gt/intel_engine.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4fe1ce1433fe..946bbe57bfe5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -49,6 +49,7 @@
#include "i915_debugfs_params.h"
#include "i915_irq.h"
#include "i915_scheduler.h"
+#include "intel_mchbar_regs.h"
#include "intel_pm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -137,6 +138,17 @@ static const char *stringify_vma_type(const struct i915_vma *vma)
return "ppgtt";
}
+static const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
+ case I915_CACHE_L3_LLC: return " L3+LLC";
+ case I915_CACHE_WT: return " WT";
+ default: return "";
+ }
+}
+
void
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
@@ -171,7 +183,8 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
stringify_vma_type(vma),
vma->node.start, vma->node.size,
- stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
+ stringify_page_sizes(vma->resource->page_sizes_gtt,
+ NULL, 0));
if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
switch (vma->ggtt_view.type) {
case I915_GGTT_VIEW_NORMAL:
@@ -391,9 +404,9 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
- swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
+ swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
- swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
+ swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_y));
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 4b56d4d83e75..1c67ff735f18 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -62,6 +62,8 @@
#include "display/intel_vga.h"
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_create.h"
+#include "gem/i915_gem_dmabuf.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_pm.h"
@@ -71,6 +73,7 @@
#include "pxp/intel_pxp_pm.h"
+#include "i915_file_private.h"
#include "i915_debugfs.h"
#include "i915_driver.h"
#include "i915_drv.h"
@@ -574,6 +577,10 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
i915_perf_init(dev_priv);
+ ret = intel_gt_assign_ggtt(to_gt(dev_priv));
+ if (ret)
+ goto err_perf;
+
ret = i915_ggtt_probe_hw(dev_priv);
if (ret)
goto err_perf;
@@ -590,8 +597,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
- intel_gt_init_hw_early(to_gt(dev_priv), &dev_priv->ggtt);
-
ret = intel_gt_probe_lmem(to_gt(dev_priv));
if (ret)
goto err_mem_regions;
@@ -1149,7 +1154,7 @@ static int i915_drm_suspend(struct drm_device *dev)
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(dev_priv);
- i915_ggtt_suspend(&dev_priv->ggtt);
+ i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
i915_save_display(dev_priv);
@@ -1273,7 +1278,7 @@ static int i915_drm_resume(struct drm_device *dev)
if (ret)
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
- i915_ggtt_resume(&dev_priv->ggtt);
+ i915_ggtt_resume(to_gt(dev_priv)->ggtt);
/* Must be called after GGTT is resumed. */
intel_dpt_resume(dev_priv);
@@ -1821,6 +1826,21 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
+/*
+ * Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ * - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 6
+#define DRIVER_PATCHLEVEL 0
+
static const struct drm_driver i915_drm_driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 9ef8db4aa0a6..9d11de65daaf 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -12,6 +12,11 @@ struct pci_dev;
struct pci_device_id;
struct drm_i915_private;
+#define DRIVER_NAME "i915"
+#define DRIVER_DESC "Intel Graphics"
+#define DRIVER_DATE "20201103"
+#define DRIVER_TIMESTAMP 1604406085
+
extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f7bdb973c880..f600d1cb01b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,38 +31,17 @@
#define _I915_DRV_H_
#include <uapi/drm/i915_drm.h>
-#include <uapi/drm/drm_fourcc.h>
#include <asm/hypervisor.h>
-#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <linux/backlight.h>
-#include <linux/hash.h>
#include <linux/intel-iommu.h>
-#include <linux/kref.h>
-#include <linux/mm_types.h>
-#include <linux/perf_event.h>
#include <linux/pm_qos.h>
-#include <linux/dma-resv.h>
-#include <linux/shmem_fs.h>
-#include <linux/stackdepot.h>
-#include <linux/xarray.h>
-
-#include <drm/drm_gem.h>
-#include <drm/drm_auth.h>
-#include <drm/drm_cache.h>
-#include <drm/drm_util.h>
-#include <drm/drm_dsc.h>
-#include <drm/drm_atomic.h>
+
#include <drm/drm_connector.h>
-#include <drm/i915_mei_hdcp_interface.h>
#include <drm/ttm/ttm_device.h>
-#include "i915_params.h"
-#include "i915_utils.h"
-
#include "display/intel_bios.h"
#include "display/intel_cdclk.h"
#include "display/intel_display.h"
@@ -77,9 +56,9 @@
#include "display/intel_opregion.h"
#include "gem/i915_gem_context_types.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
-#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
@@ -87,6 +66,12 @@
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
+#include "i915_gem.h"
+#include "i915_gpu_error.h"
+#include "i915_params.h"
+#include "i915_perf_types.h"
+#include "i915_scheduler.h"
+#include "i915_utils.h"
#include "intel_device_info.h"
#include "intel_memory_region.h"
#include "intel_pch.h"
@@ -94,28 +79,32 @@
#include "intel_runtime_pm.h"
#include "intel_step.h"
#include "intel_uncore.h"
-#include "intel_wakeref.h"
#include "intel_wopcm.h"
-#include "i915_gem.h"
-#include "i915_gem_gtt.h"
-#include "i915_gpu_error.h"
-#include "i915_perf_types.h"
-#include "i915_request.h"
-#include "i915_scheduler.h"
-#include "gt/intel_timeline.h"
-#include "i915_vma.h"
-
-
-/* General customization:
- */
-
-#define DRIVER_NAME "i915"
-#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20201103"
-#define DRIVER_TIMESTAMP 1604406085
-
+struct dpll;
+struct drm_i915_clock_gating_funcs;
struct drm_i915_gem_object;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_audio_funcs;
+struct intel_cdclk_config;
+struct intel_cdclk_funcs;
+struct intel_cdclk_state;
+struct intel_cdclk_vals;
+struct intel_color_funcs;
+struct intel_connector;
+struct intel_crtc;
+struct intel_dp;
+struct intel_dpll_funcs;
+struct intel_encoder;
+struct intel_fbdev;
+struct intel_fdi_funcs;
+struct intel_hotplug_funcs;
+struct intel_initial_plane_config;
+struct intel_limit;
+struct intel_overlay;
+struct intel_overlay_error_state;
+struct vlv_s0ix_state;
/* Threshold == 5 for long IRQs, 50 for short */
#define HPD_STORM_DEFAULT_THRESHOLD 50
@@ -166,117 +155,6 @@ struct i915_hotplug {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-struct drm_i915_private;
-
-struct drm_i915_file_private {
- struct drm_i915_private *dev_priv;
-
- union {
- struct drm_file *file;
- struct rcu_head rcu;
- };
-
- /** @proto_context_lock: Guards all struct i915_gem_proto_context
- * operations
- *
- * This not only guards @proto_context_xa, but is always held
- * whenever we manipulate any struct i915_gem_proto_context,
- * including finalizing it on first actual use of the GEM context.
- *
- * See i915_gem_proto_context.
- */
- struct mutex proto_context_lock;
-
- /** @proto_context_xa: xarray of struct i915_gem_proto_context
- *
- * Historically, the context uAPI allowed for two methods of
- * setting context parameters: SET_CONTEXT_PARAM and
- * CONTEXT_CREATE_EXT_SETPARAM. The former is allowed to be called
- * at any time while the later happens as part of
- * GEM_CONTEXT_CREATE. Everything settable via one was settable
- * via the other. While some params are fairly simple and setting
- * them on a live context is harmless such as the context priority,
- * others are far trickier such as the VM or the set of engines.
- * In order to swap out the VM, for instance, we have to delay
- * until all current in-flight work is complete, swap in the new
- * VM, and then continue. This leads to a plethora of potential
- * race conditions we'd really rather avoid.
- *
- * We have since disallowed setting these more complex parameters
- * on active contexts. This works by delaying the creation of the
- * actual context until after the client is done configuring it
- * with SET_CONTEXT_PARAM. From the perspective of the client, it
- * has the same u32 context ID the whole time. From the
- * perspective of i915, however, it's a struct i915_gem_proto_context
- * right up until the point where we attempt to do something which
- * the proto-context can't handle. Then the struct i915_gem_context
- * gets created.
- *
- * This is accomplished via a little xarray dance. When
- * GEM_CONTEXT_CREATE is called, we create a struct
- * i915_gem_proto_context, reserve a slot in @context_xa but leave
- * it NULL, and place the proto-context in the corresponding slot
- * in @proto_context_xa. Then, in i915_gem_context_lookup(), we
- * first check @context_xa. If it's there, we return the struct
- * i915_gem_context and we're done. If it's not, we look in
- * @proto_context_xa and, if we find it there, we create the actual
- * context and kill the proto-context.
- *
- * In order for this dance to work properly, everything which ever
- * touches a struct i915_gem_proto_context is guarded by
- * @proto_context_lock, including context creation. Yes, this
- * means context creation now takes a giant global lock but it
- * can't really be helped and that should never be on any driver's
- * fast-path anyway.
- */
- struct xarray proto_context_xa;
-
- /** @context_xa: xarray of fully created i915_gem_context
- *
- * Write access to this xarray is guarded by @proto_context_lock.
- * Otherwise, writers may race with finalize_create_context_locked().
- *
- * See @proto_context_xa.
- */
- struct xarray context_xa;
- struct xarray vm_xa;
-
- unsigned int bsd_engine;
-
-/*
- * Every context ban increments per client ban score. Also
- * hangs in short succession increments ban score. If ban threshold
- * is reached, client is considered banned and submitting more work
- * will fail. This is a stop gap measure to limit the badly behaving
- * clients access to gpu. Note that unbannable contexts never increment
- * the client ban score.
- */
-#define I915_CLIENT_SCORE_HANG_FAST 1
-#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
-#define I915_CLIENT_SCORE_CONTEXT_BAN 3
-#define I915_CLIENT_SCORE_BANNED 9
- /** ban_score: Accumulated score of all ctx bans and fast hangs. */
- atomic_t ban_score;
- unsigned long hang_timestamp;
-};
-
-/* Interface history:
- *
- * 1.1: Original.
- * 1.2: Add Power Management
- * 1.3: Add vblank support
- * 1.4: Fix cmdbuffer path, add heap destroy
- * 1.5: Add vblank pipe configuration
- * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
- * - Support vertical blank on secondary display pipe
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 6
-#define DRIVER_PATCHLEVEL 0
-
-struct intel_overlay;
-struct intel_overlay_error_state;
-
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
@@ -286,23 +164,6 @@ struct sdvo_device_mapping {
u8 ddc_pin;
};
-struct intel_connector;
-struct intel_encoder;
-struct intel_atomic_state;
-struct intel_cdclk_config;
-struct intel_cdclk_funcs;
-struct intel_cdclk_state;
-struct intel_cdclk_vals;
-struct intel_initial_plane_config;
-struct intel_crtc;
-struct intel_limit;
-struct dpll;
-
-/* functions used internal in intel_pm.c */
-struct drm_i915_clock_gating_funcs {
- void (*init_clock_gating)(struct drm_i915_private *dev_priv);
-};
-
/* functions used for watermark calcs for display. */
struct drm_i915_wm_disp_funcs {
/* update_wm is for legacy wm management */
@@ -320,38 +181,6 @@ struct drm_i915_wm_disp_funcs {
int (*compute_global_watermarks)(struct intel_atomic_state *state);
};
-struct intel_color_funcs {
- int (*color_check)(struct intel_crtc_state *crtc_state);
- /*
- * Program double buffered color management registers during
- * vblank evasion. The registers should then latch during the
- * next vblank start, alongside any other double buffered registers
- * involved with the same commit.
- */
- void (*color_commit)(const struct intel_crtc_state *crtc_state);
- /*
- * Load LUTs (and other single buffered color management
- * registers). Will (hopefully) be called during the vblank
- * following the latching of any double buffered registers
- * involved with the same commit.
- */
- void (*load_luts)(const struct intel_crtc_state *crtc_state);
- void (*read_luts)(struct intel_crtc_state *crtc_state);
-};
-
-struct intel_hotplug_funcs {
- void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
-};
-
-struct intel_fdi_funcs {
- void (*fdi_link_train)(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state);
-};
-
-struct intel_dpll_funcs {
- int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state);
-};
-
struct drm_i915_display_funcs {
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
@@ -385,7 +214,6 @@ enum drrs_support_type {
SEAMLESS_DRRS_SUPPORT = 2
};
-struct intel_dp;
struct i915_drrs {
struct mutex mutex;
struct delayed_work work;
@@ -403,8 +231,6 @@ struct i915_drrs {
#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
-struct intel_fbdev;
-
struct intel_gmbus {
struct i2c_adapter adapter;
#define GMBUS_FORCE_BIT_RETRY (1U << 31)
@@ -423,8 +249,6 @@ struct i915_suspend_saved_registers {
u16 saveGCDGMBUS;
};
-struct vlv_s0ix_state;
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -533,6 +357,9 @@ struct intel_vbt_data {
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
enum drm_panel_orientation orientation;
+ bool override_afc_startup;
+ u8 override_afc_startup_val;
+
enum drrs_support_type drrs_type;
struct {
@@ -613,7 +440,6 @@ struct i915_selftest_stash {
};
/* intel_audio.c private */
-struct intel_audio_funcs;
struct intel_audio_private {
/* Display internal audio functions */
const struct intel_audio_funcs *funcs;
@@ -815,8 +641,6 @@ struct drm_i915_private {
struct drm_atomic_state *modeset_restore_state;
struct drm_modeset_acquire_ctx reset_ctx;
- struct i915_ggtt ggtt; /* VM representing the global address space */
-
struct i915_gem_mm mm;
/* Kernel Modesetting */
@@ -1263,6 +1087,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
+#define IS_DG2_G12(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12)
#define IS_ADLS_RPLS(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S)
#define IS_ADLP_N(dev_priv) \
@@ -1379,16 +1205,17 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
/*
- * DG2 hardware steppings are a bit unusual. The hardware design was forked
- * to create two variants (G10 and G11) which have distinct workaround sets.
- * The G11 fork of the DG2 design resets the GT stepping back to "A0" for its
- * first iteration, even though it's more similar to a G10 B0 stepping in terms
- * of functionality and workarounds. However the display stepping does not
- * reset in the same manner --- a specific stepping like "B0" has a consistent
- * meaning regardless of whether it belongs to a G10 or G11 DG2.
+ * DG2 hardware steppings are a bit unusual. The hardware design was forked to
+ * create three variants (G10, G11, and G12) which each have distinct
+ * workaround sets. The G11 and G12 forks of the DG2 design reset the GT
+ * stepping back to "A0" for their first iterations, even though they're more
+ * similar to a G10 B0 stepping and G10 C0 stepping respectively in terms of
+ * functionality and workarounds. However the display stepping does not reset
+ * in the same manner --- a specific stepping like "B0" has a consistent
+ * meaning regardless of whether it belongs to a G10, G11, or G12 DG2.
*
* TLDR: All GT workarounds and stepping-specific logic must be applied in
- * relation to a specific subplatform (G10 or G11), whereas display workarounds
+ * relation to a specific subplatform (G10/G11/G12), whereas display workarounds
* and stepping-specific logic will be applied with a general DG2-wide stepping
* number.
*/
@@ -1558,6 +1385,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define INTEL_DISPLAY_ENABLED(dev_priv) \
(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
+#define HAS_GUC_DEPRIVILEGE(dev_priv) \
+ (INTEL_INFO(dev_priv)->has_guc_deprivilege)
+
static inline bool run_as_guest(void)
{
return !hypervisor_is_type(X86_HYPER_NATIVE);
@@ -1654,79 +1484,29 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
+#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-int i915_gem_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-static inline u32 i915_reset_count(struct i915_gpu_error *error)
-{
- return atomic_read(&error->reset_count);
-}
-
-static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
- const struct intel_engine_cs *engine)
-{
- return atomic_read(&error->reset_engine_count[engine->uabi_class]);
-}
-
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
void i915_gem_driver_register(struct drm_i915_private *i915);
void i915_gem_driver_unregister(struct drm_i915_private *i915);
void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-void i915_gem_suspend(struct drm_i915_private *dev_priv);
-void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
-void i915_gem_resume(struct drm_i915_private *dev_priv);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
-int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-
-struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
-
-struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
-
-static inline struct i915_address_space *
-i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
-{
- struct i915_address_space *vm;
-
- xa_lock(&file_priv->vm_xa);
- vm = xa_load(&file_priv->vm_xa, id);
- if (vm)
- kref_get(&vm->ref);
- xa_unlock(&file_priv->vm_xa);
-
- return vm;
-}
-
-/* i915_gem_internal.c */
-struct drm_i915_gem_object *
-i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
- phys_addr_t size);
-struct drm_i915_gem_object *
-__i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
- const struct drm_i915_gem_object_ops *ops,
- phys_addr_t size);
-
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ return to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
i915_gem_object_is_tiled(obj);
}
-const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
-
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
@@ -1734,14 +1514,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)INTEL_INFO(dev_priv);
}
-static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
-{
- if (GRAPHICS_VER(i915) >= 11)
- return ICL_HWS_CSB_WRITE_INDEX;
- else
- return I915_HWS_CSB_WRITE_INDEX;
-}
-
static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj, bool always_coherent)
diff --git a/drivers/gpu/drm/i915/i915_file_private.h b/drivers/gpu/drm/i915/i915_file_private.h
new file mode 100644
index 000000000000..fb16cc431b2a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_file_private.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_FILE_PRIVATE_H__
+#define __I915_FILE_PRIVATE_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+struct drm_i915_private;
+struct drm_file;
+
+struct drm_i915_file_private {
+ struct drm_i915_private *dev_priv;
+
+ union {
+ struct drm_file *file;
+ struct rcu_head rcu;
+ };
+
+ /** @proto_context_lock: Guards all struct i915_gem_proto_context
+ * operations
+ *
+ * This not only guards @proto_context_xa, but is always held
+ * whenever we manipulate any struct i915_gem_proto_context,
+ * including finalizing it on first actual use of the GEM context.
+ *
+ * See i915_gem_proto_context.
+ */
+ struct mutex proto_context_lock;
+
+ /** @proto_context_xa: xarray of struct i915_gem_proto_context
+ *
+ * Historically, the context uAPI allowed for two methods of
+ * setting context parameters: SET_CONTEXT_PARAM and
+ * CONTEXT_CREATE_EXT_SETPARAM. The former is allowed to be called
+ * at any time while the later happens as part of
+ * GEM_CONTEXT_CREATE. Everything settable via one was settable
+ * via the other. While some params are fairly simple and setting
+ * them on a live context is harmless such as the context priority,
+ * others are far trickier such as the VM or the set of engines.
+ * In order to swap out the VM, for instance, we have to delay
+ * until all current in-flight work is complete, swap in the new
+ * VM, and then continue. This leads to a plethora of potential
+ * race conditions we'd really rather avoid.
+ *
+ * We have since disallowed setting these more complex parameters
+ * on active contexts. This works by delaying the creation of the
+ * actual context until after the client is done configuring it
+ * with SET_CONTEXT_PARAM. From the perspective of the client, it
+ * has the same u32 context ID the whole time. From the
+ * perspective of i915, however, it's a struct i915_gem_proto_context
+ * right up until the point where we attempt to do something which
+ * the proto-context can't handle. Then the struct i915_gem_context
+ * gets created.
+ *
+ * This is accomplished via a little xarray dance. When
+ * GEM_CONTEXT_CREATE is called, we create a struct
+ * i915_gem_proto_context, reserve a slot in @context_xa but leave
+ * it NULL, and place the proto-context in the corresponding slot
+ * in @proto_context_xa. Then, in i915_gem_context_lookup(), we
+ * first check @context_xa. If it's there, we return the struct
+ * i915_gem_context and we're done. If it's not, we look in
+ * @proto_context_xa and, if we find it there, we create the actual
+ * context and kill the proto-context.
+ *
+ * In order for this dance to work properly, everything which ever
+ * touches a struct i915_gem_proto_context is guarded by
+ * @proto_context_lock, including context creation. Yes, this
+ * means context creation now takes a giant global lock but it
+ * can't really be helped and that should never be on any driver's
+ * fast-path anyway.
+ */
+ struct xarray proto_context_xa;
+
+ /** @context_xa: xarray of fully created i915_gem_context
+ *
+ * Write access to this xarray is guarded by @proto_context_lock.
+ * Otherwise, writers may race with finalize_create_context_locked().
+ *
+ * See @proto_context_xa.
+ */
+ struct xarray context_xa;
+ struct xarray vm_xa;
+
+ unsigned int bsd_engine;
+
+/*
+ * Every context ban increments per client ban score. Also
+ * hangs in short succession increments ban score. If ban threshold
+ * is reached, client is considered banned and submitting more work
+ * will fail. This is a stop gap measure to limit the badly behaving
+ * clients access to gpu. Note that unbannable contexts never increment
+ * the client ban score.
+ */
+#define I915_CLIENT_SCORE_HANG_FAST 1
+#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
+#define I915_CLIENT_SCORE_CONTEXT_BAN 3
+#define I915_CLIENT_SCORE_BANNED 9
+ /** ban_score: Accumulated score of all ctx bans and fast hangs. */
+ atomic_t ban_score;
+ unsigned long hang_timestamp;
+};
+
+#endif /* __I915_FILE_PRIVATE_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5ef959a9f594..2e10187cd0a0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -25,7 +25,6 @@
*
*/
-#include <drm/drm_vma_manager.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/dma-resv.h>
@@ -37,6 +36,9 @@
#include <linux/dma-buf.h>
#include <linux/mman.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_vma_manager.h>
+
#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
@@ -44,6 +46,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_pm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_userptr.h"
#include "gt/intel_engine_user.h"
@@ -52,9 +55,9 @@
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-
#include "intel_pm.h"
static int
@@ -89,7 +92,8 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct drm_i915_gem_get_aperture *args = data;
struct i915_vma *vma;
u64 pinned;
@@ -119,6 +123,8 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
+ assert_object_held(obj);
+
if (list_empty(&obj->vma.list))
return 0;
@@ -156,10 +162,16 @@ try_again:
spin_unlock(&obj->vma.lock);
if (vma) {
+ bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
ret = -EBUSY;
- if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
- !i915_vma_is_active(vma)) {
- if (flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK) {
+ if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
+ assert_object_held(vma->obj);
+ ret = i915_vma_unbind_async(vma, vm_trylock);
+ }
+
+ if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+ !i915_vma_is_active(vma))) {
+ if (vm_trylock) {
if (mutex_trylock(&vma->vm->mutex)) {
ret = __i915_vma_unbind(vma);
mutex_unlock(&vma->vm->mutex);
@@ -290,7 +302,7 @@ static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
bool write)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma;
struct i915_gem_ww_ctx ww;
int ret;
@@ -351,7 +363,7 @@ static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
struct i915_vma *vma)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
i915_gem_object_unpin_pages(obj);
if (drm_mm_node_allocated(node)) {
@@ -367,7 +379,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
void __user *user_data;
@@ -523,7 +535,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
struct drm_mm_node node;
@@ -824,7 +836,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
*/
list_for_each_entry_safe(obj, on,
- &i915->ggtt.userfault_list, userfault_link)
+ &to_gt(i915)->ggtt->userfault_list, userfault_link)
__i915_gem_object_release_mmap_gtt(obj);
/*
@@ -832,8 +844,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
* in use by hardware (i.e. they are pinned), we should not be powering
* down! All other fences will be reacquired by the user upon waking.
*/
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
+ struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
/*
* Ideally we want to assert that the fence register is not
@@ -874,7 +886,7 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma;
int ret;
@@ -1124,7 +1136,7 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
- i915_ggtt_resume(&dev_priv->ggtt);
+ i915_ggtt_resume(to_gt(dev_priv)->ggtt);
intel_init_clock_gating(dev_priv);
}
@@ -1147,7 +1159,7 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
- intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
+ intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
i915_gem_suspend_late(dev_priv);
intel_gt_driver_remove(to_gt(dev_priv));
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 24eee0c2055f..f025ee4fa526 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -38,6 +38,11 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1;
} igt_evict_ctl;)
+static bool dying_vma(struct i915_vma *vma)
+{
+ return !kref_read(&vma->obj->base.refcount);
+}
+
static int ggtt_flush(struct intel_gt *gt)
{
/*
@@ -50,8 +55,37 @@ static int ggtt_flush(struct intel_gt *gt)
return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}
+static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
+{
+ /*
+ * We add the extra refcount so the object doesn't drop to zero until
+ * after ungrab_vma(), this way trylock is always paired with unlock.
+ */
+ if (i915_gem_object_get_rcu(vma->obj)) {
+ if (!i915_gem_object_trylock(vma->obj, ww)) {
+ i915_gem_object_put(vma->obj);
+ return false;
+ }
+ } else {
+ /* Dead objects don't need pins */
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ }
+
+ return true;
+}
+
+static void ungrab_vma(struct i915_vma *vma)
+{
+ if (dying_vma(vma))
+ return;
+
+ i915_gem_object_unlock(vma->obj);
+ i915_gem_object_put(vma->obj);
+}
+
static bool
mark_free(struct drm_mm_scan *scan,
+ struct i915_gem_ww_ctx *ww,
struct i915_vma *vma,
unsigned int flags,
struct list_head *unwind)
@@ -59,6 +93,9 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
+ if (!grab_vma(vma, ww))
+ return false;
+
list_add(&vma->evict_link, unwind);
return drm_mm_scan_add_block(scan, &vma->node);
}
@@ -77,6 +114,7 @@ static bool defer_evict(struct i915_vma *vma)
/**
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @vm: address space to evict from
+ * @ww: An optional struct i915_gem_ww_ctx.
* @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
* @color: color for the desired space
@@ -99,6 +137,7 @@ static bool defer_evict(struct i915_vma *vma)
*/
int
i915_gem_evict_something(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
u64 min_size, u64 alignment,
unsigned long color,
u64 start, u64 end,
@@ -171,7 +210,7 @@ search_again:
continue;
}
- if (mark_free(&scan, vma, flags, &eviction_list))
+ if (mark_free(&scan, ww, vma, flags, &eviction_list))
goto found;
}
@@ -179,6 +218,7 @@ search_again:
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
ret = drm_mm_scan_remove_block(&scan, &vma->node);
BUG_ON(ret);
+ ungrab_vma(vma);
}
/*
@@ -223,10 +263,12 @@ found:
* of any of our objects, thus corrupting the list).
*/
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
- if (drm_mm_scan_remove_block(&scan, &vma->node))
+ if (drm_mm_scan_remove_block(&scan, &vma->node)) {
__i915_vma_pin(vma);
- else
+ } else {
list_del(&vma->evict_link);
+ ungrab_vma(vma);
+ }
}
/* Unbinding will emit any required flushes */
@@ -235,16 +277,20 @@ found:
__i915_vma_unpin(vma);
if (ret == 0)
ret = __i915_vma_unbind(vma);
+ ungrab_vma(vma);
}
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node);
/* If we find any non-objects (!vma), we cannot evict them */
- if (vma->node.color != I915_COLOR_UNEVICTABLE)
+ if (vma->node.color != I915_COLOR_UNEVICTABLE &&
+ grab_vma(vma, ww)) {
ret = __i915_vma_unbind(vma);
- else
- ret = -ENOSPC; /* XXX search failed, try again? */
+ ungrab_vma(vma);
+ } else {
+ ret = -ENOSPC;
+ }
}
return ret;
@@ -253,6 +299,7 @@ found:
/**
* i915_gem_evict_for_node - Evict vmas to make room for binding a new one
* @vm: address space to evict from
+ * @ww: An optional struct i915_gem_ww_ctx.
* @target: range (and color) to evict for
* @flags: additional flags to control the eviction algorithm
*
@@ -262,6 +309,7 @@ found:
* memory in e.g. the shrinker.
*/
int i915_gem_evict_for_node(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *target,
unsigned int flags)
{
@@ -334,6 +382,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
+ if (!grab_vma(vma, ww)) {
+ ret = -ENOSPC;
+ break;
+ }
+
/*
* Never show fear in the face of dragons!
*
@@ -351,6 +404,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
__i915_vma_unpin(vma);
if (ret == 0)
ret = __i915_vma_unbind(vma);
+
+ ungrab_vma(vma);
}
return ret;
@@ -359,6 +414,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
/**
* i915_gem_evict_vm - Evict all idle vmas from a vm
* @vm: Address space to cleanse
+ * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
+ * will be able to evict vma's locked by the ww as well.
*
* This function evicts all vmas from a vm.
*
@@ -368,7 +425,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
-int i915_gem_evict_vm(struct i915_address_space *vm)
+int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
{
int ret = 0;
@@ -389,24 +446,52 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
do {
struct i915_vma *vma, *vn;
LIST_HEAD(eviction_list);
+ LIST_HEAD(locked_eviction_list);
list_for_each_entry(vma, &vm->bound_list, vm_link) {
if (i915_vma_is_pinned(vma))
continue;
+ /*
+ * If we already own the lock, trylock fails. In case
+ * the resv is shared among multiple objects, we still
+ * need the object ref.
+ */
+ if (dying_vma(vma) ||
+ (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
+ __i915_vma_pin(vma);
+ list_add(&vma->evict_link, &locked_eviction_list);
+ continue;
+ }
+
+ if (!i915_gem_object_trylock(vma->obj, ww))
+ continue;
+
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
}
- if (list_empty(&eviction_list))
+ if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
break;
ret = 0;
+ /* Unbind locked objects first, before unlocking the eviction_list */
+ list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
+ __i915_vma_unpin(vma);
+
+ if (ret == 0)
+ ret = __i915_vma_unbind(vma);
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
+
list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
ret = __i915_vma_unbind(vma);
if (ret != -EINTR) /* "Get me out of here!" */
ret = 0;
+
+ i915_gem_object_unlock(vma->obj);
}
} while (ret == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h
index d4478b6ad11b..e593c530f9bd 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.h
+++ b/drivers/gpu/drm/i915/i915_gem_evict.h
@@ -10,15 +10,19 @@
struct drm_mm_node;
struct i915_address_space;
+struct i915_gem_ww_ctx;
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
u64 min_size, u64 alignment,
unsigned long color,
u64 start, u64 end,
unsigned flags);
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node,
unsigned int flags);
-int i915_gem_evict_vm(struct i915_address_space *vm);
+int i915_gem_evict_vm(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww);
#endif /* __I915_GEM_EVICT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8a7f0d92b56f..329ff75b80b9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
/* XXX This does not prevent more requests being submitted! */
if (unlikely(ggtt->do_idle_maps))
@@ -71,6 +71,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
/**
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm: the &struct i915_address_space
+ * @ww: An optional struct i915_gem_ww_ctx.
* @node: the &struct drm_mm_node (typically i915_vma.mode)
* @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned
@@ -94,6 +95,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
* asked to wait for eviction and interrupted.
*/
int i915_gem_gtt_reserve(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node,
u64 size, u64 offset, unsigned long color,
unsigned int flags)
@@ -104,7 +106,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
+ GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
@@ -118,7 +120,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
if (flags & PIN_NOEVICT)
return -ENOSPC;
- err = i915_gem_evict_for_node(vm, node, flags);
+ err = i915_gem_evict_for_node(vm, ww, node, flags);
if (err == 0)
err = drm_mm_reserve_node(&vm->mm, node);
@@ -153,6 +155,7 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
/**
* i915_gem_gtt_insert - insert a node into an address_space (GTT)
* @vm: the &struct i915_address_space
+ * @ww: An optional struct i915_gem_ww_ctx.
* @node: the &struct drm_mm_node (typically i915_vma.node)
* @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned
@@ -185,6 +188,7 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
* asked to wait for eviction and interrupted.
*/
int i915_gem_gtt_insert(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color,
u64 start, u64 end, unsigned int flags)
@@ -202,7 +206,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
+ GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
@@ -270,7 +274,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
*/
offset = random_offset(start, end,
size, alignment ?: I915_GTT_MIN_ALIGNMENT);
- err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
+ err = i915_gem_gtt_reserve(vm, ww, node, size, offset, color, flags);
if (err != -ENOSPC)
return err;
@@ -278,7 +282,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
return -ENOSPC;
/* Randomly selected placement is pinned, do a search */
- err = i915_gem_evict_something(vm, size, alignment, color,
+ err = i915_gem_evict_something(vm, ww, size, alignment, color,
start, end, flags);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c9b0ee5e1d23..8c2f57eb5dda 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -16,6 +16,7 @@
struct drm_i915_gem_object;
struct i915_address_space;
+struct i915_gem_ww_ctx;
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
@@ -23,11 +24,13 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
int i915_gem_gtt_reserve(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node,
u64 size, u64 offset, unsigned long color,
unsigned int flags);
int i915_gem_gtt_insert(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color,
u64 start, u64 end, unsigned int flags);
@@ -41,6 +44,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_HIGH BIT_ULL(5)
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
+#define PIN_VALIDATE BIT_ULL(8) /* validate placement only, no need to call unpin() */
#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index dbe49fd87283..c12a0adefda5 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -33,7 +33,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = pdev->revision;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
- value = i915->ggtt.num_fences;
+ value = to_gt(i915)->ggtt->num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
value = !!i915->overlay;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 102c7bb747f8..1d042551619e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -34,6 +34,7 @@
#include <linux/utsname.h>
#include <linux/zlib.h>
+#include <drm/drm_cache.h>
#include <drm/drm_print.h>
#include "display/intel_dmc.h"
@@ -46,11 +47,11 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_regs.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_memcpy.h"
#include "i915_scatterlist.h"
-#include "i915_vma_snapshot.h"
#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
@@ -1015,8 +1016,10 @@ void __i915_gpu_coredump_free(struct kref *error_ref)
static struct i915_vma_coredump *
i915_vma_coredump_create(const struct intel_gt *gt,
- const struct i915_vma_snapshot *vsnap,
- struct i915_vma_compress *compress)
+ const struct i915_vma_resource *vma_res,
+ struct i915_vma_compress *compress,
+ const char *name)
+
{
struct i915_ggtt *ggtt = gt->ggtt;
const u64 slot = ggtt->error_capture.start;
@@ -1026,7 +1029,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
might_sleep();
- if (!vsnap || !vsnap->pages || !compress)
+ if (!vma_res || !vma_res->bi.pages || !compress)
return NULL;
dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
@@ -1039,12 +1042,12 @@ i915_vma_coredump_create(const struct intel_gt *gt,
}
INIT_LIST_HEAD(&dst->page_list);
- strcpy(dst->name, vsnap->name);
+ strcpy(dst->name, name);
dst->next = NULL;
- dst->gtt_offset = vsnap->gtt_offset;
- dst->gtt_size = vsnap->gtt_size;
- dst->gtt_page_sizes = vsnap->page_sizes;
+ dst->gtt_offset = vma_res->start;
+ dst->gtt_size = vma_res->node_size;
+ dst->gtt_page_sizes = vma_res->page_sizes_gtt;
dst->unused = 0;
ret = -EINVAL;
@@ -1052,7 +1055,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
void __iomem *s;
dma_addr_t dma;
- for_each_sgt_daddr(dma, iter, vsnap->pages) {
+ for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
mutex_lock(&ggtt->error_mutex);
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
I915_CACHE_NONE, 0);
@@ -1070,11 +1073,11 @@ i915_vma_coredump_create(const struct intel_gt *gt,
if (ret)
break;
}
- } else if (vsnap->mr && vsnap->mr->type != INTEL_MEMORY_SYSTEM) {
- struct intel_memory_region *mem = vsnap->mr;
+ } else if (vma_res->bi.lmem) {
+ struct intel_memory_region *mem = vma_res->mr;
dma_addr_t dma;
- for_each_sgt_daddr(dma, iter, vsnap->pages) {
+ for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
void __iomem *s;
s = io_mapping_map_wc(&mem->iomap,
@@ -1090,7 +1093,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
} else {
struct page *page;
- for_each_sgt_page(page, iter, vsnap->pages) {
+ for_each_sgt_page(page, iter, vma_res->bi.pages) {
void *s;
drm_clflush_pages(&page, 1);
@@ -1326,33 +1329,32 @@ static bool record_context(struct i915_gem_context_coredump *e,
struct intel_engine_capture_vma {
struct intel_engine_capture_vma *next;
- struct i915_vma_snapshot *vsnap;
+ struct i915_vma_resource *vma_res;
char name[16];
bool lockdep_cookie;
};
static struct intel_engine_capture_vma *
capture_vma_snapshot(struct intel_engine_capture_vma *next,
- struct i915_vma_snapshot *vsnap,
- gfp_t gfp)
+ struct i915_vma_resource *vma_res,
+ gfp_t gfp, const char *name)
{
struct intel_engine_capture_vma *c;
- if (!i915_vma_snapshot_present(vsnap))
+ if (!vma_res)
return next;
c = kmalloc(sizeof(*c), gfp);
if (!c)
return next;
- if (!i915_vma_snapshot_resource_pin(vsnap, &c->lockdep_cookie)) {
+ if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
kfree(c);
return next;
}
- strcpy(c->name, vsnap->name);
- c->vsnap = vsnap;
- i915_vma_snapshot_get(vsnap);
+ strcpy(c->name, name);
+ c->vma_res = i915_vma_resource_get(vma_res);
c->next = next;
return c;
@@ -1364,8 +1366,6 @@ capture_vma(struct intel_engine_capture_vma *next,
const char *name,
gfp_t gfp)
{
- struct i915_vma_snapshot *vsnap;
-
if (!vma)
return next;
@@ -1374,19 +1374,10 @@ capture_vma(struct intel_engine_capture_vma *next,
* to a struct i915_vma_snapshot at command submission time.
* Not here.
*/
- GEM_WARN_ON(!i915_vma_is_pinned(vma));
- if (!i915_vma_is_pinned(vma))
- return next;
-
- vsnap = i915_vma_snapshot_alloc(gfp);
- if (!vsnap)
+ if (GEM_WARN_ON(!i915_vma_is_pinned(vma)))
return next;
- i915_vma_snapshot_init(vsnap, vma, name);
- next = capture_vma_snapshot(next, vsnap, gfp);
-
- /* FIXME: Replace on async unbind. */
- i915_vma_snapshot_put(vsnap);
+ next = capture_vma_snapshot(next, vma->resource, gfp, name);
return next;
}
@@ -1399,7 +1390,8 @@ capture_user(struct intel_engine_capture_vma *capture,
struct i915_capture_list *c;
for (c = rq->capture_list; c; c = c->next)
- capture = capture_vma_snapshot(capture, c->vma_snapshot, gfp);
+ capture = capture_vma_snapshot(capture, c->vma_res, gfp,
+ "user");
return capture;
}
@@ -1417,16 +1409,19 @@ static struct i915_vma_coredump *
create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
const char *name, struct i915_vma_compress *compress)
{
- struct i915_vma_coredump *ret;
- struct i915_vma_snapshot tmp;
+ struct i915_vma_coredump *ret = NULL;
+ struct i915_vma_resource *vma_res;
+ bool lockdep_cookie;
if (!vma)
return NULL;
- GEM_WARN_ON(!i915_vma_is_pinned(vma));
- i915_vma_snapshot_init_onstack(&tmp, vma, name);
- ret = i915_vma_coredump_create(gt, &tmp, compress);
- i915_vma_snapshot_put_onstack(&tmp);
+ vma_res = vma->resource;
+
+ if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
+ ret = i915_vma_coredump_create(gt, vma_res, compress, name);
+ i915_vma_resource_unhold(vma_res, lockdep_cookie);
+ }
return ret;
}
@@ -1473,7 +1468,7 @@ intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
* as the simplest method to avoid being overwritten
* by userspace.
*/
- vma = capture_vma_snapshot(vma, &rq->batch_snapshot, gfp);
+ vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
vma = capture_user(vma, rq, gfp);
vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
vma = capture_vma(vma, rq->context->state, "HW context", gfp);
@@ -1494,14 +1489,14 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
while (capture) {
struct intel_engine_capture_vma *this = capture;
- struct i915_vma_snapshot *vsnap = this->vsnap;
+ struct i915_vma_resource *vma_res = this->vma_res;
add_vma(ee,
- i915_vma_coredump_create(engine->gt,
- vsnap, compress));
+ i915_vma_coredump_create(engine->gt, vma_res,
+ compress, this->name));
- i915_vma_snapshot_resource_unpin(vsnap, this->lockdep_cookie);
- i915_vma_snapshot_put(vsnap);
+ i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
+ i915_vma_resource_put(vma_res);
capture = this->next;
kfree(this);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 5aedf5129814..903d838e2e63 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -210,6 +210,17 @@ struct drm_i915_error_state_buf {
int err;
};
+static inline u32 i915_reset_count(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_count);
+}
+
+static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
+ const struct intel_engine_cs *engine)
+{
+ return atomic_read(&error->reset_engine_count[engine->uabi_class]);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c05eb09d8a66..73cebc6aa650 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,6 +34,7 @@
#include <drm/drm_drv.h>
+#include "display/icl_dsi_regs.h"
#include "display/intel_de.h"
#include "display/intel_display_trace.h"
#include "display/intel_display_types.h"
@@ -49,6 +50,7 @@
#include "gt/intel_gt_regs.h"
#include "gt/intel_rps.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_pm.h"
@@ -178,6 +180,7 @@ static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
[HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
+ [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
};
static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
@@ -4347,6 +4350,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
return ret;
}
+struct intel_hotplug_funcs {
+ void (*hpd_irq_setup)(struct drm_i915_private *i915);
+};
+
#define HPD_FUNCS(platform) \
static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
.hpd_irq_setup = platform##_hpd_irq_setup, \
@@ -4361,6 +4368,12 @@ HPD_FUNCS(spt);
HPD_FUNCS(ilk);
#undef HPD_FUNCS
+void intel_hpd_irq_setup(struct drm_i915_private *i915)
+{
+ if (i915->display_irqs_enabled && i915->hotplug_funcs)
+ i915->hotplug_funcs->hpd_irq_setup(i915);
+}
+
/**
* intel_irq_init - initializes irq support
* @dev_priv: i915 device instance
@@ -4413,7 +4426,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->hotplug_funcs = &i915_hpd_funcs;
} else {
- if (HAS_PCH_DG1(dev_priv))
+ if (HAS_PCH_DG2(dev_priv))
+ dev_priv->hotplug_funcs = &icp_hpd_funcs;
+ else if (HAS_PCH_DG1(dev_priv))
dev_priv->hotplug_funcs = &dg1_hpd_funcs;
else if (DISPLAY_VER(dev_priv) >= 11)
dev_priv->hotplug_funcs = &gen11_hpd_funcs;
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 0eb90d271fa7..82639d9d7e82 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -37,6 +37,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+void intel_hpd_irq_setup(struct drm_i915_private *i915);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
u32 mask,
u32 bits);
diff --git a/drivers/gpu/drm/i915/i915_mitigations.c b/drivers/gpu/drm/i915/i915_mitigations.c
index 84f12598d145..def7302ef7fe 100644
--- a/drivers/gpu/drm/i915/i915_mitigations.c
+++ b/drivers/gpu/drm/i915/i915_mitigations.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/string.h>
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_mitigations.h"
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index 8451822637f0..65acd7bf75d0 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_object.h"
#include "i915_active.h"
+#include "i915_driver.h"
#include "i915_params.h"
#include "i915_pci.h"
#include "i915_perf.h"
@@ -16,6 +17,7 @@
#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "i915_vma.h"
+#include "i915_vma_resource.h"
static int i915_check_nomodeset(void)
{
@@ -61,6 +63,8 @@ static const struct {
.exit = i915_scheduler_module_exit },
{ .init = i915_vma_module_init,
.exit = i915_vma_module_exit },
+ { .init = i915_vma_resource_module_init,
+ .exit = i915_vma_resource_module_exit },
{ .init = i915_mock_selftests },
{ .init = i915_pmu_init,
.exit = i915_pmu_exit },
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 189d3bb8955a..8246cbe9b01d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -22,6 +22,7 @@
*
*/
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
#include <drm/i915_pciids.h>
@@ -1046,6 +1047,7 @@ static const struct intel_device_info dg2_info = {
.graphics.rel = 55,
.media.rel = 55,
PLATFORM(INTEL_DG2),
+ .has_guc_deprivilege = 1,
.has_64k_pages = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 590efdb9de8d..00fb40029f43 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -196,6 +196,7 @@
#include <linux/uuid.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
@@ -205,9 +206,11 @@
#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_lrc.h"
+#include "gt/intel_lrc_reg.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_perf.h"
#include "i915_perf_oa_regs.h"
@@ -1633,8 +1636,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
const u64 delay_ticks = 0xffffffffffffffff -
- intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
- atomic64_read(&stream->perf->noa_programming_delay));
+ intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
+ atomic64_read(&stream->perf->noa_programming_delay));
const u32 base = stream->engine->mmio_base;
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
u32 *batch, *ts0, *cs, *jump;
@@ -2117,7 +2120,7 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce,
u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
- i915_reg_t flex_regs[] = {
+ static const i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
EU_PERF_CNTL1,
EU_PERF_CNTL2,
@@ -3545,7 +3548,7 @@ err:
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
- return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
+ return intel_gt_clock_interval_to_ns(to_gt(perf->i915),
2ULL << exponent);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 87c92314ee26..2b8a3086ed35 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -115,9 +115,6 @@
* #define GEN8_BAR _MMIO(0xb888)
*/
-#define VLV_MIPI_BASE VLV_DISPLAY_BASE
-#define BXT_MIPI_BASE 0x60000
-
#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display_mmio_offset)
/*
@@ -1830,117 +1827,8 @@
_PALETTE_B, _CHV_PALETTE_C) + \
(i) * 4)
-/* MCH MMIO space */
-
-/*
- * MCHBAR mirror.
- *
- * This mirrors the MCHBAR MMIO space whose location is determined by
- * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
- * every way. It is not accessible from the CP register read instructions.
- *
- * Starting from Haswell, you can't write registers using the MCHBAR mirror,
- * just read.
- */
-#define MCHBAR_MIRROR_BASE 0x10000
-
-#define MCHBAR_MIRROR_BASE_SNB 0x140000
-
-#define CTG_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x34)
-#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48)
-#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
-#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
-#define G4X_STOLEN_RESERVED_ENABLE (1 << 0)
-
-/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
-#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
-
-/* 915-945 and GM965 MCH register controlling DRAM channel access */
-#define DCC _MMIO(MCHBAR_MIRROR_BASE + 0x200)
-#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
-#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
-#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
-#define DCC_ADDRESSING_MODE_MASK (3 << 0)
-#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
-#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
-#define DCC2 _MMIO(MCHBAR_MIRROR_BASE + 0x204)
-#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
-
-/* Pineview MCH register contains DDR3 setting */
-#define CSHRDDR3CTL _MMIO(MCHBAR_MIRROR_BASE + 0x1a8)
-#define CSHRDDR3CTL_DDR3 (1 << 2)
-
-/* 965 MCH register controlling DRAM channel configuration */
-#define C0DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x206)
-#define C1DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x606)
-
-/* snb MCH registers for reading the DRAM channel configuration */
-#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004)
-#define MAD_DIMM_C1 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5008)
-#define MAD_DIMM_C2 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
-#define MAD_DIMM_ECC_MASK (0x3 << 24)
-#define MAD_DIMM_ECC_OFF (0x0 << 24)
-#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
-#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
-#define MAD_DIMM_ECC_ON (0x3 << 24)
-#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
-#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
-#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
-#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
-#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
-#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
-#define MAD_DIMM_A_SELECT (0x1 << 16)
-/* DIMM sizes are in multiples of 256mb. */
-#define MAD_DIMM_B_SIZE_SHIFT 8
-#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
-#define MAD_DIMM_A_SIZE_SHIFT 0
-#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
-
-/* snb MCH registers for priority tuning */
-#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
-#define MCH_SSKPD_WM0_MASK 0x3f
-#define MCH_SSKPD_WM0_VAL 0xc
-
-/* Clocking configuration register */
-#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
-#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */
-#define CLKCFG_FSB_400_ALT (5 << 0) /* hrawclk 100 */
-#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
-#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
-#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
-#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
-#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */
-#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
-#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */
-#define CLKCFG_FSB_1600_ALT (6 << 0) /* hrawclk 400 */
-#define CLKCFG_FSB_MASK (7 << 0)
-#define CLKCFG_MEM_533 (1 << 4)
-#define CLKCFG_MEM_667 (2 << 4)
-#define CLKCFG_MEM_800 (3 << 4)
-#define CLKCFG_MEM_MASK (7 << 4)
-
-#define HPLLVCO _MMIO(MCHBAR_MIRROR_BASE + 0xc38)
-#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
-
-#define TSC1 _MMIO(0x11001)
-#define TSE (1 << 0)
-#define TR1 _MMIO(0x11006)
-#define TSFS _MMIO(0x11020)
-#define TSFS_SLOPE_MASK 0x0000ff00
-#define TSFS_SLOPE_SHIFT 8
-#define TSFS_INTR_MASK 0x000000ff
-
-#define CSIPLL0 _MMIO(0x12c10)
-#define DDRMPLL1 _MMIO(0X12c20)
#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
-#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948)
-#define BXT_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070)
-#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
-#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
-#define RP0_CAP_MASK REG_GENMASK(7, 0)
-#define RP1_CAP_MASK REG_GENMASK(15, 8)
-#define RPN_CAP_MASK REG_GENMASK(23, 16)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
@@ -4291,49 +4179,32 @@
#define _WM0_PIPEC_IVB 0x45200
#define WM0_PIPE_ILK(pipe) _MMIO_PIPE3((pipe), _WM0_PIPEA_ILK, \
_WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
-#define WM0_PIPE_PLANE_MASK (0xffff << 16)
-#define WM0_PIPE_PLANE_SHIFT 16
-#define WM0_PIPE_SPRITE_MASK (0xff << 8)
-#define WM0_PIPE_SPRITE_SHIFT 8
-#define WM0_PIPE_CURSOR_MASK (0xff)
+#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
+#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
+#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM0_PIPE_PRIMARY(x) REG_FIELD_PREP(WM0_PIPE_PRIMARY_MASK, (x))
+#define WM0_PIPE_SPRITE(x) REG_FIELD_PREP(WM0_PIPE_SPRITE_MASK, (x))
+#define WM0_PIPE_CURSOR(x) REG_FIELD_PREP(WM0_PIPE_CURSOR_MASK, (x))
#define WM1_LP_ILK _MMIO(0x45108)
-#define WM1_LP_SR_EN (1 << 31)
-#define WM1_LP_LATENCY_SHIFT 24
-#define WM1_LP_LATENCY_MASK (0x7f << 24)
-#define WM1_LP_FBC_MASK (0xf << 20)
-#define WM1_LP_FBC_SHIFT 20
-#define WM1_LP_FBC_SHIFT_BDW 19
-#define WM1_LP_SR_MASK (0x7ff << 8)
-#define WM1_LP_SR_SHIFT 8
-#define WM1_LP_CURSOR_MASK (0xff)
#define WM2_LP_ILK _MMIO(0x4510c)
-#define WM2_LP_EN (1 << 31)
#define WM3_LP_ILK _MMIO(0x45110)
-#define WM3_LP_EN (1 << 31)
+#define WM_LP_ENABLE REG_BIT(31)
+#define WM_LP_LATENCY_MASK REG_GENMASK(30, 24)
+#define WM_LP_FBC_MASK_BDW REG_GENMASK(23, 19)
+#define WM_LP_FBC_MASK_ILK REG_GENMASK(23, 20)
+#define WM_LP_PRIMARY_MASK REG_GENMASK(18, 8)
+#define WM_LP_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM_LP_LATENCY(x) REG_FIELD_PREP(WM_LP_LATENCY_MASK, (x))
+#define WM_LP_FBC_BDW(x) REG_FIELD_PREP(WM_LP_FBC_MASK_BDW, (x))
+#define WM_LP_FBC_ILK(x) REG_FIELD_PREP(WM_LP_FBC_MASK_ILK, (x))
+#define WM_LP_PRIMARY(x) REG_FIELD_PREP(WM_LP_PRIMARY_MASK, (x))
+#define WM_LP_CURSOR(x) REG_FIELD_PREP(WM_LP_CURSOR_MASK, (x))
#define WM1S_LP_ILK _MMIO(0x45120)
#define WM2S_LP_IVB _MMIO(0x45124)
#define WM3S_LP_IVB _MMIO(0x45128)
-#define WM1S_LP_EN (1 << 31)
-
-#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
- (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
- ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
-
-/* Memory latency timer register */
-#define MLTR_ILK _MMIO(0x11222)
-#define MLTR_WM1_SHIFT 0
-#define MLTR_WM2_SHIFT 8
-/* the unit of memory self-refresh latency time is 0.5us */
-#define ILK_SRLT_MASK 0x3f
-
-
-/* the address where we get all kinds of latency value */
-#define SSKPD _MMIO(0x5d10)
-#define SSKPD_WM_MASK 0x3f
-#define SSKPD_WM0_SHIFT 0
-#define SSKPD_WM1_SHIFT 8
-#define SSKPD_WM2_SHIFT 16
-#define SSKPD_WM3_SHIFT 24
+#define WM_LP_SPRITE_ENABLE REG_BIT(31) /* ilk/snb WM1S only */
+#define WM_LP_SPRITE_MASK REG_GENMASK(10, 0)
+#define WM_LP_SPRITE(x) REG_FIELD_PREP(WM_LP_SPRITE_MASK, (x))
/*
* The two pipe frame counter registers are not synchronized, so
@@ -5632,7 +5503,8 @@
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
#define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154)
-#define DMC_DEBUG3 _MMIO(0x101090)
+#define TGL_DMC_DEBUG3 _MMIO(0x101090)
+#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
@@ -6040,11 +5912,14 @@
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
-#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
-#define ICL_DELAY_PMRSP REG_BIT(22)
-#define DISABLE_FLR_SRC REG_BIT(15)
-#define MASK_WAKEMEM REG_BIT(13)
+#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
+#define LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
+#define LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
+#define LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
+#define ICL_DELAY_PMRSP REG_BIT(22)
+#define DISABLE_FLR_SRC REG_BIT(15)
+#define MASK_WAKEMEM REG_BIT(13)
#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
@@ -6181,6 +6056,7 @@
/* south display engine interrupt: ICP/TGP */
#define SDE_GMBUS_ICP (1 << 23)
#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
+#define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */
#define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin))
#define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \
SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \
@@ -7986,6 +7862,12 @@ enum skl_power_gate {
#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
_TGL_DPLL1_CFGCR0)
+#define _TGL_DPLL0_DIV0 0x164B00
+#define _TGL_DPLL1_DIV0 0x164C00
+#define TGL_DPLL0_DIV0(pll) _MMIO_PLL(pll, _TGL_DPLL0_DIV0, _TGL_DPLL1_DIV0)
+#define TGL_DPLL0_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
+#define TGL_DPLL0_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(TGL_DPLL0_DIV0_AFC_STARTUP_MASK, (val))
+
#define _TGL_DPLL0_CFGCR1 0x164288
#define _TGL_DPLL1_CFGCR1 0x164290
#define _TGL_TBTPLL_CFGCR1 0x1642A0
@@ -8032,7 +7914,15 @@ enum skl_power_gate {
#define _DKL_PHY6_BASE 0x16D000
/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */
+#define _DKL_PCS_DW5 0x14
+#define DKL_PCS_DW5(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PCS_DW5)
+#define DKL_PCS_DW5_CORE_SOFTRESET REG_BIT(11)
+
#define _DKL_PLL_DIV0 0x200
+#define DKL_PLL_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
+#define DKL_PLL_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(DKL_PLL_DIV0_AFC_STARTUP_MASK, (val))
#define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16)
#define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16)
#define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12)
@@ -8042,6 +7932,10 @@ enum skl_power_gate {
#define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT)
#define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
#define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0)
+#define DKL_PLL_DIV0_MASK (DKL_PLL_DIV0_INTEG_COEFF_MASK | \
+ DKL_PLL_DIV0_PROP_COEFF_MASK | \
+ DKL_PLL_DIV0_FBPREDIV_MASK | \
+ DKL_PLL_DIV0_FBDIV_INT_MASK)
#define DKL_PLL_DIV0(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
_DKL_PHY2_BASE) + \
_DKL_PLL_DIV0)
@@ -8215,93 +8109,7 @@ enum skl_power_gate {
#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
-#define BXT_D_CR_DRP0_DUNIT8 0x1000
-#define BXT_D_CR_DRP0_DUNIT9 0x1200
-#define BXT_D_CR_DRP0_DUNIT_START 8
-#define BXT_D_CR_DRP0_DUNIT_END 11
-#define BXT_D_CR_DRP0_DUNIT(x) _MMIO(MCHBAR_MIRROR_BASE_SNB + \
- _PICK_EVEN((x) - 8, BXT_D_CR_DRP0_DUNIT8,\
- BXT_D_CR_DRP0_DUNIT9))
-#define BXT_DRAM_RANK_MASK 0x3
-#define BXT_DRAM_RANK_SINGLE 0x1
-#define BXT_DRAM_RANK_DUAL 0x3
-#define BXT_DRAM_WIDTH_MASK (0x3 << 4)
-#define BXT_DRAM_WIDTH_SHIFT 4
-#define BXT_DRAM_WIDTH_X8 (0x0 << 4)
-#define BXT_DRAM_WIDTH_X16 (0x1 << 4)
-#define BXT_DRAM_WIDTH_X32 (0x2 << 4)
-#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
-#define BXT_DRAM_SIZE_MASK (0x7 << 6)
-#define BXT_DRAM_SIZE_SHIFT 6
-#define BXT_DRAM_SIZE_4GBIT (0x0 << 6)
-#define BXT_DRAM_SIZE_6GBIT (0x1 << 6)
-#define BXT_DRAM_SIZE_8GBIT (0x2 << 6)
-#define BXT_DRAM_SIZE_12GBIT (0x3 << 6)
-#define BXT_DRAM_SIZE_16GBIT (0x4 << 6)
-#define BXT_DRAM_TYPE_MASK (0x7 << 22)
-#define BXT_DRAM_TYPE_SHIFT 22
-#define BXT_DRAM_TYPE_DDR3 (0x0 << 22)
-#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22)
-#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
-#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
-
-#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
-#define DG1_GEAR_TYPE REG_BIT(16)
-
-#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
-#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0)
-#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0)
-#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0)
-#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0)
-#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0)
-
-#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
-#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
-#define SKL_DRAM_S_SHIFT 16
-#define SKL_DRAM_SIZE_MASK 0x3F
-#define SKL_DRAM_WIDTH_MASK (0x3 << 8)
-#define SKL_DRAM_WIDTH_SHIFT 8
-#define SKL_DRAM_WIDTH_X8 (0x0 << 8)
-#define SKL_DRAM_WIDTH_X16 (0x1 << 8)
-#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
-#define SKL_DRAM_RANK_MASK (0x1 << 10)
-#define SKL_DRAM_RANK_SHIFT 10
-#define SKL_DRAM_RANK_1 (0x0 << 10)
-#define SKL_DRAM_RANK_2 (0x1 << 10)
-#define SKL_DRAM_RANK_MASK (0x1 << 10)
-#define ICL_DRAM_SIZE_MASK 0x7F
-#define ICL_DRAM_WIDTH_MASK (0x3 << 7)
-#define ICL_DRAM_WIDTH_SHIFT 7
-#define ICL_DRAM_WIDTH_X8 (0x0 << 7)
-#define ICL_DRAM_WIDTH_X16 (0x1 << 7)
-#define ICL_DRAM_WIDTH_X32 (0x2 << 7)
-#define ICL_DRAM_RANK_MASK (0x3 << 9)
-#define ICL_DRAM_RANK_SHIFT 9
-#define ICL_DRAM_RANK_1 (0x0 << 9)
-#define ICL_DRAM_RANK_2 (0x1 << 9)
-#define ICL_DRAM_RANK_3 (0x2 << 9)
-#define ICL_DRAM_RANK_4 (0x3 << 9)
-
-#define SA_PERF_STATUS_0_0_0_MCHBAR_PC _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5918)
-#define DG1_QCLK_RATIO_MASK REG_GENMASK(9, 2)
-#define DG1_QCLK_REFERENCE REG_BIT(10)
-
-#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
-#define DG1_DRAM_T_RDPRE_MASK REG_GENMASK(16, 11)
-#define DG1_DRAM_T_RP_MASK REG_GENMASK(6, 0)
-#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4004)
-#define DG1_DRAM_T_RCD_MASK REG_GENMASK(15, 9)
-#define DG1_DRAM_T_RAS_MASK REG_GENMASK(8, 1)
-
-/*
- * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
- * since on HSW we can't write to it using intel_uncore_write.
- */
-#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW _MMIO(0x138144)
-#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
-#define D_COMP_COMP_FORCE (1 << 8)
-#define D_COMP_COMP_DISABLE (1 << 0)
/* Pipe WM_LINETIME - watermark line time */
#define _WM_LINETIME_A 0x45270
@@ -8591,93 +8399,6 @@ enum skl_power_gate {
#define CGM_PIPE_GAMMA(pipe, i, w) _MMIO(_PIPE(pipe, _CGM_PIPE_A_GAMMA, _CGM_PIPE_B_GAMMA) + (i) * 8 + (w) * 4)
#define CGM_PIPE_MODE(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_MODE, _CGM_PIPE_B_MODE)
-/* MIPI DSI registers */
-
-#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
-#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
-
-/* Gen11 DSI */
-#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \
- dsi0, dsi1)
-
-#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
-#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF
-#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
-#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
-
-#define _ICL_DSI_ESC_CLK_DIV0 0x6b090
-#define _ICL_DSI_ESC_CLK_DIV1 0x6b890
-#define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \
- _ICL_DSI_ESC_CLK_DIV0, \
- _ICL_DSI_ESC_CLK_DIV1)
-#define _ICL_DPHY_ESC_CLK_DIV0 0x162190
-#define _ICL_DPHY_ESC_CLK_DIV1 0x6C190
-#define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \
- _ICL_DPHY_ESC_CLK_DIV0, \
- _ICL_DPHY_ESC_CLK_DIV1)
-#define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16)
-#define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16
-#define ICL_ESC_CLK_DIV_MASK 0x1ff
-#define ICL_ESC_CLK_DIV_SHIFT 0
-#define DSI_MAX_ESC_CLK 20000 /* in KHz */
-
-#define _ADL_MIPIO_REG 0x180
-#define ADL_MIPIO_DW(port, dw) _MMIO(_ICL_COMBOPHY(port) + _ADL_MIPIO_REG + 4 * (dw))
-#define TX_ESC_CLK_DIV_PHY_SEL REGBIT(16)
-#define TX_ESC_CLK_DIV_PHY_MASK REG_GENMASK(23, 16)
-#define TX_ESC_CLK_DIV_PHY REG_FIELD_PREP(TX_ESC_CLK_DIV_PHY_MASK, 0x7f)
-
-#define _DSI_CMD_FRMCTL_0 0x6b034
-#define _DSI_CMD_FRMCTL_1 0x6b834
-#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \
- _DSI_CMD_FRMCTL_0,\
- _DSI_CMD_FRMCTL_1)
-#define DSI_FRAME_UPDATE_REQUEST (1 << 31)
-#define DSI_PERIODIC_FRAME_UPDATE_ENABLE (1 << 29)
-#define DSI_NULL_PACKET_ENABLE (1 << 28)
-#define DSI_FRAME_IN_PROGRESS (1 << 0)
-
-#define _DSI_INTR_MASK_REG_0 0x6b070
-#define _DSI_INTR_MASK_REG_1 0x6b870
-#define DSI_INTR_MASK_REG(port) _MMIO_PORT(port, \
- _DSI_INTR_MASK_REG_0,\
- _DSI_INTR_MASK_REG_1)
-
-#define _DSI_INTR_IDENT_REG_0 0x6b074
-#define _DSI_INTR_IDENT_REG_1 0x6b874
-#define DSI_INTR_IDENT_REG(port) _MMIO_PORT(port, \
- _DSI_INTR_IDENT_REG_0,\
- _DSI_INTR_IDENT_REG_1)
-#define DSI_TE_EVENT (1 << 31)
-#define DSI_RX_DATA_OR_BTA_TERMINATED (1 << 30)
-#define DSI_TX_DATA (1 << 29)
-#define DSI_ULPS_ENTRY_DONE (1 << 28)
-#define DSI_NON_TE_TRIGGER_RECEIVED (1 << 27)
-#define DSI_HOST_CHKSUM_ERROR (1 << 26)
-#define DSI_HOST_MULTI_ECC_ERROR (1 << 25)
-#define DSI_HOST_SINGL_ECC_ERROR (1 << 24)
-#define DSI_HOST_CONTENTION_DETECTED (1 << 23)
-#define DSI_HOST_FALSE_CONTROL_ERROR (1 << 22)
-#define DSI_HOST_TIMEOUT_ERROR (1 << 21)
-#define DSI_HOST_LOW_POWER_TX_SYNC_ERROR (1 << 20)
-#define DSI_HOST_ESCAPE_MODE_ENTRY_ERROR (1 << 19)
-#define DSI_FRAME_UPDATE_DONE (1 << 16)
-#define DSI_PROTOCOL_VIOLATION_REPORTED (1 << 15)
-#define DSI_INVALID_TX_LENGTH (1 << 13)
-#define DSI_INVALID_VC (1 << 12)
-#define DSI_INVALID_DATA_TYPE (1 << 11)
-#define DSI_PERIPHERAL_CHKSUM_ERROR (1 << 10)
-#define DSI_PERIPHERAL_MULTI_ECC_ERROR (1 << 9)
-#define DSI_PERIPHERAL_SINGLE_ECC_ERROR (1 << 8)
-#define DSI_PERIPHERAL_CONTENTION_DETECTED (1 << 7)
-#define DSI_PERIPHERAL_FALSE_CTRL_ERROR (1 << 6)
-#define DSI_PERIPHERAL_TIMEOUT_ERROR (1 << 5)
-#define DSI_PERIPHERAL_LP_TX_SYNC_ERROR (1 << 4)
-#define DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR (1 << 3)
-#define DSI_EOT_SYNC_ERROR (1 << 2)
-#define DSI_SOT_SYNC_ERROR (1 << 1)
-#define DSI_SOT_ERROR (1 << 0)
-
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
@@ -8693,143 +8414,6 @@ enum skl_power_gate {
#define PIPE_FRMTMSTMP(pipe) \
_MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)
-/* BXT MIPI clock controls */
-#define BXT_MAX_VAR_OUTPUT_KHZ 39500
-
-#define BXT_MIPI_CLOCK_CTL _MMIO(0x46090)
-#define BXT_MIPI1_DIV_SHIFT 26
-#define BXT_MIPI2_DIV_SHIFT 10
-#define BXT_MIPI_DIV_SHIFT(port) \
- _MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
- BXT_MIPI2_DIV_SHIFT)
-
-/* TX control divider to select actual TX clock output from (8x/var) */
-#define BXT_MIPI1_TX_ESCLK_SHIFT 26
-#define BXT_MIPI2_TX_ESCLK_SHIFT 10
-#define BXT_MIPI_TX_ESCLK_SHIFT(port) \
- _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
- BXT_MIPI2_TX_ESCLK_SHIFT)
-#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (0x3F << 26)
-#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (0x3F << 10)
-#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \
- _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
- BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
-#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \
- (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
-/* RX upper control divider to select actual RX clock output from 8x */
-#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21
-#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5
-#define BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port) \
- _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_SHIFT, \
- BXT_MIPI2_RX_ESCLK_UPPER_SHIFT)
-#define BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 21)
-#define BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 5)
-#define BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port) \
- _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
- BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
-#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \
- (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
-/* 8/3X divider to select the actual 8/3X clock output from 8x */
-#define BXT_MIPI1_8X_BY3_SHIFT 19
-#define BXT_MIPI2_8X_BY3_SHIFT 3
-#define BXT_MIPI_8X_BY3_SHIFT(port) \
- _MIPI_PORT(port, BXT_MIPI1_8X_BY3_SHIFT, \
- BXT_MIPI2_8X_BY3_SHIFT)
-#define BXT_MIPI1_8X_BY3_DIVIDER_MASK (3 << 19)
-#define BXT_MIPI2_8X_BY3_DIVIDER_MASK (3 << 3)
-#define BXT_MIPI_8X_BY3_DIVIDER_MASK(port) \
- _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
- BXT_MIPI2_8X_BY3_DIVIDER_MASK)
-#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \
- (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
-/* RX lower control divider to select actual RX clock output from 8x */
-#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16
-#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0
-#define BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port) \
- _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_SHIFT, \
- BXT_MIPI2_RX_ESCLK_LOWER_SHIFT)
-#define BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 16)
-#define BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 0)
-#define BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port) \
- _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
- BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
-#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \
- (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
-
-#define RX_DIVIDER_BIT_1_2 0x3
-#define RX_DIVIDER_BIT_3_4 0xC
-
-/* BXT MIPI mode configure */
-#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
-#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
-#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
-
-#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
-#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
-#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
-
-#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
-#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
-#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
-
-#define BXT_DSI_PLL_CTL _MMIO(0x161000)
-#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
-#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
-#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
-#define BXT_DSIC_16X_BY1 (0 << 10)
-#define BXT_DSIC_16X_BY2 (1 << 10)
-#define BXT_DSIC_16X_BY3 (2 << 10)
-#define BXT_DSIC_16X_BY4 (3 << 10)
-#define BXT_DSIC_16X_MASK (3 << 10)
-#define BXT_DSIA_16X_BY1 (0 << 8)
-#define BXT_DSIA_16X_BY2 (1 << 8)
-#define BXT_DSIA_16X_BY3 (2 << 8)
-#define BXT_DSIA_16X_BY4 (3 << 8)
-#define BXT_DSIA_16X_MASK (3 << 8)
-#define BXT_DSI_FREQ_SEL_SHIFT 8
-#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT)
-
-#define BXT_DSI_PLL_RATIO_MAX 0x7D
-#define BXT_DSI_PLL_RATIO_MIN 0x22
-#define GLK_DSI_PLL_RATIO_MAX 0x6F
-#define GLK_DSI_PLL_RATIO_MIN 0x22
-#define BXT_DSI_PLL_RATIO_MASK 0xFF
-#define BXT_REF_CLOCK_KHZ 19200
-
-#define BXT_DSI_PLL_ENABLE _MMIO(0x46080)
-#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
-#define BXT_DSI_PLL_LOCKED (1 << 30)
-
-#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
-#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
-
- /* BXT port control */
-#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
-#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
-#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
-
-/* ICL DSI MODE control */
-#define _ICL_DSI_IO_MODECTL_0 0x6B094
-#define _ICL_DSI_IO_MODECTL_1 0x6B894
-#define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \
- _ICL_DSI_IO_MODECTL_0, \
- _ICL_DSI_IO_MODECTL_1)
-#define COMBO_PHY_MODE_DSI (1 << 0)
-
-/* TGL DSI Chicken register */
-#define _TGL_DSI_CHKN_REG_0 0x6B0C0
-#define _TGL_DSI_CHKN_REG_1 0x6B8C0
-#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \
- _TGL_DSI_CHKN_REG_0, \
- _TGL_DSI_CHKN_REG_1)
-#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12)
-#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \
- (byte_clocks))
-
/* Display Stream Splitter Control */
#define DSS_CTL1 _MMIO(0x67400)
#define SPLITTER_ENABLE (1 << 31)
@@ -8868,685 +8452,6 @@ enum skl_power_gate {
_ICL_PIPE_DSS_CTL2_PB, \
_ICL_PIPE_DSS_CTL2_PC)
-#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
-#define STAP_SELECT (1 << 0)
-
-#define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054)
-#define HS_IO_CTRL_SELECT (1 << 0)
-
-#define DPI_ENABLE (1 << 31) /* A + C */
-#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
-#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
-#define DUAL_LINK_MODE_SHIFT 26
-#define DUAL_LINK_MODE_MASK (1 << 26)
-#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
-#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
-#define DITHERING_ENABLE (1 << 25) /* A + C */
-#define FLOPPED_HSTX (1 << 23)
-#define DE_INVERT (1 << 19) /* XXX */
-#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
-#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
-#define AFE_LATCHOUT (1 << 17)
-#define LP_OUTPUT_HOLD (1 << 16)
-#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
-#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
-#define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11
-#define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
-#define CSB_SHIFT 9
-#define CSB_MASK (3 << 9)
-#define CSB_20MHZ (0 << 9)
-#define CSB_10MHZ (1 << 9)
-#define CSB_40MHZ (2 << 9)
-#define BANDGAP_MASK (1 << 8)
-#define BANDGAP_PNW_CIRCUIT (0 << 8)
-#define BANDGAP_LNC_CIRCUIT (1 << 8)
-#define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
-#define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
-#define TEARING_EFFECT_DELAY (1 << 4) /* A + C */
-#define TEARING_EFFECT_SHIFT 2 /* A + C */
-#define TEARING_EFFECT_MASK (3 << 2)
-#define TEARING_EFFECT_OFF (0 << 2)
-#define TEARING_EFFECT_DSI (1 << 2)
-#define TEARING_EFFECT_GPIO (2 << 2)
-#define LANE_CONFIGURATION_SHIFT 0
-#define LANE_CONFIGURATION_MASK (3 << 0)
-#define LANE_CONFIGURATION_4LANE (0 << 0)
-#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
-#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
-
-#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
-#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
-#define TEARING_EFFECT_DELAY_SHIFT 0
-#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
-
-/* XXX: all bits reserved */
-#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
-
-/* MIPI DSI Controller and D-PHY registers */
-
-#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
-#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
-#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
-#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
-#define ULPS_STATE_MASK (3 << 1)
-#define ULPS_STATE_ENTER (2 << 1)
-#define ULPS_STATE_EXIT (1 << 1)
-#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
-#define DEVICE_READY (1 << 0)
-
-#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
-#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
-#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
-#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
-#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
-#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
-#define TEARING_EFFECT (1 << 31)
-#define SPL_PKT_SENT_INTERRUPT (1 << 30)
-#define GEN_READ_DATA_AVAIL (1 << 29)
-#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
-#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
-#define RX_PROT_VIOLATION (1 << 26)
-#define RX_INVALID_TX_LENGTH (1 << 25)
-#define ACK_WITH_NO_ERROR (1 << 24)
-#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
-#define LP_RX_TIMEOUT (1 << 22)
-#define HS_TX_TIMEOUT (1 << 21)
-#define DPI_FIFO_UNDERRUN (1 << 20)
-#define LOW_CONTENTION (1 << 19)
-#define HIGH_CONTENTION (1 << 18)
-#define TXDSI_VC_ID_INVALID (1 << 17)
-#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
-#define TXCHECKSUM_ERROR (1 << 15)
-#define TXECC_MULTIBIT_ERROR (1 << 14)
-#define TXECC_SINGLE_BIT_ERROR (1 << 13)
-#define TXFALSE_CONTROL_ERROR (1 << 12)
-#define RXDSI_VC_ID_INVALID (1 << 11)
-#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
-#define RXCHECKSUM_ERROR (1 << 9)
-#define RXECC_MULTIBIT_ERROR (1 << 8)
-#define RXECC_SINGLE_BIT_ERROR (1 << 7)
-#define RXFALSE_CONTROL_ERROR (1 << 6)
-#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
-#define RX_LP_TX_SYNC_ERROR (1 << 4)
-#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
-#define RXEOT_SYNC_ERROR (1 << 2)
-#define RXSOT_SYNC_ERROR (1 << 1)
-#define RXSOT_ERROR (1 << 0)
-
-#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
-#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
-#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
-#define CMD_MODE_NOT_SUPPORTED (0 << 13)
-#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
-#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
-#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
-#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
-#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
-#define VID_MODE_FORMAT_MASK (0xf << 7)
-#define VID_MODE_NOT_SUPPORTED (0 << 7)
-#define VID_MODE_FORMAT_RGB565 (1 << 7)
-#define VID_MODE_FORMAT_RGB666_PACKED (2 << 7)
-#define VID_MODE_FORMAT_RGB666 (3 << 7)
-#define VID_MODE_FORMAT_RGB888 (4 << 7)
-#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
-#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
-#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
-#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
-#define DATA_LANES_PRG_REG_SHIFT 0
-#define DATA_LANES_PRG_REG_MASK (7 << 0)
-
-#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
-#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
-#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-
-#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
-#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
-#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-
-#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
-#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
-#define TURN_AROUND_TIMEOUT_MASK 0x3f
-
-#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
-#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
-#define DEVICE_RESET_TIMER_MASK 0xffff
-
-#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
-#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
-#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
-#define VERTICAL_ADDRESS_SHIFT 16
-#define VERTICAL_ADDRESS_MASK (0xffff << 16)
-#define HORIZONTAL_ADDRESS_SHIFT 0
-#define HORIZONTAL_ADDRESS_MASK 0xffff
-
-#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
-#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
-#define DBI_FIFO_EMPTY_HALF (0 << 0)
-#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
-#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
-
-/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
-#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
-
-#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
-#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
-#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
-
-#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
-#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
-#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
-
-#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
-#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
-
-#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
-#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
-
-#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
-#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
-#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
-
-#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
-#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
-#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
-
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
-#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
-
-/* regs above are bits 15:0 */
-
-#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
-#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
-#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
-#define DPI_LP_MODE (1 << 6)
-#define BACKLIGHT_OFF (1 << 5)
-#define BACKLIGHT_ON (1 << 4)
-#define COLOR_MODE_OFF (1 << 3)
-#define COLOR_MODE_ON (1 << 2)
-#define TURN_ON (1 << 1)
-#define SHUTDOWN (1 << 0)
-
-#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
-#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
-#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
-#define COMMAND_BYTE_SHIFT 0
-#define COMMAND_BYTE_MASK (0x3f << 0)
-
-#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
-#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
-#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
-#define MASTER_INIT_TIMER_SHIFT 0
-#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-
-#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
-#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
- _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
-#define MAX_RETURN_PKT_SIZE_SHIFT 0
-#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-
-#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
-#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
-#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
-#define DISABLE_VIDEO_BTA (1 << 3)
-#define IP_TG_CONFIG (1 << 2)
-#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
-#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
-#define VIDEO_MODE_BURST (3 << 0)
-
-#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
-#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
-#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
-#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
-#define BXT_DPHY_DEFEATURE_EN (1 << 8)
-#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
-#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
-#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
-#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
-#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
-#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
-#define CLOCKSTOP (1 << 1)
-#define EOT_DISABLE (1 << 0)
-
-#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
-#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
-#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
-#define LP_BYTECLK_SHIFT 0
-#define LP_BYTECLK_MASK (0xffff << 0)
-
-#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4)
-#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4)
-#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
-
-#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098)
-#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898)
-#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
-
-/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
-#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
-#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
-
-/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
-#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
-#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
-
-#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
-#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
-#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
-#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
-#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
-#define LONG_PACKET_WORD_COUNT_SHIFT 8
-#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
-#define SHORT_PACKET_PARAM_SHIFT 8
-#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
-#define VIRTUAL_CHANNEL_SHIFT 6
-#define VIRTUAL_CHANNEL_MASK (3 << 6)
-#define DATA_TYPE_SHIFT 0
-#define DATA_TYPE_MASK (0x3f << 0)
-/* data type values, see include/video/mipi_display.h */
-
-#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
-#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
-#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
-#define DPI_FIFO_EMPTY (1 << 28)
-#define DBI_FIFO_EMPTY (1 << 27)
-#define LP_CTRL_FIFO_EMPTY (1 << 26)
-#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
-#define LP_CTRL_FIFO_FULL (1 << 24)
-#define HS_CTRL_FIFO_EMPTY (1 << 18)
-#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
-#define HS_CTRL_FIFO_FULL (1 << 16)
-#define LP_DATA_FIFO_EMPTY (1 << 10)
-#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
-#define LP_DATA_FIFO_FULL (1 << 8)
-#define HS_DATA_FIFO_EMPTY (1 << 2)
-#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
-#define HS_DATA_FIFO_FULL (1 << 0)
-
-#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
-#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
-#define DBI_HS_LP_MODE_MASK (1 << 0)
-#define DBI_LP_MODE (1 << 0)
-#define DBI_HS_MODE (0 << 0)
-
-#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
-#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
-#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
-#define EXIT_ZERO_COUNT_SHIFT 24
-#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
-#define TRAIL_COUNT_SHIFT 16
-#define TRAIL_COUNT_MASK (0x1f << 16)
-#define CLK_ZERO_COUNT_SHIFT 8
-#define CLK_ZERO_COUNT_MASK (0xff << 8)
-#define PREPARE_COUNT_SHIFT 0
-#define PREPARE_COUNT_MASK (0x3f << 0)
-
-#define _ICL_DSI_T_INIT_MASTER_0 0x6b088
-#define _ICL_DSI_T_INIT_MASTER_1 0x6b888
-#define ICL_DSI_T_INIT_MASTER(port) _MMIO_PORT(port, \
- _ICL_DSI_T_INIT_MASTER_0,\
- _ICL_DSI_T_INIT_MASTER_1)
-
-#define _DPHY_CLK_TIMING_PARAM_0 0x162180
-#define _DPHY_CLK_TIMING_PARAM_1 0x6c180
-#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
- _DPHY_CLK_TIMING_PARAM_0,\
- _DPHY_CLK_TIMING_PARAM_1)
-#define _DSI_CLK_TIMING_PARAM_0 0x6b080
-#define _DSI_CLK_TIMING_PARAM_1 0x6b880
-#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
- _DSI_CLK_TIMING_PARAM_0,\
- _DSI_CLK_TIMING_PARAM_1)
-#define CLK_PREPARE_OVERRIDE (1 << 31)
-#define CLK_PREPARE(x) ((x) << 28)
-#define CLK_PREPARE_MASK (0x7 << 28)
-#define CLK_PREPARE_SHIFT 28
-#define CLK_ZERO_OVERRIDE (1 << 27)
-#define CLK_ZERO(x) ((x) << 20)
-#define CLK_ZERO_MASK (0xf << 20)
-#define CLK_ZERO_SHIFT 20
-#define CLK_PRE_OVERRIDE (1 << 19)
-#define CLK_PRE(x) ((x) << 16)
-#define CLK_PRE_MASK (0x3 << 16)
-#define CLK_PRE_SHIFT 16
-#define CLK_POST_OVERRIDE (1 << 15)
-#define CLK_POST(x) ((x) << 8)
-#define CLK_POST_MASK (0x7 << 8)
-#define CLK_POST_SHIFT 8
-#define CLK_TRAIL_OVERRIDE (1 << 7)
-#define CLK_TRAIL(x) ((x) << 0)
-#define CLK_TRAIL_MASK (0xf << 0)
-#define CLK_TRAIL_SHIFT 0
-
-#define _DPHY_DATA_TIMING_PARAM_0 0x162184
-#define _DPHY_DATA_TIMING_PARAM_1 0x6c184
-#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
- _DPHY_DATA_TIMING_PARAM_0,\
- _DPHY_DATA_TIMING_PARAM_1)
-#define _DSI_DATA_TIMING_PARAM_0 0x6B084
-#define _DSI_DATA_TIMING_PARAM_1 0x6B884
-#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
- _DSI_DATA_TIMING_PARAM_0,\
- _DSI_DATA_TIMING_PARAM_1)
-#define HS_PREPARE_OVERRIDE (1 << 31)
-#define HS_PREPARE(x) ((x) << 24)
-#define HS_PREPARE_MASK (0x7 << 24)
-#define HS_PREPARE_SHIFT 24
-#define HS_ZERO_OVERRIDE (1 << 23)
-#define HS_ZERO(x) ((x) << 16)
-#define HS_ZERO_MASK (0xf << 16)
-#define HS_ZERO_SHIFT 16
-#define HS_TRAIL_OVERRIDE (1 << 15)
-#define HS_TRAIL(x) ((x) << 8)
-#define HS_TRAIL_MASK (0x7 << 8)
-#define HS_TRAIL_SHIFT 8
-#define HS_EXIT_OVERRIDE (1 << 7)
-#define HS_EXIT(x) ((x) << 0)
-#define HS_EXIT_MASK (0x7 << 0)
-#define HS_EXIT_SHIFT 0
-
-#define _DPHY_TA_TIMING_PARAM_0 0x162188
-#define _DPHY_TA_TIMING_PARAM_1 0x6c188
-#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
- _DPHY_TA_TIMING_PARAM_0,\
- _DPHY_TA_TIMING_PARAM_1)
-#define _DSI_TA_TIMING_PARAM_0 0x6b098
-#define _DSI_TA_TIMING_PARAM_1 0x6b898
-#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
- _DSI_TA_TIMING_PARAM_0,\
- _DSI_TA_TIMING_PARAM_1)
-#define TA_SURE_OVERRIDE (1 << 31)
-#define TA_SURE(x) ((x) << 16)
-#define TA_SURE_MASK (0x1f << 16)
-#define TA_SURE_SHIFT 16
-#define TA_GO_OVERRIDE (1 << 15)
-#define TA_GO(x) ((x) << 8)
-#define TA_GO_MASK (0xf << 8)
-#define TA_GO_SHIFT 8
-#define TA_GET_OVERRIDE (1 << 7)
-#define TA_GET(x) ((x) << 0)
-#define TA_GET_MASK (0xf << 0)
-#define TA_GET_SHIFT 0
-
-/* DSI transcoder configuration */
-#define _DSI_TRANS_FUNC_CONF_0 0x6b030
-#define _DSI_TRANS_FUNC_CONF_1 0x6b830
-#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \
- _DSI_TRANS_FUNC_CONF_0,\
- _DSI_TRANS_FUNC_CONF_1)
-#define OP_MODE_MASK (0x3 << 28)
-#define OP_MODE_SHIFT 28
-#define CMD_MODE_NO_GATE (0x0 << 28)
-#define CMD_MODE_TE_GATE (0x1 << 28)
-#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
-#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
-#define TE_SOURCE_GPIO (1 << 27)
-#define LINK_READY (1 << 20)
-#define PIX_FMT_MASK (0x3 << 16)
-#define PIX_FMT_SHIFT 16
-#define PIX_FMT_RGB565 (0x0 << 16)
-#define PIX_FMT_RGB666_PACKED (0x1 << 16)
-#define PIX_FMT_RGB666_LOOSE (0x2 << 16)
-#define PIX_FMT_RGB888 (0x3 << 16)
-#define PIX_FMT_RGB101010 (0x4 << 16)
-#define PIX_FMT_RGB121212 (0x5 << 16)
-#define PIX_FMT_COMPRESSED (0x6 << 16)
-#define BGR_TRANSMISSION (1 << 15)
-#define PIX_VIRT_CHAN(x) ((x) << 12)
-#define PIX_VIRT_CHAN_MASK (0x3 << 12)
-#define PIX_VIRT_CHAN_SHIFT 12
-#define PIX_BUF_THRESHOLD_MASK (0x3 << 10)
-#define PIX_BUF_THRESHOLD_SHIFT 10
-#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10)
-#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10)
-#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10)
-#define PIX_BUF_THRESHOLD_FULL (0x3 << 10)
-#define CONTINUOUS_CLK_MASK (0x3 << 8)
-#define CONTINUOUS_CLK_SHIFT 8
-#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8)
-#define CLK_HS_OR_LP (0x2 << 8)
-#define CLK_HS_CONTINUOUS (0x3 << 8)
-#define LINK_CALIBRATION_MASK (0x3 << 4)
-#define LINK_CALIBRATION_SHIFT 4
-#define CALIBRATION_DISABLED (0x0 << 4)
-#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
-#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
-#define BLANKING_PACKET_ENABLE (1 << 2)
-#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
-#define EOTP_DISABLED (1 << 0)
-
-#define _DSI_CMD_RXCTL_0 0x6b0d4
-#define _DSI_CMD_RXCTL_1 0x6b8d4
-#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \
- _DSI_CMD_RXCTL_0,\
- _DSI_CMD_RXCTL_1)
-#define READ_UNLOADS_DW (1 << 16)
-#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15)
-#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14)
-#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13)
-#define RECEIVED_RESET_TRIGGER (1 << 12)
-#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11)
-#define RECEIVED_CRC_WAS_LOST (1 << 10)
-#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0)
-#define NUMBER_RX_PLOAD_DW_SHIFT 0
-
-#define _DSI_CMD_TXCTL_0 0x6b0d0
-#define _DSI_CMD_TXCTL_1 0x6b8d0
-#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \
- _DSI_CMD_TXCTL_0,\
- _DSI_CMD_TXCTL_1)
-#define KEEP_LINK_IN_HS (1 << 24)
-#define FREE_HEADER_CREDIT_MASK (0x1f << 8)
-#define FREE_HEADER_CREDIT_SHIFT 0x8
-#define FREE_PLOAD_CREDIT_MASK (0xff << 0)
-#define FREE_PLOAD_CREDIT_SHIFT 0
-#define MAX_HEADER_CREDIT 0x10
-#define MAX_PLOAD_CREDIT 0x40
-
-#define _DSI_CMD_TXHDR_0 0x6b100
-#define _DSI_CMD_TXHDR_1 0x6b900
-#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \
- _DSI_CMD_TXHDR_0,\
- _DSI_CMD_TXHDR_1)
-#define PAYLOAD_PRESENT (1 << 31)
-#define LP_DATA_TRANSFER (1 << 30)
-#define VBLANK_FENCE (1 << 29)
-#define PARAM_WC_MASK (0xffff << 8)
-#define PARAM_WC_LOWER_SHIFT 8
-#define PARAM_WC_UPPER_SHIFT 16
-#define VC_MASK (0x3 << 6)
-#define VC_SHIFT 6
-#define DT_MASK (0x3f << 0)
-#define DT_SHIFT 0
-
-#define _DSI_CMD_TXPYLD_0 0x6b104
-#define _DSI_CMD_TXPYLD_1 0x6b904
-#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \
- _DSI_CMD_TXPYLD_0,\
- _DSI_CMD_TXPYLD_1)
-
-#define _DSI_LP_MSG_0 0x6b0d8
-#define _DSI_LP_MSG_1 0x6b8d8
-#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \
- _DSI_LP_MSG_0,\
- _DSI_LP_MSG_1)
-#define LPTX_IN_PROGRESS (1 << 17)
-#define LINK_IN_ULPS (1 << 16)
-#define LINK_ULPS_TYPE_LP11 (1 << 8)
-#define LINK_ENTER_ULPS (1 << 0)
-
-/* DSI timeout registers */
-#define _DSI_HSTX_TO_0 0x6b044
-#define _DSI_HSTX_TO_1 0x6b844
-#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \
- _DSI_HSTX_TO_0,\
- _DSI_HSTX_TO_1)
-#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16)
-#define HSTX_TIMEOUT_VALUE_SHIFT 16
-#define HSTX_TIMEOUT_VALUE(x) ((x) << 16)
-#define HSTX_TIMED_OUT (1 << 0)
-
-#define _DSI_LPRX_HOST_TO_0 0x6b048
-#define _DSI_LPRX_HOST_TO_1 0x6b848
-#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \
- _DSI_LPRX_HOST_TO_0,\
- _DSI_LPRX_HOST_TO_1)
-#define LPRX_TIMED_OUT (1 << 16)
-#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0)
-#define LPRX_TIMEOUT_VALUE_SHIFT 0
-#define LPRX_TIMEOUT_VALUE(x) ((x) << 0)
-
-#define _DSI_PWAIT_TO_0 0x6b040
-#define _DSI_PWAIT_TO_1 0x6b840
-#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \
- _DSI_PWAIT_TO_0,\
- _DSI_PWAIT_TO_1)
-#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16)
-#define PRESET_TIMEOUT_VALUE_SHIFT 16
-#define PRESET_TIMEOUT_VALUE(x) ((x) << 16)
-#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0)
-#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0
-#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0)
-
-#define _DSI_TA_TO_0 0x6b04c
-#define _DSI_TA_TO_1 0x6b84c
-#define DSI_TA_TO(tc) _MMIO_DSI(tc, \
- _DSI_TA_TO_0,\
- _DSI_TA_TO_1)
-#define TA_TIMED_OUT (1 << 16)
-#define TA_TIMEOUT_VALUE_MASK (0xffff << 0)
-#define TA_TIMEOUT_VALUE_SHIFT 0
-#define TA_TIMEOUT_VALUE(x) ((x) << 0)
-
-/* bits 31:0 */
-#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
-#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
-#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
-
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088)
-#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
-#define LP_HS_SSW_CNT_SHIFT 16
-#define LP_HS_SSW_CNT_MASK (0xffff << 16)
-#define HS_LP_PWR_SW_CNT_SHIFT 0
-#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-
-#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
-#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
-#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
-#define STOP_STATE_STALL_COUNTER_SHIFT 0
-#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-
-#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
-#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
-#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
-#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
-#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
-#define RX_CONTENTION_DETECTED (1 << 0)
-
-/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
-#define DBI_TYPEC_ENABLE (1 << 31)
-#define DBI_TYPEC_WIP (1 << 30)
-#define DBI_TYPEC_OPTION_SHIFT 28
-#define DBI_TYPEC_OPTION_MASK (3 << 28)
-#define DBI_TYPEC_FREQ_SHIFT 24
-#define DBI_TYPEC_FREQ_MASK (0xf << 24)
-#define DBI_TYPEC_OVERRIDE (1 << 8)
-#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
-#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
-
-
-/* MIPI adapter registers */
-
-#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
-#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
-#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
-#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
-#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
-#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
-#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
-#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
-#define READ_REQUEST_PRIORITY_SHIFT 3
-#define READ_REQUEST_PRIORITY_MASK (3 << 3)
-#define READ_REQUEST_PRIORITY_LOW (0 << 3)
-#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
-#define RGB_FLIP_TO_BGR (1 << 2)
-
-#define BXT_PIPE_SELECT_SHIFT 7
-#define BXT_PIPE_SELECT_MASK (7 << 7)
-#define BXT_PIPE_SELECT(pipe) ((pipe) << 7)
-#define GLK_PHY_STATUS_PORT_READY (1 << 31) /* RO */
-#define GLK_ULPS_NOT_ACTIVE (1 << 30) /* RO */
-#define GLK_MIPIIO_RESET_RELEASED (1 << 28)
-#define GLK_CLOCK_LANE_STOP_STATE (1 << 27) /* RO */
-#define GLK_DATA_LANE_STOP_STATE (1 << 26) /* RO */
-#define GLK_LP_WAKE (1 << 22)
-#define GLK_LP11_LOW_PWR_MODE (1 << 21)
-#define GLK_LP00_LOW_PWR_MODE (1 << 20)
-#define GLK_FIREWALL_ENABLE (1 << 16)
-#define BXT_PIXEL_OVERLAP_CNT_MASK (0xf << 10)
-#define BXT_PIXEL_OVERLAP_CNT_SHIFT 10
-#define BXT_DSC_ENABLE (1 << 3)
-#define BXT_RGB_FLIP (1 << 2)
-#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
-#define GLK_MIPIIO_ENABLE (1 << 0)
-
-#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
-#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
-#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
-#define DATA_MEM_ADDRESS_SHIFT 5
-#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
-#define DATA_VALID (1 << 0)
-
-#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
-#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
-#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
-#define DATA_LENGTH_SHIFT 0
-#define DATA_LENGTH_MASK (0xfffff << 0)
-
-#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
-#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
-#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
-#define COMMAND_MEM_ADDRESS_SHIFT 5
-#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
-#define AUTO_PWG_ENABLE (1 << 2)
-#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
-#define COMMAND_VALID (1 << 0)
-
-#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
-#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
-#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
-#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
-#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-
-#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
-#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
-#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
-
-#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
-#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
-#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
-#define READ_DATA_VALID(n) (1 << (n))
-
#define GEN12_GSMBASE _MMIO(0x108100)
#define GEN12_DSMBASE _MMIO(0x1080C0)
@@ -9557,8 +8462,10 @@ enum skl_power_gate {
#define _ICL_PHY_MISC_A 0x64C00
#define _ICL_PHY_MISC_B 0x64C04
-#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \
- _ICL_PHY_MISC_B)
+#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */
+#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, _ICL_PHY_MISC_B)
+#define DG2_PHY_MISC(port) ((port) == PHY_E ? _MMIO(_DG2_PHY_MISC_TC1) : \
+ ICL_PHY_MISC(port))
#define ICL_PHY_MISC_MUX_DDID (1 << 28)
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
@@ -9928,4 +8835,8 @@ enum skl_power_gate {
#define CLKGATE_DIS_MISC _MMIO(0x46534)
#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
+#define GEN12_CULLBIT1 _MMIO(0x6100)
+#define GEN12_CULLBIT2 _MMIO(0x7030)
+#define GEN12_STATE_ACK_DEBUG _MMIO(0x20BC)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index 34d37bbf08cd..d78d78fce431 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -37,6 +37,21 @@
__is_constexpr(__low) && \
((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
+/**
+ * REG_GENMASK64() - Prepare a continuous u64 bitmask
+ * @__high: 0-based high bit
+ * @__low: 0-based low bit
+ *
+ * Local wrapper for GENMASK_ULL() to force u64, with compile time checks.
+ *
+ * @return: Continuous bitmask from @__high to @__low, inclusive.
+ */
+#define REG_GENMASK64(__high, __low) \
+ ((u64)(GENMASK_ULL(__high, __low) + \
+ BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
+ __is_constexpr(__low) && \
+ ((__low) < 0 || (__high) > 63 || (__low) > (__high)))))
+
/*
* Local integer constant expression version of is_power_of_2().
*/
@@ -71,6 +86,18 @@
*/
#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
+/**
+ * REG_FIELD_GET64() - Extract a u64 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u64 and for consistency with
+ * REG_GENMASK64().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
+
typedef struct {
u32 reg;
} i915_reg_t;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5d94f86940f7..582770360ad1 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -44,6 +44,7 @@
#include "i915_active.h"
#include "i915_deps.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_pm.h"
@@ -117,8 +118,10 @@ static void i915_fence_release(struct dma_fence *fence)
rq->guc_prio != GUC_PRIO_FINI);
i915_request_free_capture_list(fetch_and_zero(&rq->capture_list));
- if (i915_vma_snapshot_present(&rq->batch_snapshot))
- i915_vma_snapshot_put_onstack(&rq->batch_snapshot);
+ if (rq->batch_res) {
+ i915_vma_resource_put(rq->batch_res);
+ rq->batch_res = NULL;
+ }
/*
* The request is put onto a RCU freelist (i.e. the address
@@ -309,7 +312,7 @@ void i915_request_free_capture_list(struct i915_capture_list *capture)
while (capture) {
struct i915_capture_list *next = capture->next;
- i915_vma_snapshot_put(capture->vma_snapshot);
+ i915_vma_resource_put(capture->vma_res);
kfree(capture);
capture = next;
}
@@ -855,7 +858,7 @@ static void __i915_request_ctor(void *arg)
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
clear_capture_list(rq);
- rq->batch_snapshot.present = false;
+ rq->batch_res = NULL;
init_llist_head(&rq->execute_cb);
}
@@ -961,7 +964,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
__rq_init_watchdog(rq);
assert_capture_list_is_null(rq);
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
- GEM_BUG_ON(i915_vma_snapshot_present(&rq->batch_snapshot));
+ GEM_BUG_ON(rq->batch_res);
/*
* Reserve space in the ring buffer for all the commands required to
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 170ee78c2858..28b1f9db5487 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -40,7 +40,7 @@
#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "i915_sw_fence.h"
-#include "i915_vma_snapshot.h"
+#include "i915_vma_resource.h"
#include <uapi/drm/i915_drm.h>
@@ -52,7 +52,7 @@ struct i915_request;
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
struct i915_capture_list {
- struct i915_vma_snapshot *vma_snapshot;
+ struct i915_vma_resource *vma_res;
struct i915_capture_list *next;
};
@@ -300,7 +300,7 @@ struct i915_request {
/** Batch buffer pointer for selftest internal use. */
I915_SELFTEST_DECLARE(struct i915_vma *batch);
- struct i915_vma_snapshot batch_snapshot;
+ struct i915_vma_resource *batch_res;
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
/**
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 247714bab044..76d5211c25eb 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -36,13 +36,14 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct i915_ttm_buddy_resource *bman_res;
struct drm_buddy *mm = &bman->mm;
- unsigned long n_pages;
- unsigned int min_order;
+ unsigned long n_pages, lpfn;
u64 min_page_size;
u64 size;
int err;
- GEM_BUG_ON(place->fpfn || place->lpfn);
+ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
if (!bman_res)
@@ -52,6 +53,12 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
INIT_LIST_HEAD(&bman_res->blocks);
bman_res->mm = mm;
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
+
+ if (place->fpfn || lpfn != man->size)
+ bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
+
GEM_BUG_ON(!bman_res->base.num_pages);
size = bman_res->base.num_pages << PAGE_SHIFT;
@@ -60,10 +67,16 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
min_page_size = bo->page_alignment << PAGE_SHIFT;
GEM_BUG_ON(min_page_size < mm->chunk_size);
- min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
+
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ unsigned long pages;
+
size = roundup_pow_of_two(size);
- min_order = ilog2(size) - ilog2(mm->chunk_size);
+ min_page_size = size;
+
+ pages = size >> ilog2(mm->chunk_size);
+ if (pages > lpfn)
+ lpfn = pages;
}
if (size > mm->size) {
@@ -73,34 +86,26 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
n_pages = size >> ilog2(mm->chunk_size);
- do {
- struct drm_buddy_block *block;
- unsigned int order;
-
- order = fls(n_pages) - 1;
- GEM_BUG_ON(order > mm->max_order);
- GEM_BUG_ON(order < min_order);
-
- do {
- mutex_lock(&bman->lock);
- block = drm_buddy_alloc_blocks(mm, order);
- mutex_unlock(&bman->lock);
- if (!IS_ERR(block))
- break;
-
- if (order-- == min_order) {
- err = -ENOSPC;
- goto err_free_blocks;
- }
- } while (1);
-
- n_pages -= BIT(order);
+ mutex_lock(&bman->lock);
+ err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
+ (u64)lpfn << PAGE_SHIFT,
+ (u64)n_pages << PAGE_SHIFT,
+ min_page_size,
+ &bman_res->blocks,
+ bman_res->flags);
+ mutex_unlock(&bman->lock);
+ if (unlikely(err))
+ goto err_free_blocks;
- list_add_tail(&block->link, &bman_res->blocks);
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT;
- if (!n_pages)
- break;
- } while (1);
+ mutex_lock(&bman->lock);
+ drm_buddy_block_trim(mm,
+ original_size,
+ &bman_res->blocks);
+ mutex_unlock(&bman->lock);
+ }
*res = &bman_res->base;
return 0;
@@ -268,10 +273,17 @@ int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
{
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct drm_buddy *mm = &bman->mm;
+ unsigned long flags = 0;
int ret;
+ flags |= DRM_BUDDY_RANGE_ALLOCATION;
+
mutex_lock(&bman->lock);
- ret = drm_buddy_alloc_range(mm, &bman->reserved, start, size);
+ ret = drm_buddy_alloc_blocks(mm, start,
+ start + size,
+ size, mm->chunk_size,
+ &bman->reserved,
+ flags);
mutex_unlock(&bman->lock);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
index 312077941411..72c90b432e87 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
@@ -20,6 +20,7 @@ struct drm_buddy;
*
* @base: struct ttm_resource base class we extend
* @blocks: the list of struct i915_buddy_block for this resource/allocation
+ * @flags: DRM_BUDDY_*_ALLOCATION flags
* @mm: the struct i915_buddy_mm for this resource
*
* Extends the struct ttm_resource to manage an address space allocation with
@@ -28,6 +29,7 @@ struct drm_buddy;
struct i915_ttm_buddy_resource {
struct ttm_resource base;
struct list_head blocks;
+ unsigned long flags;
struct drm_buddy *mm;
};
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 31a105bc1792..c97323973f9b 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
drm_info(&dev_priv->drm,
"balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
- ret = i915_gem_gtt_reserve(&ggtt->vm, node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
if (!ret)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 68cf1d392250..845cd88f8313 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -38,6 +38,18 @@
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_vma.h"
+#include "i915_vma_resource.h"
+
+static inline void assert_vma_held_evict(const struct i915_vma *vma)
+{
+ /*
+ * We may be forced to unbind when the vm is dead, to clean it up.
+ * This is the only exception to the requirement of the object lock
+ * being held.
+ */
+ if (atomic_read(&vma->vm->open))
+ assert_object_held_shared(vma->obj);
+}
static struct kmem_cache *slab_vmas;
@@ -285,7 +297,7 @@ struct i915_vma_work {
struct dma_fence_work base;
struct i915_address_space *vm;
struct i915_vm_pt_stash stash;
- struct i915_vma *vma;
+ struct i915_vma_resource *vma_res;
struct drm_i915_gem_object *pinned;
struct i915_sw_dma_fence_cb cb;
enum i915_cache_level cache_level;
@@ -295,23 +307,24 @@ struct i915_vma_work {
static void __vma_bind(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
- struct i915_vma *vma = vw->vma;
+ struct i915_vma_resource *vma_res = vw->vma_res;
+
+ vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
+ vma_res, vw->cache_level, vw->flags);
- vma->ops->bind_vma(vw->vm, &vw->stash,
- vma, vw->cache_level, vw->flags);
}
static void __vma_release(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
- if (vw->pinned) {
- __i915_gem_object_unpin_pages(vw->pinned);
+ if (vw->pinned)
i915_gem_object_put(vw->pinned);
- }
i915_vm_free_pt_stash(vw->vm, &vw->stash);
i915_vm_put(vw->vm);
+ if (vw->vma_res)
+ i915_vma_resource_put(vw->vma_res);
}
static const struct dma_fence_work_ops bind_ops = {
@@ -375,12 +388,27 @@ static int i915_vma_verify_bind_complete(struct i915_vma *vma)
#define i915_vma_verify_bind_complete(_vma) 0
#endif
+I915_SELFTEST_EXPORT void
+i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
+ struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
+ obj->mm.rsgt, i915_gem_object_is_readonly(obj),
+ i915_gem_object_is_lmem(obj), obj->mm.region,
+ vma->ops, vma->private, vma->node.start,
+ vma->node.size, vma->size);
+}
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
* @cache_level: mapping cache level
* @flags: flags like global or local mapping
* @work: preallocated worker for allocating and binding the PTE
+ * @vma_res: pointer to a preallocated vma resource. The resource is either
+ * consumed or freed.
*
* DMA addresses are taken from the scatter-gather table of this object (or of
* this VMA in case of non-default GGTT views) and PTE entries set up.
@@ -389,10 +417,12 @@ static int i915_vma_verify_bind_complete(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags,
- struct i915_vma_work *work)
+ struct i915_vma_work *work,
+ struct i915_vma_resource *vma_res)
{
u32 bind_flags;
u32 vma_flags;
+ int ret;
lockdep_assert_held(&vma->vm->mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -400,11 +430,15 @@ int i915_vma_bind(struct i915_vma *vma,
if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
- vma->vm->total)))
+ vma->vm->total))) {
+ i915_vma_resource_free(vma_res);
return -ENODEV;
+ }
- if (GEM_DEBUG_WARN_ON(!flags))
+ if (GEM_DEBUG_WARN_ON(!flags)) {
+ i915_vma_resource_free(vma_res);
return -EINVAL;
+ }
bind_flags = flags;
bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
@@ -413,16 +447,44 @@ int i915_vma_bind(struct i915_vma *vma,
vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
bind_flags &= ~vma_flags;
- if (bind_flags == 0)
+ if (bind_flags == 0) {
+ i915_vma_resource_free(vma_res);
return 0;
+ }
GEM_BUG_ON(!atomic_read(&vma->pages_count));
+ /* Wait for or await async unbinds touching our range */
+ if (work && bind_flags & vma->vm->bind_async_flags)
+ ret = i915_vma_resource_bind_dep_await(vma->vm,
+ &work->base.chain,
+ vma->node.start,
+ vma->node.size,
+ true,
+ GFP_NOWAIT |
+ __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN);
+ else
+ ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
+ vma->node.size, true);
+ if (ret) {
+ i915_vma_resource_free(vma_res);
+ return ret;
+ }
+
+ if (vma->resource || !vma_res) {
+ /* Rebinding with an additional I915_VMA_*_BIND */
+ GEM_WARN_ON(!vma_flags);
+ i915_vma_resource_free(vma_res);
+ } else {
+ i915_vma_resource_init_from_vma(vma_res, vma);
+ vma->resource = vma_res;
+ }
trace_i915_vma_bind(vma, bind_flags);
if (work && bind_flags & vma->vm->bind_async_flags) {
struct dma_fence *prev;
- work->vma = vma;
+ work->vma_res = i915_vma_resource_get(vma->resource);
work->cache_level = cache_level;
work->flags = bind_flags;
@@ -445,17 +507,25 @@ int i915_vma_bind(struct i915_vma *vma,
work->base.dma.error = 0; /* enable the queue_work() */
- __i915_gem_object_pin_pages(vma->obj);
- work->pinned = i915_gem_object_get(vma->obj);
+ /*
+ * If we don't have the refcounted pages list, keep a reference
+ * on the object to avoid waiting for the async bind to
+ * complete in the object destruction path.
+ */
+ if (!work->vma_res->bi.pages_rsgt)
+ work->pinned = i915_gem_object_get(vma->obj);
} else {
if (vma->obj) {
- int ret;
-
ret = i915_gem_object_wait_moving_fence(vma->obj, true);
- if (ret)
+ if (ret) {
+ i915_vma_resource_free(vma->resource);
+ vma->resource = NULL;
+
return ret;
+ }
}
- vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
+ vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
+ bind_flags);
}
if (vma->obj)
@@ -655,7 +725,8 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
* 0 on success, negative error code otherwise.
*/
static int
-i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u64 size, u64 alignment, u64 flags)
{
unsigned long color;
u64 start, end;
@@ -707,7 +778,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
range_overflows(offset, size, end))
return -EINVAL;
- ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
+ ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
size, offset, color,
flags);
if (ret)
@@ -746,7 +817,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
}
- ret = i915_gem_gtt_insert(vma->vm, &vma->node,
+ ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
size, alignment, color,
start, end, flags);
if (ret)
@@ -780,9 +851,17 @@ i915_vma_detach(struct i915_vma *vma)
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
{
unsigned int bound;
- bool pinned = true;
bound = atomic_read(&vma->flags);
+
+ if (flags & PIN_VALIDATE) {
+ flags &= I915_VMA_BIND_MASK;
+
+ return (flags & bound) == flags;
+ }
+
+ /* with the lock mandatory for unbind, we don't race here */
+ flags &= I915_VMA_BIND_MASK;
do {
if (unlikely(flags & ~bound))
return false;
@@ -790,34 +869,10 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
return false;
- if (!(bound & I915_VMA_PIN_MASK))
- goto unpinned;
-
GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
return true;
-
-unpinned:
- /*
- * If pin_count==0, but we are bound, check under the lock to avoid
- * racing with a concurrent i915_vma_unbind().
- */
- mutex_lock(&vma->vm->mutex);
- do {
- if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
- pinned = false;
- break;
- }
-
- if (unlikely(flags & ~bound)) {
- pinned = false;
- break;
- }
- } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
- mutex_unlock(&vma->vm->mutex);
-
- return pinned;
}
static struct scatterlist *
@@ -913,30 +968,39 @@ err_st_alloc:
}
static struct scatterlist *
-remap_pages(struct drm_i915_gem_object *obj,
- unsigned int offset, unsigned int alignment_pad,
- unsigned int width, unsigned int height,
- unsigned int src_stride, unsigned int dst_stride,
- struct sg_table *st, struct scatterlist *sg)
+add_padding_pages(unsigned int count,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a convenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
+ sg = sg_next(sg);
+
+ return sg;
+}
+
+static struct scatterlist *
+remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
+ unsigned int offset, unsigned int alignment_pad,
+ unsigned int width, unsigned int height,
+ unsigned int src_stride, unsigned int dst_stride,
+ struct sg_table *st, struct scatterlist *sg,
+ unsigned int *gtt_offset)
{
unsigned int row;
if (!width || !height)
return sg;
- if (alignment_pad) {
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a convenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, alignment_pad * 4096, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = alignment_pad * 4096;
- sg = sg_next(sg);
- }
+ if (alignment_pad)
+ sg = add_padding_pages(alignment_pad, st, sg);
for (row = 0; row < height; row++) {
unsigned int left = width * I915_GTT_PAGE_SIZE;
@@ -973,18 +1037,98 @@ remap_pages(struct drm_i915_gem_object *obj,
if (!left)
continue;
+ sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
+ }
+
+ *gtt_offset += alignment_pad + dst_stride * height;
+
+ return sg;
+}
+
+static struct scatterlist *
+remap_contiguous_pages(struct drm_i915_gem_object *obj,
+ unsigned int obj_offset,
+ unsigned int count,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ struct scatterlist *iter;
+ unsigned int offset;
+
+ iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
+ GEM_BUG_ON(!iter);
+
+ do {
+ unsigned int len;
+
+ len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
+
st->nents++;
+ count -= len >> PAGE_SHIFT;
+ if (count == 0)
+ return sg;
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, left, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = left;
- sg = sg_next(sg);
- }
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
+}
+
+static struct scatterlist *
+remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
+ unsigned int obj_offset, unsigned int alignment_pad,
+ unsigned int size,
+ struct sg_table *st, struct scatterlist *sg,
+ unsigned int *gtt_offset)
+{
+ if (!size)
+ return sg;
+
+ if (alignment_pad)
+ sg = add_padding_pages(alignment_pad, st, sg);
+
+ sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
+ sg = sg_next(sg);
+
+ *gtt_offset += alignment_pad + size;
+
+ return sg;
+}
+
+static struct scatterlist *
+remap_color_plane_pages(const struct intel_remapped_info *rem_info,
+ struct drm_i915_gem_object *obj,
+ int color_plane,
+ struct sg_table *st, struct scatterlist *sg,
+ unsigned int *gtt_offset)
+{
+ unsigned int alignment_pad = 0;
+
+ if (rem_info->plane_alignment)
+ alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
+
+ if (rem_info->plane[color_plane].linear)
+ sg = remap_linear_color_plane_pages(obj,
+ rem_info->plane[color_plane].offset,
+ alignment_pad,
+ rem_info->plane[color_plane].size,
+ st, sg,
+ gtt_offset);
+
+ else
+ sg = remap_tiled_color_plane_pages(obj,
+ rem_info->plane[color_plane].offset,
+ alignment_pad,
+ rem_info->plane[color_plane].width,
+ rem_info->plane[color_plane].height,
+ rem_info->plane[color_plane].src_stride,
+ rem_info->plane[color_plane].dst_stride,
+ st, sg,
+ gtt_offset);
return sg;
}
@@ -1013,21 +1157,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
st->nents = 0;
sg = st->sgl;
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- unsigned int alignment_pad = 0;
-
- if (rem_info->plane_alignment)
- alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset;
-
- sg = remap_pages(obj,
- rem_info->plane[i].offset, alignment_pad,
- rem_info->plane[i].width, rem_info->plane[i].height,
- rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
- st, sg);
-
- gtt_offset += alignment_pad +
- rem_info->plane[i].dst_stride * rem_info->plane[i].height;
- }
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
+ sg = remap_color_plane_pages(rem_info, obj, i, st, sg, &gtt_offset);
i915_sg_trim(st);
@@ -1049,9 +1180,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
- struct scatterlist *sg, *iter;
+ struct scatterlist *sg;
unsigned int count = view->partial.size;
- unsigned int offset;
int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -1062,34 +1192,14 @@ intel_partial_pages(const struct i915_ggtt_view *view,
if (ret)
goto err_sg_alloc;
- iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
- GEM_BUG_ON(!iter);
-
- sg = st->sgl;
st->nents = 0;
- do {
- unsigned int len;
-
- len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
- count << PAGE_SHIFT);
- sg_set_page(sg, NULL, len, 0);
- sg_dma_address(sg) =
- sg_dma_address(iter) + (offset << PAGE_SHIFT);
- sg_dma_len(sg) = len;
- st->nents++;
- count -= len >> PAGE_SHIFT;
- if (count == 0) {
- sg_mark_end(sg);
- i915_sg_trim(st); /* Drop any unused tail entries. */
+ sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
- return st;
- }
+ sg_mark_end(sg);
+ i915_sg_trim(st); /* Drop any unused tail entries. */
- sg = __sg_next(sg);
- iter = __sg_next(iter);
- offset = 0;
- } while (1);
+ return st;
err_sg_alloc:
kfree(st);
@@ -1101,7 +1211,6 @@ static int
__i915_vma_get_pages(struct i915_vma *vma)
{
struct sg_table *pages;
- int ret;
/*
* The vma->pages are only valid within the lifespan of the borrowed
@@ -1134,18 +1243,16 @@ __i915_vma_get_pages(struct i915_vma *vma)
break;
}
- ret = 0;
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- pages = NULL;
drm_err(&vma->vm->i915->drm,
- "Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
+ "Failed to get pages for VMA view type %u (%ld)!\n",
+ vma->ggtt_view.type, PTR_ERR(pages));
+ return PTR_ERR(pages);
}
vma->pages = pages;
- return ret;
+ return 0;
}
I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
@@ -1177,25 +1284,14 @@ err_unpin:
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
/* We allocate under vma_get_pages, so beware the shrinker */
- struct sg_table *pages = READ_ONCE(vma->pages);
-
GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
if (atomic_sub_return(count, &vma->pages_count) == 0) {
- /*
- * The atomic_sub_return is a read barrier for the READ_ONCE of
- * vma->pages above.
- *
- * READ_ONCE is safe because this is either called from the same
- * function (i915_vma_pin_ww), or guarded by vma->vm->mutex.
- *
- * TODO: We're leaving vma->pages dangling, until vma->obj->resv
- * lock is required.
- */
- if (pages != vma->obj->mm.pages) {
- sg_free_table(pages);
- kfree(pages);
+ if (vma->pages != vma->obj->mm.pages) {
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
}
+ vma->pages = NULL;
i915_gem_object_unpin_pages(vma->obj);
}
@@ -1228,6 +1324,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
{
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
+ struct i915_vma_resource *vma_res = NULL;
intel_wakeref_t wakeref = 0;
unsigned int bound;
int err;
@@ -1241,7 +1338,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
/* First try and grab the pin without rebinding the vma */
- if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
+ if (try_qad_pin(vma, flags))
return 0;
err = i915_vma_get_pages(vma);
@@ -1282,6 +1379,12 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
}
}
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res)) {
+ err = PTR_ERR(vma_res);
+ goto err_fence;
+ }
+
/*
* Differentiate between user/kernel vma inside the aliasing-ppgtt.
*
@@ -1302,7 +1405,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
err = mutex_lock_interruptible_nested(&vma->vm->mutex,
!(flags & PIN_GLOBAL));
if (err)
- goto err_fence;
+ goto err_vma_res;
/* No more allocations allowed now we hold vm->mutex */
@@ -1323,7 +1426,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
}
if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
- __i915_vma_pin(vma);
+ if (!(flags & PIN_VALIDATE))
+ __i915_vma_pin(vma);
goto err_unlock;
}
@@ -1332,7 +1436,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
goto err_unlock;
if (!(bound & I915_VMA_BIND_MASK)) {
- err = i915_vma_insert(vma, size, alignment, flags);
+ err = i915_vma_insert(vma, ww, size, alignment, flags);
if (err)
goto err_active;
@@ -1343,7 +1447,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!vma->pages);
err = i915_vma_bind(vma,
vma->obj->cache_level,
- flags, work);
+ flags, work, vma_res);
+ vma_res = NULL;
if (err)
goto err_remove;
@@ -1352,8 +1457,10 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- __i915_vma_pin(vma);
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ if (!(flags & PIN_VALIDATE)) {
+ __i915_vma_pin(vma);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ }
GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
@@ -1366,6 +1473,8 @@ err_active:
i915_active_release(&vma->active);
err_unlock:
mutex_unlock(&vma->vm->mutex);
+err_vma_res:
+ i915_vma_resource_free(vma_res);
err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
@@ -1412,7 +1521,12 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
/* Unlike i915_vma_pin, we don't take no for an answer! */
flush_idle_contexts(vm->gt);
if (mutex_lock_interruptible(&vm->mutex) == 0) {
- i915_gem_evict_vm(vm);
+ /*
+ * We pass NULL ww here, as we don't want to unbind
+ * locked objects when called from execbuf when pinning
+ * is removed. This would probably regress badly.
+ */
+ i915_gem_evict_vm(vm, NULL);
mutex_unlock(&vm->mutex);
}
} while (1);
@@ -1516,6 +1630,7 @@ void i915_vma_release(struct kref *ref)
i915_vm_put(vma->vm);
i915_active_fini(&vma->active);
+ GEM_WARN_ON(vma->resource);
i915_vma_free(vma);
}
@@ -1548,8 +1663,16 @@ void i915_vma_parked(struct intel_gt *gt)
struct drm_i915_gem_object *obj = vma->obj;
struct i915_address_space *vm = vma->vm;
- INIT_LIST_HEAD(&vma->closed_link);
- __i915_vma_put(vma);
+ if (i915_gem_object_trylock(obj, NULL)) {
+ INIT_LIST_HEAD(&vma->closed_link);
+ __i915_vma_put(vma);
+ i915_gem_object_unlock(obj);
+ } else {
+ /* back you go.. */
+ spin_lock_irq(&gt->closed_lock);
+ list_add(&vma->closed_link, &gt->closed_vma);
+ spin_unlock_irq(&gt->closed_lock);
+ }
i915_gem_object_put(obj);
i915_vm_close(vm);
@@ -1600,8 +1723,6 @@ static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *
{
int err;
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
-
/* Wait for the vma to be bound before we start! */
err = __i915_request_await_bind(rq, vma);
if (err)
@@ -1620,6 +1741,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
assert_object_held(obj);
+ GEM_BUG_ON(!vma->pages);
+
err = __i915_vma_move_to_active(vma, rq);
if (unlikely(err))
return err;
@@ -1662,9 +1785,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
return 0;
}
-void __i915_vma_evict(struct i915_vma *vma)
+struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
{
+ struct i915_vma_resource *vma_res = vma->resource;
+ struct dma_fence *unbind_fence;
+
GEM_BUG_ON(i915_vma_is_pinned(vma));
+ assert_vma_held_evict(vma);
if (i915_vma_is_map_and_fenceable(vma)) {
/* Force a pagefault for domain tracking on next user access */
@@ -1694,15 +1821,36 @@ void __i915_vma_evict(struct i915_vma *vma)
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
- if (likely(atomic_read(&vma->vm->open))) {
- trace_i915_vma_unbind(vma);
- vma->ops->unbind_vma(vma->vm, vma);
- }
+ /* Object backend must be async capable. */
+ GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
+
+ /* If vm is not open, unbind is a nop. */
+ vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
+ atomic_read(&vma->vm->open);
+ trace_i915_vma_unbind(vma);
+
+ unbind_fence = i915_vma_resource_unbind(vma_res);
+ vma->resource = NULL;
+
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags);
i915_vma_detach(vma);
+
+ if (!async && unbind_fence) {
+ dma_fence_wait(unbind_fence, false);
+ dma_fence_put(unbind_fence);
+ unbind_fence = NULL;
+ }
+
+ /*
+ * Binding itself may not have completed until the unbind fence signals,
+ * so don't drop the pages until that happens, unless the resource is
+ * async_capable.
+ */
+
vma_unbind_pages(vma);
+ return unbind_fence;
}
int __i915_vma_unbind(struct i915_vma *vma)
@@ -1710,6 +1858,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
int ret;
lockdep_assert_held(&vma->vm->mutex);
+ assert_vma_held_evict(vma);
if (!drm_mm_node_allocated(&vma->node))
return 0;
@@ -1729,18 +1878,55 @@ int __i915_vma_unbind(struct i915_vma *vma)
return ret;
GEM_BUG_ON(i915_vma_is_active(vma));
- __i915_vma_evict(vma);
+ __i915_vma_evict(vma, false);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
}
+static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
+{
+ struct dma_fence *fence;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return NULL;
+
+ if (i915_vma_is_pinned(vma) ||
+ &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
+ return ERR_PTR(-EAGAIN);
+
+ /*
+ * We probably need to replace this with awaiting the fences of the
+ * object's dma_resv when the vma active goes away. When doing that
+ * we need to be careful to not add the vma_resource unbind fence
+ * immediately to the object's dma_resv, because then unbinding
+ * the next vma from the object, in case there are many, will
+ * actually await the unbinding of the previous vmas, which is
+ * undesirable.
+ */
+ if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
+ I915_ACTIVE_AWAIT_EXCL |
+ I915_ACTIVE_AWAIT_ACTIVE) < 0) {
+ return ERR_PTR(-EBUSY);
+ }
+
+ fence = __i915_vma_evict(vma, true);
+
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
+
+ return fence;
+}
+
int i915_vma_unbind(struct i915_vma *vma)
{
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref = 0;
int err;
+ assert_object_held_shared(vma->obj);
+
/* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma);
if (err)
@@ -1771,6 +1957,79 @@ out_rpm:
return err;
}
+int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
+ intel_wakeref_t wakeref = 0;
+ struct dma_fence *fence;
+ int err;
+
+ /*
+ * We need the dma-resv lock since we add the
+ * unbind fence to the dma-resv object.
+ */
+ assert_object_held(obj);
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
+ return -EAGAIN;
+ }
+
+ if (!obj->mm.rsgt)
+ return -EBUSY;
+
+ err = dma_resv_reserve_shared(obj->base.resv, 1);
+ if (err)
+ return -EBUSY;
+
+ /*
+ * It would be great if we could grab this wakeref from the
+ * async unbind work if needed, but we can't because it uses
+ * kmalloc and it's in the dma-fence signalling critical path.
+ */
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ if (trylock_vm && !mutex_trylock(&vm->mutex)) {
+ err = -EBUSY;
+ goto out_rpm;
+ } else if (!trylock_vm) {
+ err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
+ if (err)
+ goto out_rpm;
+ }
+
+ fence = __i915_vma_unbind_async(vma);
+ mutex_unlock(&vm->mutex);
+ if (IS_ERR_OR_NULL(fence)) {
+ err = PTR_ERR_OR_ZERO(fence);
+ goto out_rpm;
+ }
+
+ dma_resv_add_shared_fence(obj->base.resv, fence);
+ dma_fence_put(fence);
+
+out_rpm:
+ if (wakeref)
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+ return err;
+}
+
+int i915_vma_unbind_unlocked(struct i915_vma *vma)
+{
+ int err;
+
+ i915_gem_object_lock(vma->obj, NULL);
+ err = i915_vma_unbind(vma);
+ i915_gem_object_unlock(vma->obj);
+
+ return err;
+}
+
struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
{
i915_gem_object_make_unshrinkable(vma->obj);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 32719431b3df..011af044ad4f 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -37,6 +37,7 @@
#include "i915_active.h"
#include "i915_request.h"
+#include "i915_vma_resource.h"
#include "i915_vma_types.h"
struct i915_vma *
@@ -204,16 +205,19 @@ struct i915_vma_work *i915_vma_work(void);
int i915_vma_bind(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags,
- struct i915_vma_work *work);
+ struct i915_vma_work *work,
+ struct i915_vma_resource *vma_res);
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
-void __i915_vma_evict(struct i915_vma *vma);
+struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
+int __must_check i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm);
+int __must_check i915_vma_unbind_unlocked(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
@@ -337,12 +341,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
*/
void i915_vma_unpin_iomap(struct i915_vma *vma);
-static inline struct page *i915_vma_first_page(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
- return sg_page(vma->pages->sgl);
-}
-
/**
* i915_vma_pin_fence - pin fencing state
* @vma: vma to pin fencing for
@@ -428,6 +426,26 @@ static inline int i915_vma_sync(struct i915_vma *vma)
return i915_active_wait(&vma->active);
}
+/**
+ * i915_vma_get_current_resource - Get the current resource of the vma
+ * @vma: The vma to get the current resource from.
+ *
+ * It's illegal to call this function if the vma is not bound.
+ *
+ * Return: A refcounted pointer to the current vma resource
+ * of the vma, assuming the vma is bound.
+ */
+static inline struct i915_vma_resource *
+i915_vma_get_current_resource(struct i915_vma *vma)
+{
+ return i915_vma_resource_get(vma->resource);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+void i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
+ struct i915_vma *vma);
+#endif
+
void i915_vma_module_exit(void);
int i915_vma_module_init(void);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
new file mode 100644
index 000000000000..57ae92ba8af1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/interval_tree_generic.h>
+#include <linux/sched/mm.h>
+
+#include "i915_sw_fence.h"
+#include "i915_vma_resource.h"
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+
+#include "gt/intel_gtt.h"
+
+static struct kmem_cache *slab_vma_resources;
+
+/**
+ * DOC:
+ * We use a per-vm interval tree to keep track of vma_resources
+ * scheduled for unbind but not yet unbound. The tree is protected by
+ * the vm mutex, and nodes are removed just after the unbind fence signals.
+ * The removal takes the vm mutex from a kernel thread which we need to
+ * keep in mind so that we don't grab the mutex and try to wait for all
+ * pending unbinds to complete, because that will temporaryily block many
+ * of the workqueue threads, and people will get angry.
+ *
+ * We should consider using a single ordered fence per VM instead but that
+ * requires ordering the unbinds and might introduce unnecessary waiting
+ * for unrelated unbinds. Amount of code will probably be roughly the same
+ * due to the simplicity of using the interval tree interface.
+ *
+ * Another drawback of this interval tree is that the complexity of insertion
+ * and removal of fences increases as O(ln(pending_unbinds)) instead of
+ * O(1) for a single fence without interval tree.
+ */
+#define VMA_RES_START(_node) ((_node)->start)
+#define VMA_RES_LAST(_node) ((_node)->start + (_node)->node_size - 1)
+INTERVAL_TREE_DEFINE(struct i915_vma_resource, rb,
+ u64, __subtree_last,
+ VMA_RES_START, VMA_RES_LAST, static, vma_res_itree);
+
+/* Callbacks for the unbind dma-fence. */
+
+/**
+ * i915_vma_resource_alloc - Allocate a vma resource
+ *
+ * Return: A pointer to a cleared struct i915_vma_resource or
+ * a -ENOMEM error pointer if allocation fails.
+ */
+struct i915_vma_resource *i915_vma_resource_alloc(void)
+{
+ struct i915_vma_resource *vma_res =
+ kmem_cache_zalloc(slab_vma_resources, GFP_KERNEL);
+
+ return vma_res ? vma_res : ERR_PTR(-ENOMEM);
+}
+
+/**
+ * i915_vma_resource_free - Free a vma resource
+ * @vma_res: The vma resource to free.
+ */
+void i915_vma_resource_free(struct i915_vma_resource *vma_res)
+{
+ if (vma_res)
+ kmem_cache_free(slab_vma_resources, vma_res);
+}
+
+static const char *get_driver_name(struct dma_fence *fence)
+{
+ return "vma unbind fence";
+}
+
+static const char *get_timeline_name(struct dma_fence *fence)
+{
+ return "unbound";
+}
+
+static void unbind_fence_free_rcu(struct rcu_head *head)
+{
+ struct i915_vma_resource *vma_res =
+ container_of(head, typeof(*vma_res), unbind_fence.rcu);
+
+ i915_vma_resource_free(vma_res);
+}
+
+static void unbind_fence_release(struct dma_fence *fence)
+{
+ struct i915_vma_resource *vma_res =
+ container_of(fence, typeof(*vma_res), unbind_fence);
+
+ i915_sw_fence_fini(&vma_res->chain);
+
+ call_rcu(&fence->rcu, unbind_fence_free_rcu);
+}
+
+static struct dma_fence_ops unbind_fence_ops = {
+ .get_driver_name = get_driver_name,
+ .get_timeline_name = get_timeline_name,
+ .release = unbind_fence_release,
+};
+
+static void __i915_vma_resource_unhold(struct i915_vma_resource *vma_res)
+{
+ struct i915_address_space *vm;
+
+ if (!refcount_dec_and_test(&vma_res->hold_count))
+ return;
+
+ dma_fence_signal(&vma_res->unbind_fence);
+
+ vm = vma_res->vm;
+ if (vma_res->wakeref)
+ intel_runtime_pm_put(&vm->i915->runtime_pm, vma_res->wakeref);
+
+ vma_res->vm = NULL;
+ if (!RB_EMPTY_NODE(&vma_res->rb)) {
+ mutex_lock(&vm->mutex);
+ vma_res_itree_remove(vma_res, &vm->pending_unbind);
+ mutex_unlock(&vm->mutex);
+ }
+
+ if (vma_res->bi.pages_rsgt)
+ i915_refct_sgt_put(vma_res->bi.pages_rsgt);
+}
+
+/**
+ * i915_vma_resource_unhold - Unhold the signaling of the vma resource unbind
+ * fence.
+ * @vma_res: The vma resource.
+ * @lockdep_cookie: The lockdep cookie returned from i915_vma_resource_hold.
+ *
+ * The function may leave a dma_fence critical section.
+ */
+void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
+ bool lockdep_cookie)
+{
+ dma_fence_end_signalling(lockdep_cookie);
+
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ unsigned long irq_flags;
+
+ /* Inefficient open-coded might_lock_irqsave() */
+ spin_lock_irqsave(&vma_res->lock, irq_flags);
+ spin_unlock_irqrestore(&vma_res->lock, irq_flags);
+ }
+
+ __i915_vma_resource_unhold(vma_res);
+}
+
+/**
+ * i915_vma_resource_hold - Hold the signaling of the vma resource unbind fence.
+ * @vma_res: The vma resource.
+ * @lockdep_cookie: Pointer to a bool serving as a lockdep cooke that should
+ * be given as an argument to the pairing i915_vma_resource_unhold.
+ *
+ * If returning true, the function enters a dma_fence signalling critical
+ * section if not in one already.
+ *
+ * Return: true if holding successful, false if not.
+ */
+bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
+ bool *lockdep_cookie)
+{
+ bool held = refcount_inc_not_zero(&vma_res->hold_count);
+
+ if (held)
+ *lockdep_cookie = dma_fence_begin_signalling();
+
+ return held;
+}
+
+static void i915_vma_resource_unbind_work(struct work_struct *work)
+{
+ struct i915_vma_resource *vma_res =
+ container_of(work, typeof(*vma_res), work);
+ struct i915_address_space *vm = vma_res->vm;
+ bool lockdep_cookie;
+
+ lockdep_cookie = dma_fence_begin_signalling();
+ if (likely(atomic_read(&vm->open)))
+ vma_res->ops->unbind_vma(vm, vma_res);
+
+ dma_fence_end_signalling(lockdep_cookie);
+ __i915_vma_resource_unhold(vma_res);
+ i915_vma_resource_put(vma_res);
+}
+
+static int
+i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify state)
+{
+ struct i915_vma_resource *vma_res =
+ container_of(fence, typeof(*vma_res), chain);
+ struct dma_fence *unbind_fence =
+ &vma_res->unbind_fence;
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ dma_fence_get(unbind_fence);
+ if (vma_res->immediate_unbind) {
+ i915_vma_resource_unbind_work(&vma_res->work);
+ } else {
+ INIT_WORK(&vma_res->work, i915_vma_resource_unbind_work);
+ queue_work(system_unbound_wq, &vma_res->work);
+ }
+ break;
+ case FENCE_FREE:
+ i915_vma_resource_put(vma_res);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * i915_vma_resource_unbind - Unbind a vma resource
+ * @vma_res: The vma resource to unbind.
+ *
+ * At this point this function does little more than publish a fence that
+ * signals immediately unless signaling is held back.
+ *
+ * Return: A refcounted pointer to a dma-fence that signals when unbinding is
+ * complete.
+ */
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res)
+{
+ struct i915_address_space *vm = vma_res->vm;
+
+ /* Reference for the sw fence */
+ i915_vma_resource_get(vma_res);
+
+ /* Caller must already have a wakeref in this case. */
+ if (vma_res->needs_wakeref)
+ vma_res->wakeref = intel_runtime_pm_get_if_in_use(&vm->i915->runtime_pm);
+
+ if (atomic_read(&vma_res->chain.pending) <= 1) {
+ RB_CLEAR_NODE(&vma_res->rb);
+ vma_res->immediate_unbind = 1;
+ } else {
+ vma_res_itree_insert(vma_res, &vma_res->vm->pending_unbind);
+ }
+
+ i915_sw_fence_commit(&vma_res->chain);
+
+ return &vma_res->unbind_fence;
+}
+
+/**
+ * __i915_vma_resource_init - Initialize a vma resource.
+ * @vma_res: The vma resource to initialize
+ *
+ * Initializes the private members of a vma resource.
+ */
+void __i915_vma_resource_init(struct i915_vma_resource *vma_res)
+{
+ spin_lock_init(&vma_res->lock);
+ dma_fence_init(&vma_res->unbind_fence, &unbind_fence_ops,
+ &vma_res->lock, 0, 0);
+ refcount_set(&vma_res->hold_count, 1);
+ i915_sw_fence_init(&vma_res->chain, i915_vma_resource_fence_notify);
+}
+
+static void
+i915_vma_resource_color_adjust_range(struct i915_address_space *vm,
+ u64 *start,
+ u64 *end)
+{
+ if (i915_vm_has_cache_coloring(vm)) {
+ if (*start)
+ *start -= I915_GTT_PAGE_SIZE;
+ *end += I915_GTT_PAGE_SIZE;
+ }
+}
+
+/**
+ * i915_vma_resource_bind_dep_sync - Wait for / sync all unbinds touching a
+ * certain vm range.
+ * @vm: The vm to look at.
+ * @offset: The range start.
+ * @size: The range size.
+ * @intr: Whether to wait interrubtible.
+ *
+ * The function needs to be called with the vm lock held.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted and @intr==true
+ */
+int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
+ u64 offset,
+ u64 size,
+ bool intr)
+{
+ struct i915_vma_resource *node;
+ u64 last = offset + size - 1;
+
+ lockdep_assert_held(&vm->mutex);
+ might_sleep();
+
+ i915_vma_resource_color_adjust_range(vm, &offset, &last);
+ node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
+ while (node) {
+ int ret = dma_fence_wait(&node->unbind_fence, intr);
+
+ if (ret)
+ return ret;
+
+ node = vma_res_itree_iter_next(node, offset, last);
+ }
+
+ return 0;
+}
+
+/**
+ * i915_vma_resource_bind_dep_sync_all - Wait for / sync all unbinds of a vm,
+ * releasing the vm lock while waiting.
+ * @vm: The vm to look at.
+ *
+ * The function may not be called with the vm lock held.
+ * Typically this is called at vm destruction to finish any pending
+ * unbind operations. The vm mutex is released while waiting to avoid
+ * stalling kernel workqueues trying to grab the mutex.
+ */
+void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm)
+{
+ struct i915_vma_resource *node;
+ struct dma_fence *fence;
+
+ do {
+ fence = NULL;
+ mutex_lock(&vm->mutex);
+ node = vma_res_itree_iter_first(&vm->pending_unbind, 0,
+ U64_MAX);
+ if (node)
+ fence = dma_fence_get_rcu(&node->unbind_fence);
+ mutex_unlock(&vm->mutex);
+
+ if (fence) {
+ /*
+ * The wait makes sure the node eventually removes
+ * itself from the tree.
+ */
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+ } while (node);
+}
+
+/**
+ * i915_vma_resource_bind_dep_await - Have a struct i915_sw_fence await all
+ * pending unbinds in a certain range of a vm.
+ * @vm: The vm to look at.
+ * @sw_fence: The struct i915_sw_fence that will be awaiting the unbinds.
+ * @offset: The range start.
+ * @size: The range size.
+ * @intr: Whether to wait interrubtible.
+ * @gfp: Allocation mode for memory allocations.
+ *
+ * The function makes @sw_fence await all pending unbinds in a certain
+ * vm range before calling the complete notifier. To be able to await
+ * each individual unbind, the function needs to allocate memory using
+ * the @gpf allocation mode. If that fails, the function will instead
+ * wait for the unbind fence to signal, using @intr to judge whether to
+ * wait interruptible or not. Note that @gfp should ideally be selected so
+ * as to avoid any expensive memory allocation stalls and rather fail and
+ * synchronize itself. For now the vm mutex is required when calling this
+ * function with means that @gfp can't call into direct reclaim. In reality
+ * this means that during heavy memory pressure, we will sync in this
+ * function.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted and @intr==true
+ */
+int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
+ struct i915_sw_fence *sw_fence,
+ u64 offset,
+ u64 size,
+ bool intr,
+ gfp_t gfp)
+{
+ struct i915_vma_resource *node;
+ u64 last = offset + size - 1;
+
+ lockdep_assert_held(&vm->mutex);
+ might_alloc(gfp);
+ might_sleep();
+
+ i915_vma_resource_color_adjust_range(vm, &offset, &last);
+ node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
+ while (node) {
+ int ret;
+
+ ret = i915_sw_fence_await_dma_fence(sw_fence,
+ &node->unbind_fence,
+ 0, gfp);
+ if (ret < 0) {
+ ret = dma_fence_wait(&node->unbind_fence, intr);
+ if (ret)
+ return ret;
+ }
+
+ node = vma_res_itree_iter_next(node, offset, last);
+ }
+
+ return 0;
+}
+
+void i915_vma_resource_module_exit(void)
+{
+ kmem_cache_destroy(slab_vma_resources);
+}
+
+int __init i915_vma_resource_module_init(void)
+{
+ slab_vma_resources = KMEM_CACHE(i915_vma_resource, SLAB_HWCACHE_ALIGN);
+ if (!slab_vma_resources)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
new file mode 100644
index 000000000000..25913913baa6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_VMA_RESOURCE_H__
+#define __I915_VMA_RESOURCE_H__
+
+#include <linux/dma-fence.h>
+#include <linux/refcount.h>
+
+#include "i915_gem.h"
+#include "i915_scatterlist.h"
+#include "i915_sw_fence.h"
+#include "intel_runtime_pm.h"
+
+struct intel_memory_region;
+
+struct i915_page_sizes {
+ /**
+ * The sg mask of the pages sg_table. i.e the mask of
+ * the lengths for each sg entry.
+ */
+ unsigned int phys;
+
+ /**
+ * The gtt page sizes we are allowed to use given the
+ * sg mask and the supported page sizes. This will
+ * express the smallest unit we can use for the whole
+ * object, as well as the larger sizes we may be able
+ * to use opportunistically.
+ */
+ unsigned int sg;
+};
+
+/**
+ * struct i915_vma_resource - Snapshotted unbind information.
+ * @unbind_fence: Fence to mark unbinding complete. Note that this fence
+ * is not considered published until unbind is scheduled, and as such it
+ * is illegal to access this fence before scheduled unbind other than
+ * for refcounting.
+ * @lock: The @unbind_fence lock.
+ * @hold_count: Number of holders blocking the fence from finishing.
+ * The vma itself is keeping a hold, which is released when unbind
+ * is scheduled.
+ * @work: Work struct for deferred unbind work.
+ * @chain: Pointer to struct i915_sw_fence used to await dependencies.
+ * @rb: Rb node for the vm's pending unbind interval tree.
+ * @__subtree_last: Interval tree private member.
+ * @vm: non-refcounted pointer to the vm. This is for internal use only and
+ * this member is cleared after vm_resource unbind.
+ * @mr: The memory region of the object pointed to by the vma.
+ * @ops: Pointer to the backend i915_vma_ops.
+ * @private: Bind backend private info.
+ * @start: Offset into the address space of bind range start.
+ * @node_size: Size of the allocated range manager node.
+ * @vma_size: Bind size.
+ * @page_sizes_gtt: Resulting page sizes from the bind operation.
+ * @bound_flags: Flags indicating binding status.
+ * @allocated: Backend private data. TODO: Should move into @private.
+ * @immediate_unbind: Unbind can be done immediately and doesn't need to be
+ * deferred to a work item awaiting unsignaled fences. This is a hack.
+ * (dma_fence_work uses a fence flag for this, but this seems slightly
+ * cleaner).
+ *
+ * The lifetime of a struct i915_vma_resource is from a binding request to
+ * the actual possible asynchronous unbind has completed.
+ */
+struct i915_vma_resource {
+ struct dma_fence unbind_fence;
+ /* See above for description of the lock. */
+ spinlock_t lock;
+ refcount_t hold_count;
+ struct work_struct work;
+ struct i915_sw_fence chain;
+ struct rb_node rb;
+ u64 __subtree_last;
+ struct i915_address_space *vm;
+ intel_wakeref_t wakeref;
+
+ /**
+ * struct i915_vma_bindinfo - Information needed for async bind
+ * only but that can be dropped after the bind has taken place.
+ * Consider making this a separate argument to the bind_vma
+ * op, coalescing with other arguments like vm, stash, cache_level
+ * and flags
+ * @pages: The pages sg-table.
+ * @page_sizes: Page sizes of the pages.
+ * @pages_rsgt: Refcounted sg-table when delayed object destruction
+ * is supported. May be NULL.
+ * @readonly: Whether the vma should be bound read-only.
+ * @lmem: Whether the vma points to lmem.
+ */
+ struct i915_vma_bindinfo {
+ struct sg_table *pages;
+ struct i915_page_sizes page_sizes;
+ struct i915_refct_sgt *pages_rsgt;
+ bool readonly:1;
+ bool lmem:1;
+ } bi;
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+ struct intel_memory_region *mr;
+#endif
+ const struct i915_vma_ops *ops;
+ void *private;
+ u64 start;
+ u64 node_size;
+ u64 vma_size;
+ u32 page_sizes_gtt;
+
+ u32 bound_flags;
+ bool allocated:1;
+ bool immediate_unbind:1;
+ bool needs_wakeref:1;
+};
+
+bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
+ bool *lockdep_cookie);
+
+void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
+ bool lockdep_cookie);
+
+struct i915_vma_resource *i915_vma_resource_alloc(void);
+
+void i915_vma_resource_free(struct i915_vma_resource *vma_res);
+
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res);
+
+void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
+
+/**
+ * i915_vma_resource_get - Take a reference on a vma resource
+ * @vma_res: The vma resource on which to take a reference.
+ *
+ * Return: The @vma_res pointer
+ */
+static inline struct i915_vma_resource
+*i915_vma_resource_get(struct i915_vma_resource *vma_res)
+{
+ dma_fence_get(&vma_res->unbind_fence);
+ return vma_res;
+}
+
+/**
+ * i915_vma_resource_put - Release a reference to a struct i915_vma_resource
+ * @vma_res: The resource
+ */
+static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
+{
+ dma_fence_put(&vma_res->unbind_fence);
+}
+
+/**
+ * i915_vma_resource_init - Initialize a vma resource.
+ * @vma_res: The vma resource to initialize
+ * @vm: Pointer to the vm.
+ * @pages: The pages sg-table.
+ * @page_sizes: Page sizes of the pages.
+ * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with
+ * delayed destruction.
+ * @readonly: Whether the vma should be bound read-only.
+ * @lmem: Whether the vma points to lmem.
+ * @mr: The memory region of the object the vma points to.
+ * @ops: The backend ops.
+ * @private: Bind backend private info.
+ * @start: Offset into the address space of bind range start.
+ * @node_size: Size of the allocated range manager node.
+ * @size: Bind size.
+ *
+ * Initializes a vma resource allocated using i915_vma_resource_alloc().
+ * The reason for having separate allocate and initialize function is that
+ * initialization may need to be performed from under a lock where
+ * allocation is not allowed.
+ */
+static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
+ struct i915_address_space *vm,
+ struct sg_table *pages,
+ const struct i915_page_sizes *page_sizes,
+ struct i915_refct_sgt *pages_rsgt,
+ bool readonly,
+ bool lmem,
+ struct intel_memory_region *mr,
+ const struct i915_vma_ops *ops,
+ void *private,
+ u64 start,
+ u64 node_size,
+ u64 size)
+{
+ __i915_vma_resource_init(vma_res);
+ vma_res->vm = vm;
+ vma_res->bi.pages = pages;
+ vma_res->bi.page_sizes = *page_sizes;
+ if (pages_rsgt)
+ vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt);
+ vma_res->bi.readonly = readonly;
+ vma_res->bi.lmem = lmem;
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+ vma_res->mr = mr;
+#endif
+ vma_res->ops = ops;
+ vma_res->private = private;
+ vma_res->start = start;
+ vma_res->node_size = node_size;
+ vma_res->vma_size = size;
+}
+
+static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
+{
+ GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1);
+ if (vma_res->bi.pages_rsgt)
+ i915_refct_sgt_put(vma_res->bi.pages_rsgt);
+ i915_sw_fence_fini(&vma_res->chain);
+}
+
+int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
+ u64 first,
+ u64 last,
+ bool intr);
+
+int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
+ struct i915_sw_fence *sw_fence,
+ u64 first,
+ u64 last,
+ bool intr,
+ gfp_t gfp);
+
+void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
+
+void i915_vma_resource_module_exit(void);
+
+int i915_vma_resource_module_init(void);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.c b/drivers/gpu/drm/i915/i915_vma_snapshot.c
deleted file mode 100644
index 2949ceea9884..000000000000
--- a/drivers/gpu/drm/i915/i915_vma_snapshot.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#include "i915_vma_snapshot.h"
-#include "i915_vma_types.h"
-#include "i915_vma.h"
-
-/**
- * i915_vma_snapshot_init - Initialize a struct i915_vma_snapshot from
- * a struct i915_vma.
- * @vsnap: The i915_vma_snapshot to init.
- * @vma: A struct i915_vma used to initialize @vsnap.
- * @name: Name associated with the snapshot. The character pointer needs to
- * stay alive over the lifitime of the shapsot
- */
-void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap,
- struct i915_vma *vma,
- const char *name)
-{
- if (!i915_vma_is_pinned(vma))
- assert_object_held(vma->obj);
-
- vsnap->name = name;
- vsnap->size = vma->size;
- vsnap->obj_size = vma->obj->base.size;
- vsnap->gtt_offset = vma->node.start;
- vsnap->gtt_size = vma->node.size;
- vsnap->page_sizes = vma->page_sizes.gtt;
- vsnap->pages = vma->pages;
- vsnap->pages_rsgt = NULL;
- vsnap->mr = NULL;
- if (vma->obj->mm.rsgt)
- vsnap->pages_rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt);
- vsnap->mr = vma->obj->mm.region;
- kref_init(&vsnap->kref);
- vsnap->vma_resource = &vma->active;
- vsnap->onstack = false;
- vsnap->present = true;
-}
-
-/**
- * i915_vma_snapshot_init_onstack - Initialize a struct i915_vma_snapshot from
- * a struct i915_vma, but avoid kfreeing it on last put.
- * @vsnap: The i915_vma_snapshot to init.
- * @vma: A struct i915_vma used to initialize @vsnap.
- * @name: Name associated with the snapshot. The character pointer needs to
- * stay alive over the lifitime of the shapsot
- */
-void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap,
- struct i915_vma *vma,
- const char *name)
-{
- i915_vma_snapshot_init(vsnap, vma, name);
- vsnap->onstack = true;
-}
-
-static void vma_snapshot_release(struct kref *ref)
-{
- struct i915_vma_snapshot *vsnap =
- container_of(ref, typeof(*vsnap), kref);
-
- vsnap->present = false;
- if (vsnap->pages_rsgt)
- i915_refct_sgt_put(vsnap->pages_rsgt);
- if (!vsnap->onstack)
- kfree(vsnap);
-}
-
-/**
- * i915_vma_snapshot_put - Put an i915_vma_snapshot pointer reference
- * @vsnap: The pointer reference
- */
-void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap)
-{
- kref_put(&vsnap->kref, vma_snapshot_release);
-}
-
-/**
- * i915_vma_snapshot_put_onstack - Put an onstcak i915_vma_snapshot pointer
- * reference and varify that the structure is released
- * @vsnap: The pointer reference
- *
- * This function is intended to be paired with a i915_vma_init_onstack()
- * and should be called before exiting the scope that declared or
- * freeing the structure that embedded @vsnap to verify that all references
- * have been released.
- */
-void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap)
-{
- if (!kref_put(&vsnap->kref, vma_snapshot_release))
- GEM_BUG_ON(1);
-}
-
-/**
- * i915_vma_snapshot_resource_pin - Temporarily block the memory the
- * vma snapshot is pointing to from being released.
- * @vsnap: The vma snapshot.
- * @lockdep_cookie: Pointer to bool needed for lockdep support. This needs
- * to be passed to the paired i915_vma_snapshot_resource_unpin.
- *
- * This function will temporarily try to hold up a fence or similar structure
- * and will therefore enter a fence signaling critical section.
- *
- * Return: true if we succeeded in blocking the memory from being released,
- * false otherwise.
- */
-bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap,
- bool *lockdep_cookie)
-{
- bool pinned = i915_active_acquire_if_busy(vsnap->vma_resource);
-
- if (pinned)
- *lockdep_cookie = dma_fence_begin_signalling();
-
- return pinned;
-}
-
-/**
- * i915_vma_snapshot_resource_unpin - Unblock vma snapshot memory from
- * being released.
- * @vsnap: The vma snapshot.
- * @lockdep_cookie: Cookie returned from matching i915_vma_resource_pin().
- *
- * Might leave a fence signalling critical section and signal a fence.
- */
-void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap,
- bool lockdep_cookie)
-{
- dma_fence_end_signalling(lockdep_cookie);
-
- return i915_active_release(vsnap->vma_resource);
-}
diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.h b/drivers/gpu/drm/i915/i915_vma_snapshot.h
deleted file mode 100644
index 940581df4622..000000000000
--- a/drivers/gpu/drm/i915/i915_vma_snapshot.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-#ifndef _I915_VMA_SNAPSHOT_H_
-#define _I915_VMA_SNAPSHOT_H_
-
-#include <linux/kref.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-struct i915_active;
-struct i915_refct_sgt;
-struct i915_vma;
-struct intel_memory_region;
-struct sg_table;
-
-/**
- * DOC: Simple utilities for snapshotting GPU vma metadata, later used for
- * error capture. Vi use a separate header for this to avoid issues due to
- * recursive header includes.
- */
-
-/**
- * struct i915_vma_snapshot - Snapshot of vma metadata.
- * @size: The vma size in bytes.
- * @obj_size: The size of the underlying object in bytes.
- * @gtt_offset: The gtt offset the vma is bound to.
- * @gtt_size: The size in bytes allocated for the vma in the GTT.
- * @pages: The struct sg_table pointing to the pages bound.
- * @pages_rsgt: The refcounted sg_table holding the reference for @pages if any.
- * @mr: The memory region pointed for the pages bound.
- * @kref: Reference for this structure.
- * @vma_resource: FIXME: A means to keep the unbind fence from signaling.
- * Temporarily while we have only sync unbinds, and still use the vma
- * active, we use that. With async unbinding we need a signaling refcount
- * for the unbind fence.
- * @page_sizes: The vma GTT page sizes information.
- * @onstack: Whether the structure shouldn't be freed on final put.
- * @present: Whether the structure is present and initialized.
- */
-struct i915_vma_snapshot {
- const char *name;
- size_t size;
- size_t obj_size;
- size_t gtt_offset;
- size_t gtt_size;
- struct sg_table *pages;
- struct i915_refct_sgt *pages_rsgt;
- struct intel_memory_region *mr;
- struct kref kref;
- struct i915_active *vma_resource;
- u32 page_sizes;
- bool onstack:1;
- bool present:1;
-};
-
-void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap,
- struct i915_vma *vma,
- const char *name);
-
-void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap,
- struct i915_vma *vma,
- const char *name);
-
-void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap);
-
-void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap);
-
-bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap,
- bool *lockdep_cookie);
-
-void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap,
- bool lockdep_cookie);
-
-/**
- * i915_vma_snapshot_alloc - Allocate a struct i915_vma_snapshot
- * @gfp: Allocation mode.
- *
- * Return: A pointer to a struct i915_vma_snapshot if successful.
- * NULL otherwise.
- */
-static inline struct i915_vma_snapshot *i915_vma_snapshot_alloc(gfp_t gfp)
-{
- return kmalloc(sizeof(struct i915_vma_snapshot), gfp);
-}
-
-/**
- * i915_vma_snapshot_get - Take a reference on a struct i915_vma_snapshot
- *
- * Return: A pointer to a struct i915_vma_snapshot.
- */
-static inline struct i915_vma_snapshot *
-i915_vma_snapshot_get(struct i915_vma_snapshot *vsnap)
-{
- kref_get(&vsnap->kref);
- return vsnap;
-}
-
-/**
- * i915_vma_snapshot_present - Whether a struct i915_vma_snapshot is
- * present and initialized.
- *
- * Return: true if present and initialized; false otherwise.
- */
-static inline bool
-i915_vma_snapshot_present(const struct i915_vma_snapshot *vsnap)
-{
- return vsnap && vsnap->present;
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index ca575e129ced..88370dadca82 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -95,6 +95,8 @@ enum i915_cache_level;
*
*/
+struct i915_vma_resource;
+
struct intel_remapped_plane_info {
/* in gtt pages */
u32 offset:31;
@@ -247,22 +249,20 @@ struct i915_vma {
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
-#define I915_VMA_ALLOC_BIT 12
-
-#define I915_VMA_ERROR_BIT 13
+#define I915_VMA_ERROR_BIT 12
#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
-#define I915_VMA_GGTT_BIT 14
-#define I915_VMA_CAN_FENCE_BIT 15
-#define I915_VMA_USERFAULT_BIT 16
-#define I915_VMA_GGTT_WRITE_BIT 17
+#define I915_VMA_GGTT_BIT 13
+#define I915_VMA_CAN_FENCE_BIT 14
+#define I915_VMA_USERFAULT_BIT 15
+#define I915_VMA_GGTT_WRITE_BIT 16
#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
-#define I915_VMA_SCANOUT_BIT 18
+#define I915_VMA_SCANOUT_BIT 17
#define I915_VMA_SCANOUT ((int)BIT(I915_VMA_SCANOUT_BIT))
struct i915_active active;
@@ -291,6 +291,9 @@ struct i915_vma {
struct list_head evict_link;
struct list_head closed_link;
+
+ /** The async vma resource. Protected by the vm_mutex */
+ struct i915_vma_resource *resource;
};
#endif
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 3699b1c539ea..27dcfe6f2429 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -96,7 +96,7 @@ enum intel_platform {
* it is fine for the same bit to be used on multiple parent platforms.
*/
-#define INTEL_SUBPLATFORM_BITS (2)
+#define INTEL_SUBPLATFORM_BITS (3)
#define INTEL_SUBPLATFORM_MASK (BIT(INTEL_SUBPLATFORM_BITS) - 1)
/* HSW/BDW/SKL/KBL/CFL */
@@ -109,6 +109,7 @@ enum intel_platform {
/* DG2 */
#define INTEL_SUBPLATFORM_G10 0
#define INTEL_SUBPLATFORM_G11 1
+#define INTEL_SUBPLATFORM_G12 2
/* ADL-S */
#define INTEL_SUBPLATFORM_RPL_S 0
@@ -134,6 +135,7 @@ enum intel_ppgtt_type {
func(has_reset_engine); \
func(has_global_mocs); \
func(has_gt_uc); \
+ func(has_guc_deprivilege); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 723bd0411a0e..174c95c3e10f 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_dram.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
struct dram_dimm_info {
diff --git a/drivers/gpu/drm/i915/intel_mchbar_regs.h b/drivers/gpu/drm/i915/intel_mchbar_regs.h
new file mode 100644
index 000000000000..2aad2f0cc8db
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mchbar_regs.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_MCHBAR_REGS__
+#define __INTEL_MCHBAR_REGS__
+
+#include "i915_reg_defs.h"
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way. It is not accessible from the CP register read instructions.
+ *
+ * Starting from Haswell, you can't write registers using the MCHBAR mirror,
+ * just read.
+ */
+
+#define MCHBAR_MIRROR_BASE 0x10000
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
+#define CTG_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x34)
+#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48)
+#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
+#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
+#define G4X_STOLEN_RESERVED_ENABLE (1 << 0)
+
+/* Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL _MMIO(MCHBAR_MIRROR_BASE + 0x1a8)
+#define CSHRDDR3CTL_DDR3 (1 << 2)
+
+/* 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC _MMIO(MCHBAR_MIRROR_BASE + 0x200)
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+#define DCC2 _MMIO(MCHBAR_MIRROR_BASE + 0x204)
+#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
+
+/* 965 MCH register controlling DRAM channel configuration */
+#define C0DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x206)
+#define C1DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x606)
+
+/* Clocking configuration register */
+#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
+#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_400_ALT (5 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
+#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
+#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
+#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
+#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */
+#define CLKCFG_FSB_1600_ALT (6 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_MASK (7 << 0)
+#define CLKCFG_MEM_533 (1 << 4)
+#define CLKCFG_MEM_667 (2 << 4)
+#define CLKCFG_MEM_800 (3 << 4)
+#define CLKCFG_MEM_MASK (7 << 4)
+
+#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
+#define HPLLVCO _MMIO(MCHBAR_MIRROR_BASE + 0xc38)
+
+#define TSC1 _MMIO(MCHBAR_MIRROR_BASE + 0x1001)
+#define TSE (1 << 0)
+#define TR1 _MMIO(MCHBAR_MIRROR_BASE + 0x1006)
+#define TSFS _MMIO(MCHBAR_MIRROR_BASE + 0x1020)
+#define TSFS_SLOPE_MASK 0x0000ff00
+#define TSFS_SLOPE_SHIFT 8
+#define TSFS_INTR_MASK 0x000000ff
+
+/* Memory latency timer register */
+#define MLTR_ILK _MMIO(MCHBAR_MIRROR_BASE + 0x1222)
+/* the unit of memory self-refresh latency time is 0.5us */
+#define MLTR_WM2_MASK REG_GENMASK(13, 8)
+#define MLTR_WM1_MASK REG_GENMASK(5, 0)
+
+#define CSIPLL0 _MMIO(MCHBAR_MIRROR_BASE + 0x2c10)
+#define DDRMPLL1 _MMIO(MCHBAR_MIRROR_BASE + 0x2c20)
+
+#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
+#define ILK_GRDOM_FULL (0 << 1)
+#define ILK_GRDOM_RENDER (1 << 1)
+#define ILK_GRDOM_MEDIA (3 << 1)
+#define ILK_GRDOM_MASK (3 << 1)
+#define ILK_GRDOM_RESET_ENABLE (1 << 0)
+
+#define BXT_D_CR_DRP0_DUNIT8 0x1000
+#define BXT_D_CR_DRP0_DUNIT9 0x1200
+#define BXT_D_CR_DRP0_DUNIT_START 8
+#define BXT_D_CR_DRP0_DUNIT_END 11
+#define BXT_D_CR_DRP0_DUNIT(x) _MMIO(MCHBAR_MIRROR_BASE_SNB + \
+ _PICK_EVEN((x) - 8, BXT_D_CR_DRP0_DUNIT8,\
+ BXT_D_CR_DRP0_DUNIT9))
+#define BXT_DRAM_RANK_MASK 0x3
+#define BXT_DRAM_RANK_SINGLE 0x1
+#define BXT_DRAM_RANK_DUAL 0x3
+#define BXT_DRAM_WIDTH_MASK (0x3 << 4)
+#define BXT_DRAM_WIDTH_SHIFT 4
+#define BXT_DRAM_WIDTH_X8 (0x0 << 4)
+#define BXT_DRAM_WIDTH_X16 (0x1 << 4)
+#define BXT_DRAM_WIDTH_X32 (0x2 << 4)
+#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
+#define BXT_DRAM_SIZE_MASK (0x7 << 6)
+#define BXT_DRAM_SIZE_SHIFT 6
+#define BXT_DRAM_SIZE_4GBIT (0x0 << 6)
+#define BXT_DRAM_SIZE_6GBIT (0x1 << 6)
+#define BXT_DRAM_SIZE_8GBIT (0x2 << 6)
+#define BXT_DRAM_SIZE_12GBIT (0x3 << 6)
+#define BXT_DRAM_SIZE_16GBIT (0x4 << 6)
+#define BXT_DRAM_TYPE_MASK (0x7 << 22)
+#define BXT_DRAM_TYPE_SHIFT 22
+#define BXT_DRAM_TYPE_DDR3 (0x0 << 22)
+#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22)
+#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
+#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
+
+#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
+#define DG1_DRAM_T_RDPRE_MASK REG_GENMASK(16, 11)
+#define DG1_DRAM_T_RP_MASK REG_GENMASK(6, 0)
+#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4004)
+#define DG1_DRAM_T_RCD_MASK REG_GENMASK(15, 9)
+#define DG1_DRAM_T_RAS_MASK REG_GENMASK(8, 1)
+
+#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
+#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0)
+#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0)
+#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0)
+#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0)
+#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0)
+
+/* snb MCH registers for reading the DRAM channel configuration */
+#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004)
+#define MAD_DIMM_C1 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5008)
+#define MAD_DIMM_C2 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define MAD_DIMM_ECC_MASK (0x3 << 24)
+#define MAD_DIMM_ECC_OFF (0x0 << 24)
+#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
+#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
+#define MAD_DIMM_ECC_ON (0x3 << 24)
+#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
+#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
+#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
+#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
+#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
+#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
+#define MAD_DIMM_A_SELECT (0x1 << 16)
+/* DIMM sizes are in multiples of 256mb. */
+#define MAD_DIMM_B_SIZE_SHIFT 8
+#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
+#define MAD_DIMM_A_SIZE_SHIFT 0
+#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
+
+#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
+#define SKL_DRAM_S_SHIFT 16
+#define SKL_DRAM_SIZE_MASK 0x3F
+#define SKL_DRAM_WIDTH_MASK (0x3 << 8)
+#define SKL_DRAM_WIDTH_SHIFT 8
+#define SKL_DRAM_WIDTH_X8 (0x0 << 8)
+#define SKL_DRAM_WIDTH_X16 (0x1 << 8)
+#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
+#define SKL_DRAM_RANK_MASK (0x1 << 10)
+#define SKL_DRAM_RANK_SHIFT 10
+#define SKL_DRAM_RANK_1 (0x0 << 10)
+#define SKL_DRAM_RANK_2 (0x1 << 10)
+#define SKL_DRAM_RANK_MASK (0x1 << 10)
+#define ICL_DRAM_SIZE_MASK 0x7F
+#define ICL_DRAM_WIDTH_MASK (0x3 << 7)
+#define ICL_DRAM_WIDTH_SHIFT 7
+#define ICL_DRAM_WIDTH_X8 (0x0 << 7)
+#define ICL_DRAM_WIDTH_X16 (0x1 << 7)
+#define ICL_DRAM_WIDTH_X32 (0x2 << 7)
+#define ICL_DRAM_RANK_MASK (0x3 << 9)
+#define ICL_DRAM_RANK_SHIFT 9
+#define ICL_DRAM_RANK_1 (0x0 << 9)
+#define ICL_DRAM_RANK_2 (0x1 << 9)
+#define ICL_DRAM_RANK_3 (0x2 << 9)
+#define ICL_DRAM_RANK_4 (0x3 << 9)
+
+#define SA_PERF_STATUS_0_0_0_MCHBAR_PC _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5918)
+#define DG1_QCLK_RATIO_MASK REG_GENMASK(9, 2)
+#define DG1_QCLK_REFERENCE REG_BIT(10)
+
+#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
+#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define RP0_CAP_MASK REG_GENMASK(7, 0)
+#define RP1_CAP_MASK REG_GENMASK(15, 8)
+#define RPN_CAP_MASK REG_GENMASK(23, 16)
+
+/* snb MCH registers for priority tuning */
+#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
+#define SSKPD_NEW_WM0_MASK_HSW REG_GENMASK64(63, 56)
+#define SSKPD_WM4_MASK_HSW REG_GENMASK64(40, 32)
+#define SSKPD_WM3_MASK_HSW REG_GENMASK64(28, 20)
+#define SSKPD_WM2_MASK_HSW REG_GENMASK64(19, 12)
+#define SSKPD_WM1_MASK_HSW REG_GENMASK64(11, 4)
+#define SSKPD_OLD_WM0_MASK_HSW REG_GENMASK64(3, 0)
+#define SSKPD_WM3_MASK_SNB REG_GENMASK(29, 24)
+#define SSKPD_WM2_MASK_SNB REG_GENMASK(21, 16)
+#define SSKPD_WM1_MASK_SNB REG_GENMASK(13, 8)
+#define SSKPD_WM0_MASK_SNB REG_GENMASK(5, 0)
+
+/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
+#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
+#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
+#define DG1_GEAR_TYPE REG_BIT(16)
+
+/*
+ * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using intel_uncore_write.
+ */
+#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5f0c)
+#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
+#define D_COMP_COMP_FORCE (1 << 8)
+#define D_COMP_COMP_DISABLE (1 << 0)
+
+#define BXT_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070)
+
+#endif /* __INTEL_MCHBAR_REGS */
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index da8f82c2342f..4f7a61d5502e 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -130,6 +130,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_ADP_DEVICE_ID_TYPE:
case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP4_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
!IS_ALDERLAKE_P(dev_priv));
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 6bff77521094..6fd20408f7bf 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -58,6 +58,7 @@ enum intel_pch {
#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
+#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 02084652fe3d..5af16ca4dabd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -50,11 +50,16 @@
#include "i915_drv.h"
#include "i915_fixed.h"
#include "i915_irq.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_pm.h"
#include "vlv_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+struct drm_i915_clock_gating_funcs {
+ void (*init_clock_gating)(struct drm_i915_private *i915);
+};
+
/* Stores plane specific WM parameters */
struct skl_wm_params {
bool x_tiled, y_tiled;
@@ -2942,27 +2947,27 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
- wm[0] = (sskpd >> 56) & 0xFF;
+ wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
if (wm[0] == 0)
- wm[0] = sskpd & 0xF;
- wm[1] = (sskpd >> 4) & 0xFF;
- wm[2] = (sskpd >> 12) & 0xFF;
- wm[3] = (sskpd >> 20) & 0x1FF;
- wm[4] = (sskpd >> 32) & 0x1FF;
+ wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
+ wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
+ wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
+ wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
+ wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
} else if (DISPLAY_VER(dev_priv) >= 6) {
u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
- wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
- wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
- wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
- wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
+ wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
+ wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
+ wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
+ wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
} else if (DISPLAY_VER(dev_priv) >= 5) {
u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
wm[0] = 7;
- wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
- wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
+ wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
+ wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
} else {
MISSING_CASE(INTEL_DEVID(dev_priv));
}
@@ -3175,12 +3180,8 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
}
pipe_wm->pipe_enabled = crtc_state->hw.active;
- if (sprstate) {
- pipe_wm->sprites_enabled = sprstate->uapi.visible;
- pipe_wm->sprites_scaled = sprstate->uapi.visible &&
- (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
- drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
- }
+ pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
+ pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
usable_level = max_level;
@@ -3409,29 +3410,28 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
- (r->pri_val << WM1_LP_SR_SHIFT) |
- r->cur_val;
+ WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_PRIMARY(r->pri_val) |
+ WM_LP_CURSOR(r->cur_val);
if (r->enable)
- results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
+ results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
if (DISPLAY_VER(dev_priv) >= 8)
- results->wm_lp[wm_lp - 1] |=
- r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
+ results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
else
- results->wm_lp[wm_lp - 1] |=
- r->fbc_val << WM1_LP_FBC_SHIFT;
+ results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
+
+ results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
/*
- * Always set WM1S_LP_EN when spr_val != 0, even if the
+ * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
- results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
- } else
- results->wm_lp_spr[wm_lp - 1] = r->spr_val;
+ results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
+ }
}
/* LP0 register values */
@@ -3444,9 +3444,9 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
continue;
results->wm_pipe[pipe] =
- (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
- (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
- r->cur_val;
+ WM0_PIPE_PRIMARY(r->pri_val) |
+ WM0_PIPE_SPRITE(r->spr_val) |
+ WM0_PIPE_CURSOR(r->cur_val);
}
}
@@ -3538,24 +3538,24 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
struct ilk_wm_values *previous = &dev_priv->wm.hw;
bool changed = false;
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
- previous->wm_lp[2] &= ~WM1_LP_SR_EN;
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
+ previous->wm_lp[2] &= ~WM_LP_ENABLE;
intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
- previous->wm_lp[1] &= ~WM1_LP_SR_EN;
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
+ previous->wm_lp[1] &= ~WM_LP_ENABLE;
intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
- previous->wm_lp[0] &= ~WM1_LP_SR_EN;
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
+ previous->wm_lp[0] &= ~WM_LP_ENABLE;
intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
/*
- * Don't touch WM1S_LP_EN here.
+ * Don't touch WM_LP_SPRITE_ENABLE here.
* Doing so could cause underruns.
*/
@@ -3781,48 +3781,55 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
return 0;
}
-void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
+static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_bw_state *new_bw_state;
- const struct intel_bw_state *old_bw_state;
- u32 new_mask = 0;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
- /*
- * Just return if we can't control SAGV or don't have it.
- * This is different from situation when we have SAGV but just can't
- * afford it due to DBuf limitation - in case if SAGV is completely
- * disabled in a BIOS, we are not even allowed to send a PCode request,
- * as it will throw an error. So have to check it here.
- */
- if (!intel_has_sagv(dev_priv))
+ if (!new_bw_state)
return;
- new_bw_state = intel_atomic_get_new_bw_state(state);
+ if (!intel_can_enable_sagv(i915, new_bw_state))
+ intel_disable_sagv(i915);
+}
+
+static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+
if (!new_bw_state)
return;
- if (DISPLAY_VER(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
- intel_disable_sagv(dev_priv);
- return;
- }
+ if (intel_can_enable_sagv(i915, new_bw_state))
+ intel_enable_sagv(i915);
+}
- old_bw_state = intel_atomic_get_old_bw_state(state);
- /*
- * Nothing to mask
- */
- if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
+static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
return;
+ old_mask = old_bw_state->qgv_points_mask;
new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
- /*
- * If new mask is zero - means there is nothing to mask,
- * we can only unmask, which should be done in unmask.
- */
- if (!new_mask)
+ if (old_mask == new_mask)
return;
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(&dev_priv->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
/*
* Restrict required qgv points before updating the configuration.
* According to BSpec we can't mask and unmask qgv points at the same
@@ -3832,12 +3839,41 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
icl_pcode_restrict_qgv_points(dev_priv, new_mask);
}
-void intel_sagv_post_plane_update(struct intel_atomic_state *state)
+static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_bw_state *new_bw_state;
- const struct intel_bw_state *old_bw_state;
- u32 new_mask = 0;
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+ new_mask = new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(&dev_priv->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(dev_priv, new_mask);
+}
+
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
/*
* Just return if we can't control SAGV or don't have it.
@@ -3846,34 +3882,33 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(dev_priv))
+ if (!intel_has_sagv(i915))
return;
- new_bw_state = intel_atomic_get_new_bw_state(state);
- if (!new_bw_state)
- return;
+ if (DISPLAY_VER(i915) >= 11)
+ icl_sagv_pre_plane_update(state);
+ else
+ skl_sagv_pre_plane_update(state);
+}
- if (DISPLAY_VER(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
- intel_enable_sagv(dev_priv);
- return;
- }
+void intel_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
- old_bw_state = intel_atomic_get_old_bw_state(state);
/*
- * Nothing to unmask
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
*/
- if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
+ if (!intel_has_sagv(i915))
return;
- new_mask = new_bw_state->qgv_points_mask;
-
- /*
- * Allow required qgv points after updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(dev_priv, new_mask);
+ if (DISPLAY_VER(i915) >= 11)
+ icl_sagv_post_plane_update(state);
+ else
+ skl_sagv_post_plane_update(state);
}
static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
@@ -4005,6 +4040,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return ret;
}
+ if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
+ intel_can_enable_sagv(dev_priv, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
for_each_new_intel_crtc_in_state(state, crtc,
new_crtc_state, i) {
struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
@@ -4020,20 +4066,18 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
intel_can_enable_sagv(dev_priv, new_bw_state);
}
- if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
- intel_can_enable_sagv(dev_priv, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
return 0;
}
+static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
+ u16 start, u16 end)
+{
+ entry->start = start;
+ entry->end = end;
+
+ return end;
+}
+
static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->dbuf.size /
@@ -4172,8 +4216,7 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
int ret;
if (new_dbuf_state->weight[pipe] == 0) {
- new_dbuf_state->ddb[pipe].start = 0;
- new_dbuf_state->ddb[pipe].end = 0;
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
goto out;
}
@@ -4189,8 +4232,10 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
start = ddb_range_size * weight_start / weight_total;
end = ddb_range_size * weight_end / weight_total;
- new_dbuf_state->ddb[pipe].start = ddb_slices.start - mbus_offset + start;
- new_dbuf_state->ddb[pipe].end = ddb_slices.start - mbus_offset + end;
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
+ ddb_slices.start - mbus_offset + start,
+ ddb_slices.start - mbus_offset + end);
+
out:
if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
@@ -4268,11 +4313,11 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
}
-static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
- struct skl_ddb_entry *entry, u32 reg)
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
{
- entry->start = REG_FIELD_GET(PLANE_BUF_START_MASK, reg);
- entry->end = REG_FIELD_GET(PLANE_BUF_END_MASK, reg);
+ skl_ddb_entry_init(entry,
+ REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
+ REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
if (entry->end)
entry->end++;
}
@@ -4290,7 +4335,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ skl_ddb_entry_init_from_hw(ddb_y, val);
return;
}
@@ -4304,7 +4349,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
if (DISPLAY_VER(dev_priv) >= 11) {
val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ skl_ddb_entry_init_from_hw(ddb_y, val);
} else {
val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
@@ -4313,8 +4358,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
swap(val, val2);
- skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
- skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
+ skl_ddb_entry_init_from_hw(ddb_y, val);
+ skl_ddb_entry_init_from_hw(ddb_uv, val2);
}
}
@@ -4342,55 +4387,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
intel_display_power_put(dev_priv, power_domain, wakeref);
}
-/*
- * Determines the downscale amount of a plane for the purposes of watermark calculations.
- * The bspec defines downscale amount as:
- *
- * """
- * Horizontal down scale amount = maximum[1, Horizontal source size /
- * Horizontal destination size]
- * Vertical down scale amount = maximum[1, Vertical source size /
- * Vertical destination size]
- * Total down scale amount = Horizontal down scale amount *
- * Vertical down scale amount
- * """
- *
- * Return value is provided in 16.16 fixed point form to retain fractional part.
- * Caller should take care of dividing & rounding off the value.
- */
-static uint_fixed_16_16_t
-skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 src_w, src_h, dst_w, dst_h;
- uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
- uint_fixed_16_16_t downscale_h, downscale_w;
-
- if (drm_WARN_ON(&dev_priv->drm,
- !intel_wm_plane_visible(crtc_state, plane_state)))
- return u32_to_fixed16(0);
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- *
- * n.b., src is 16.16 fixed point, dst is whole integer.
- */
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- dst_w = drm_rect_width(&plane_state->uapi.dst);
- dst_h = drm_rect_height(&plane_state->uapi.dst);
-
- fp_w_ratio = div_fixed16(src_w, dst_w);
- fp_h_ratio = div_fixed16(src_h, dst_h);
- downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
- downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
-
- return mul_fixed16(downscale_w, downscale_h);
-}
-
struct dbuf_slice_conf_entry {
u8 active_pipes;
u8 dbuf_mask[I915_MAX_PIPES];
@@ -4831,7 +4827,7 @@ static bool check_mbus_joined(u8 active_pipes,
{
int i;
- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
if (dbuf_slices[i].active_pipes == active_pipes)
return dbuf_slices[i].join_mbus;
}
@@ -4848,7 +4844,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
{
int i;
- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
if (dbuf_slices[i].active_pipes == active_pipes &&
dbuf_slices[i].join_mbus == join_mbus)
return dbuf_slices[i].dbuf_mask[pipe];
@@ -4946,10 +4942,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- u32 data_rate;
- u32 width = 0, height = 0;
- uint_fixed_16_16_t down_scale_amount;
- u64 rate;
+ int width, height;
if (!plane_state->uapi.visible)
return 0;
@@ -4983,14 +4976,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
height /= 2;
}
- data_rate = width * height;
-
- down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
-
- rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
-
- rate *= fb->format->cpp[color_plane];
- return rate;
+ return width * height * fb->format->cpp[color_plane];
}
static u64
@@ -5147,9 +5133,31 @@ static bool icl_need_wm1_wa(struct drm_i915_private *i915,
(IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
}
+struct skl_plane_ddb_iter {
+ u64 data_rate;
+ u16 total[I915_MAX_PLANES];
+ u16 uv_total[I915_MAX_PLANES];
+ u16 start, size;
+};
+
+static u16
+skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
+ const struct skl_wm_level *wm,
+ u64 data_rate)
+{
+ u16 extra;
+
+ extra = min_t(u16, iter->size,
+ DIV64_U64_ROUND_UP(iter->size * data_rate, iter->data_rate));
+ iter->size -= extra;
+ iter->data_rate -= data_rate;
+
+ return wm->min_ddb_alloc + extra;
+}
+
static int
-skl_allocate_plane_ddb(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
@@ -5158,10 +5166,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
intel_atomic_get_new_dbuf_state(state);
const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
int num_active = hweight8(dbuf_state->active_pipes);
- u16 alloc_size, start = 0;
- u16 total[I915_MAX_PLANES] = {};
- u16 uv_total[I915_MAX_PLANES] = {};
- u64 total_data_rate;
+ struct skl_plane_ddb_iter iter = {};
enum plane_id plane_id;
u32 blocks;
int level;
@@ -5174,24 +5179,21 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
return 0;
if (DISPLAY_VER(dev_priv) >= 11)
- total_data_rate =
- icl_get_total_relative_data_rate(state, crtc);
+ iter.data_rate = icl_get_total_relative_data_rate(state, crtc);
else
- total_data_rate =
- skl_get_total_relative_data_rate(state, crtc);
+ iter.data_rate = skl_get_total_relative_data_rate(state, crtc);
- alloc_size = skl_ddb_entry_size(alloc);
- if (alloc_size == 0)
+ iter.size = skl_ddb_entry_size(alloc);
+ if (iter.size == 0)
return 0;
/* Allocate fixed number of blocks for cursor. */
- total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
- alloc_size -= total[PLANE_CURSOR];
- crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
- alloc->end - total[PLANE_CURSOR];
- crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+ iter.total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= iter.total[PLANE_CURSOR];
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR],
+ alloc->end - iter.total[PLANE_CURSOR], alloc->end);
- if (total_data_rate == 0)
+ if (iter.data_rate == 0)
return 0;
/*
@@ -5205,7 +5207,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
- if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
+ if (wm->wm[level].min_ddb_alloc > iter.total[PLANE_CURSOR]) {
drm_WARN_ON(&dev_priv->drm,
wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
@@ -5218,8 +5220,8 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
blocks += wm->uv_wm[level].min_ddb_alloc;
}
- if (blocks <= alloc_size) {
- alloc_size -= blocks;
+ if (blocks <= iter.size) {
+ iter.size -= blocks;
break;
}
}
@@ -5228,7 +5230,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
drm_dbg_kms(&dev_priv->drm,
"Requested display configuration exceeds system DDB limitations");
drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
- blocks, alloc_size);
+ blocks, iter.size);
return -EINVAL;
}
@@ -5240,8 +5242,6 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- u64 rate;
- u16 extra;
if (plane_id == PLANE_CURSOR)
continue;
@@ -5250,32 +5250,24 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
* We've accounted for all active planes; remaining planes are
* all disabled.
*/
- if (total_data_rate == 0)
+ if (iter.data_rate == 0)
break;
- rate = crtc_state->plane_data_rate[plane_id];
- extra = min_t(u16, alloc_size,
- DIV64_U64_ROUND_UP(alloc_size * rate,
- total_data_rate));
- total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
- alloc_size -= extra;
- total_data_rate -= rate;
+ iter.total[plane_id] =
+ skl_allocate_plane_ddb(&iter, &wm->wm[level],
+ crtc_state->plane_data_rate[plane_id]);
- if (total_data_rate == 0)
+ if (iter.data_rate == 0)
break;
- rate = crtc_state->uv_plane_data_rate[plane_id];
- extra = min_t(u16, alloc_size,
- DIV64_U64_ROUND_UP(alloc_size * rate,
- total_data_rate));
- uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
- alloc_size -= extra;
- total_data_rate -= rate;
+ iter.uv_total[plane_id] =
+ skl_allocate_plane_ddb(&iter, &wm->uv_wm[level],
+ crtc_state->uv_plane_data_rate[plane_id]);
}
- drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
+ drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0);
/* Set the actual DDB start/end points for each plane */
- start = alloc->start;
+ iter.start = alloc->start;
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_ddb_entry *plane_alloc =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
@@ -5287,20 +5279,16 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
/* Gen11+ uses a separate plane for UV watermarks */
drm_WARN_ON(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 11 && uv_total[plane_id]);
+ DISPLAY_VER(dev_priv) >= 11 && iter.uv_total[plane_id]);
/* Leave disabled planes at (0,0) */
- if (total[plane_id]) {
- plane_alloc->start = start;
- start += total[plane_id];
- plane_alloc->end = start;
- }
+ if (iter.total[plane_id])
+ iter.start = skl_ddb_entry_init(plane_alloc, iter.start,
+ iter.start + iter.total[plane_id]);
- if (uv_total[plane_id]) {
- uv_plane_alloc->start = start;
- start += uv_total[plane_id];
- uv_plane_alloc->end = start;
- }
+ if (iter.uv_total[plane_id])
+ iter.start = skl_ddb_entry_init(uv_plane_alloc, iter.start,
+ iter.start + iter.uv_total[plane_id]);
}
/*
@@ -5315,7 +5303,8 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.optimal.planes[plane_id];
skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level],
- total[plane_id], uv_total[plane_id]);
+ iter.total[plane_id],
+ iter.uv_total[plane_id]);
if (icl_need_wm1_wa(dev_priv, plane_id) &&
level == 1 && wm->wm[0].enable) {
@@ -5334,9 +5323,9 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- skl_check_wm_level(&wm->trans_wm, total[plane_id]);
- skl_check_wm_level(&wm->sagv.wm0, total[plane_id]);
- skl_check_wm_level(&wm->sagv.trans_wm, total[plane_id]);
+ skl_check_wm_level(&wm->trans_wm, iter.total[plane_id]);
+ skl_check_wm_level(&wm->sagv.wm0, iter.total[plane_id]);
+ skl_check_wm_level(&wm->sagv.trans_wm, iter.total[plane_id]);
}
return 0;
@@ -6226,7 +6215,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- ret = skl_allocate_plane_ddb(state, crtc);
+ ret = skl_crtc_allocate_plane_ddb(state, crtc);
if (ret)
return ret;
@@ -6803,9 +6792,9 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
* multiple pipes are active.
*/
active->wm[0].enable = true;
- active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
- active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
- active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+ active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
+ active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
+ active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
} else {
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -7229,12 +7218,12 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
*/
static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN);
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN);
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM_LP_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM_LP_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM_LP_ENABLE);
/*
- * Don't touch WM1S_LP_EN here.
+ * Don't touch WM_LP_SPRITE_ENABLE here.
* Doing so could cause underruns.
*/
}
@@ -7437,7 +7426,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
u32 tmp;
tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
- if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
+ if (REG_FIELD_GET(SSKPD_WM0_MASK_SNB, tmp) != 12)
drm_dbg_kms(&dev_priv->drm,
"Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
tmp);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 3293ac71bcf8..6ed5786bcd29 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -77,6 +77,9 @@ track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
depot_stack_handle_t stack, *stacks;
unsigned long flags;
+ if (rpm->no_wakeref_tracking)
+ return -1;
+
stack = __save_depot_stack();
if (!stack)
return -1;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 47a85fab4130..d9160e3ff4af 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -51,6 +51,7 @@ struct intel_runtime_pm {
bool available;
bool suspended;
bool irqs_enabled;
+ bool no_wakeref_tracking;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
/*
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index a4b16b9e2e55..ac1a796b2808 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -122,6 +122,15 @@ static const struct intel_step_info dg2_g11_revid_step_tbl[] = {
[0x5] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 },
};
+static const struct intel_step_info dg2_g12_revid_step_tbl[] = {
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_C0 },
+};
+
+static const struct intel_step_info adls_rpls_revids[] = {
+ [0x4] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_D0 },
+ [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 },
+};
+
void intel_step_init(struct drm_i915_private *i915)
{
const struct intel_step_info *revids = NULL;
@@ -135,12 +144,18 @@ void intel_step_init(struct drm_i915_private *i915)
} else if (IS_DG2_G11(i915)) {
revids = dg2_g11_revid_step_tbl;
size = ARRAY_SIZE(dg2_g11_revid_step_tbl);
+ } else if (IS_DG2_G12(i915)) {
+ revids = dg2_g12_revid_step_tbl;
+ size = ARRAY_SIZE(dg2_g12_revid_step_tbl);
} else if (IS_XEHPSDV(i915)) {
revids = xehpsdv_revids;
size = ARRAY_SIZE(xehpsdv_revids);
} else if (IS_ALDERLAKE_P(i915)) {
revids = adlp_revids;
size = ARRAY_SIZE(adlp_revids);
+ } else if (IS_ADLS_RPLS(i915)) {
+ revids = adls_rpls_revids;
+ size = ARRAY_SIZE(adls_rpls_revids);
} else if (IS_ALDERLAKE_S(i915)) {
revids = adls_revids;
size = ARRAY_SIZE(adls_revids);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 850ebfae31af..dd8fdd5863de 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1496,7 +1496,7 @@ ilk_dummy_write(struct intel_uncore *uncore)
/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
* the chip from rc6 before touching it for real. MI_MODE is masked,
* hence harmless to write 0 into. */
- __raw_uncore_write32(uncore, MI_MODE, 0);
+ __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
}
static void
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index f06d21005106..322fb9eeb880 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -43,6 +43,7 @@
/* Default WOPCM size is 2MB from Gen11, 1MB on previous platforms */
#define GEN11_WOPCM_SIZE SZ_2M
#define GEN9_WOPCM_SIZE SZ_1M
+#define MAX_WOPCM_SIZE SZ_8M
/* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */
#define WOPCM_RESERVED_SIZE SZ_16K
@@ -207,6 +208,14 @@ static bool __wopcm_regs_locked(struct intel_uncore *uncore,
return true;
}
+static bool __wopcm_regs_writable(struct intel_uncore *uncore)
+{
+ if (!HAS_GUC_DEPRIVILEGE(uncore->i915))
+ return true;
+
+ return intel_uncore_read(uncore, GUC_SHIM_CONTROL2) & GUC_IS_PRIVILEGED;
+}
+
/**
* intel_wopcm_init() - Initialize the WOPCM structure.
* @wopcm: pointer to intel_wopcm.
@@ -224,18 +233,19 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
u32 guc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.guc.fw);
u32 huc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.huc.fw);
u32 ctx_rsvd = context_reserved_size(i915);
+ u32 wopcm_size = wopcm->size;
u32 guc_wopcm_base;
u32 guc_wopcm_size;
if (!guc_fw_size)
return;
- GEM_BUG_ON(!wopcm->size);
+ GEM_BUG_ON(!wopcm_size);
GEM_BUG_ON(wopcm->guc.base);
GEM_BUG_ON(wopcm->guc.size);
- GEM_BUG_ON(guc_fw_size >= wopcm->size);
- GEM_BUG_ON(huc_fw_size >= wopcm->size);
- GEM_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm->size);
+ GEM_BUG_ON(guc_fw_size >= wopcm_size);
+ GEM_BUG_ON(huc_fw_size >= wopcm_size);
+ GEM_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm_size);
if (i915_inject_probe_failure(i915))
return;
@@ -243,6 +253,24 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
drm_dbg(&i915->drm, "GuC WOPCM is already locked [%uK, %uK)\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
+ /*
+ * Note that to keep things simple (i.e. avoid different
+ * defines per platform) our WOPCM math doesn't always use the
+ * actual WOPCM size, but a value that is less or equal to it.
+ * This is perfectly fine when i915 programs the registers, but
+ * on platforms with GuC deprivilege the registers are not
+ * writable from i915 and are instead pre-programmed by the
+ * bios/IFWI, so there might be a mismatch of sizes.
+ * Instead of handling the size difference, we trust that the
+ * programmed values make sense and disable the relevant check
+ * by using the maximum possible WOPCM size in the verification
+ * math. In the extremely unlikely case that the registers
+ * were pre-programmed with an invalid value, we will still
+ * gracefully fail later during the GuC/HuC dma.
+ */
+ if (!__wopcm_regs_writable(gt->uncore))
+ wopcm_size = MAX_WOPCM_SIZE;
+
goto check;
}
@@ -257,17 +285,17 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
* Need to clamp guc_wopcm_base now to make sure the following math is
* correct. Formal check of whole WOPCM layout will be done below.
*/
- guc_wopcm_base = min(guc_wopcm_base, wopcm->size - ctx_rsvd);
+ guc_wopcm_base = min(guc_wopcm_base, wopcm_size - ctx_rsvd);
/* Aligned remainings of usable WOPCM space can be assigned to GuC. */
- guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base;
+ guc_wopcm_size = wopcm_size - ctx_rsvd - guc_wopcm_base;
guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
drm_dbg(&i915->drm, "Calculated GuC WOPCM [%uK, %uK)\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
check:
- if (__check_layout(i915, wopcm->size, guc_wopcm_base, guc_wopcm_size,
+ if (__check_layout(i915, wopcm_size, guc_wopcm_base, guc_wopcm_size,
guc_fw_size, huc_fw_size)) {
wopcm->guc.base = guc_wopcm_base;
wopcm->guc.size = guc_wopcm_size;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
index 16990a3f2f85..586be769104f 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_PXP_PM_H__
#define __INTEL_PXP_PM_H__
-#include "intel_pxp_types.h"
+struct intel_pxp;
#ifdef CONFIG_DRM_I915_PXP
void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index b5576888cd78..e5dd82e7e480 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -6,9 +6,10 @@
#include <linux/random.h>
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_pm.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
-#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
@@ -41,7 +42,7 @@ static int switch_to_context(struct i915_gem_context *ctx)
static void trash_stolen(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
const u64 slot = ggtt->error_capture.start;
const resource_size_t size = resource_size(&i915->dsm);
unsigned long page;
@@ -99,7 +100,7 @@ static void igt_pm_suspend(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_ggtt_suspend(&i915->ggtt);
+ i915_ggtt_suspend(to_gt(i915)->ggtt);
i915_gem_suspend_late(i915);
}
}
@@ -109,7 +110,7 @@ static void igt_pm_hibernate(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_ggtt_suspend(&i915->ggtt);
+ i915_ggtt_suspend(to_gt(i915)->ggtt);
i915_gem_freeze(i915);
i915_gem_freeze_late(i915);
@@ -125,7 +126,7 @@ static void igt_pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_ggtt_resume(&i915->ggtt);
+ i915_ggtt_resume(to_gt(i915)->ggtt);
i915_gem_resume(i915);
}
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 75b709c26dd3..8c6517d29b8e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -22,6 +22,7 @@
*
*/
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
@@ -117,7 +118,7 @@ static int igt_evict_something(void *arg)
/* Everything is pinned, nothing should happen */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_something(&ggtt->vm,
+ err = i915_gem_evict_something(&ggtt->vm, NULL,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -132,7 +133,7 @@ static int igt_evict_something(void *arg)
/* Everything is unpinned, we should be able to evict something */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_something(&ggtt->vm,
+ err = i915_gem_evict_something(&ggtt->vm, NULL,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -204,7 +205,7 @@ static int igt_evict_for_vma(void *arg)
/* Everything is pinned, nothing should happen */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
@@ -216,7 +217,7 @@ static int igt_evict_for_vma(void *arg)
/* Everything is unpinned, we should be able to evict the node */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
@@ -297,7 +298,7 @@ static int igt_evict_for_cache_color(void *arg)
/* Remove just the second vma */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
@@ -310,7 +311,7 @@ static int igt_evict_for_cache_color(void *arg)
target.color = I915_CACHE_L3_LLC;
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
@@ -331,6 +332,7 @@ static int igt_evict_vm(void *arg)
{
struct intel_gt *gt = arg;
struct i915_ggtt *ggtt = gt->ggtt;
+ struct i915_gem_ww_ctx ww;
LIST_HEAD(objects);
int err;
@@ -342,7 +344,7 @@ static int igt_evict_vm(void *arg)
/* Everything is pinned, nothing should happen */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_vm(&ggtt->vm);
+ err = i915_gem_evict_vm(&ggtt->vm, NULL);
mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
@@ -352,9 +354,12 @@ static int igt_evict_vm(void *arg)
unpin_ggtt(ggtt);
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_vm(&ggtt->vm);
- mutex_unlock(&ggtt->vm.mutex);
+ for_i915_gem_ww(&ww, err, false) {
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_evict_vm(&ggtt->vm, &ww);
+ mutex_unlock(&ggtt->vm.mutex);
+ }
+
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -402,7 +407,7 @@ static int igt_evict_contexts(void *arg)
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_insert(&ggtt->vm, &hole,
+ err = i915_gem_gtt_insert(&ggtt->vm, NULL, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->vm.total,
PIN_NOEVICT);
@@ -422,7 +427,7 @@ static int igt_evict_contexts(void *arg)
goto out_locked;
}
- if (i915_gem_gtt_insert(&ggtt->vm, &r->node,
+ if (i915_gem_gtt_insert(&ggtt->vm, NULL, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->vm.total,
PIN_NOEVICT)) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 575705c3bce9..e7e6c4b2c81d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -26,12 +26,14 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_context.h"
#include "gt/intel_gpu_commands.h"
#include "i915_random.h"
#include "i915_selftest.h"
+#include "i915_vma_resource.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -238,11 +240,11 @@ static int lowlevel_hole(struct i915_address_space *vm,
unsigned long end_time)
{
I915_RND_STATE(seed_prng);
- struct i915_vma *mock_vma;
+ struct i915_vma_resource *mock_vma_res;
unsigned int size;
- mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
- if (!mock_vma)
+ mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
+ if (!mock_vma_res)
return -ENOMEM;
/* Keep creating larger objects until one cannot fit into the hole */
@@ -268,7 +270,7 @@ static int lowlevel_hole(struct i915_address_space *vm,
break;
} while (count >>= 1);
if (!count) {
- kfree(mock_vma);
+ kfree(mock_vma_res);
return -ENOMEM;
}
GEM_BUG_ON(!order);
@@ -342,12 +344,12 @@ alloc_vm_end:
break;
}
- mock_vma->pages = obj->mm.pages;
- mock_vma->node.size = BIT_ULL(size);
- mock_vma->node.start = addr;
+ mock_vma_res->bi.pages = obj->mm.pages;
+ mock_vma_res->node_size = BIT_ULL(size);
+ mock_vma_res->start = addr;
with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
- vm->insert_entries(vm, mock_vma,
+ vm->insert_entries(vm, mock_vma_res,
I915_CACHE_NONE, 0);
}
count = n;
@@ -370,7 +372,7 @@ alloc_vm_end:
cleanup_freed_objects(vm->i915);
}
- kfree(mock_vma);
+ kfree(mock_vma_res);
return 0;
}
@@ -385,7 +387,7 @@ static void close_object_list(struct list_head *objects,
vma = i915_vma_instance(obj, vm, NULL);
if (!IS_ERR(vma))
- ignored = i915_vma_unbind(vma);
+ ignored = i915_vma_unbind_unlocked(vma);
list_del(&obj->st_link);
i915_gem_object_put(obj);
@@ -496,7 +498,7 @@ static int fill_hole(struct i915_address_space *vm,
goto err;
}
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
__func__, p->name, vma->node.start, vma->node.size,
@@ -569,7 +571,7 @@ static int fill_hole(struct i915_address_space *vm,
goto err;
}
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
__func__, p->name, vma->node.start, vma->node.size,
@@ -655,7 +657,7 @@ static int walk_hole(struct i915_address_space *vm,
goto err_put;
}
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("%s unbind failed at %llx + %llx with err=%d\n",
__func__, addr, vma->size, err);
@@ -732,13 +734,13 @@ static int pot_hole(struct i915_address_space *vm,
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, vma->size);
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
err = -EINVAL;
goto err_obj;
}
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
GEM_BUG_ON(err);
}
@@ -832,13 +834,13 @@ static int drunk_hole(struct i915_address_space *vm,
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, BIT_ULL(size));
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
err = -EINVAL;
goto err_obj;
}
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
GEM_BUG_ON(err);
if (igt_timeout(end_time,
@@ -906,7 +908,7 @@ static int __shrink_hole(struct i915_address_space *vm,
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, size);
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
err = -EINVAL;
break;
}
@@ -1122,7 +1124,7 @@ static int exercise_ggtt(struct drm_i915_private *i915,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
u64 hole_start, hole_end, last = 0;
struct drm_mm_node *node;
IGT_TIMEOUT(end_time);
@@ -1182,7 +1184,7 @@ static int igt_ggtt_page(void *arg)
const unsigned int count = PAGE_SIZE/sizeof(u32);
I915_RND_STATE(prng);
struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
struct drm_mm_node tmp;
@@ -1279,6 +1281,7 @@ static void track_vma_bind(struct i915_vma *vma)
atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
__i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
+ vma->resource->bi.pages = vma->pages;
mutex_lock(&vma->vm->mutex);
list_add_tail(&vma->vm_link, &vma->vm->bound_list);
@@ -1336,6 +1339,33 @@ static int igt_mock_drunk(void *arg)
return exercise_mock(ggtt->vm.i915, drunk_hole);
}
+static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
+{
+ struct i915_address_space *vm = vma->vm;
+ struct i915_vma_resource *vma_res;
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err;
+
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res))
+ return PTR_ERR(vma_res);
+
+ mutex_lock(&vm->mutex);
+ err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
+ offset,
+ obj->cache_level,
+ 0);
+ if (!err) {
+ i915_vma_resource_init_from_vma(vma_res, vma);
+ vma->resource = vma_res;
+ } else {
+ kfree(vma_res);
+ }
+ mutex_unlock(&vm->mutex);
+
+ return err;
+}
+
static int igt_gtt_reserve(void *arg)
{
struct i915_ggtt *ggtt = arg;
@@ -1370,20 +1400,13 @@ static int igt_gtt_reserve(void *arg)
}
list_add(&obj->st_link, &objects);
-
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
- obj->base.size,
- total,
- obj->cache_level,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = reserve_gtt_with_resource(vma, total);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1429,13 +1452,7 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
- obj->base.size,
- total,
- obj->cache_level,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = reserve_gtt_with_resource(vma, total);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1465,7 +1482,7 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("i915_vma_unbind failed with err=%d!\n", err);
goto out;
@@ -1476,13 +1493,7 @@ static int igt_gtt_reserve(void *arg)
2 * I915_GTT_PAGE_SIZE,
I915_GTT_MIN_ALIGNMENT);
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
- obj->base.size,
- offset,
- obj->cache_level,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = reserve_gtt_with_resource(vma, offset);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1509,6 +1520,31 @@ out:
return err;
}
+static int insert_gtt_with_resource(struct i915_vma *vma)
+{
+ struct i915_address_space *vm = vma->vm;
+ struct i915_vma_resource *vma_res;
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err;
+
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res))
+ return PTR_ERR(vma_res);
+
+ mutex_lock(&vm->mutex);
+ err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
+ obj->cache_level, 0, vm->total, 0);
+ if (!err) {
+ i915_vma_resource_init_from_vma(vma_res, vma);
+ vma->resource = vma_res;
+ } else {
+ kfree(vma_res);
+ }
+ mutex_unlock(&vm->mutex);
+
+ return err;
+}
+
static int igt_gtt_insert(void *arg)
{
struct i915_ggtt *ggtt = arg;
@@ -1552,7 +1588,7 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
+ err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
@@ -1593,12 +1629,7 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
- obj->base.size, 0, obj->cache_level,
- 0, ggtt->vm.total,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = insert_gtt_with_resource(vma);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
i915_gem_object_put(obj);
@@ -1647,18 +1678,13 @@ static int igt_gtt_insert(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
offset = vma->node.start;
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("i915_vma_unbind failed with err=%d!\n", err);
goto out;
}
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
- obj->base.size, 0, obj->cache_level,
- 0, ggtt->vm.total,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = insert_gtt_with_resource(vma);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1702,12 +1728,7 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
- obj->base.size, 0, obj->cache_level,
- 0, ggtt->vm.total,
- 0);
- mutex_unlock(&ggtt->vm.mutex);
+ err = insert_gtt_with_resource(vma);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1737,26 +1758,28 @@ int i915_gem_gtt_mock_selftests(void)
SUBTEST(igt_gtt_insert),
};
struct drm_i915_private *i915;
- struct i915_ggtt *ggtt;
+ struct intel_gt *gt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
- if (!ggtt) {
- err = -ENOMEM;
+ /* allocate the ggtt */
+ err = intel_gt_assign_ggtt(to_gt(i915));
+ if (err)
goto out_put;
- }
- mock_init_ggtt(i915, ggtt);
- err = i915_subtests(tests, ggtt);
+ gt = to_gt(i915);
+
+ mock_init_ggtt(gt);
+
+ err = i915_subtests(tests, gt->ggtt);
mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(ggtt);
- kfree(ggtt);
+ mock_fini_ggtt(gt->ggtt);
+
out_put:
mock_destroy_device(i915);
return err;
@@ -1939,6 +1962,7 @@ static int igt_cs_tlb(void *arg)
struct i915_vm_pt_stash stash = {};
struct i915_request *rq;
struct i915_gem_ww_ctx ww;
+ struct i915_vma_resource *vma_res;
u64 offset;
offset = igt_random_offset(&prng,
@@ -1959,6 +1983,13 @@ static int igt_cs_tlb(void *arg)
if (err)
goto end;
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res)) {
+ i915_vma_put_pages(vma);
+ err = PTR_ERR(vma_res);
+ goto end;
+ }
+
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_vm_lock_objects(vm, &ww);
@@ -1980,33 +2011,41 @@ end_ww:
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
- if (err)
+ if (err) {
+ kfree(vma_res);
goto end;
+ }
+ i915_vma_resource_init_from_vma(vma_res, vma);
/* Prime the TLB with the dummy pages */
for (i = 0; i < count; i++) {
- vma->node.start = offset + i * PAGE_SIZE;
- vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+ vma_res->start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma_res, I915_CACHE_NONE,
+ 0);
- rq = submit_batch(ce, vma->node.start);
+ rq = submit_batch(ce, vma_res->start);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
+ i915_vma_resource_fini(vma_res);
+ kfree(vma_res);
goto end;
}
i915_request_put(rq);
}
-
+ i915_vma_resource_fini(vma_res);
i915_vma_put_pages(vma);
err = context_sync(ce);
if (err) {
pr_err("%s: dummy setup timed out\n",
ce->engine->name);
+ kfree(vma_res);
goto end;
}
vma = i915_vma_instance(act, vm, NULL);
if (IS_ERR(vma)) {
+ kfree(vma_res);
err = PTR_ERR(vma);
goto end;
}
@@ -2014,19 +2053,22 @@ end_ww:
i915_gem_object_lock(act, NULL);
err = i915_vma_get_pages(vma);
i915_gem_object_unlock(act);
- if (err)
+ if (err) {
+ kfree(vma_res);
goto end;
+ }
+ i915_vma_resource_init_from_vma(vma_res, vma);
/* Replace the TLB with target batches */
for (i = 0; i < count; i++) {
struct i915_request *rq;
u32 *cs = batch + i * 64 / sizeof(*cs);
u64 addr;
- vma->node.start = offset + i * PAGE_SIZE;
- vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+ vma_res->start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma_res, I915_CACHE_NONE, 0);
- addr = vma->node.start + i * 64;
+ addr = vma_res->start + i * 64;
cs[4] = MI_NOOP;
cs[6] = lower_32_bits(addr);
cs[7] = upper_32_bits(addr);
@@ -2035,6 +2077,8 @@ end_ww:
rq = submit_batch(ce, addr);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
+ i915_vma_resource_fini(vma_res);
+ kfree(vma_res);
goto end;
}
@@ -2051,6 +2095,8 @@ end_ww:
}
end_spin(batch, count - 1);
+ i915_vma_resource_fini(vma_res);
+ kfree(vma_res);
i915_vma_put_pages(vma);
err = context_sync(ce);
@@ -2114,7 +2160,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_cs_tlb),
};
- GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
+ GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 92a859b34190..c56a0c2cd2f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -26,6 +26,7 @@
#include <linux/pm_qos.h>
#include <linux/sort.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
@@ -782,6 +783,115 @@ out_spin:
return err;
}
+/*
+ * Test to prove a non-preemptable request can be cancelled and a subsequent
+ * request on the same context can successfully complete after cancellation.
+ *
+ * Testing methodology is to create a non-preemptible request and submit it,
+ * wait for spinner to start, create a NOP request and submit it, cancel the
+ * spinner, wait for spinner to complete and verify it failed with an error,
+ * finally wait for NOP request to complete verify it succeeded without an
+ * error. Preemption timeout also reduced / restored so test runs in a timely
+ * maner.
+ */
+static int __cancel_reset(struct drm_i915_private *i915,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq, *nop;
+ unsigned long preempt_timeout_ms;
+ int err = 0;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT ||
+ !intel_has_reset_engine(engine->gt))
+ return 0;
+
+ preempt_timeout_ms = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 100;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ goto out_restore;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ pr_debug("%s: Cancelling active non-preemptable request\n",
+ engine->name);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ nop = intel_context_create_request(ce);
+ if (IS_ERR(nop))
+ goto out_rq;
+ i915_request_get(nop);
+ i915_request_add(nop);
+
+ i915_request_cancel(rq, -EINTR);
+
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to cancel hung request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_nop;
+ }
+
+ if (rq->fence.error != -EINTR) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ goto out_nop;
+ }
+
+ if (i915_request_wait(nop, 0, HZ) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to complete nop request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_nop;
+ }
+
+ if (nop->fence.error != 0) {
+ pr_err("%s: Nop request errored (%u)\n",
+ engine->name, nop->fence.error);
+ err = -EINVAL;
+ }
+
+out_nop:
+ i915_request_put(nop);
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+out_restore:
+ engine->props.preempt_timeout_ms = preempt_timeout_ms;
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
static int live_cancel_request(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -814,6 +924,14 @@ static int live_cancel_request(void *arg)
return err;
if (err2)
return err2;
+
+ /* Expects reset so call outside of igt_live_test_* */
+ err = __cancel_reset(i915, engine);
+ if (err)
+ return err;
+
+ if (igt_flush_test(i915))
+ return -EIO;
}
return 0;
@@ -843,7 +961,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
intel_gt_chipset_flush(to_gt(i915));
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 2d6d7bd13c3c..c4e932368b37 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -24,6 +24,7 @@
#include <linux/random.h>
#include "gt/intel_gt_pm.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_selftest.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 5c5809dfe9b2..6921ba128015 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -25,6 +25,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "gem/selftests/mock_context.h"
#include "i915_scatterlist.h"
@@ -340,7 +341,7 @@ static int igt_vma_pin1(void *arg)
if (!err) {
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("Failed to unbind single page from GGTT, err=%d\n", err);
goto out;
@@ -691,7 +692,7 @@ static int igt_vma_rotate_remap(void *arg)
}
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("Unbinding returned %i\n", err);
goto out_object;
@@ -852,7 +853,7 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
nvma++;
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("Unbinding returned %i\n", err);
goto out_object;
@@ -891,7 +892,7 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
- err = i915_vma_unbind(vma);
+ err = i915_vma_unbind_unlocked(vma);
if (err) {
pr_err("Unbinding returned %i\n", err);
goto out_object;
@@ -922,26 +923,28 @@ int i915_vma_mock_selftests(void)
SUBTEST(igt_vma_partial),
};
struct drm_i915_private *i915;
- struct i915_ggtt *ggtt;
+ struct intel_gt *gt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
- if (!ggtt) {
- err = -ENOMEM;
+ /* allocate the ggtt */
+ err = intel_gt_assign_ggtt(to_gt(i915));
+ if (err)
goto out_put;
- }
- mock_init_ggtt(i915, ggtt);
- err = i915_subtests(tests, ggtt);
+ gt = to_gt(i915);
+
+ mock_init_ggtt(gt);
+
+ err = i915_subtests(tests, gt->ggtt);
mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(ggtt);
- kfree(ggtt);
+ mock_fini_ggtt(gt->ggtt);
+
out_put:
mock_destroy_device(i915);
return err;
@@ -982,7 +985,7 @@ static int igt_vma_remapped_gtt(void *arg)
intel_wakeref_t wakeref;
int err = 0;
- if (!i915_ggtt_has_aperture(&i915->ggtt))
+ if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return 0;
obj = i915_gem_object_create_internal(i915, 10 * 10 * PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index b84594601d30..b484e12df417 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -19,7 +19,7 @@ int igt_flush_test(struct drm_i915_private *i915)
cond_resched();
- if (intel_gt_wait_for_idle(gt, HZ) == -ETIME) {
+ if (intel_gt_wait_for_idle(gt, HZ * 3) == -ETIME) {
pr_err("%pS timed out, cancelling all further testing.\n",
__builtin_return_address(0));
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 24d87d0fc747..0c22594ae274 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -6,6 +6,7 @@
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
+#include "gem/i915_gem_internal.h"
#include "gem/selftests/igt_gem_utils.h"
#include "igt_spinner.h"
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 8aa7b1d33865..573d9b2e1a4a 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -69,7 +69,7 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(&i915->ggtt);
+ mock_fini_ggtt(to_gt(i915)->ggtt);
destroy_workqueue(i915->wq);
intel_region_ttm_device_fini(i915);
@@ -161,6 +161,8 @@ struct drm_i915_private *mock_gem_device(void)
i915_params_copy(&i915->params, &i915_modparams);
intel_runtime_pm_init_early(&i915->runtime_pm);
+ /* wakeref tracking has significant overhead */
+ i915->runtime_pm.no_wakeref_tracking = true;
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
@@ -194,8 +196,13 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
- mock_init_ggtt(i915, &i915->ggtt);
- to_gt(i915)->vm = i915_vm_get(&i915->ggtt.vm);
+ /* allocate the ggtt */
+ ret = intel_gt_assign_ggtt(to_gt(i915));
+ if (ret)
+ goto err_unlock;
+
+ mock_init_ggtt(to_gt(i915));
+ to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
to_gt(i915)->info.engine_mask = BIT(0);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 1802baf80a17..568840e7ca66 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -33,23 +33,23 @@ static void mock_insert_page(struct i915_address_space *vm,
}
static void mock_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level level, u32 flags)
{
}
static void mock_bind_ppgtt(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
- set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma));
+ vma_res->bound_flags |= flags;
}
static void mock_unbind_ppgtt(struct i915_address_space *vm,
- struct i915_vma *vma)
+ struct i915_vma_resource *vma_res)
{
}
@@ -93,23 +93,23 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
static void mock_bind_ggtt(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
+ struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
}
static void mock_unbind_ggtt(struct i915_address_space *vm,
- struct i915_vma *vma)
+ struct i915_vma_resource *vma_res)
{
}
-void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
+void mock_init_ggtt(struct intel_gt *gt)
{
- memset(ggtt, 0, sizeof(*ggtt));
+ struct i915_ggtt *ggtt = gt->ggtt;
- ggtt->vm.gt = to_gt(i915);
- ggtt->vm.i915 = i915;
+ ggtt->vm.gt = gt;
+ ggtt->vm.i915 = gt->i915;
ggtt->vm.is_ggtt = true;
ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
@@ -128,7 +128,6 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
- to_gt(i915)->ggtt = ggtt;
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index e3f224f43beb..d6eb90bd7f3f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -27,8 +27,9 @@
struct drm_i915_private;
struct i915_ggtt;
+struct intel_gt;
-void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
+void mock_init_ggtt(struct intel_gt *gt);
void mock_fini_ggtt(struct i915_ggtt *ggtt);
struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name);
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
index 7374f1952762..5c2b2277afbf 100644
--- a/drivers/gpu/drm/imx/dcss/Kconfig
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -2,6 +2,7 @@ config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
help
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index cb685fe2039b..a57812ec36b1 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -341,6 +341,9 @@ static struct platform_driver * const drivers[] = {
static int __init imx_drm_init(void)
{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(imx_drm_init);
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 542c4af70661..dcf44cb00821 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -65,8 +65,10 @@ struct ingenic_dma_hwdescs {
struct jz_soc_info {
bool needs_dev_clk;
bool has_osd;
+ bool has_alpha;
bool map_noncoherent;
bool use_extended_hwdesc;
+ bool plane_f0_not_working;
unsigned int max_width, max_height;
const u32 *formats_f0, *formats_f1;
unsigned int num_formats_f0, num_formats_f1;
@@ -453,7 +455,7 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- if (plane == &priv->f0)
+ if (priv->soc_info->plane_f0_not_working && plane == &priv->f0)
return -EINVAL;
crtc_state = drm_atomic_get_existing_crtc_state(state,
@@ -1055,6 +1057,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
long parent_rate;
unsigned int i, clone_mask = 0;
int ret, irq;
+ u32 osdc = 0;
soc_info = of_device_get_match_data(dev);
if (!soc_info) {
@@ -1312,7 +1315,10 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
/* Enable OSD if available */
if (soc_info->has_osd)
- regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN);
+ osdc |= JZ_LCD_OSDC_OSDEN;
+ if (soc_info->has_alpha)
+ osdc |= JZ_LCD_OSDC_ALPHAEN;
+ regmap_write(priv->map, JZ_REG_LCD_OSDC, osdc);
mutex_init(&priv->clk_mutex);
priv->clock_nb.notifier_call = ingenic_drm_update_pixclk;
@@ -1511,7 +1517,9 @@ static const struct jz_soc_info jz4770_soc_info = {
static const struct jz_soc_info jz4780_soc_info = {
.needs_dev_clk = true,
.has_osd = true,
+ .has_alpha = true,
.use_extended_hwdesc = true,
+ .plane_f0_not_working = true, /* REVISIT */
.max_width = 4096,
.max_height = 2048,
.formats_f1 = jz4770_formats_f1,
@@ -1543,6 +1551,9 @@ static int ingenic_drm_init(void)
{
int err;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) {
err = platform_driver_register(ingenic_ipu_driver_ptr);
if (err)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index ed2424350773..76fef0880504 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -628,7 +629,7 @@ static struct platform_driver kmb_platform_driver = {
},
};
-module_platform_driver(kmb_platform_driver);
+drm_module_platform_driver(kmb_platform_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Keembay Display driver");
diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c
index eeb155826d27..31b5a3e21911 100644
--- a/drivers/gpu/drm/lib/drm_random.c
+++ b/drivers/gpu/drm/lib/drm_random.c
@@ -7,10 +7,11 @@
#include "drm_random.h"
-static inline u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
+u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
{
return upper_32_bits((u64)prandom_u32_state(state) * ep_ro);
}
+EXPORT_SYMBOL(drm_prandom_u32_max_state);
void drm_random_reorder(unsigned int *order, unsigned int count,
struct rnd_state *state)
diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/drm/lib/drm_random.h
index 4a3e94dfa0c0..5543bf0474bc 100644
--- a/drivers/gpu/drm/lib/drm_random.h
+++ b/drivers/gpu/drm/lib/drm_random.h
@@ -22,5 +22,7 @@ unsigned int *drm_random_order(unsigned int count,
void drm_random_reorder(unsigned int *order,
unsigned int count,
struct rnd_state *state);
+u32 drm_prandom_u32_max_state(u32 ep_ro,
+ struct rnd_state *state);
#endif /* !__DRM_RANDOM_H__ */
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index d0c2b1422b3b..55bb1ec3c4f7 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -214,6 +214,7 @@ static const struct drm_gem_object_funcs lima_gem_funcs = {
.vmap = lima_gem_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = lima_gem_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
};
struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 390c969f74ad..e82931712d8a 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -409,7 +409,8 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
drm_sched_increase_karma(&task->base);
- lima_sched_build_error_task_list(task);
+ if (lima_max_error_tasks)
+ lima_sched_build_error_task_list(task);
pipe->task_error(pipe);
@@ -490,7 +491,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
lima_job_hang_limit,
msecs_to_jiffies(timeout), NULL,
- NULL, name);
+ NULL, name, pipe->ldev->dev);
}
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 5b5afc6aaf8e..0b2910e69b42 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -491,6 +491,9 @@ static int __init mcde_drm_register(void)
{
int ret;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
ret = platform_register_drivers(component_drivers,
ARRAY_SIZE(component_drivers));
if (ret)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index d661edf7e0fe..8cc0a1283d7c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -56,6 +56,7 @@ struct mtk_drm_crtc {
struct cmdq_pkt cmdq_handle;
u32 cmdq_event;
u32 cmdq_vblank_cnt;
+ wait_queue_head_t cb_blocking_queue;
#endif
struct device *mmsys_dev;
@@ -314,6 +315,7 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
}
mtk_crtc->cmdq_vblank_cnt = 0;
+ wake_up(&mtk_crtc->cb_blocking_queue);
}
#endif
@@ -700,6 +702,13 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
mtk_crtc->pending_planes = true;
mtk_drm_crtc_update_config(mtk_crtc, false);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ /* Wait for planes to be disabled by cmdq */
+ if (mtk_crtc->cmdq_client.chan)
+ wait_event_timeout(mtk_crtc->cb_blocking_queue,
+ mtk_crtc->cmdq_vblank_cnt == 0,
+ msecs_to_jiffies(500));
+#endif
/* Wait for planes to be disabled */
drm_crtc_wait_one_vblank(crtc);
@@ -976,6 +985,9 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->cmdq_client.chan = NULL;
}
}
+
+ /* for sending blocking cmd in crtc disable */
+ init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
}
#endif
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 56ff8c57ef8f..dd029307be7d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -236,6 +236,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
struct device *dma_dev;
int ret;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
if (!iommu_present(&platform_bus_type))
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 5d90d2eb0019..ccb0511b9cd5 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -786,18 +786,101 @@ void mtk_dsi_ddp_stop(struct device *dev)
mtk_dsi_poweroff(dsi);
}
+static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+ int ret;
+
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
+ if (ret) {
+ DRM_ERROR("Failed to encoder init to drm\n");
+ return ret;
+ }
+
+ dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
+
+ ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ goto err_cleanup_encoder;
+
+ dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
+ if (IS_ERR(dsi->connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ ret = PTR_ERR(dsi->connector);
+ goto err_cleanup_encoder;
+ }
+ drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
+
+ return 0;
+
+err_cleanup_encoder:
+ drm_encoder_cleanup(&dsi->encoder);
+ return ret;
+}
+
+static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ int ret;
+ struct drm_device *drm = data;
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+ ret = mtk_dsi_encoder_init(drm, dsi);
+ if (ret)
+ return ret;
+
+ return device_reset_optional(dev);
+}
+
+static void mtk_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+ drm_encoder_cleanup(&dsi->encoder);
+}
+
+static const struct component_ops mtk_dsi_component_ops = {
+ .bind = mtk_dsi_bind,
+ .unbind = mtk_dsi_unbind,
+};
+
static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct mtk_dsi *dsi = host_to_dsi(host);
+ struct device *dev = host->dev;
+ int ret;
dsi->lanes = device->lanes;
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
+ dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(dsi->next_bridge))
+ return PTR_ERR(dsi->next_bridge);
+
+ drm_bridge_add(&dsi->bridge);
+
+ ret = component_add(host->dev, &mtk_dsi_component_ops);
+ if (ret) {
+ DRM_ERROR("failed to add dsi_host component: %d\n", ret);
+ drm_bridge_remove(&dsi->bridge);
+ return ret;
+ }
return 0;
}
+static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct mtk_dsi *dsi = host_to_dsi(host);
+
+ component_del(host->dev, &mtk_dsi_component_ops);
+ drm_bridge_remove(&dsi->bridge);
+ return 0;
+}
+
static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
{
int ret;
@@ -891,24 +974,33 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
u8 read_data[16];
void *src_addr;
u8 irq_flag = CMD_DONE_INT_FLAG;
+ u32 dsi_mode;
+ int ret;
- if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) {
- DRM_ERROR("dsi engine is not command mode\n");
- return -EINVAL;
+ dsi_mode = readl(dsi->regs + DSI_MODE_CTRL);
+ if (dsi_mode & MODE) {
+ mtk_dsi_stop(dsi);
+ ret = mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
+ if (ret)
+ goto restore_dsi_mode;
}
if (MTK_DSI_HOST_IS_READ(msg->type))
irq_flag |= LPRX_RD_RDY_INT_FLAG;
- if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0)
- return -ETIME;
+ ret = mtk_dsi_host_send_cmd(dsi, msg, irq_flag);
+ if (ret)
+ goto restore_dsi_mode;
- if (!MTK_DSI_HOST_IS_READ(msg->type))
- return 0;
+ if (!MTK_DSI_HOST_IS_READ(msg->type)) {
+ recv_cnt = 0;
+ goto restore_dsi_mode;
+ }
if (!msg->rx_buf) {
DRM_ERROR("dsi receive buffer size may be NULL\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto restore_dsi_mode;
}
for (i = 0; i < 16; i++)
@@ -933,78 +1025,25 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
recv_cnt, *((u8 *)(msg->tx_buf)));
- return recv_cnt;
+restore_dsi_mode:
+ if (dsi_mode & MODE) {
+ mtk_dsi_set_mode(dsi);
+ mtk_dsi_start(dsi);
+ }
+
+ return ret < 0 ? ret : recv_cnt;
}
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
.attach = mtk_dsi_host_attach,
+ .detach = mtk_dsi_host_detach,
.transfer = mtk_dsi_host_transfer,
};
-static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
-{
- int ret;
-
- ret = drm_simple_encoder_init(drm, &dsi->encoder,
- DRM_MODE_ENCODER_DSI);
- if (ret) {
- DRM_ERROR("Failed to encoder init to drm\n");
- return ret;
- }
-
- dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
-
- ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
- if (ret)
- goto err_cleanup_encoder;
-
- dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
- if (IS_ERR(dsi->connector)) {
- DRM_ERROR("Unable to create bridge connector\n");
- ret = PTR_ERR(dsi->connector);
- goto err_cleanup_encoder;
- }
- drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
-
- return 0;
-
-err_cleanup_encoder:
- drm_encoder_cleanup(&dsi->encoder);
- return ret;
-}
-
-static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
-{
- int ret;
- struct drm_device *drm = data;
- struct mtk_dsi *dsi = dev_get_drvdata(dev);
-
- ret = mtk_dsi_encoder_init(drm, dsi);
- if (ret)
- return ret;
-
- return device_reset_optional(dev);
-}
-
-static void mtk_dsi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct mtk_dsi *dsi = dev_get_drvdata(dev);
-
- drm_encoder_cleanup(&dsi->encoder);
-}
-
-static const struct component_ops mtk_dsi_component_ops = {
- .bind = mtk_dsi_bind,
- .unbind = mtk_dsi_unbind,
-};
-
static int mtk_dsi_probe(struct platform_device *pdev)
{
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
- struct drm_panel *panel;
struct resource *regs;
int irq_num;
int ret;
@@ -1021,19 +1060,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
return ret;
}
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &dsi->next_bridge);
- if (ret)
- goto err_unregister_host;
-
- if (panel) {
- dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(dsi->next_bridge)) {
- ret = PTR_ERR(dsi->next_bridge);
- goto err_unregister_host;
- }
- }
-
dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
@@ -1098,14 +1124,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
- drm_bridge_add(&dsi->bridge);
-
- ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
- if (ret) {
- dev_err(&pdev->dev, "failed to add component: %d\n", ret);
- goto err_unregister_host;
- }
-
return 0;
err_unregister_host:
@@ -1118,8 +1136,6 @@ static int mtk_dsi_remove(struct platform_device *pdev)
struct mtk_dsi *dsi = platform_get_drvdata(pdev);
mtk_output_dsi_disable(dsi);
- drm_bridge_remove(&dsi->bridge);
- component_del(&pdev->dev, &mtk_dsi_component_ops);
mipi_dsi_host_unregister(&dsi->host);
return 0;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 26aeaf0ab86e..93a7a033a3e8 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -22,6 +22,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -542,7 +543,7 @@ static struct platform_driver meson_drm_platform_driver = {
},
};
-module_platform_driver(meson_drm_platform_driver);
+drm_module_platform_driver(meson_drm_platform_driver);
MODULE_AUTHOR("Jasper St. Pierre <jstpierre@mecheye.net>");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 740108a006ba..217844d71ab5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_module.h>
#include <drm/drm_pciids.h>
#include "mgag200_drv.h"
@@ -375,24 +376,7 @@ static struct pci_driver mgag200_pci_driver = {
.remove = mgag200_pci_remove,
};
-static int __init mgag200_init(void)
-{
- if (drm_firmware_drivers_only() && mgag200_modeset == -1)
- return -EINVAL;
-
- if (mgag200_modeset == 0)
- return -EINVAL;
-
- return pci_register_driver(&mgag200_pci_driver);
-}
-
-static void __exit mgag200_exit(void)
-{
- pci_unregister_driver(&mgag200_pci_driver);
-}
-
-module_init(mgag200_init);
-module_exit(mgag200_exit);
+drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 555666e3f960..ff19c5a94d6f 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -512,6 +512,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
struct msm_kms *kms;
int ret, i;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 3bbf574c3bdc..367a6aaa3a20 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -89,7 +89,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ret = drm_sched_init(&ring->sched, &msm_sched_ops,
num_hw_submissions, 0, sched_timeout,
- NULL, NULL, to_msm_bo(ring->bo)->name);
+ NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
if (ret) {
goto fail;
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 375f26d4a417..9d71c55a31c0 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
+#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -258,8 +259,7 @@ static int mxsfb_load(struct drm_device *drm,
ret = mxsfb_attach_bridge(mxsfb);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
+ dev_err_probe(drm->dev, ret, "Cannot connect bridge\n");
goto err_vblank;
}
@@ -419,7 +419,7 @@ static struct platform_driver mxsfb_platform_driver = {
},
};
-module_platform_driver(mxsfb_platform_driver);
+drm_module_platform_driver(mxsfb_platform_driver);
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
MODULE_DESCRIPTION("Freescale MXS DRM/KMS driver");
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index ae2f2abc8f5a..daf9f87477ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -101,7 +101,6 @@ nv40_backlight_init(struct nouveau_encoder *encoder,
if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
return -ENODEV;
- props->type = BACKLIGHT_RAW;
props->max_brightness = 31;
*ops = &nv40_bl_ops;
return 0;
@@ -294,7 +293,8 @@ nv50_backlight_init(struct nouveau_backlight *bl,
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
- if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))
+ if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) ||
+ nv_conn->base.status != connector_status_connected)
return -ENODEV;
if (nv_conn->type == DCB_CONNECTOR_eDP) {
@@ -342,7 +342,6 @@ nv50_backlight_init(struct nouveau_backlight *bl,
else
*ops = &nva3_bl_ops;
- props->type = BACKLIGHT_RAW;
props->max_brightness = 100;
return 0;
@@ -410,6 +409,7 @@ nouveau_backlight_init(struct drm_connector *connector)
goto fail_alloc;
}
+ props.type = BACKLIGHT_RAW;
bl->dev = backlight_device_register(backlight_name, connector->kdev,
nv_encoder, ops, &props);
if (IS_ERR(bl->dev)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2b460835a438..2cd0932b3d68 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -708,10 +708,12 @@ nouveau_display_create(struct drm_device *dev)
&disp->disp);
if (ret == 0) {
nouveau_display_create_properties(dev);
- if (disp->disp.object.oclass < NV50_DISP)
+ if (disp->disp.object.oclass < NV50_DISP) {
+ dev->mode_config.fb_modifiers_not_supported = true;
ret = nv04_display_create(dev);
- else
+ } else {
ret = nv50_display_create(dev);
+ }
}
} else {
ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 266809e511e2..46a5a1016e37 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -925,8 +925,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
mutex_lock(&svmm->mutex);
- ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
- npages * sizeof(args->p.phys[0]), NULL);
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args,
+ struct_size(args, p.phys, npages), NULL);
mutex_unlock(&svmm->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index a11637b0f6cc..d063d0dc13c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -21,6 +21,9 @@
*
* Authors: Ben Skeggs
*/
+
+#include <linux/string_helpers.h>
+
#include "aux.h"
#include "pad.h"
@@ -94,7 +97,7 @@ void
nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *aux, bool monitor)
{
struct nvkm_i2c_pad *pad = aux->pad;
- AUX_TRACE(aux, "monitor: %s", monitor ? "yes" : "no");
+ AUX_TRACE(aux, "monitor: %s", str_yes_no(monitor));
if (monitor)
nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_AUX);
else
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2720a58ccd90..eaf67b9e5f12 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -727,6 +727,9 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
DBG("%s", dev_name(dev));
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&omap_drm_driver, dev);
if (IS_ERR(ddev))
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 434c2861bb40..bb2e47229c68 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -106,6 +106,7 @@ config DRM_PANEL_EDP
depends on PM
select VIDEOMODE_HELPERS
select DRM_DP_AUX_BUS
+ select DRM_DP_HELPER
help
DRM panel driver for dumb eDP panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so
@@ -292,6 +293,18 @@ config DRM_PANEL_NOVATEK_NT35510
around the Novatek NT35510 display controller, such as some
Hydis panels.
+config DRM_PANEL_NOVATEK_NT35560
+ tristate "Novatek NT35560 DSI command mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ Say Y here if you want to enable the Novatek NT35560 display
+ controller. This panel supports DSI in both command and video
+ mode. This supports several panels such as Sony ACX424AKM and
+ ACX424AKP.
+
config DRM_PANEL_NOVATEK_NT35950
tristate "Novatek NT35950 DSI panel"
depends on OF
@@ -592,17 +605,6 @@ config DRM_PANEL_SITRONIX_ST7789V
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
-config DRM_PANEL_SONY_ACX424AKP
- tristate "Sony ACX424AKP DSI command mode panel"
- depends on OF
- depends on DRM_MIPI_DSI
- depends on BACKLIGHT_CLASS_DEVICE
- select VIDEOMODE_HELPERS
- help
- Say Y here if you want to enable the Sony ACX424 display
- panel. This panel supports DSI in both command and video
- mode.
-
config DRM_PANEL_SONY_ACX565AKM
tristate "Sony ACX565AKM panel"
depends on GPIOLIB && OF && SPI
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index d99fbbce49d1..5740911f637c 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35560) += panel-novatek-nt35560.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
@@ -60,7 +61,6 @@ obj-$(CONFIG_DRM_PANEL_SHARP_LS060T1SX01) += panel-sharp-ls060t1sx01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
-obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o
obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index a394a15dc3fb..f7bfcf63d48e 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/iopoll.h>
@@ -222,6 +223,8 @@ struct panel_edp {
struct gpio_desc *enable_gpio;
struct gpio_desc *hpd_gpio;
+ const struct edp_panel_entry *detected_panel;
+
struct edid *edid;
struct drm_display_mode override_mode;
@@ -606,6 +609,28 @@ static int panel_edp_get_timings(struct drm_panel *panel,
return p->desc->num_timings;
}
+static int detected_panel_show(struct seq_file *s, void *data)
+{
+ struct drm_panel *panel = s->private;
+ struct panel_edp *p = to_panel_edp(panel);
+
+ if (IS_ERR(p->detected_panel))
+ seq_puts(s, "UNKNOWN\n");
+ else if (!p->detected_panel)
+ seq_puts(s, "HARDCODED\n");
+ else
+ seq_printf(s, "%s\n", p->detected_panel->name);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(detected_panel);
+
+static void panel_edp_debugfs_init(struct drm_panel *panel, struct dentry *root)
+{
+ debugfs_create_file("detected_panel", 0600, root, panel, &detected_panel_fops);
+}
+
static const struct drm_panel_funcs panel_edp_funcs = {
.disable = panel_edp_disable,
.unprepare = panel_edp_unprepare,
@@ -613,6 +638,7 @@ static const struct drm_panel_funcs panel_edp_funcs = {
.enable = panel_edp_enable,
.get_modes = panel_edp_get_modes,
.get_timings = panel_edp_get_timings,
+ .debugfs_init = panel_edp_debugfs_init,
};
#define PANEL_EDP_BOUNDS_CHECK(to_check, bounds, field) \
@@ -666,7 +692,6 @@ static const struct edp_panel_entry *find_edp_panel(u32 panel_id);
static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
{
- const struct edp_panel_entry *edp_panel;
struct panel_desc *desc;
u32 panel_id;
char vend[4];
@@ -705,14 +730,14 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
}
drm_edid_decode_panel_id(panel_id, vend, &product_id);
- edp_panel = find_edp_panel(panel_id);
+ panel->detected_panel = find_edp_panel(panel_id);
/*
* We're using non-optimized timings and want it really obvious that
* someone needs to add an entry to the table, so we'll do a WARN_ON
* splat.
*/
- if (WARN_ON(!edp_panel)) {
+ if (WARN_ON(!panel->detected_panel)) {
dev_warn(dev,
"Unknown panel %s %#06x, using conservative timings\n",
vend, product_id);
@@ -734,12 +759,14 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
*/
desc->delay.unprepare = 2000;
desc->delay.enable = 200;
+
+ panel->detected_panel = ERR_PTR(-EINVAL);
} else {
dev_info(dev, "Detected %s %s (%#06x)\n",
- vend, edp_panel->name, product_id);
+ vend, panel->detected_panel->name, product_id);
/* Update the delay; everything else comes from EDID */
- desc->delay = *edp_panel->delay;
+ desc->delay = *panel->detected_panel->delay;
}
ret = 0;
@@ -1605,6 +1632,47 @@ static const struct panel_desc sharp_lq123p1jx31 = {
},
};
+static const struct drm_display_mode sharp_lq140m1jw46_mode[] = {
+ {
+ .clock = 346500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 69,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }, {
+ .clock = 144370,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 69,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ },
+};
+
+static const struct panel_desc sharp_lq140m1jw46 = {
+ .modes = sharp_lq140m1jw46_mode,
+ .num_modes = ARRAY_SIZE(sharp_lq140m1jw46_mode),
+ .bpc = 8,
+ .size = {
+ .width = 309,
+ .height = 174,
+ },
+ .delay = {
+ .hpd_absent = 80,
+ .enable = 50,
+ .unprepare = 500,
+ },
+};
+
static const struct drm_display_mode starry_kr122ea0sra_mode = {
.clock = 147000,
.hdisplay = 1920,
@@ -1719,6 +1787,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq123p1jx31",
.data = &sharp_lq123p1jx31,
}, {
+ .compatible = "sharp,lq140m1jw46",
+ .data = &sharp_lq140m1jw46,
+ }, {
.compatible = "starry,kr122ea0sra",
.data = &starry_kr122ea0sra,
}, {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
new file mode 100644
index 000000000000..1b6042321ea1
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPI-DSI Novatek NT35560-based panel controller.
+ *
+ * Supported panels include:
+ * Sony ACX424AKM - a 480x854 AMOLED DSI panel
+ * Sony ACX424AKP - a 480x864 AMOLED DSI panel
+ *
+ * Copyright (C) Linaro Ltd. 2019-2021
+ * Author: Linus Walleij
+ * Based on code and know-how from Marcus Lorentzon
+ * Copyright (C) ST-Ericsson SA 2010
+ * Based on code and know-how from Johan Olson and Joakim Wesslen
+ * Copyright (C) Sony Ericsson Mobile Communications 2010
+ */
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define NT35560_DCS_READ_ID1 0xDA
+#define NT35560_DCS_READ_ID2 0xDB
+#define NT35560_DCS_READ_ID3 0xDC
+#define NT35560_DCS_SET_MDDI 0xAE
+
+/*
+ * Sony seems to use vendor ID 0x81
+ */
+#define DISPLAY_SONY_ACX424AKP_ID1 0x8103
+#define DISPLAY_SONY_ACX424AKP_ID2 0x811a
+#define DISPLAY_SONY_ACX424AKP_ID3 0x811b
+/*
+ * The fourth ID looks like a bug, vendor IDs begin at 0x80
+ * and panel 00 ... seems like default values.
+ */
+#define DISPLAY_SONY_ACX424AKP_ID4 0x8000
+
+struct nt35560_config {
+ const struct drm_display_mode *vid_mode;
+ const struct drm_display_mode *cmd_mode;
+};
+
+struct nt35560 {
+ const struct nt35560_config *conf;
+ struct drm_panel panel;
+ struct device *dev;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+ bool video_mode;
+};
+
+static const struct drm_display_mode sony_acx424akp_vid_mode = {
+ .clock = 27234,
+ .hdisplay = 480,
+ .hsync_start = 480 + 15,
+ .hsync_end = 480 + 15 + 0,
+ .htotal = 480 + 15 + 0 + 15,
+ .vdisplay = 864,
+ .vsync_start = 864 + 14,
+ .vsync_end = 864 + 14 + 1,
+ .vtotal = 864 + 14 + 1 + 11,
+ .width_mm = 48,
+ .height_mm = 84,
+ .flags = DRM_MODE_FLAG_PVSYNC,
+};
+
+/*
+ * The timings are not very helpful as the display is used in
+ * command mode using the maximum HS frequency.
+ */
+static const struct drm_display_mode sony_acx424akp_cmd_mode = {
+ .clock = 35478,
+ .hdisplay = 480,
+ .hsync_start = 480 + 154,
+ .hsync_end = 480 + 154 + 16,
+ .htotal = 480 + 154 + 16 + 32,
+ .vdisplay = 864,
+ .vsync_start = 864 + 1,
+ .vsync_end = 864 + 1 + 1,
+ .vtotal = 864 + 1 + 1 + 1,
+ /*
+ * Some desired refresh rate, experiments at the maximum "pixel"
+ * clock speed (HS clock 420 MHz) yields around 117Hz.
+ */
+ .width_mm = 48,
+ .height_mm = 84,
+};
+
+static const struct nt35560_config sony_acx424akp_data = {
+ .vid_mode = &sony_acx424akp_vid_mode,
+ .cmd_mode = &sony_acx424akp_cmd_mode,
+};
+
+static const struct drm_display_mode sony_acx424akm_vid_mode = {
+ .clock = 27234,
+ .hdisplay = 480,
+ .hsync_start = 480 + 15,
+ .hsync_end = 480 + 15 + 0,
+ .htotal = 480 + 15 + 0 + 15,
+ .vdisplay = 854,
+ .vsync_start = 854 + 14,
+ .vsync_end = 854 + 14 + 1,
+ .vtotal = 854 + 14 + 1 + 11,
+ .width_mm = 46,
+ .height_mm = 82,
+ .flags = DRM_MODE_FLAG_PVSYNC,
+};
+
+/*
+ * The timings are not very helpful as the display is used in
+ * command mode using the maximum HS frequency.
+ */
+static const struct drm_display_mode sony_acx424akm_cmd_mode = {
+ .clock = 35478,
+ .hdisplay = 480,
+ .hsync_start = 480 + 154,
+ .hsync_end = 480 + 154 + 16,
+ .htotal = 480 + 154 + 16 + 32,
+ .vdisplay = 854,
+ .vsync_start = 854 + 1,
+ .vsync_end = 854 + 1 + 1,
+ .vtotal = 854 + 1 + 1 + 1,
+ .width_mm = 46,
+ .height_mm = 82,
+};
+
+static const struct nt35560_config sony_acx424akm_data = {
+ .vid_mode = &sony_acx424akm_vid_mode,
+ .cmd_mode = &sony_acx424akm_cmd_mode,
+};
+
+static inline struct nt35560 *panel_to_nt35560(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt35560, panel);
+}
+
+#define FOSC 20 /* 20Mhz */
+#define SCALE_FACTOR_NS_DIV_MHZ 1000
+
+static int nt35560_set_brightness(struct backlight_device *bl)
+{
+ struct nt35560 *nt = bl_get_data(bl);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int period_ns = 1023;
+ int duty_ns = bl->props.brightness;
+ u8 pwm_ratio;
+ u8 pwm_div;
+ u8 par;
+ int ret;
+
+ if (backlight_is_blank(bl)) {
+ /* Disable backlight */
+ par = 0x00;
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &par, 1);
+ if (ret) {
+ dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ /* Calculate the PWM duty cycle in n/256's */
+ pwm_ratio = max(((duty_ns * 256) / period_ns) - 1, 1);
+ pwm_div = max(1,
+ ((FOSC * period_ns) / 256) /
+ SCALE_FACTOR_NS_DIV_MHZ);
+
+ /* Set up PWM dutycycle ONE byte (differs from the standard) */
+ dev_dbg(nt->dev, "calculated duty cycle %02x\n", pwm_ratio);
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &pwm_ratio, 1);
+ if (ret < 0) {
+ dev_err(nt->dev, "failed to set display PWM ratio (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Sequence to write PWMDIV:
+ * address data
+ * 0xF3 0xAA CMD2 Unlock
+ * 0x00 0x01 Enter CMD2 page 0
+ * 0X7D 0x01 No reload MTP of CMD2 P1
+ * 0x22 PWMDIV
+ * 0x7F 0xAA CMD2 page 1 lock
+ */
+ par = 0xaa;
+ ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1);
+ if (ret < 0) {
+ dev_err(nt->dev, "failed to unlock CMD 2 (%d)\n", ret);
+ return ret;
+ }
+ par = 0x01;
+ ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1);
+ if (ret < 0) {
+ dev_err(nt->dev, "failed to enter page 1 (%d)\n", ret);
+ return ret;
+ }
+ par = 0x01;
+ ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1);
+ if (ret < 0) {
+ dev_err(nt->dev, "failed to disable MTP reload (%d)\n", ret);
+ return ret;
+ }
+ ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1);
+ if (ret < 0) {
+ dev_err(nt->dev, "failed to set PWM divisor (%d)\n", ret);
+ return ret;
+ }
+ par = 0xaa;
+ ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1);
+ if (ret < 0) {
+ dev_err(nt->dev, "failed to lock CMD 2 (%d)\n", ret);
+ return ret;
+ }
+
+ /* Enable backlight */
+ par = 0x24;
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &par, 1);
+ if (ret < 0) {
+ dev_err(nt->dev, "failed to enable display backlight (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct backlight_ops nt35560_bl_ops = {
+ .update_status = nt35560_set_brightness,
+};
+
+static const struct backlight_properties nt35560_bl_props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 512,
+ .max_brightness = 1023,
+};
+
+static int nt35560_read_id(struct nt35560 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ u8 vendor, version, panel;
+ u16 val;
+ int ret;
+
+ ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID1, &vendor, 1);
+ if (ret < 0) {
+ dev_err(nt->dev, "could not vendor ID byte\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID2, &version, 1);
+ if (ret < 0) {
+ dev_err(nt->dev, "could not read device version byte\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID3, &panel, 1);
+ if (ret < 0) {
+ dev_err(nt->dev, "could not read panel ID byte\n");
+ return ret;
+ }
+
+ if (vendor == 0x00) {
+ dev_err(nt->dev, "device vendor ID is zero\n");
+ return -ENODEV;
+ }
+
+ val = (vendor << 8) | panel;
+ switch (val) {
+ case DISPLAY_SONY_ACX424AKP_ID1:
+ case DISPLAY_SONY_ACX424AKP_ID2:
+ case DISPLAY_SONY_ACX424AKP_ID3:
+ case DISPLAY_SONY_ACX424AKP_ID4:
+ dev_info(nt->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n",
+ vendor, version, panel);
+ break;
+ default:
+ dev_info(nt->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n",
+ vendor, version, panel);
+ break;
+ }
+
+ return 0;
+}
+
+static int nt35560_power_on(struct nt35560 *nt)
+{
+ int ret;
+
+ ret = regulator_enable(nt->supply);
+ if (ret) {
+ dev_err(nt->dev, "failed to enable supply (%d)\n", ret);
+ return ret;
+ }
+
+ /* Assert RESET */
+ gpiod_set_value_cansleep(nt->reset_gpio, 1);
+ udelay(20);
+ /* De-assert RESET */
+ gpiod_set_value_cansleep(nt->reset_gpio, 0);
+ usleep_range(11000, 20000);
+
+ return 0;
+}
+
+static void nt35560_power_off(struct nt35560 *nt)
+{
+ /* Assert RESET */
+ gpiod_set_value_cansleep(nt->reset_gpio, 1);
+ usleep_range(11000, 20000);
+
+ regulator_disable(nt->supply);
+}
+
+static int nt35560_prepare(struct drm_panel *panel)
+{
+ struct nt35560 *nt = panel_to_nt35560(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ const u8 mddi = 3;
+ int ret;
+
+ ret = nt35560_power_on(nt);
+ if (ret)
+ return ret;
+
+ ret = nt35560_read_id(nt);
+ if (ret) {
+ dev_err(nt->dev, "failed to read panel ID (%d)\n", ret);
+ goto err_power_off;
+ }
+
+ /* Enabe tearing mode: send TE (tearing effect) at VBLANK */
+ ret = mipi_dsi_dcs_set_tear_on(dsi,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret) {
+ dev_err(nt->dev, "failed to enable vblank TE (%d)\n", ret);
+ goto err_power_off;
+ }
+
+ /*
+ * Set MDDI
+ *
+ * This presumably deactivates the Qualcomm MDDI interface and
+ * selects DSI, similar code is found in other drivers such as the
+ * Sharp LS043T1LE01 which makes us suspect that this panel may be
+ * using a Novatek NT35565 or similar display driver chip that shares
+ * this command. Due to the lack of documentation we cannot know for
+ * sure.
+ */
+ ret = mipi_dsi_dcs_write(dsi, NT35560_DCS_SET_MDDI,
+ &mddi, sizeof(mddi));
+ if (ret < 0) {
+ dev_err(nt->dev, "failed to set MDDI (%d)\n", ret);
+ goto err_power_off;
+ }
+
+ /* Exit sleep mode */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret);
+ goto err_power_off;
+ }
+ msleep(140);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ dev_err(nt->dev, "failed to turn display on (%d)\n", ret);
+ goto err_power_off;
+ }
+ if (nt->video_mode) {
+ /* In video mode turn peripheral on */
+ ret = mipi_dsi_turn_on_peripheral(dsi);
+ if (ret) {
+ dev_err(nt->dev, "failed to turn on peripheral\n");
+ goto err_power_off;
+ }
+ }
+
+ return 0;
+
+err_power_off:
+ nt35560_power_off(nt);
+ return ret;
+}
+
+static int nt35560_unprepare(struct drm_panel *panel)
+{
+ struct nt35560 *nt = panel_to_nt35560(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret) {
+ dev_err(nt->dev, "failed to turn display off (%d)\n", ret);
+ return ret;
+ }
+
+ /* Enter sleep mode */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret);
+ return ret;
+ }
+ msleep(85);
+
+ nt35560_power_off(nt);
+
+ return 0;
+}
+
+
+static int nt35560_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct nt35560 *nt = panel_to_nt35560(panel);
+ const struct nt35560_config *conf = nt->conf;
+ struct drm_display_mode *mode;
+
+ if (nt->video_mode)
+ mode = drm_mode_duplicate(connector->dev,
+ conf->vid_mode);
+ else
+ mode = drm_mode_duplicate(connector->dev,
+ conf->cmd_mode);
+ if (!mode) {
+ dev_err(panel->dev, "bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs nt35560_drm_funcs = {
+ .unprepare = nt35560_unprepare,
+ .prepare = nt35560_prepare,
+ .get_modes = nt35560_get_modes,
+};
+
+static int nt35560_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct nt35560 *nt;
+ int ret;
+
+ nt = devm_kzalloc(dev, sizeof(struct nt35560), GFP_KERNEL);
+ if (!nt)
+ return -ENOMEM;
+ nt->video_mode = of_property_read_bool(dev->of_node,
+ "enforce-video-mode");
+
+ mipi_dsi_set_drvdata(dsi, nt);
+ nt->dev = dev;
+
+ nt->conf = of_device_get_match_data(dev);
+ if (!nt->conf) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ /*
+ * FIXME: these come from the ST-Ericsson vendor driver for the
+ * HREF520 and seems to reflect limitations in the PLLs on that
+ * platform, if you have the datasheet, please cross-check the
+ * actual max rates.
+ */
+ dsi->lp_rate = 19200000;
+ dsi->hs_rate = 420160000;
+
+ if (nt->video_mode)
+ /* Burst mode using event for sync */
+ dsi->mode_flags =
+ MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_BURST;
+ else
+ dsi->mode_flags =
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ nt->supply = devm_regulator_get(dev, "vddi");
+ if (IS_ERR(nt->supply))
+ return PTR_ERR(nt->supply);
+
+ /* This asserts RESET by default */
+ nt->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(nt->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(nt->reset_gpio),
+ "failed to request GPIO\n");
+
+ drm_panel_init(&nt->panel, dev, &nt35560_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ nt->panel.backlight = devm_backlight_device_register(dev, "nt35560", dev, nt,
+ &nt35560_bl_ops, &nt35560_bl_props);
+ if (IS_ERR(nt->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(nt->panel.backlight),
+ "failed to register backlight device\n");
+
+ drm_panel_add(&nt->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&nt->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nt35560_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt35560 *nt = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&nt->panel);
+
+ return 0;
+}
+
+static const struct of_device_id nt35560_of_match[] = {
+ {
+ .compatible = "sony,acx424akp",
+ .data = &sony_acx424akp_data,
+ },
+ {
+ .compatible = "sony,acx424akm",
+ .data = &sony_acx424akm_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nt35560_of_match);
+
+static struct mipi_dsi_driver nt35560_driver = {
+ .probe = nt35560_probe,
+ .remove = nt35560_remove,
+ .driver = {
+ .name = "panel-novatek-nt35560",
+ .of_match_table = nt35560_of_match,
+ },
+};
+module_mipi_dsi_driver(nt35560_driver);
+
+MODULE_AUTHOR("Linus Wallei <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("MIPI-DSI Novatek NT35560 Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
deleted file mode 100644
index 9536d56a94a5..000000000000
--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+++ /dev/null
@@ -1,490 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * MIPI-DSI Sony ACX424AKP panel driver. This is a 480x864
- * AMOLED panel with a command-only DSI interface.
- *
- * Copyright (C) Linaro Ltd. 2019
- * Author: Linus Walleij
- * Based on code and know-how from Marcus Lorentzon
- * Copyright (C) ST-Ericsson SA 2010
- */
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/regulator/consumer.h>
-
-#include <video/mipi_display.h>
-
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_modes.h>
-#include <drm/drm_panel.h>
-
-#define ACX424_DCS_READ_ID1 0xDA
-#define ACX424_DCS_READ_ID2 0xDB
-#define ACX424_DCS_READ_ID3 0xDC
-#define ACX424_DCS_SET_MDDI 0xAE
-
-/*
- * Sony seems to use vendor ID 0x81
- */
-#define DISPLAY_SONY_ACX424AKP_ID1 0x811b
-#define DISPLAY_SONY_ACX424AKP_ID2 0x811a
-/*
- * The third ID looks like a bug, vendor IDs begin at 0x80
- * and panel 00 ... seems like default values.
- */
-#define DISPLAY_SONY_ACX424AKP_ID3 0x8000
-
-struct acx424akp {
- struct drm_panel panel;
- struct device *dev;
- struct regulator *supply;
- struct gpio_desc *reset_gpio;
- bool video_mode;
-};
-
-static const struct drm_display_mode sony_acx424akp_vid_mode = {
- .clock = 27234,
- .hdisplay = 480,
- .hsync_start = 480 + 15,
- .hsync_end = 480 + 15 + 0,
- .htotal = 480 + 15 + 0 + 15,
- .vdisplay = 864,
- .vsync_start = 864 + 14,
- .vsync_end = 864 + 14 + 1,
- .vtotal = 864 + 14 + 1 + 11,
- .width_mm = 48,
- .height_mm = 84,
- .flags = DRM_MODE_FLAG_PVSYNC,
-};
-
-/*
- * The timings are not very helpful as the display is used in
- * command mode using the maximum HS frequency.
- */
-static const struct drm_display_mode sony_acx424akp_cmd_mode = {
- .clock = 35478,
- .hdisplay = 480,
- .hsync_start = 480 + 154,
- .hsync_end = 480 + 154 + 16,
- .htotal = 480 + 154 + 16 + 32,
- .vdisplay = 864,
- .vsync_start = 864 + 1,
- .vsync_end = 864 + 1 + 1,
- .vtotal = 864 + 1 + 1 + 1,
- /*
- * Some desired refresh rate, experiments at the maximum "pixel"
- * clock speed (HS clock 420 MHz) yields around 117Hz.
- */
- .width_mm = 48,
- .height_mm = 84,
-};
-
-static inline struct acx424akp *panel_to_acx424akp(struct drm_panel *panel)
-{
- return container_of(panel, struct acx424akp, panel);
-}
-
-#define FOSC 20 /* 20Mhz */
-#define SCALE_FACTOR_NS_DIV_MHZ 1000
-
-static int acx424akp_set_brightness(struct backlight_device *bl)
-{
- struct acx424akp *acx = bl_get_data(bl);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
- int period_ns = 1023;
- int duty_ns = bl->props.brightness;
- u8 pwm_ratio;
- u8 pwm_div;
- u8 par;
- int ret;
-
- if (backlight_is_blank(bl)) {
- /* Disable backlight */
- par = 0x00;
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
- &par, 1);
- if (ret) {
- dev_err(acx->dev, "failed to disable display backlight (%d)\n", ret);
- return ret;
- }
- return 0;
- }
-
- /* Calculate the PWM duty cycle in n/256's */
- pwm_ratio = max(((duty_ns * 256) / period_ns) - 1, 1);
- pwm_div = max(1,
- ((FOSC * period_ns) / 256) /
- SCALE_FACTOR_NS_DIV_MHZ);
-
- /* Set up PWM dutycycle ONE byte (differs from the standard) */
- dev_dbg(acx->dev, "calculated duty cycle %02x\n", pwm_ratio);
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
- &pwm_ratio, 1);
- if (ret < 0) {
- dev_err(acx->dev, "failed to set display PWM ratio (%d)\n", ret);
- return ret;
- }
-
- /*
- * Sequence to write PWMDIV:
- * address data
- * 0xF3 0xAA CMD2 Unlock
- * 0x00 0x01 Enter CMD2 page 0
- * 0X7D 0x01 No reload MTP of CMD2 P1
- * 0x22 PWMDIV
- * 0x7F 0xAA CMD2 page 1 lock
- */
- par = 0xaa;
- ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1);
- if (ret < 0) {
- dev_err(acx->dev, "failed to unlock CMD 2 (%d)\n", ret);
- return ret;
- }
- par = 0x01;
- ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1);
- if (ret < 0) {
- dev_err(acx->dev, "failed to enter page 1 (%d)\n", ret);
- return ret;
- }
- par = 0x01;
- ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1);
- if (ret < 0) {
- dev_err(acx->dev, "failed to disable MTP reload (%d)\n", ret);
- return ret;
- }
- ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1);
- if (ret < 0) {
- dev_err(acx->dev, "failed to set PWM divisor (%d)\n", ret);
- return ret;
- }
- par = 0xaa;
- ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1);
- if (ret < 0) {
- dev_err(acx->dev, "failed to lock CMD 2 (%d)\n", ret);
- return ret;
- }
-
- /* Enable backlight */
- par = 0x24;
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
- &par, 1);
- if (ret < 0) {
- dev_err(acx->dev, "failed to enable display backlight (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static const struct backlight_ops acx424akp_bl_ops = {
- .update_status = acx424akp_set_brightness,
-};
-
-static const struct backlight_properties acx424akp_bl_props = {
- .type = BACKLIGHT_RAW,
- .brightness = 512,
- .max_brightness = 1023,
-};
-
-static int acx424akp_read_id(struct acx424akp *acx)
-{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
- u8 vendor, version, panel;
- u16 val;
- int ret;
-
- ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID1, &vendor, 1);
- if (ret < 0) {
- dev_err(acx->dev, "could not vendor ID byte\n");
- return ret;
- }
- ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID2, &version, 1);
- if (ret < 0) {
- dev_err(acx->dev, "could not read device version byte\n");
- return ret;
- }
- ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID3, &panel, 1);
- if (ret < 0) {
- dev_err(acx->dev, "could not read panel ID byte\n");
- return ret;
- }
-
- if (vendor == 0x00) {
- dev_err(acx->dev, "device vendor ID is zero\n");
- return -ENODEV;
- }
-
- val = (vendor << 8) | panel;
- switch (val) {
- case DISPLAY_SONY_ACX424AKP_ID1:
- case DISPLAY_SONY_ACX424AKP_ID2:
- case DISPLAY_SONY_ACX424AKP_ID3:
- dev_info(acx->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n",
- vendor, version, panel);
- break;
- default:
- dev_info(acx->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n",
- vendor, version, panel);
- break;
- }
-
- return 0;
-}
-
-static int acx424akp_power_on(struct acx424akp *acx)
-{
- int ret;
-
- ret = regulator_enable(acx->supply);
- if (ret) {
- dev_err(acx->dev, "failed to enable supply (%d)\n", ret);
- return ret;
- }
-
- /* Assert RESET */
- gpiod_set_value_cansleep(acx->reset_gpio, 1);
- udelay(20);
- /* De-assert RESET */
- gpiod_set_value_cansleep(acx->reset_gpio, 0);
- usleep_range(11000, 20000);
-
- return 0;
-}
-
-static void acx424akp_power_off(struct acx424akp *acx)
-{
- /* Assert RESET */
- gpiod_set_value_cansleep(acx->reset_gpio, 1);
- usleep_range(11000, 20000);
-
- regulator_disable(acx->supply);
-}
-
-static int acx424akp_prepare(struct drm_panel *panel)
-{
- struct acx424akp *acx = panel_to_acx424akp(panel);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
- const u8 mddi = 3;
- int ret;
-
- ret = acx424akp_power_on(acx);
- if (ret)
- return ret;
-
- ret = acx424akp_read_id(acx);
- if (ret) {
- dev_err(acx->dev, "failed to read panel ID (%d)\n", ret);
- goto err_power_off;
- }
-
- /* Enabe tearing mode: send TE (tearing effect) at VBLANK */
- ret = mipi_dsi_dcs_set_tear_on(dsi,
- MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret) {
- dev_err(acx->dev, "failed to enable vblank TE (%d)\n", ret);
- goto err_power_off;
- }
-
- /*
- * Set MDDI
- *
- * This presumably deactivates the Qualcomm MDDI interface and
- * selects DSI, similar code is found in other drivers such as the
- * Sharp LS043T1LE01 which makes us suspect that this panel may be
- * using a Novatek NT35565 or similar display driver chip that shares
- * this command. Due to the lack of documentation we cannot know for
- * sure.
- */
- ret = mipi_dsi_dcs_write(dsi, ACX424_DCS_SET_MDDI,
- &mddi, sizeof(mddi));
- if (ret < 0) {
- dev_err(acx->dev, "failed to set MDDI (%d)\n", ret);
- goto err_power_off;
- }
-
- /* Exit sleep mode */
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret) {
- dev_err(acx->dev, "failed to exit sleep mode (%d)\n", ret);
- goto err_power_off;
- }
- msleep(140);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret) {
- dev_err(acx->dev, "failed to turn display on (%d)\n", ret);
- goto err_power_off;
- }
- if (acx->video_mode) {
- /* In video mode turn peripheral on */
- ret = mipi_dsi_turn_on_peripheral(dsi);
- if (ret) {
- dev_err(acx->dev, "failed to turn on peripheral\n");
- goto err_power_off;
- }
- }
-
- return 0;
-
-err_power_off:
- acx424akp_power_off(acx);
- return ret;
-}
-
-static int acx424akp_unprepare(struct drm_panel *panel)
-{
- struct acx424akp *acx = panel_to_acx424akp(panel);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
- int ret;
-
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret) {
- dev_err(acx->dev, "failed to turn display off (%d)\n", ret);
- return ret;
- }
-
- /* Enter sleep mode */
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret) {
- dev_err(acx->dev, "failed to enter sleep mode (%d)\n", ret);
- return ret;
- }
- msleep(85);
-
- acx424akp_power_off(acx);
-
- return 0;
-}
-
-
-static int acx424akp_get_modes(struct drm_panel *panel,
- struct drm_connector *connector)
-{
- struct acx424akp *acx = panel_to_acx424akp(panel);
- struct drm_display_mode *mode;
-
- if (acx->video_mode)
- mode = drm_mode_duplicate(connector->dev,
- &sony_acx424akp_vid_mode);
- else
- mode = drm_mode_duplicate(connector->dev,
- &sony_acx424akp_cmd_mode);
- if (!mode) {
- dev_err(panel->dev, "bad mode or failed to add mode\n");
- return -EINVAL;
- }
- drm_mode_set_name(mode);
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
-
- drm_mode_probed_add(connector, mode);
-
- return 1; /* Number of modes */
-}
-
-static const struct drm_panel_funcs acx424akp_drm_funcs = {
- .unprepare = acx424akp_unprepare,
- .prepare = acx424akp_prepare,
- .get_modes = acx424akp_get_modes,
-};
-
-static int acx424akp_probe(struct mipi_dsi_device *dsi)
-{
- struct device *dev = &dsi->dev;
- struct acx424akp *acx;
- int ret;
-
- acx = devm_kzalloc(dev, sizeof(struct acx424akp), GFP_KERNEL);
- if (!acx)
- return -ENOMEM;
- acx->video_mode = of_property_read_bool(dev->of_node,
- "enforce-video-mode");
-
- mipi_dsi_set_drvdata(dsi, acx);
- acx->dev = dev;
-
- dsi->lanes = 2;
- dsi->format = MIPI_DSI_FMT_RGB888;
- /*
- * FIXME: these come from the ST-Ericsson vendor driver for the
- * HREF520 and seems to reflect limitations in the PLLs on that
- * platform, if you have the datasheet, please cross-check the
- * actual max rates.
- */
- dsi->lp_rate = 19200000;
- dsi->hs_rate = 420160000;
-
- if (acx->video_mode)
- /* Burst mode using event for sync */
- dsi->mode_flags =
- MIPI_DSI_MODE_VIDEO |
- MIPI_DSI_MODE_VIDEO_BURST;
- else
- dsi->mode_flags =
- MIPI_DSI_CLOCK_NON_CONTINUOUS;
-
- acx->supply = devm_regulator_get(dev, "vddi");
- if (IS_ERR(acx->supply))
- return PTR_ERR(acx->supply);
-
- /* This asserts RESET by default */
- acx->reset_gpio = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_HIGH);
- if (IS_ERR(acx->reset_gpio))
- return dev_err_probe(dev, PTR_ERR(acx->reset_gpio),
- "failed to request GPIO\n");
-
- drm_panel_init(&acx->panel, dev, &acx424akp_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
- acx->panel.backlight = devm_backlight_device_register(dev, "acx424akp", dev, acx,
- &acx424akp_bl_ops, &acx424akp_bl_props);
- if (IS_ERR(acx->panel.backlight))
- return dev_err_probe(dev, PTR_ERR(acx->panel.backlight),
- "failed to register backlight device\n");
-
- drm_panel_add(&acx->panel);
-
- ret = mipi_dsi_attach(dsi);
- if (ret < 0) {
- drm_panel_remove(&acx->panel);
- return ret;
- }
-
- return 0;
-}
-
-static int acx424akp_remove(struct mipi_dsi_device *dsi)
-{
- struct acx424akp *acx = mipi_dsi_get_drvdata(dsi);
-
- mipi_dsi_detach(dsi);
- drm_panel_remove(&acx->panel);
-
- return 0;
-}
-
-static const struct of_device_id acx424akp_of_match[] = {
- { .compatible = "sony,acx424akp" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, acx424akp_of_match);
-
-static struct mipi_dsi_driver acx424akp_driver = {
- .probe = acx424akp_probe,
- .remove = acx424akp_remove,
- .driver = {
- .name = "panel-sony-acx424akp",
- .of_match_table = acx424akp_of_match,
- },
-};
-module_mipi_dsi_driver(acx424akp_driver);
-
-MODULE_AUTHOR("Linus Wallei <linus.walleij@linaro.org>");
-MODULE_DESCRIPTION("MIPI-DSI Sony acx424akp Panel Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
index 34f2bae1ec8c..36fadcf9634e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_features.h
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -20,6 +20,7 @@ enum panfrost_hw_feature {
HW_FEATURE_AARCH64_MMU,
HW_FEATURE_TLS_HASHING,
HW_FEATURE_THREAD_GROUP_SPLIT,
+ HW_FEATURE_IDVS_GROUP_SIZE,
HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
};
@@ -74,6 +75,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g76 (\
@@ -87,6 +89,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_COHERENCY_REG) | \
BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_TLS_HASHING) | \
+ BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \
BIT_ULL(HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
#define hw_features_g31 (\
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index ead65f5fa2bc..293e799e2fe8 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -206,6 +206,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
};
/**
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 15cec831a99a..aa89926742fd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -145,6 +145,9 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) <<
JM_FORCE_COHERENCY_FEATURES_SHIFT;
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_IDVS_GROUP_SIZE))
+ quirks |= JM_DEFAULT_IDVS_GROUP_SIZE << JM_IDVS_GROUP_SIZE_SHIFT;
+
if (quirks)
gpu_write(pfdev, GPU_JM_CONFIG, quirks);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 908d79520853..a6925dbb6224 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -812,7 +812,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
nentries, 0,
msecs_to_jiffies(JOB_TIMEOUT_MS),
pfdev->reset.wq,
- NULL, "pan_js");
+ NULL, "pan_js", pfdev->dev);
if (ret) {
dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
goto err_sched;
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index 6c5a11ef1ee8..16e776cc82ea 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -208,6 +208,7 @@
#define JM_MAX_JOB_THROTTLE_LIMIT 0x3F
#define JM_FORCE_COHERENCY_FEATURES_SHIFT 2
#define JM_IDVS_GROUP_SIZE_SHIFT 16
+#define JM_DEFAULT_IDVS_GROUP_SIZE 0xF
#define JM_MAX_IDVS_GROUP_SIZE 0x3F
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index e4b16421500b..1cb6f0c224bb 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -39,6 +39,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
@@ -269,6 +270,16 @@ static struct pci_driver qxl_pci_driver = {
.driver.pm = &qxl_pm_ops,
};
+static const struct drm_ioctl_desc qxl_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, DRM_AUTH),
+};
+
static struct drm_driver qxl_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
@@ -282,6 +293,7 @@ static struct drm_driver qxl_driver = {
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.fops = &qxl_fops,
.ioctls = qxl_ioctls,
+ .num_ioctls = ARRAY_SIZE(qxl_ioctls),
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -292,24 +304,7 @@ static struct drm_driver qxl_driver = {
.release = qxl_drm_release,
};
-static int __init qxl_init(void)
-{
- if (drm_firmware_drivers_only() && qxl_modeset == -1)
- return -EINVAL;
-
- if (qxl_modeset == 0)
- return -EINVAL;
- qxl_driver.num_ioctls = qxl_max_ioctls;
- return pci_register_driver(&qxl_pci_driver);
-}
-
-static void __exit qxl_exit(void)
-{
- pci_unregister_driver(&qxl_pci_driver);
-}
-
-module_init(qxl_init);
-module_exit(qxl_exit);
+drm_module_pci_driver_if_modeset(qxl_pci_driver, qxl_modeset);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 9796099ff18f..47c169673088 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -65,7 +65,6 @@ struct iosys_map;
#define QXL_DEBUGFS_MAX_COMPONENTS 32
extern int qxl_num_crtc;
-extern int qxl_max_ioctls;
#define QXL_INTERRUPT_MASK (\
QXL_INTERRUPT_DISPLAY |\
@@ -261,9 +260,6 @@ struct qxl_device {
int qxl_debugfs_fence_init(struct qxl_device *rdev);
-extern const struct drm_ioctl_desc qxl_ioctls[];
-extern int qxl_max_ioctl;
-
int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
void qxl_device_fini(struct qxl_device *qdev);
@@ -457,4 +453,13 @@ struct qxl_drv_surface *
qxl_surface_lookup(struct drm_device *dev, int surface_id);
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
+/* qxl_ioctl.c */
+int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 38aabcbe2238..30f58b21372a 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -33,8 +33,7 @@
* TODO: allocating a new gem(in qxl_bo) for each request.
* This is wasteful since bo's are page aligned.
*/
-static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc *qxl_alloc = data;
@@ -61,8 +60,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int qxl_map_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_map *qxl_map = data;
@@ -272,8 +270,7 @@ out_free_reloc:
return ret;
}
-static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_execbuffer *execbuffer = data;
@@ -297,8 +294,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_update_area *update_area = data;
@@ -347,8 +343,7 @@ out:
return ret;
}
-static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_getparam *param = data;
@@ -366,8 +361,7 @@ static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -388,8 +382,7 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
return -ENOSYS;
}
-static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc_surf *param = data;
@@ -422,23 +415,3 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
param->handle = handle;
return ret;
}
-
-const struct drm_ioctl_desc qxl_ioctls[] = {
- DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
- DRM_AUTH),
- DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
- DRM_AUTH),
- DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
- DRM_AUTH),
- DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
- DRM_AUTH),
-
- DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
- DRM_AUTH),
-};
-
-int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index f15b20da5315..c1bbfbe28bda 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include <asm/unaligned.h>
@@ -722,7 +723,7 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
break;
}
if (arg != ATOM_COND_ALWAYS)
- SDEBUG(" taken: %s\n", execute ? "yes" : "no");
+ SDEBUG(" taken: %s\n", str_yes_no(execute));
SDEBUG(" target: 0x%04X\n", target);
if (execute) {
if (ctx->last_jump == (ctx->start + target)) {
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 895776c421d4..08f83bf2c330 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2462,8 +2462,6 @@ struct radeon_device {
struct radeon_vm_manager vm_manager;
struct mutex gpu_clock_mutex;
/* memory stats */
- atomic64_t vram_usage;
- atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
atomic_t gpu_reset_counter;
/* ACPI interface */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 573154268d43..b9a07677a71e 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1596,6 +1596,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
rdev->ddev->mode_config.preferred_depth = 24;
rdev->ddev->mode_config.prefer_shadow = 1;
+ rdev->ddev->mode_config.fb_modifiers_not_supported = true;
+
rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
ret = radeon_modeset_create_props(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 11ad210919c8..965161b8565b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -241,6 +241,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct drm_radeon_info *info = data;
struct radeon_mode_info *minfo = &rdev->mode_info;
uint32_t *value, value_tmp, *value_ptr, value_size;
+ struct ttm_resource_manager *man;
uint64_t value64;
struct drm_crtc *crtc;
int i, found;
@@ -550,12 +551,14 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_VRAM_USAGE:
value = (uint32_t*)&value64;
value_size = sizeof(uint64_t);
- value64 = atomic64_read(&rdev->vram_usage);
+ man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
+ value64 = ttm_resource_manager_usage(man);
break;
case RADEON_INFO_GTT_USAGE:
value = (uint32_t*)&value64;
value_size = sizeof(uint64_t);
- value64 = atomic64_read(&rdev->gtt_usage);
+ man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT);
+ value64 = ttm_resource_manager_usage(man);
break;
case RADEON_INFO_ACTIVE_CU_COUNT:
if (rdev->family >= CHIP_BONAIRE)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 87536d205593..91a72cd14304 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -49,27 +49,6 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
-static void radeon_update_memory_usage(struct ttm_buffer_object *bo,
- unsigned int mem_type, int sign)
-{
- struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
-
- switch (mem_type) {
- case TTM_PL_TT:
- if (sign > 0)
- atomic64_add(bo->base.size, &rdev->gtt_usage);
- else
- atomic64_sub(bo->base.size, &rdev->gtt_usage);
- break;
- case TTM_PL_VRAM:
- if (sign > 0)
- atomic64_add(bo->base.size, &rdev->vram_usage);
- else
- atomic64_sub(bo->base.size, &rdev->vram_usage);
- break;
- }
-}
-
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct radeon_bo *bo;
@@ -434,7 +413,9 @@ void radeon_bo_fini(struct radeon_device *rdev)
static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
{
u64 real_vram_size = rdev->mc.real_vram_size;
- u64 vram_usage = atomic64_read(&rdev->vram_usage);
+ struct ttm_resource_manager *man =
+ ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
+ u64 vram_usage = ttm_resource_manager_usage(man);
/* This function is based on the current VRAM usage.
*
@@ -723,16 +704,10 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
return radeon_bo_get_surface_reg(bo);
}
-void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- unsigned int old_type,
- struct ttm_resource *new_mem)
+void radeon_bo_move_notify(struct ttm_buffer_object *bo)
{
struct radeon_bo *rbo;
- radeon_update_memory_usage(bo, old_type, -1);
- if (new_mem)
- radeon_update_memory_usage(bo, new_mem->mem_type, 1);
-
if (!radeon_ttm_bo_is_radeon_bo(bo))
return;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 1afc7992ef91..0a6ef49e990a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -160,9 +160,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
u32 *tiling_flags, u32 *pitch);
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
-extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- unsigned int old_type,
- struct ttm_resource *new_mem);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo);
extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0d1283cdc8fb..44594d16611f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -199,7 +199,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = bo->resource;
struct radeon_device *rdev;
struct radeon_bo *rbo;
- int r, old_type;
+ int r;
if (new_mem->mem_type == TTM_PL_TT) {
r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
@@ -216,9 +216,6 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
return -EINVAL;
- /* Save old type for statistics update */
- old_type = old_mem->mem_type;
-
rdev = radeon_get_rdev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ttm_bo_move_null(bo, new_mem);
@@ -264,7 +261,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
out:
/* update statistics */
atomic64_add(bo->base.size, &rdev->num_bytes_moved);
- radeon_bo_move_notify(bo, old_type, new_mem);
+ radeon_bo_move_notify(bo);
return 0;
}
@@ -679,16 +676,6 @@ bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
}
-static void
-radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
-{
- unsigned int old_type = TTM_PL_SYSTEM;
-
- if (bo->resource)
- old_type = bo->resource->mem_type;
- radeon_bo_move_notify(bo, old_type, NULL);
-}
-
static struct ttm_device_funcs radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
@@ -697,7 +684,6 @@ static struct ttm_device_funcs radeon_bo_driver = {
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
- .delete_mem_notify = &radeon_bo_delete_mem_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
};
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 0558d928d98d..bc0f44299bb9 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -470,8 +470,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
void *ptr;
-
- int i, r;
+ long r;
+ int i;
if (offset & 0x3F) {
DRM_ERROR("UVD messages must be 64 byte aligned!\n");
@@ -481,13 +481,13 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
MAX_SCHEDULE_TIMEOUT);
if (r <= 0) {
- DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
return r ? r : -ETIME;
}
r = radeon_bo_kmap(bo, &ptr);
if (r) {
- DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+ DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
return r;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 5a8131ef81d5..982e450233ed 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -701,6 +701,9 @@ static struct platform_driver rcar_du_platform_driver = {
static int __init rcar_du_init(void)
{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
rcar_du_of_init(rcar_du_of_table);
return platform_driver_register(&rcar_du_platform_driver);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index bec207de4544..ac190e2b1f7a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -457,6 +457,9 @@ static int __init rockchip_drm_init(void)
{
int ret;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
num_rockchip_sub_drivers = 0;
ADD_ROCKCHIP_SUB_DRIVER(vop_platform_driver, CONFIG_DRM_ROCKCHIP);
ADD_ROCKCHIP_SUB_DRIVER(rockchip_lvds_driver,
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index f91fb31ab7a7..b81fceb0b8a2 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -491,7 +491,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
if (r == -ENOENT)
drm_sched_job_done(s_job);
else if (r)
- DRM_ERROR("fence add callback failed (%d)\n",
+ DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
r);
} else
drm_sched_job_done(s_job);
@@ -957,7 +957,7 @@ static int drm_sched_main(void *param)
if (r == -ENOENT)
drm_sched_job_done(sched_job);
else if (r)
- DRM_ERROR("fence add callback failed (%d)\n",
+ DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
} else {
@@ -991,7 +991,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
unsigned hw_submission, unsigned hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
- atomic_t *score, const char *name)
+ atomic_t *score, const char *name, struct device *dev)
{
int i, ret;
sched->ops = ops;
@@ -1001,6 +1001,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->timeout_wq = timeout_wq ? : system_wq;
sched->hang_limit = hang_limit;
sched->score = score ? score : &sched->_score;
+ sched->dev = dev;
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
drm_sched_rq_init(sched, &sched->sched_rq[i]);
@@ -1018,7 +1019,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
if (IS_ERR(sched->thread)) {
ret = PTR_ERR(sched->thread);
sched->thread = NULL;
- DRM_ERROR("Failed to create scheduler for %s.\n", name);
+ DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
return ret;
}
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index 0856e4b12f70..5ba5f9138c95 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -4,4 +4,5 @@ test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
test-drm_damage_helper.o test-drm_dp_mst_helper.o \
test-drm_rect.o
-obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o
+obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o \
+ test-drm_buddy.o
diff --git a/drivers/gpu/drm/selftests/drm_buddy_selftests.h b/drivers/gpu/drm/selftests/drm_buddy_selftests.h
new file mode 100644
index 000000000000..455b756c4ae5
--- /dev/null
+++ b/drivers/gpu/drm/selftests/drm_buddy_selftests.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as igt__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * Tests are executed in order by igt/drm_buddy
+ */
+selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */
+selftest(buddy_alloc_limit, igt_buddy_alloc_limit)
+selftest(buddy_alloc_range, igt_buddy_alloc_range)
+selftest(buddy_alloc_optimistic, igt_buddy_alloc_optimistic)
+selftest(buddy_alloc_pessimistic, igt_buddy_alloc_pessimistic)
+selftest(buddy_alloc_smoke, igt_buddy_alloc_smoke)
+selftest(buddy_alloc_pathological, igt_buddy_alloc_pathological)
diff --git a/drivers/gpu/drm/selftests/test-drm_buddy.c b/drivers/gpu/drm/selftests/test-drm_buddy.c
new file mode 100644
index 000000000000..fa997f89522b
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_buddy.c
@@ -0,0 +1,992 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#define pr_fmt(fmt) "drm_buddy: " fmt
+
+#include <linux/module.h>
+#include <linux/prime_numbers.h>
+#include <linux/sched/signal.h>
+
+#include <drm/drm_buddy.h>
+
+#include "../lib/drm_random.h"
+
+#define TESTS "drm_buddy_selftests.h"
+#include "drm_selftest.h"
+
+#define IGT_TIMEOUT(name__) \
+ unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
+
+static unsigned int random_seed;
+
+static inline u64 get_size(int order, u64 chunk_size)
+{
+ return (1 << order) * chunk_size;
+}
+
+__printf(2, 3)
+static bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
+{
+ va_list va;
+
+ if (!signal_pending(current)) {
+ cond_resched();
+ if (time_before(jiffies, timeout))
+ return false;
+ }
+
+ if (fmt) {
+ va_start(va, fmt);
+ vprintk(fmt, va);
+ va_end(va);
+ }
+
+ return true;
+}
+
+static inline const char *yesno(bool v)
+{
+ return v ? "yes" : "no";
+}
+
+static void __igt_dump_block(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ bool buddy)
+{
+ pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
+ block->header,
+ drm_buddy_block_state(block),
+ drm_buddy_block_order(block),
+ drm_buddy_block_offset(block),
+ drm_buddy_block_size(mm, block),
+ yesno(!block->parent),
+ yesno(buddy));
+}
+
+static void igt_dump_block(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *buddy;
+
+ __igt_dump_block(mm, block, false);
+
+ buddy = drm_get_buddy(block);
+ if (buddy)
+ __igt_dump_block(mm, buddy, true);
+}
+
+static int igt_check_block(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *buddy;
+ unsigned int block_state;
+ u64 block_size;
+ u64 offset;
+ int err = 0;
+
+ block_state = drm_buddy_block_state(block);
+
+ if (block_state != DRM_BUDDY_ALLOCATED &&
+ block_state != DRM_BUDDY_FREE &&
+ block_state != DRM_BUDDY_SPLIT) {
+ pr_err("block state mismatch\n");
+ err = -EINVAL;
+ }
+
+ block_size = drm_buddy_block_size(mm, block);
+ offset = drm_buddy_block_offset(block);
+
+ if (block_size < mm->chunk_size) {
+ pr_err("block size smaller than min size\n");
+ err = -EINVAL;
+ }
+
+ if (!is_power_of_2(block_size)) {
+ pr_err("block size not power of two\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(block_size, mm->chunk_size)) {
+ pr_err("block size not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, mm->chunk_size)) {
+ pr_err("block offset not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, block_size)) {
+ pr_err("block offset not aligned to block size\n");
+ err = -EINVAL;
+ }
+
+ buddy = drm_get_buddy(block);
+
+ if (!buddy && block->parent) {
+ pr_err("buddy has gone fishing\n");
+ err = -EINVAL;
+ }
+
+ if (buddy) {
+ if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
+ pr_err("buddy has wrong offset\n");
+ err = -EINVAL;
+ }
+
+ if (drm_buddy_block_size(mm, buddy) != block_size) {
+ pr_err("buddy size mismatch\n");
+ err = -EINVAL;
+ }
+
+ if (drm_buddy_block_state(buddy) == block_state &&
+ block_state == DRM_BUDDY_FREE) {
+ pr_err("block and its buddy are free\n");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static int igt_check_blocks(struct drm_buddy *mm,
+ struct list_head *blocks,
+ u64 expected_size,
+ bool is_contiguous)
+{
+ struct drm_buddy_block *block;
+ struct drm_buddy_block *prev;
+ u64 total;
+ int err = 0;
+
+ block = NULL;
+ prev = NULL;
+ total = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ err = igt_check_block(mm, block);
+
+ if (!drm_buddy_block_is_allocated(block)) {
+ pr_err("block not allocated\n"),
+ err = -EINVAL;
+ }
+
+ if (is_contiguous && prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = drm_buddy_block_offset(prev);
+ prev_block_size = drm_buddy_block_size(mm, prev);
+ offset = drm_buddy_block_offset(block);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ pr_err("block offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ break;
+
+ total += drm_buddy_block_size(mm, block);
+ prev = block;
+ }
+
+ if (!err) {
+ if (total != expected_size) {
+ pr_err("size mismatch, expected=%llx, found=%llx\n",
+ expected_size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ pr_err("prev block, dump:\n");
+ igt_dump_block(mm, prev);
+ }
+
+ pr_err("bad block, dump:\n");
+ igt_dump_block(mm, block);
+
+ return err;
+}
+
+static int igt_check_mm(struct drm_buddy *mm)
+{
+ struct drm_buddy_block *root;
+ struct drm_buddy_block *prev;
+ unsigned int i;
+ u64 total;
+ int err = 0;
+
+ if (!mm->n_roots) {
+ pr_err("n_roots is zero\n");
+ return -EINVAL;
+ }
+
+ if (mm->n_roots != hweight64(mm->size)) {
+ pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
+ mm->n_roots, hweight64(mm->size));
+ return -EINVAL;
+ }
+
+ root = NULL;
+ prev = NULL;
+ total = 0;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ struct drm_buddy_block *block;
+ unsigned int order;
+
+ root = mm->roots[i];
+ if (!root) {
+ pr_err("root(%u) is NULL\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ err = igt_check_block(mm, root);
+
+ if (!drm_buddy_block_is_free(root)) {
+ pr_err("root not free\n");
+ err = -EINVAL;
+ }
+
+ order = drm_buddy_block_order(root);
+
+ if (!i) {
+ if (order != mm->max_order) {
+ pr_err("max order root missing\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = drm_buddy_block_offset(prev);
+ prev_block_size = drm_buddy_block_size(mm, prev);
+ offset = drm_buddy_block_offset(root);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ pr_err("root offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ block = list_first_entry_or_null(&mm->free_list[order],
+ struct drm_buddy_block,
+ link);
+ if (block != root) {
+ pr_err("root mismatch at order=%u\n", order);
+ err = -EINVAL;
+ }
+
+ if (err)
+ break;
+
+ prev = root;
+ total += drm_buddy_block_size(mm, root);
+ }
+
+ if (!err) {
+ if (total != mm->size) {
+ pr_err("expected mm size=%llx, found=%llx\n", mm->size,
+ total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ pr_err("prev root(%u), dump:\n", i - 1);
+ igt_dump_block(mm, prev);
+ }
+
+ if (root) {
+ pr_err("bad root(%u), dump:\n", i);
+ igt_dump_block(mm, root);
+ }
+
+ return err;
+}
+
+static void igt_mm_config(u64 *size, u64 *chunk_size)
+{
+ DRM_RND_STATE(prng, random_seed);
+ u32 s, ms;
+
+ /* Nothing fancy, just try to get an interesting bit pattern */
+
+ prandom_seed_state(&prng, random_seed);
+
+ /* Let size be a random number of pages up to 8 GB (2M pages) */
+ s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
+ /* Let the chunk size be a random power of 2 less than size */
+ ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
+ /* Round size down to the chunk size */
+ s &= -ms;
+
+ /* Convert from pages to bytes */
+ *chunk_size = (u64)ms << 12;
+ *size = (u64)s << 12;
+}
+
+static int igt_buddy_alloc_pathological(void *arg)
+{
+ u64 mm_size, size, min_page_size, start = 0;
+ struct drm_buddy_block *block;
+ const int max_order = 3;
+ unsigned long flags = 0;
+ int order, top, err;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(holes);
+ LIST_HEAD(tmp);
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left. Free the largest block, then whittle down again.
+ * Eventually we will have a fully 50% fragmented mm.
+ */
+
+ mm_size = PAGE_SIZE << max_order;
+ err = drm_buddy_init(&mm, mm_size, PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ BUG_ON(mm.max_order != max_order);
+
+ for (top = max_order; top; top--) {
+ /* Make room by freeing the largest allocated block */
+ block = list_first_entry_or_null(&blocks, typeof(*block), link);
+ if (block) {
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ }
+
+ for (order = top; order--; ) {
+ size = min_page_size = get_size(order, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size,
+ min_page_size, &tmp, flags);
+ if (err) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
+ order, top);
+ goto err;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* There should be one final page for this sub-allocation */
+ size = min_page_size = get_size(0, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+ if (err) {
+ pr_info("buddy_alloc hit -ENOMEM for hole\n");
+ goto err;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_move_tail(&block->link, &holes);
+
+ size = min_page_size = get_size(top, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+ if (!err) {
+ pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
+ top, max_order);
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_move_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ drm_buddy_free_list(&mm, &holes);
+
+ /* Nothing larger than blocks of chunk_size now available */
+ for (order = 1; order <= max_order; order++) {
+ size = min_page_size = get_size(order, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+ if (!err) {
+ pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_move_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (err)
+ err = 0;
+
+err:
+ list_splice_tail(&holes, &blocks);
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_smoke(void *arg)
+{
+ u64 mm_size, min_page_size, chunk_size, start = 0;
+ unsigned long flags = 0;
+ struct drm_buddy mm;
+ int *order;
+ int err, i;
+
+ DRM_RND_STATE(prng, random_seed);
+ IGT_TIMEOUT(end_time);
+
+ igt_mm_config(&mm_size, &chunk_size);
+
+ err = drm_buddy_init(&mm, mm_size, chunk_size);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ order = drm_random_order(mm.max_order + 1, &prng);
+ if (!order)
+ goto out_fini;
+
+ for (i = 0; i <= mm.max_order; ++i) {
+ struct drm_buddy_block *block;
+ int max_order = order[i];
+ bool timeout = false;
+ LIST_HEAD(blocks);
+ u64 total, size;
+ LIST_HEAD(tmp);
+ int order;
+
+ err = igt_check_mm(&mm);
+ if (err) {
+ pr_err("pre-mm check failed, abort\n");
+ break;
+ }
+
+ order = max_order;
+ total = 0;
+
+ do {
+retry:
+ size = min_page_size = get_size(order, chunk_size);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size,
+ min_page_size, &tmp, flags);
+ if (err) {
+ if (err == -ENOMEM) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ } else {
+ if (order--) {
+ err = 0;
+ goto retry;
+ }
+
+ pr_err("buddy_alloc with order=%d failed(%d)\n",
+ order, err);
+ }
+
+ break;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ break;
+ }
+
+ list_move_tail(&block->link, &blocks);
+
+ if (drm_buddy_block_order(block) != order) {
+ pr_err("buddy_alloc order mismatch\n");
+ err = -EINVAL;
+ break;
+ }
+
+ total += drm_buddy_block_size(&mm, block);
+
+ if (__igt_timeout(end_time, NULL)) {
+ timeout = true;
+ break;
+ }
+ } while (total < mm.size);
+
+ if (!err)
+ err = igt_check_blocks(&mm, &blocks, total, false);
+
+ drm_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ err = igt_check_mm(&mm);
+ if (err)
+ pr_err("post-mm check failed\n");
+ }
+
+ if (err || timeout)
+ break;
+
+ cond_resched();
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ kfree(order);
+out_fini:
+ drm_buddy_fini(&mm);
+
+ return err;
+}
+
+static int igt_buddy_alloc_pessimistic(void *arg)
+{
+ u64 mm_size, size, min_page_size, start = 0;
+ struct drm_buddy_block *block, *bn;
+ const unsigned int max_order = 16;
+ unsigned long flags = 0;
+ struct drm_buddy mm;
+ unsigned int order;
+ LIST_HEAD(blocks);
+ LIST_HEAD(tmp);
+ int err;
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left.
+ */
+
+ mm_size = PAGE_SIZE << max_order;
+ err = drm_buddy_init(&mm, mm_size, PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ BUG_ON(mm.max_order != max_order);
+
+ for (order = 0; order < max_order; order++) {
+ size = min_page_size = get_size(order, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+ if (err) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ goto err;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* And now the last remaining block available */
+ size = min_page_size = get_size(0, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+ if (err) {
+ pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
+ goto err;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_move_tail(&block->link, &blocks);
+
+ /* Should be completely full! */
+ for (order = max_order; order--; ) {
+ size = min_page_size = get_size(order, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+ if (!err) {
+ pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_move_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ block = list_last_entry(&blocks, typeof(*block), link);
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+
+ /* As we free in increasing size, we make available larger blocks */
+ order = 1;
+ list_for_each_entry_safe(block, bn, &blocks, link) {
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+
+ size = min_page_size = get_size(order, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+ if (err) {
+ pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ order);
+ goto err;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ order++;
+ }
+
+ /* To confirm, now the whole mm should be available */
+ size = min_page_size = get_size(max_order, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+ if (err) {
+ pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ max_order);
+ goto err;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+
+err:
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_optimistic(void *arg)
+{
+ u64 mm_size, size, min_page_size, start = 0;
+ struct drm_buddy_block *block;
+ unsigned long flags = 0;
+ const int max_order = 16;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(tmp);
+ int order, err;
+
+ /*
+ * Create a mm with one block of each order available, and
+ * try to allocate them all.
+ */
+
+ mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
+ err = drm_buddy_init(&mm,
+ mm_size,
+ PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ BUG_ON(mm.max_order != max_order);
+
+ for (order = 0; order <= max_order; order++) {
+ size = min_page_size = get_size(order, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+ if (err) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ goto err;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* Should be completely full! */
+ size = min_page_size = get_size(0, PAGE_SIZE);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
+ if (!err) {
+ pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_blocks has no blocks\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ list_move_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ } else {
+ err = 0;
+ }
+
+err:
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_range(void *arg)
+{
+ unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
+ u64 offset, size, rem, chunk_size, end;
+ unsigned long page_num;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+ int err;
+
+ igt_mm_config(&size, &chunk_size);
+
+ err = drm_buddy_init(&mm, size, chunk_size);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ err = igt_check_mm(&mm);
+ if (err) {
+ pr_err("pre-mm check failed, abort, abort, abort!\n");
+ goto err_fini;
+ }
+
+ rem = mm.size;
+ offset = 0;
+
+ for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
+ struct drm_buddy_block *block;
+ LIST_HEAD(tmp);
+
+ size = min(page_num * mm.chunk_size, rem);
+ end = offset + size;
+
+ err = drm_buddy_alloc_blocks(&mm, offset, end, size, mm.chunk_size, &tmp, flags);
+ if (err) {
+ if (err == -ENOMEM) {
+ pr_info("alloc_range hit -ENOMEM with size=%llx\n",
+ size);
+ } else {
+ pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
+ offset, size, err);
+ }
+
+ break;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct drm_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_range has no blocks\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (drm_buddy_block_offset(block) != offset) {
+ pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
+ drm_buddy_block_offset(block), offset);
+ err = -EINVAL;
+ }
+
+ if (!err)
+ err = igt_check_blocks(&mm, &tmp, size, true);
+
+ list_splice_tail(&tmp, &blocks);
+
+ if (err)
+ break;
+
+ offset += size;
+
+ rem -= size;
+ if (!rem)
+ break;
+
+ cond_resched();
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ drm_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ err = igt_check_mm(&mm);
+ if (err)
+ pr_err("post-mm check failed\n");
+ }
+
+err_fini:
+ drm_buddy_fini(&mm);
+
+ return err;
+}
+
+static int igt_buddy_alloc_limit(void *arg)
+{
+ u64 end, size = U64_MAX, start = 0;
+ struct drm_buddy_block *block;
+ unsigned long flags = 0;
+ LIST_HEAD(allocated);
+ struct drm_buddy mm;
+ int err;
+
+ size = end = round_down(size, 4096);
+ err = drm_buddy_init(&mm, size, PAGE_SIZE);
+ if (err)
+ return err;
+
+ if (mm.max_order != DRM_BUDDY_MAX_ORDER) {
+ pr_err("mm.max_order(%d) != %d\n",
+ mm.max_order, DRM_BUDDY_MAX_ORDER);
+ err = -EINVAL;
+ goto out_fini;
+ }
+
+ err = drm_buddy_alloc_blocks(&mm, start, end, size,
+ PAGE_SIZE, &allocated, flags);
+
+ if (unlikely(err))
+ goto out_free;
+
+ block = list_first_entry_or_null(&allocated,
+ struct drm_buddy_block,
+ link);
+
+ if (!block) {
+ err = -EINVAL;
+ goto out_fini;
+ }
+
+ if (drm_buddy_block_order(block) != mm.max_order) {
+ pr_err("block order(%d) != %d\n",
+ drm_buddy_block_order(block), mm.max_order);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ if (drm_buddy_block_size(&mm, block) !=
+ BIT_ULL(mm.max_order) * PAGE_SIZE) {
+ pr_err("block size(%llu) != %llu\n",
+ drm_buddy_block_size(&mm, block),
+ BIT_ULL(mm.max_order) * PAGE_SIZE);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+out_free:
+ drm_buddy_free_list(&mm, &allocated);
+out_fini:
+ drm_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_sanitycheck(void *ignored)
+{
+ pr_info("%s - ok!\n", __func__);
+ return 0;
+}
+
+#include "drm_selftest.c"
+
+static int __init test_drm_buddy_init(void)
+{
+ int err;
+
+ while (!random_seed)
+ random_seed = get_random_int();
+
+ pr_info("Testing DRM buddy manager (struct drm_buddy), with random_seed=0x%x\n",
+ random_seed);
+ err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
+
+ return err > 0 ? 0 : err;
+}
+
+static void __exit test_drm_buddy_exit(void)
+{
+}
+
+module_init(test_drm_buddy_init);
+module_exit(test_drm_buddy_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
index 61b44d3a6a61..f6d66285c5fc 100644
--- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c
+++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
@@ -323,7 +323,6 @@ static struct drm_device mock_drm_device = {
.max_width = MAX_WIDTH,
.min_height = MIN_HEIGHT,
.max_height = MAX_HEIGHT,
- .allow_fb_modifiers = true,
.funcs = &mock_config_funcs,
},
};
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 80078a9fd7f6..731cbad7520f 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -295,7 +296,7 @@ static struct platform_driver shmob_drm_platform_driver = {
},
};
-module_platform_driver(shmob_drm_platform_driver);
+drm_module_platform_driver(shmob_drm_platform_driver);
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_DESCRIPTION("Renesas SH Mobile DRM Driver");
diff --git a/drivers/gpu/drm/solomon/Kconfig b/drivers/gpu/drm/solomon/Kconfig
new file mode 100644
index 000000000000..5861c3ab7c45
--- /dev/null
+++ b/drivers/gpu/drm/solomon/Kconfig
@@ -0,0 +1,21 @@
+config DRM_SSD130X
+ tristate "DRM support for Solomon SSD130x OLED displays"
+ depends on DRM
+ select BACKLIGHT_CLASS_DEVICE
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ help
+ DRM driver for the SSD1305, SSD1306, SSD1307 and SSD1309 Solomon
+ OLED controllers. This is only for the core driver, a driver for
+ the appropriate bus transport in your chip also must be selected.
+
+ If M is selected the module will be called ssd130x.
+
+config DRM_SSD130X_I2C
+ tristate "DRM support for Solomon SSD130x OLED displays (I2C bus)"
+ depends on DRM_SSD130X && I2C
+ select REGMAP_I2C
+ help
+ Say Y here if the SSD130x OLED display is connected via I2C bus.
+
+ If M is selected the module will be called ssd130x-i2c.
diff --git a/drivers/gpu/drm/solomon/Makefile b/drivers/gpu/drm/solomon/Makefile
new file mode 100644
index 000000000000..4bfc5acb0447
--- /dev/null
+++ b/drivers/gpu/drm/solomon/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_DRM_SSD130X) += ssd130x.o
+obj-$(CONFIG_DRM_SSD130X_I2C) += ssd130x-i2c.o
diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c
new file mode 100644
index 000000000000..3126aeda4ced
--- /dev/null
+++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DRM driver for Solomon SSD130x OLED displays (I2C bus)
+ *
+ * Copyright 2022 Red Hat Inc.
+ * Author: Javier Martinez Canillas <javierm@redhat.com>
+ *
+ * Based on drivers/video/fbdev/ssd1307fb.c
+ * Copyright 2012 Free Electrons
+ */
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "ssd130x.h"
+
+#define DRIVER_NAME "ssd130x-i2c"
+#define DRIVER_DESC "DRM driver for Solomon SSD130x OLED displays (I2C)"
+
+static const struct regmap_config ssd130x_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ssd130x_i2c_probe(struct i2c_client *client)
+{
+ struct ssd130x_device *ssd130x;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &ssd130x_i2c_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ssd130x = ssd130x_probe(&client->dev, regmap);
+ if (IS_ERR(ssd130x))
+ return PTR_ERR(ssd130x);
+
+ i2c_set_clientdata(client, ssd130x);
+
+ return 0;
+}
+
+static int ssd130x_i2c_remove(struct i2c_client *client)
+{
+ struct ssd130x_device *ssd130x = i2c_get_clientdata(client);
+
+ return ssd130x_remove(ssd130x);
+}
+
+static void ssd130x_i2c_shutdown(struct i2c_client *client)
+{
+ struct ssd130x_device *ssd130x = i2c_get_clientdata(client);
+
+ ssd130x_shutdown(ssd130x);
+}
+
+static struct ssd130x_deviceinfo ssd130x_ssd1305_deviceinfo = {
+ .default_vcomh = 0x34,
+ .default_dclk_div = 1,
+ .default_dclk_frq = 7,
+};
+
+static struct ssd130x_deviceinfo ssd130x_ssd1306_deviceinfo = {
+ .default_vcomh = 0x20,
+ .default_dclk_div = 1,
+ .default_dclk_frq = 8,
+ .need_chargepump = 1,
+};
+
+static struct ssd130x_deviceinfo ssd130x_ssd1307_deviceinfo = {
+ .default_vcomh = 0x20,
+ .default_dclk_div = 2,
+ .default_dclk_frq = 12,
+ .need_pwm = 1,
+};
+
+static struct ssd130x_deviceinfo ssd130x_ssd1309_deviceinfo = {
+ .default_vcomh = 0x34,
+ .default_dclk_div = 1,
+ .default_dclk_frq = 10,
+};
+
+static const struct of_device_id ssd130x_of_match[] = {
+ {
+ .compatible = "solomon,ssd1305fb-i2c",
+ .data = &ssd130x_ssd1305_deviceinfo,
+ },
+ {
+ .compatible = "solomon,ssd1306fb-i2c",
+ .data = &ssd130x_ssd1306_deviceinfo,
+ },
+ {
+ .compatible = "solomon,ssd1307fb-i2c",
+ .data = &ssd130x_ssd1307_deviceinfo,
+ },
+ {
+ .compatible = "solomon,ssd1309fb-i2c",
+ .data = &ssd130x_ssd1309_deviceinfo,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ssd130x_of_match);
+
+static struct i2c_driver ssd130x_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ssd130x_of_match,
+ },
+ .probe_new = ssd130x_i2c_probe,
+ .remove = ssd130x_i2c_remove,
+ .shutdown = ssd130x_i2c_shutdown,
+};
+module_i2c_driver(ssd130x_i2c_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Javier Martinez Canillas <javierm@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
new file mode 100644
index 000000000000..92c1902f53e4
--- /dev/null
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -0,0 +1,843 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DRM driver for Solomon SSD130x OLED displays
+ *
+ * Copyright 2022 Red Hat Inc.
+ * Author: Javier Martinez Canillas <javierm@redhat.com>
+ *
+ * Based on drivers/video/fbdev/ssd1307fb.c
+ * Copyright 2012 Free Electrons
+ */
+
+#include <linux/backlight.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/property.h>
+#include <linux/pwm.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_probe_helper.h>
+
+#include "ssd130x.h"
+
+#define DRIVER_NAME "ssd130x"
+#define DRIVER_DESC "DRM driver for Solomon SSD130x OLED displays"
+#define DRIVER_DATE "20220131"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define SSD130X_DATA 0x40
+#define SSD130X_COMMAND 0x80
+
+#define SSD130X_SET_ADDRESS_MODE 0x20
+#define SSD130X_SET_COL_RANGE 0x21
+#define SSD130X_SET_PAGE_RANGE 0x22
+#define SSD130X_CONTRAST 0x81
+#define SSD130X_SET_LOOKUP_TABLE 0x91
+#define SSD130X_CHARGE_PUMP 0x8d
+#define SSD130X_SEG_REMAP_ON 0xa1
+#define SSD130X_DISPLAY_OFF 0xae
+#define SSD130X_SET_MULTIPLEX_RATIO 0xa8
+#define SSD130X_DISPLAY_ON 0xaf
+#define SSD130X_START_PAGE_ADDRESS 0xb0
+#define SSD130X_SET_COM_SCAN_DIR 0xc0
+#define SSD130X_SET_DISPLAY_OFFSET 0xd3
+#define SSD130X_SET_CLOCK_FREQ 0xd5
+#define SSD130X_SET_AREA_COLOR_MODE 0xd8
+#define SSD130X_SET_PRECHARGE_PERIOD 0xd9
+#define SSD130X_SET_COM_PINS_CONFIG 0xda
+#define SSD130X_SET_VCOMH 0xdb
+
+#define SSD130X_SET_COM_SCAN_DIR_MASK GENMASK(3, 2)
+#define SSD130X_SET_COM_SCAN_DIR_SET(val) FIELD_PREP(SSD130X_SET_COM_SCAN_DIR_MASK, (val))
+#define SSD130X_SET_CLOCK_DIV_MASK GENMASK(3, 0)
+#define SSD130X_SET_CLOCK_DIV_SET(val) FIELD_PREP(SSD130X_SET_CLOCK_DIV_MASK, (val))
+#define SSD130X_SET_CLOCK_FREQ_MASK GENMASK(7, 4)
+#define SSD130X_SET_CLOCK_FREQ_SET(val) FIELD_PREP(SSD130X_SET_CLOCK_FREQ_MASK, (val))
+#define SSD130X_SET_PRECHARGE_PERIOD1_MASK GENMASK(3, 0)
+#define SSD130X_SET_PRECHARGE_PERIOD1_SET(val) FIELD_PREP(SSD130X_SET_PRECHARGE_PERIOD1_MASK, (val))
+#define SSD130X_SET_PRECHARGE_PERIOD2_MASK GENMASK(7, 4)
+#define SSD130X_SET_PRECHARGE_PERIOD2_SET(val) FIELD_PREP(SSD130X_SET_PRECHARGE_PERIOD2_MASK, (val))
+#define SSD130X_SET_COM_PINS_CONFIG1_MASK GENMASK(4, 4)
+#define SSD130X_SET_COM_PINS_CONFIG1_SET(val) FIELD_PREP(SSD130X_SET_COM_PINS_CONFIG1_MASK, !(val))
+#define SSD130X_SET_COM_PINS_CONFIG2_MASK GENMASK(5, 5)
+#define SSD130X_SET_COM_PINS_CONFIG2_SET(val) FIELD_PREP(SSD130X_SET_COM_PINS_CONFIG2_MASK, (val))
+
+#define SSD130X_SET_ADDRESS_MODE_HORIZONTAL 0x00
+#define SSD130X_SET_ADDRESS_MODE_VERTICAL 0x01
+#define SSD130X_SET_ADDRESS_MODE_PAGE 0x02
+
+#define SSD130X_SET_AREA_COLOR_MODE_ENABLE 0x1e
+#define SSD130X_SET_AREA_COLOR_MODE_LOW_POWER 0x05
+
+#define MAX_CONTRAST 255
+
+static inline struct ssd130x_device *drm_to_ssd130x(struct drm_device *drm)
+{
+ return container_of(drm, struct ssd130x_device, drm);
+}
+
+/*
+ * Helper to write data (SSD130X_DATA) to the device.
+ */
+static int ssd130x_write_data(struct ssd130x_device *ssd130x, u8 *values, int count)
+{
+ return regmap_bulk_write(ssd130x->regmap, SSD130X_DATA, values, count);
+}
+
+/*
+ * Helper to write command (SSD130X_COMMAND). The fist variadic argument
+ * is the command to write and the following are the command options.
+ *
+ * Note that the ssd130x protocol requires each command and option to be
+ * written as a SSD130X_COMMAND device register value. That is why a call
+ * to regmap_write(..., SSD130X_COMMAND, ...) is done for each argument.
+ */
+static int ssd130x_write_cmd(struct ssd130x_device *ssd130x, int count,
+ /* u8 cmd, u8 option, ... */...)
+{
+ va_list ap;
+ u8 value;
+ int ret;
+
+ va_start(ap, count);
+
+ do {
+ value = va_arg(ap, int);
+ ret = regmap_write(ssd130x->regmap, SSD130X_COMMAND, value);
+ if (ret)
+ goto out_end;
+ } while (--count);
+
+out_end:
+ va_end(ap);
+
+ return ret;
+}
+
+static int ssd130x_set_col_range(struct ssd130x_device *ssd130x,
+ u8 col_start, u8 cols)
+{
+ u8 col_end = col_start + cols - 1;
+ int ret;
+
+ if (col_start == ssd130x->col_start && col_end == ssd130x->col_end)
+ return 0;
+
+ ret = ssd130x_write_cmd(ssd130x, 3, SSD130X_SET_COL_RANGE, col_start, col_end);
+ if (ret < 0)
+ return ret;
+
+ ssd130x->col_start = col_start;
+ ssd130x->col_end = col_end;
+ return 0;
+}
+
+static int ssd130x_set_page_range(struct ssd130x_device *ssd130x,
+ u8 page_start, u8 pages)
+{
+ u8 page_end = page_start + pages - 1;
+ int ret;
+
+ if (page_start == ssd130x->page_start && page_end == ssd130x->page_end)
+ return 0;
+
+ ret = ssd130x_write_cmd(ssd130x, 3, SSD130X_SET_PAGE_RANGE, page_start, page_end);
+ if (ret < 0)
+ return ret;
+
+ ssd130x->page_start = page_start;
+ ssd130x->page_end = page_end;
+ return 0;
+}
+
+static int ssd130x_pwm_enable(struct ssd130x_device *ssd130x)
+{
+ struct device *dev = ssd130x->dev;
+ struct pwm_state pwmstate;
+
+ ssd130x->pwm = pwm_get(dev, NULL);
+ if (IS_ERR(ssd130x->pwm)) {
+ dev_err(dev, "Could not get PWM from firmware description!\n");
+ return PTR_ERR(ssd130x->pwm);
+ }
+
+ pwm_init_state(ssd130x->pwm, &pwmstate);
+ pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
+ pwm_apply_state(ssd130x->pwm, &pwmstate);
+
+ /* Enable the PWM */
+ pwm_enable(ssd130x->pwm);
+
+ dev_dbg(dev, "Using PWM%d with a %lluns period.\n",
+ ssd130x->pwm->pwm, pwm_get_period(ssd130x->pwm));
+
+ return 0;
+}
+
+static void ssd130x_reset(struct ssd130x_device *ssd130x)
+{
+ if (!ssd130x->reset)
+ return;
+
+ /* Reset the screen */
+ gpiod_set_value_cansleep(ssd130x->reset, 1);
+ udelay(4);
+ gpiod_set_value_cansleep(ssd130x->reset, 0);
+ udelay(4);
+}
+
+static int ssd130x_power_on(struct ssd130x_device *ssd130x)
+{
+ struct device *dev = ssd130x->dev;
+ int ret;
+
+ ssd130x_reset(ssd130x);
+
+ ret = regulator_enable(ssd130x->vcc_reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable VCC: %d\n", ret);
+ return ret;
+ }
+
+ if (ssd130x->device_info->need_pwm) {
+ ret = ssd130x_pwm_enable(ssd130x);
+ if (ret) {
+ dev_err(dev, "Failed to enable PWM: %d\n", ret);
+ regulator_disable(ssd130x->vcc_reg);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ssd130x_power_off(struct ssd130x_device *ssd130x)
+{
+ pwm_disable(ssd130x->pwm);
+ pwm_put(ssd130x->pwm);
+
+ regulator_disable(ssd130x->vcc_reg);
+}
+
+static int ssd130x_init(struct ssd130x_device *ssd130x)
+{
+ u32 precharge, dclk, com_invdir, compins, chargepump;
+ int ret;
+
+ /* Set initial contrast */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_CONTRAST, ssd130x->contrast);
+ if (ret < 0)
+ return ret;
+
+ /* Set segment re-map */
+ if (ssd130x->seg_remap) {
+ ret = ssd130x_write_cmd(ssd130x, 1, SSD130X_SEG_REMAP_ON);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Set COM direction */
+ com_invdir = (SSD130X_SET_COM_SCAN_DIR |
+ SSD130X_SET_COM_SCAN_DIR_SET(ssd130x->com_invdir));
+ ret = ssd130x_write_cmd(ssd130x, 1, com_invdir);
+ if (ret < 0)
+ return ret;
+
+ /* Set multiplex ratio value */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_MULTIPLEX_RATIO, ssd130x->height - 1);
+ if (ret < 0)
+ return ret;
+
+ /* set display offset value */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_DISPLAY_OFFSET, ssd130x->com_offset);
+ if (ret < 0)
+ return ret;
+
+ /* Set clock frequency */
+ dclk = (SSD130X_SET_CLOCK_DIV_SET(ssd130x->dclk_div - 1) |
+ SSD130X_SET_CLOCK_FREQ_SET(ssd130x->dclk_frq));
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_CLOCK_FREQ, dclk);
+ if (ret < 0)
+ return ret;
+
+ /* Set Area Color Mode ON/OFF & Low Power Display Mode */
+ if (ssd130x->area_color_enable || ssd130x->low_power) {
+ u32 mode = 0;
+
+ if (ssd130x->area_color_enable)
+ mode |= SSD130X_SET_AREA_COLOR_MODE_ENABLE;
+
+ if (ssd130x->low_power)
+ mode |= SSD130X_SET_AREA_COLOR_MODE_LOW_POWER;
+
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_AREA_COLOR_MODE, mode);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Set precharge period in number of ticks from the internal clock */
+ precharge = (SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep1) |
+ SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep2));
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_PRECHARGE_PERIOD, precharge);
+ if (ret < 0)
+ return ret;
+
+ /* Set COM pins configuration */
+ compins = BIT(1);
+ compins |= (SSD130X_SET_COM_PINS_CONFIG1_SET(ssd130x->com_seq) |
+ SSD130X_SET_COM_PINS_CONFIG2_SET(ssd130x->com_lrremap));
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_COM_PINS_CONFIG, compins);
+ if (ret < 0)
+ return ret;
+
+ /* Set VCOMH */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_VCOMH, ssd130x->vcomh);
+ if (ret < 0)
+ return ret;
+
+ /* Turn on the DC-DC Charge Pump */
+ chargepump = BIT(4);
+
+ if (ssd130x->device_info->need_chargepump)
+ chargepump |= BIT(2);
+
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_CHARGE_PUMP, chargepump);
+ if (ret < 0)
+ return ret;
+
+ /* Set lookup table */
+ if (ssd130x->lookup_table_set) {
+ int i;
+
+ ret = ssd130x_write_cmd(ssd130x, 1, SSD130X_SET_LOOKUP_TABLE);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ssd130x->lookup_table); i++) {
+ u8 val = ssd130x->lookup_table[i];
+
+ if (val < 31 || val > 63)
+ dev_warn(ssd130x->dev,
+ "lookup table index %d value out of range 31 <= %d <= 63\n",
+ i, val);
+ ret = ssd130x_write_cmd(ssd130x, 1, val);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Switch to horizontal addressing mode */
+ return ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_ADDRESS_MODE,
+ SSD130X_SET_ADDRESS_MODE_HORIZONTAL);
+}
+
+static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
+ struct drm_rect *rect)
+{
+ unsigned int x = rect->x1;
+ unsigned int y = rect->y1;
+ unsigned int width = drm_rect_width(rect);
+ unsigned int height = drm_rect_height(rect);
+ unsigned int line_length = DIV_ROUND_UP(width, 8);
+ unsigned int pages = DIV_ROUND_UP(y % 8 + height, 8);
+ u32 array_idx = 0;
+ int ret, i, j, k;
+ u8 *data_array = NULL;
+
+ data_array = kcalloc(width, pages, GFP_KERNEL);
+ if (!data_array)
+ return -ENOMEM;
+
+ /*
+ * The screen is divided in pages, each having a height of 8
+ * pixels, and the width of the screen. When sending a byte of
+ * data to the controller, it gives the 8 bits for the current
+ * column. I.e, the first byte are the 8 bits of the first
+ * column, then the 8 bits for the second column, etc.
+ *
+ *
+ * Representation of the screen, assuming it is 5 bits
+ * wide. Each letter-number combination is a bit that controls
+ * one pixel.
+ *
+ * A0 A1 A2 A3 A4
+ * B0 B1 B2 B3 B4
+ * C0 C1 C2 C3 C4
+ * D0 D1 D2 D3 D4
+ * E0 E1 E2 E3 E4
+ * F0 F1 F2 F3 F4
+ * G0 G1 G2 G3 G4
+ * H0 H1 H2 H3 H4
+ *
+ * If you want to update this screen, you need to send 5 bytes:
+ * (1) A0 B0 C0 D0 E0 F0 G0 H0
+ * (2) A1 B1 C1 D1 E1 F1 G1 H1
+ * (3) A2 B2 C2 D2 E2 F2 G2 H2
+ * (4) A3 B3 C3 D3 E3 F3 G3 H3
+ * (5) A4 B4 C4 D4 E4 F4 G4 H4
+ */
+
+ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
+ if (ret < 0)
+ goto out_free;
+
+ ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages);
+ if (ret < 0)
+ goto out_free;
+
+ for (i = y / 8; i < y / 8 + pages; i++) {
+ int m = 8;
+
+ /* Last page may be partial */
+ if (8 * (i + 1) > ssd130x->height)
+ m = ssd130x->height % 8;
+ for (j = x; j < x + width; j++) {
+ u8 data = 0;
+
+ for (k = 0; k < m; k++) {
+ u8 byte = buf[(8 * i + k) * line_length + j / 8];
+ u8 bit = (byte >> (j % 8)) & 1;
+
+ data |= bit << k;
+ }
+ data_array[array_idx++] = data;
+ }
+ }
+
+ ret = ssd130x_write_data(ssd130x, data_array, width * pages);
+
+out_free:
+ kfree(data_array);
+ return ret;
+}
+
+static void ssd130x_clear_screen(struct ssd130x_device *ssd130x)
+{
+ u8 *buf = NULL;
+ struct drm_rect fullscreen = {
+ .x1 = 0,
+ .x2 = ssd130x->width,
+ .y1 = 0,
+ .y2 = ssd130x->height,
+ };
+
+ buf = kcalloc(ssd130x->width, ssd130x->height, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ ssd130x_update_rect(ssd130x, buf, &fullscreen);
+
+ kfree(buf);
+}
+
+static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_map *map,
+ struct drm_rect *rect)
+{
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
+ void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+ int ret = 0;
+ u8 *buf = NULL;
+
+ buf = kcalloc(fb->width, fb->height, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ drm_fb_xrgb8888_to_mono_reversed(buf, 0, vmap, fb, rect);
+
+ ssd130x_update_rect(ssd130x, buf, rect);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int ssd130x_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+{
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
+
+ if (mode->hdisplay != ssd130x->mode.hdisplay &&
+ mode->vdisplay != ssd130x->mode.vdisplay)
+ return MODE_ONE_SIZE;
+
+ if (mode->hdisplay != ssd130x->mode.hdisplay)
+ return MODE_ONE_WIDTH;
+
+ if (mode->vdisplay != ssd130x->mode.vdisplay)
+ return MODE_ONE_HEIGHT;
+
+ return MODE_OK;
+}
+
+static void ssd130x_display_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_device *drm = &ssd130x->drm;
+ int idx, ret;
+
+ ret = ssd130x_power_on(ssd130x);
+ if (ret)
+ return;
+
+ ret = ssd130x_init(ssd130x);
+ if (ret)
+ goto out_power_off;
+
+ if (!drm_dev_enter(drm, &idx))
+ goto out_power_off;
+
+ ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &plane_state->dst);
+
+ ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
+
+ backlight_enable(ssd130x->bl_dev);
+
+ drm_dev_exit(idx);
+
+ return;
+out_power_off:
+ ssd130x_power_off(ssd130x);
+}
+
+static void ssd130x_display_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
+ struct drm_device *drm = &ssd130x->drm;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ ssd130x_clear_screen(ssd130x);
+
+ backlight_disable(ssd130x->bl_dev);
+
+ ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_OFF);
+
+ ssd130x_power_off(ssd130x);
+
+ drm_dev_exit(idx);
+}
+
+static void ssd130x_display_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_plane_state)
+{
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_device *drm = &ssd130x->drm;
+ struct drm_rect src_clip, dst_clip;
+ int idx;
+
+ if (!fb)
+ return;
+
+ if (!pipe->crtc.state->active)
+ return;
+
+ if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
+ return;
+
+ dst_clip = plane_state->dst;
+ if (!drm_rect_intersect(&dst_clip, &src_clip))
+ return;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_simple_display_pipe_funcs ssd130x_pipe_funcs = {
+ .mode_valid = ssd130x_display_pipe_mode_valid,
+ .enable = ssd130x_display_pipe_enable,
+ .disable = ssd130x_display_pipe_disable,
+ .update = ssd130x_display_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+};
+
+static int ssd130x_connector_get_modes(struct drm_connector *connector)
+{
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(connector->dev);
+ struct drm_display_mode *mode = &ssd130x->mode;
+ struct device *dev = ssd130x->dev;
+
+ mode = drm_mode_duplicate(connector->dev, &ssd130x->mode);
+ if (!mode) {
+ dev_err(dev, "Failed to duplicated mode\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ drm_set_preferred_mode(connector, mode->hdisplay, mode->vdisplay);
+
+ /* There is only a single mode */
+ return 1;
+}
+
+static const struct drm_connector_helper_funcs ssd130x_connector_helper_funcs = {
+ .get_modes = ssd130x_connector_get_modes,
+};
+
+static const struct drm_connector_funcs ssd130x_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_mode_config_funcs ssd130x_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const uint32_t ssd130x_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+DEFINE_DRM_GEM_FOPS(ssd130x_fops);
+
+static const struct drm_driver ssd130x_drm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &ssd130x_fops,
+};
+
+static int ssd130x_update_bl(struct backlight_device *bdev)
+{
+ struct ssd130x_device *ssd130x = bl_get_data(bdev);
+ int brightness = backlight_get_brightness(bdev);
+ int ret;
+
+ ssd130x->contrast = brightness;
+
+ ret = ssd130x_write_cmd(ssd130x, 1, SSD130X_CONTRAST);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd130x_write_cmd(ssd130x, 1, ssd130x->contrast);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct backlight_ops ssd130xfb_bl_ops = {
+ .update_status = ssd130x_update_bl,
+};
+
+static void ssd130x_parse_properties(struct ssd130x_device *ssd130x)
+{
+ struct device *dev = ssd130x->dev;
+
+ if (device_property_read_u32(dev, "solomon,width", &ssd130x->width))
+ ssd130x->width = 96;
+
+ if (device_property_read_u32(dev, "solomon,height", &ssd130x->height))
+ ssd130x->height = 16;
+
+ if (device_property_read_u32(dev, "solomon,page-offset", &ssd130x->page_offset))
+ ssd130x->page_offset = 1;
+
+ if (device_property_read_u32(dev, "solomon,col-offset", &ssd130x->col_offset))
+ ssd130x->col_offset = 0;
+
+ if (device_property_read_u32(dev, "solomon,com-offset", &ssd130x->com_offset))
+ ssd130x->com_offset = 0;
+
+ if (device_property_read_u32(dev, "solomon,prechargep1", &ssd130x->prechargep1))
+ ssd130x->prechargep1 = 2;
+
+ if (device_property_read_u32(dev, "solomon,prechargep2", &ssd130x->prechargep2))
+ ssd130x->prechargep2 = 2;
+
+ if (!device_property_read_u8_array(dev, "solomon,lookup-table",
+ ssd130x->lookup_table,
+ ARRAY_SIZE(ssd130x->lookup_table)))
+ ssd130x->lookup_table_set = 1;
+
+ ssd130x->seg_remap = !device_property_read_bool(dev, "solomon,segment-no-remap");
+ ssd130x->com_seq = device_property_read_bool(dev, "solomon,com-seq");
+ ssd130x->com_lrremap = device_property_read_bool(dev, "solomon,com-lrremap");
+ ssd130x->com_invdir = device_property_read_bool(dev, "solomon,com-invdir");
+ ssd130x->area_color_enable =
+ device_property_read_bool(dev, "solomon,area-color-enable");
+ ssd130x->low_power = device_property_read_bool(dev, "solomon,low-power");
+
+ ssd130x->contrast = 127;
+ ssd130x->vcomh = ssd130x->device_info->default_vcomh;
+
+ /* Setup display timing */
+ if (device_property_read_u32(dev, "solomon,dclk-div", &ssd130x->dclk_div))
+ ssd130x->dclk_div = ssd130x->device_info->default_dclk_div;
+ if (device_property_read_u32(dev, "solomon,dclk-frq", &ssd130x->dclk_frq))
+ ssd130x->dclk_frq = ssd130x->device_info->default_dclk_frq;
+}
+
+static int ssd130x_init_modeset(struct ssd130x_device *ssd130x)
+{
+ struct drm_display_mode *mode = &ssd130x->mode;
+ struct device *dev = ssd130x->dev;
+ struct drm_device *drm = &ssd130x->drm;
+ unsigned long max_width, max_height;
+ int ret;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret) {
+ dev_err(dev, "DRM mode config init failed: %d\n", ret);
+ return ret;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ mode->clock = 1;
+ mode->hdisplay = mode->htotal = ssd130x->width;
+ mode->hsync_start = mode->hsync_end = ssd130x->width;
+ mode->vdisplay = mode->vtotal = ssd130x->height;
+ mode->vsync_start = mode->vsync_end = ssd130x->height;
+ mode->width_mm = 27;
+ mode->height_mm = 27;
+
+ max_width = max_t(unsigned long, mode->hdisplay, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, mode->vdisplay, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ drm->mode_config.min_width = mode->hdisplay;
+ drm->mode_config.max_width = max_width;
+ drm->mode_config.min_height = mode->vdisplay;
+ drm->mode_config.max_height = max_height;
+ drm->mode_config.preferred_depth = 32;
+ drm->mode_config.funcs = &ssd130x_mode_config_funcs;
+
+ ret = drm_connector_init(drm, &ssd130x->connector, &ssd130x_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret) {
+ dev_err(dev, "DRM connector init failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_helper_add(&ssd130x->connector, &ssd130x_connector_helper_funcs);
+
+ ret = drm_simple_display_pipe_init(drm, &ssd130x->pipe, &ssd130x_pipe_funcs,
+ ssd130x_formats, ARRAY_SIZE(ssd130x_formats),
+ NULL, &ssd130x->connector);
+ if (ret) {
+ dev_err(dev, "DRM simple display pipeline init failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_plane_enable_fb_damage_clips(&ssd130x->pipe.plane);
+
+ drm_mode_config_reset(drm);
+
+ return 0;
+}
+
+static int ssd130x_get_resources(struct ssd130x_device *ssd130x)
+{
+ struct device *dev = ssd130x->dev;
+
+ ssd130x->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ssd130x->reset))
+ return dev_err_probe(dev, PTR_ERR(ssd130x->reset),
+ "Failed to get reset gpio\n");
+
+ ssd130x->vcc_reg = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(ssd130x->vcc_reg))
+ return dev_err_probe(dev, PTR_ERR(ssd130x->vcc_reg),
+ "Failed to get VCC regulator\n");
+
+ return 0;
+}
+
+struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap)
+{
+ struct ssd130x_device *ssd130x;
+ struct backlight_device *bl;
+ struct drm_device *drm;
+ int ret;
+
+ ssd130x = devm_drm_dev_alloc(dev, &ssd130x_drm_driver,
+ struct ssd130x_device, drm);
+ if (IS_ERR(ssd130x))
+ return ERR_PTR(dev_err_probe(dev, PTR_ERR(ssd130x),
+ "Failed to allocate DRM device\n"));
+
+ drm = &ssd130x->drm;
+
+ ssd130x->dev = dev;
+ ssd130x->regmap = regmap;
+ ssd130x->device_info = device_get_match_data(dev);
+
+ ssd130x_parse_properties(ssd130x);
+
+ ret = ssd130x_get_resources(ssd130x);
+ if (ret)
+ return ERR_PTR(ret);
+
+ bl = devm_backlight_device_register(dev, dev_name(dev), dev, ssd130x,
+ &ssd130xfb_bl_ops, NULL);
+ if (IS_ERR(bl))
+ return ERR_PTR(dev_err_probe(dev, PTR_ERR(bl),
+ "Unable to register backlight device\n"));
+
+ bl->props.brightness = ssd130x->contrast;
+ bl->props.max_brightness = MAX_CONTRAST;
+ ssd130x->bl_dev = bl;
+
+ ret = ssd130x_init_modeset(ssd130x);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ERR_PTR(dev_err_probe(dev, ret, "DRM device register failed\n"));
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return ssd130x;
+}
+EXPORT_SYMBOL_GPL(ssd130x_probe);
+
+int ssd130x_remove(struct ssd130x_device *ssd130x)
+{
+ drm_dev_unplug(&ssd130x->drm);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssd130x_remove);
+
+void ssd130x_shutdown(struct ssd130x_device *ssd130x)
+{
+ drm_atomic_helper_shutdown(&ssd130x->drm);
+}
+EXPORT_SYMBOL_GPL(ssd130x_shutdown);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Javier Martinez Canillas <javierm@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
new file mode 100644
index 000000000000..cd21cdccb566
--- /dev/null
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Header file for:
+ * DRM driver for Solomon SSD130x OLED displays
+ *
+ * Copyright 2022 Red Hat Inc.
+ * Author: Javier Martinez Canillas <javierm@redhat.com>
+ *
+ * Based on drivers/video/fbdev/ssd1307fb.c
+ * Copyright 2012 Free Electrons
+ */
+
+#ifndef __SSD1307X_H__
+#define __SSD1307X_H__
+
+#include <drm/drm_drv.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include <linux/regmap.h>
+
+struct ssd130x_deviceinfo {
+ u32 default_vcomh;
+ u32 default_dclk_div;
+ u32 default_dclk_frq;
+ int need_pwm;
+ int need_chargepump;
+};
+
+struct ssd130x_device {
+ struct drm_device drm;
+ struct device *dev;
+ struct drm_simple_display_pipe pipe;
+ struct drm_display_mode mode;
+ struct drm_connector connector;
+ struct i2c_client *client;
+
+ struct regmap *regmap;
+
+ const struct ssd130x_deviceinfo *device_info;
+
+ unsigned area_color_enable : 1;
+ unsigned com_invdir : 1;
+ unsigned com_lrremap : 1;
+ unsigned com_seq : 1;
+ unsigned lookup_table_set : 1;
+ unsigned low_power : 1;
+ unsigned seg_remap : 1;
+ u32 com_offset;
+ u32 contrast;
+ u32 dclk_div;
+ u32 dclk_frq;
+ u32 height;
+ u8 lookup_table[4];
+ u32 page_offset;
+ u32 col_offset;
+ u32 prechargep1;
+ u32 prechargep2;
+
+ struct backlight_device *bl_dev;
+ struct pwm_device *pwm;
+ struct gpio_desc *reset;
+ struct regulator *vcc_reg;
+ u32 vcomh;
+ u32 width;
+ /* Cached address ranges */
+ u8 col_start;
+ u8 col_end;
+ u8 page_start;
+ u8 page_end;
+};
+
+struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap);
+int ssd130x_remove(struct ssd130x_device *ssd130x);
+void ssd130x_shutdown(struct ssd130x_device *ssd130x);
+
+#endif /* __SSD1307X_H__ */
diff --git a/drivers/gpu/drm/sprd/Kconfig b/drivers/gpu/drm/sprd/Kconfig
index 3edeaeca0e65..9a9c7ebfc716 100644
--- a/drivers/gpu/drm/sprd/Kconfig
+++ b/drivers/gpu/drm/sprd/Kconfig
@@ -3,7 +3,6 @@ config DRM_SPRD
depends on ARCH_SPRD || COMPILE_TEST
depends on DRM && OF
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index 06a3414ee43a..1637203ea103 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -790,6 +790,11 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu,
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get I/O resource\n");
+ return -EINVAL;
+ }
+
ctx->base = devm_ioremap(dev, res->start, resource_size(res));
if (!ctx->base) {
dev_err(dev, "failed to map dpu registers\n");
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
index a077e2d4d721..a60ecdd67d98 100644
--- a/drivers/gpu/drm/sprd/sprd_drm.c
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -43,7 +43,6 @@ static void sprd_drm_mode_config_init(struct drm_device *drm)
drm->mode_config.min_height = 0;
drm->mode_config.max_width = 8192;
drm->mode_config.max_height = 8192;
- drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.funcs = &sprd_drm_mode_config_funcs;
drm->mode_config.helper_private = &sprd_drm_mode_config_helper;
@@ -155,7 +154,7 @@ static void sprd_drm_shutdown(struct platform_device *pdev)
struct drm_device *drm = platform_get_drvdata(pdev);
if (!drm) {
- drm_warn(drm, "drm device is not available, no shutdown\n");
+ dev_warn(&pdev->dev, "drm device is not available, no shutdown\n");
return;
}
@@ -186,6 +185,9 @@ static struct platform_driver *sprd_drm_drivers[] = {
static int __init sprd_drm_init(void)
{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
return platform_register_drivers(sprd_drm_drivers,
ARRAY_SIZE(sprd_drm_drivers));
}
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index 911b3cddc264..12b67a5d5923 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -907,6 +907,11 @@ static int sprd_dsi_context_init(struct sprd_dsi *dsi,
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get I/O resource\n");
+ return -EINVAL;
+ }
+
ctx->base = devm_ioremap(dev, res->start, resource_size(res));
if (!ctx->base) {
drm_err(dsi->drm, "failed to map dsi host registers\n");
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index c7efb43b83ee..860b2230aa08 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -287,6 +287,9 @@ static struct platform_driver * const drivers[] = {
static int sti_drm_init(void)
{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(sti_drm_init);
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 9f441aadf2d5..0da7cce2a1a2 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -22,6 +22,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -241,7 +242,7 @@ static struct platform_driver stm_drm_platform_driver = {
},
};
-module_platform_driver(stm_drm_platform_driver);
+drm_module_platform_driver(stm_drm_platform_driver);
MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>");
MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index b630614b3d72..a3fd441dd9ad 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -19,6 +19,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -441,7 +442,7 @@ static struct platform_driver sun4i_drv_platform_driver = {
.pm = &sun4i_drv_drm_pm_ops,
},
};
-module_platform_driver(sun4i_drv_platform_driver);
+drm_module_platform_driver(sun4i_drv_platform_driver);
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 18c319b804c0..6ed55ebaec8c 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -5,6 +5,7 @@ config DRM_TEGRA
depends on COMMON_CLK
depends on DRM
depends on OF
+ select DRM_DP_AUX_BUS
select DRM_DP_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 8ca500977a46..5847dcad2478 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <drm/dp/drm_dp_helper.h>
+#include <drm/dp/drm_dp_aux_bus.h>
#include <drm/drm_panel.h>
#include "dp.h"
@@ -570,6 +571,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
list_add_tail(&dpaux->list, &dpaux_list);
mutex_unlock(&dpaux_lock);
+ err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
+ if (err < 0) {
+ dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e9de91a4e7e8..9464f522e257 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1410,6 +1410,9 @@ static int __init host1x_drm_init(void)
{
int err;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
err = host1x_driver_register(&host1x_drm_driver);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index 223ab2ceb7e6..3762d87759d9 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -63,7 +63,7 @@ static void falcon_copy_firmware_image(struct falcon *falcon,
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
- virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
+ virt[i] = le32_to_cpu(((__le32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 7c784e90e40e..04cfff89ee51 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include "tidss_dispc.h"
@@ -251,7 +252,7 @@ static struct platform_driver tidss_platform_driver = {
},
};
-module_platform_driver(tidss_platform_driver);
+drm_module_platform_driver(tidss_platform_driver);
MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
MODULE_DESCRIPTION("TI Keystone DSS Driver");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index cc567c87057d..eee3c447fbac 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -604,6 +604,9 @@ static struct platform_driver tilcdc_platform_driver = {
static int __init tilcdc_drm_init(void)
{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
DBG("init");
tilcdc_panel_init();
return platform_driver_register(&tilcdc_platform_driver);
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index f8531c50a072..f0fa3b15c341 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -15,6 +15,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -427,7 +428,7 @@ static struct platform_driver arcpgu_platform_driver = {
},
};
-module_platform_driver(arcpgu_platform_driver);
+drm_module_platform_driver(arcpgu_platform_driver);
MODULE_AUTHOR("Carlos Palminha <palminha@synopsys.com>");
MODULE_DESCRIPTION("ARC PGU DRM driver");
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 29ee2eda7b7d..9a477f0fddee 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -41,6 +41,8 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res)
{
+ struct ttm_resource_manager *man;
+
res->start = 0;
res->num_pages = PFN_UP(bo->base.size);
res->mem_type = place->mem_type;
@@ -50,6 +52,11 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
res->bo = bo;
+
+ man = ttm_manager_type(bo->bdev, place->mem_type);
+ spin_lock(&bo->bdev->lru_lock);
+ man->usage += bo->base.size;
+ spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_init);
@@ -65,6 +72,9 @@ EXPORT_SYMBOL(ttm_resource_init);
void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
+ spin_lock(&man->bdev->lru_lock);
+ man->usage -= res->bo->base.size;
+ spin_unlock(&man->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_fini);
@@ -153,19 +163,20 @@ void ttm_resource_set_bo(struct ttm_resource *res,
*
* @man: memory manager object to init
* @bdev: ttm device this manager belongs to
- * @p_size: size managed area in pages.
+ * @size: size of managed resources in arbitrary units
*
* Initialise core parts of a manager object.
*/
void ttm_resource_manager_init(struct ttm_resource_manager *man,
struct ttm_device *bdev,
- unsigned long p_size)
+ uint64_t size)
{
unsigned i;
spin_lock_init(&man->move_lock);
man->bdev = bdev;
- man->size = p_size;
+ man->size = size;
+ man->usage = 0;
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&man->lru[i]);
@@ -227,6 +238,24 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
EXPORT_SYMBOL(ttm_resource_manager_evict_all);
/**
+ * ttm_resource_manager_usage
+ *
+ * @man: A memory manager object.
+ *
+ * Return how many resources are currently used.
+ */
+uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
+{
+ uint64_t usage;
+
+ spin_lock(&man->bdev->lru_lock);
+ usage = man->usage;
+ spin_unlock(&man->bdev->lru_lock);
+ return usage;
+}
+EXPORT_SYMBOL(ttm_resource_manager_usage);
+
+/**
* ttm_resource_manager_debug
*
* @man: manager type to dump.
@@ -238,6 +267,7 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
drm_printf(p, " use_type: %d\n", man->use_type);
drm_printf(p, " use_tt: %d\n", man->use_tt);
drm_printf(p, " size: %llu\n", man->size);
+ drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man));
if (man->func->debug)
man->func->debug(man, p);
}
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 7fa71c8bb828..6d9d2921abf4 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -43,6 +43,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -266,7 +267,7 @@ static struct platform_driver tve200_driver = {
.probe = tve200_probe,
.remove = tve200_remove,
};
-module_platform_driver(tve200_driver);
+drm_module_platform_driver(tve200_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 6e3113f419f4..8b3229a37c6d 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -59,6 +59,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
};
/* gem_create_object function for allocating a BO struct and doing
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index e76b24bb8828..29fd13109e43 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -6,6 +6,7 @@
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
+#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
@@ -148,15 +149,15 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV),
V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPIDX));
seq_printf(m, "MMU: %s\n",
- (ident2 & V3D_HUB_IDENT2_WITH_MMU) ? "yes" : "no");
+ str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU));
seq_printf(m, "TFU: %s\n",
- (ident1 & V3D_HUB_IDENT1_WITH_TFU) ? "yes" : "no");
+ str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU));
seq_printf(m, "TSY: %s\n",
- (ident1 & V3D_HUB_IDENT1_WITH_TSY) ? "yes" : "no");
+ str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
seq_printf(m, "MSO: %s\n",
- (ident1 & V3D_HUB_IDENT1_WITH_MSO) ? "yes" : "no");
+ str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_MSO));
seq_printf(m, "L3C: %s (%dkb)\n",
- (ident1 & V3D_HUB_IDENT1_WITH_L3C) ? "yes" : "no",
+ str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_L3C),
V3D_GET_FIELD(ident2, V3D_HUB_IDENT2_L3C_NKB));
for (core = 0; core < cores; core++) {
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index c7ed2e1cbab6..92bc0faee84f 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -798,7 +798,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (!render->base.perfmon) {
ret = -ENOENT;
- goto fail;
+ goto fail_perfmon;
}
}
@@ -847,6 +847,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
+fail_perfmon:
drm_gem_unlock_reservations(last_job->bo,
last_job->bo_count, &acquire_ctx);
fail:
@@ -1027,7 +1028,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
args->perfmon_id);
if (!job->base.perfmon) {
ret = -ENOENT;
- goto fail;
+ goto fail_perfmon;
}
}
@@ -1056,6 +1057,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
+fail_perfmon:
drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
&acquire_ctx);
fail:
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index e0cb7d0697a7..39459ae96f30 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -391,7 +391,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_bin_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_bin");
+ NULL, "v3d_bin", v3d->drm.dev);
if (ret) {
dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
return ret;
@@ -401,7 +401,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_render_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_render");
+ NULL, "v3d_render", v3d->drm.dev);
if (ret) {
dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
ret);
@@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_tfu_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_tfu");
+ NULL, "v3d_tfu", v3d->drm.dev);
if (ret) {
dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
ret);
@@ -426,7 +426,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_csd_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_csd");
+ NULL, "v3d_csd", v3d->drm.dev);
if (ret) {
dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
ret);
@@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_cache_clean_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_cache_clean");
+ NULL, "v3d_cache_clean", v3d->drm.dev);
if (ret) {
dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
ret);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index f35d9e44c6b7..f4f2bd79a7cb 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
+#include <drm/drm_module.h>
#include "vbox_drv.h"
@@ -190,24 +191,7 @@ static const struct drm_driver driver = {
DRM_GEM_VRAM_DRIVER,
};
-static int __init vbox_init(void)
-{
- if (drm_firmware_drivers_only() && vbox_modeset == -1)
- return -EINVAL;
-
- if (vbox_modeset == 0)
- return -EINVAL;
-
- return pci_register_driver(&vbox_pci_driver);
-}
-
-static void __exit vbox_exit(void)
-{
- pci_unregister_driver(&vbox_pci_driver);
-}
-
-module_init(vbox_init);
-module_exit(vbox_exit);
+drm_module_pci_driver_if_modeset(vbox_pci_driver, vbox_modeset);
MODULE_AUTHOR("Oracle Corporation");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index e6cc47470e03..783890e8d43a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -525,9 +525,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (ret)
return ret;
- ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
- if (ret)
- return ret;
+ /*
+ * post_crtc_powerdown will have called pm_runtime_put, so we
+ * don't need it here otherwise we'll get the reference counting
+ * wrong.
+ */
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 9300d3354c51..752f921735c6 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1493,15 +1493,10 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
- const struct of_device_id *match;
dma_cap_mask_t dma_mask;
int ret;
- match = of_match_device(vc4_dsi_dt_match, dev);
- if (!match)
- return -ENODEV;
-
- dsi->variant = match->data;
+ dsi->variant = of_device_get_match_data(dev);
vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 92b1530aa17b..6c58b0fd13fb 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1012,30 +1012,15 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
"VC4_HDMI_FIFO_CTL_RECENTER_DONE");
}
-static struct drm_connector_state *
-vc4_hdmi_encoder_get_connector_state(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
-{
- struct drm_connector_state *conn_state;
- struct drm_connector *connector;
- unsigned int i;
-
- for_each_new_connector_in_state(state, connector, conn_state, i) {
- if (conn_state->best_encoder == encoder)
- return conn_state;
- }
-
- return NULL;
-}
-
static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *conn_state =
- vc4_hdmi_encoder_get_connector_state(encoder, state);
+ drm_atomic_get_new_connector_state(state, connector);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
- struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
unsigned long pixel_rate = vc4_conn_state->pixel_rate;
unsigned long bvb_rate, hsm_rate;
@@ -1249,9 +1234,8 @@ static void vc4_hdmi_encoder_atomic_mode_set(struct drm_encoder *encoder,
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
mutex_lock(&vc4_hdmi->mutex);
- memcpy(&vc4_hdmi->saved_adjusted_mode,
- &crtc_state->adjusted_mode,
- sizeof(vc4_hdmi->saved_adjusted_mode));
+ drm_mode_copy(&vc4_hdmi->saved_adjusted_mode,
+ &crtc_state->adjusted_mode);
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -1773,6 +1757,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev));
return PTR_ERR(codec_pdev);
}
+ vc4_hdmi->audio.codec_pdev = codec_pdev;
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
@@ -1812,6 +1797,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
+static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi)
+{
+ platform_device_unregister(vc4_hdmi->audio.codec_pdev);
+ vc4_hdmi->audio.codec_pdev = NULL;
+}
+
static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
@@ -2684,6 +2675,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
kfree(vc4_hdmi->hdmi_regset.regs);
kfree(vc4_hdmi->hd_regset.regs);
+ vc4_hdmi_audio_exit(vc4_hdmi);
vc4_hdmi_cec_exit(vc4_hdmi);
vc4_hdmi_hotplug_exit(vc4_hdmi);
vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 933a468e10f3..1076faeab616 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -117,6 +117,7 @@ struct vc4_hdmi_audio {
struct snd_soc_dai_link_component platform;
struct snd_dmaengine_dai_dma_data dma_data;
struct hdmi_audio_infoframe infoframe;
+ struct platform_device *codec_pdev;
bool streaming;
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index b6954e2f75e6..853dd9aa397e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -23,6 +23,8 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/string_helpers.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
@@ -31,7 +33,7 @@
static void virtio_gpu_add_bool(struct seq_file *m, const char *name,
bool value)
{
- seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
+ seq_printf(m, "%-16s : %s\n", name, str_yes_no(value));
}
static void virtio_gpu_add_int(struct seq_file *m, const char *name, int value)
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index baef2c5f2aaf..f293e6ad52da 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -124,6 +124,7 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
};
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index fe36efdb7ff5..26eb5478394a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,9 +32,10 @@
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_module.h>
#include <drm/drm_sysfs.h>
-#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_placement.h>
@@ -1643,26 +1644,7 @@ out_error:
return ret;
}
-static int __init vmwgfx_init(void)
-{
- int ret;
-
- if (drm_firmware_drivers_only())
- return -EINVAL;
-
- ret = pci_register_driver(&vmw_pci_driver);
- if (ret)
- DRM_ERROR("Failed initializing DRM.\n");
- return ret;
-}
-
-static void __exit vmwgfx_exit(void)
-{
- pci_unregister_driver(&vmw_pci_driver);
-}
-
-module_init(vmwgfx_init);
-module_exit(vmwgfx_exit);
+drm_module_pci_driver(vmw_pci_driver);
MODULE_AUTHOR("VMware Inc. and others");
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dd2ff441068e..d49de4905efa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -4501,7 +4501,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
goto mksstats_out;
}
- ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
+ ret = dma_fence_wait(in_fence, true);
if (ret)
goto out;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 5001b87aebe8..59d6a2dd4c2e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -621,52 +621,6 @@ out_no_object:
return ret;
}
-
-/**
- * vmw_wait_dma_fence - Wait for a dma fence
- *
- * @fman: pointer to a fence manager
- * @fence: DMA fence to wait on
- *
- * This function handles the case when the fence is actually a fence
- * array. If that's the case, it'll wait on each of the child fence
- */
-int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
- struct dma_fence *fence)
-{
- struct dma_fence_array *fence_array;
- int ret = 0;
- int i;
-
-
- if (dma_fence_is_signaled(fence))
- return 0;
-
- if (!dma_fence_is_array(fence))
- return dma_fence_wait(fence, true);
-
- /* From i915: Note that if the fence-array was created in
- * signal-on-any mode, we should *not* decompose it into its individual
- * fences. However, we don't currently store which mode the fence-array
- * is operating in. Fortunately, the only user of signal-on-any is
- * private to amdgpu and we should not see any incoming fence-array
- * from sync-file being in signal-on-any mode.
- */
-
- fence_array = to_dma_fence_array(fence);
- for (i = 0; i < fence_array->num_fences; i++) {
- struct dma_fence *child = fence_array->fences[i];
-
- ret = dma_fence_wait(child, true);
-
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-
/*
* vmw_fence_fifo_down - signal all unsignaled fence objects.
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 079ab4f3ba51..a7eee579c76a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -104,9 +104,6 @@ extern int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
-extern int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
- struct dma_fence *fence);
-
extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index e63088c2121d..0d8e6bd1ccbf 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -495,6 +495,9 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
struct drm_device *drm_dev;
int ret;
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
DRM_INFO("Creating %s\n", xen_drm_driver.desc);
drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index ac37053412a1..824b510e337b 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -25,6 +25,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -286,7 +287,7 @@ static struct platform_driver zynqmp_dpsub_driver = {
},
};
-module_platform_driver(zynqmp_dpsub_driver);
+drm_module_platform_driver(zynqmp_dpsub_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("ZynqMP DP Subsystem Driver");
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index e08e331e46ae..f87a8705f518 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -137,8 +137,15 @@ void host1x_syncpt_restore(struct host1x *host)
struct host1x_syncpt *sp_base = host->syncpt;
unsigned int i;
- for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
+ for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
+ /*
+ * Unassign syncpt from channels for purposes of Tegra186
+ * syncpoint protection. This prevents any channel from
+ * accessing it until it is reassigned.
+ */
+ host1x_hw_syncpt_assign_to_channel(host, sp_base + i, NULL);
host1x_hw_syncpt_restore(host, sp_base + i);
+ }
for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
@@ -227,27 +234,12 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
void *ref;
struct host1x_waitlist *waiter;
int err = 0, check_count = 0;
- u32 val;
if (value)
- *value = 0;
-
- /* first check cache */
- if (host1x_syncpt_is_expired(sp, thresh)) {
- if (value)
- *value = host1x_syncpt_load(sp);
+ *value = host1x_syncpt_load(sp);
+ if (host1x_syncpt_is_expired(sp, thresh))
return 0;
- }
-
- /* try to read from register */
- val = host1x_hw_syncpt_load(sp->host, sp);
- if (host1x_syncpt_is_expired(sp, thresh)) {
- if (value)
- *value = val;
-
- goto done;
- }
if (!timeout) {
err = -EAGAIN;
@@ -352,13 +344,6 @@ int host1x_syncpt_init(struct host1x *host)
for (i = 0; i < host->info->nb_pts; i++) {
syncpt[i].id = i;
syncpt[i].host = host;
-
- /*
- * Unassign syncpt from channels for purposes of Tegra186
- * syncpoint protection. This prevents any channel from
- * accessing it until it is reassigned.
- */
- host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL);
}
for (i = 0; i < host->info->nb_bases; i++)
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 2503be0253d3..19fa734a9a79 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -37,11 +37,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_
{
union cmd_response cmd_resp;
- /* Get response with status within a max of 800 ms timeout */
+ /* Get response with status within a max of 1600 ms timeout */
if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
(cmd_resp.response_v2.response == sensor_sts &&
cmd_resp.response_v2.status == 0 && (sid == 0xff ||
- cmd_resp.response_v2.sensor_id == sid)), 500, 800000))
+ cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
return cmd_resp.response_v2.response;
return SENSOR_DISABLED;
@@ -53,6 +53,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = info.period;
cmd_base.cmd_v2.sensor_id = info.sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -70,6 +71,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -83,12 +85,51 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
union sfh_cmd_base cmd_base;
cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = 0;
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
+static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
+{
+ if (readl(privdata->mmio + AMD_P2C_MSG(4))) {
+ writel(0, privdata->mmio + AMD_P2C_MSG(4));
+ writel(0xf, privdata->mmio + AMD_P2C_MSG(5));
+ }
+}
+
+static void amd_sfh_clear_intr(struct amd_mp2_dev *privdata)
+{
+ if (privdata->mp2_ops->clear_intr)
+ privdata->mp2_ops->clear_intr(privdata);
+}
+
+static irqreturn_t amd_sfh_irq_handler(int irq, void *data)
+{
+ amd_sfh_clear_intr(data);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
+{
+ int rc;
+
+ pci_intx(privdata->pdev, true);
+
+ rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq,
+ amd_sfh_irq_handler, 0, DRIVER_NAME, privdata);
+ if (rc) {
+ dev_err(&privdata->pdev->dev, "failed to request irq %d err=%d\n",
+ privdata->pdev->irq, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
@@ -193,6 +234,8 @@ static void amd_mp2_pci_remove(void *privdata)
struct amd_mp2_dev *mp2 = privdata;
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
+ pci_intx(mp2->pdev, false);
+ amd_sfh_clear_intr(mp2);
}
static const struct amd_mp2_ops amd_sfh_ops_v2 = {
@@ -200,6 +243,8 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = {
.stop = amd_stop_sensor_v2,
.stop_all = amd_stop_all_sensor_v2,
.response = amd_sfh_wait_response_v2,
+ .clear_intr = amd_sfh_clear_intr_v2,
+ .init_intr = amd_sfh_irq_init_v2,
};
static const struct amd_mp2_ops amd_sfh_ops = {
@@ -225,6 +270,14 @@ static void mp2_select_ops(struct amd_mp2_dev *privdata)
}
}
+static int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
+{
+ if (privdata->mp2_ops->init_intr)
+ return privdata->mp2_ops->init_intr(privdata);
+
+ return 0;
+}
+
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct amd_mp2_dev *privdata;
@@ -261,9 +314,20 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
mp2_select_ops(privdata);
+ rc = amd_sfh_irq_init(privdata);
+ if (rc) {
+ dev_err(&pdev->dev, "amd_sfh_irq_init failed\n");
+ return rc;
+ }
+
rc = amd_sfh_hid_client_init(privdata);
- if (rc)
+ if (rc) {
+ amd_sfh_clear_intr(privdata);
+ dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n");
return rc;
+ }
+
+ amd_sfh_clear_intr(privdata);
return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
}
@@ -290,6 +354,9 @@ static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
}
}
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ amd_sfh_clear_intr(mp2);
+
return 0;
}
@@ -312,6 +379,9 @@ static int __maybe_unused amd_mp2_pci_suspend(struct device *dev)
}
}
+ cancel_delayed_work_sync(&cl_data->work_buffer);
+ amd_sfh_clear_intr(mp2);
+
return 0;
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index ae30e059f847..97b99861fae2 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -49,7 +49,7 @@ union sfh_cmd_base {
} s;
struct {
u32 cmd_id : 4;
- u32 intr_enable : 1;
+ u32 intr_disable : 1;
u32 rsvd1 : 3;
u32 length : 7;
u32 mem_type : 1;
@@ -141,5 +141,7 @@ struct amd_mp2_ops {
void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx);
void (*stop_all)(struct amd_mp2_dev *privdata);
int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
+ void (*clear_intr)(struct amd_mp2_dev *privdata);
+ int (*init_intr)(struct amd_mp2_dev *privdata);
};
#endif
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
index be41f83b0289..76095bd53c65 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
@@ -27,6 +27,7 @@
#define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
#define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
#define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
+#define ILLUMINANCE_MASK GENMASK(14, 0)
int get_report_descriptor(int sensor_idx, u8 *rep_desc)
{
@@ -246,7 +247,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_
get_common_inputs(&als_input.common_property, report_id);
/* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
if (supported_input == V2_STATUS)
- als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5));
+ als_input.illuminance_value =
+ readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK;
else
als_input.illuminance_value =
(int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 24802a4a636e..7dc89dc6b0f0 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -691,49 +691,49 @@ static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 8e960d7b233b..9b42b0cdeef0 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -262,6 +262,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
return 0;
err_free:
+ usb_put_dev(udev);
kfree(priv);
return ret;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 85975031389b..78bd3ddda442 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1370,6 +1370,7 @@
#define USB_VENDOR_ID_UGTIZER 0x2179
#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
+#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004
#define USB_VENDOR_ID_VIEWSONIC 0x0543
#define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 9af1dc8ae3a2..c066ba901867 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
index b4dad66fa954..ec6c73f75ffe 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
@@ -27,7 +27,6 @@ struct i2c_hid_of_goodix {
struct regulator *vdd;
struct notifier_block nb;
- struct mutex regulator_mutex;
struct gpio_desc *reset_gpio;
const struct goodix_i2c_hid_timing_data *timings;
};
@@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
container_of(nb, struct i2c_hid_of_goodix, nb);
int ret = NOTIFY_OK;
- mutex_lock(&ihid_goodix->regulator_mutex);
-
switch (event) {
case REGULATOR_EVENT_PRE_DISABLE:
gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
@@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
break;
}
- mutex_unlock(&ihid_goodix->regulator_mutex);
-
return ret;
}
@@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
if (!ihid_goodix)
return -ENOMEM;
- mutex_init(&ihid_goodix->regulator_mutex);
-
ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
@@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
* long. Holding the controller in reset apparently draws extra
* power.
*/
- mutex_lock(&ihid_goodix->regulator_mutex);
ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
- if (ret) {
- mutex_unlock(&ihid_goodix->regulator_mutex);
+ if (ret)
return dev_err_probe(&client->dev, ret,
"regulator notifier request failed\n");
- }
/*
* If someone else is holding the regulator on (or the regulator is
* an always-on one) we might never be told to deassert reset. Do it
- * now. Here we'll assume that someone else might have _just
- * barely_ turned the regulator on so we'll do the full
- * "post_power_delay" just in case.
+ * now... and temporarily bump the regulator reference count just to
+ * make sure it is impossible for this to race with our own notifier!
+ * We also assume that someone else might have _just barely_ turned
+ * the regulator on so we'll do the full "post_power_delay" just in
+ * case.
*/
- if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd))
+ if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) {
+ ret = regulator_enable(ihid_goodix->vdd);
+ if (ret)
+ return ret;
goodix_i2c_hid_deassert_reset(ihid_goodix, true);
- mutex_unlock(&ihid_goodix->regulator_mutex);
+ regulator_disable(ihid_goodix->vdd);
+ }
return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
}
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index eb2833d2b5d0..832885198643 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -13,7 +13,7 @@
#include "hv_utils_transport.h"
static DEFINE_SPINLOCK(hvt_list_lock);
-static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list);
+static LIST_HEAD(hvt_list);
static void hvt_reset(struct hvutil_transport *hvt)
{
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 17bf55fe3169..12a2b37e87f3 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2028,8 +2028,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
kobj->kset = dev->channels_kset;
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
"%u", relid);
- if (ret)
+ if (ret) {
+ kobject_put(kobj);
return ret;
+ }
ret = sysfs_create_group(kobj, &vmbus_chan_group);
@@ -2038,6 +2040,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
* The calling functions' error handling paths will cleanup the
* empty channel directory.
*/
+ kobject_put(kobj);
dev_err(device, "Unable to set up channel sysfs files\n");
return ret;
}
@@ -2079,7 +2082,6 @@ struct hv_device *vmbus_device_create(const guid_t *type,
return child_device_obj;
}
-static u64 vmbus_dma_mask = DMA_BIT_MASK(64);
/*
* vmbus_device_register - Register the child device
*/
@@ -2120,8 +2122,9 @@ int vmbus_device_register(struct hv_device *child_device_obj)
}
hv_debug_add_dev_dir(child_device_obj);
- child_device_obj->device.dma_mask = &vmbus_dma_mask;
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+ child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+ dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
return 0;
err_kset_unregister:
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 3501a3ead4ba..3ae961986fc3 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -214,12 +214,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
&hwmon_thermal_ops);
- /*
- * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
- * so ignore that error but forward any other error.
- */
- if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
- return PTR_ERR(tzd);
+ if (IS_ERR(tzd)) {
+ if (PTR_ERR(tzd) != -ENODEV)
+ return PTR_ERR(tzd);
+ dev_info(dev, "temp%d_input not attached to any thermal zone\n",
+ index + 1);
+ devm_kfree(dev, tdata);
+ return 0;
+ }
err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node);
if (err)
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 414204f5704c..9c9e9f4ccb9e 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -59,7 +59,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
[NTC_NCP15XH103] = { "ncp15xh103", TYPE_NCPXXXH103 },
[NTC_NCP18WB473] = { "ncp18wb473", TYPE_NCPXXWB473 },
[NTC_NCP21WB473] = { "ncp21wb473", TYPE_NCPXXWB473 },
- [NTC_SSG1404001221] = { "ssg1404-001221", TYPE_NCPXXWB473 },
+ [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 },
[NTC_LAST] = { },
};
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 776ee2237be2..ac2fbee1ba9c 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -911,6 +911,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
pmbus_update_sensor_data(client, s2);
regval = status & mask;
+ if (regval) {
+ ret = pmbus_write_byte_data(client, page, reg, regval);
+ if (ret)
+ goto unlock;
+ }
if (s1 && s2) {
s64 v1, v2;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 42da31c1ab70..8a6c6ee28556 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -488,7 +488,7 @@ config I2C_BRCMSTB
config I2C_CADENCE
tristate "Cadence I2C Controller"
- depends on ARCH_ZYNQ || ARM64 || XTENSA
+ depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST
help
Say yes here to select Cadence I2C Host Controller. This controller is
e.g. used by Xilinx Zynq.
@@ -680,7 +680,7 @@ config I2C_IMG
config I2C_IMX
tristate "IMX I2C interface"
- depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
+ depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST
select I2C_SLAVE
help
Say Y here if you want to use the IIC bus controller on
@@ -935,7 +935,7 @@ config I2C_QCOM_GENI
config I2C_QUP
tristate "Qualcomm QUP based I2C controller"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Qualcomm SoCs.
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index dfc534065595..5149454eef4a 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -23,6 +23,11 @@
#define BCM2835_I2C_FIFO 0x10
#define BCM2835_I2C_DIV 0x14
#define BCM2835_I2C_DEL 0x18
+/*
+ * 16-bit field for the number of SCL cycles to wait after rising SCL
+ * before deciding the slave is not responding. 0 disables the
+ * timeout detection.
+ */
#define BCM2835_I2C_CLKT 0x1c
#define BCM2835_I2C_C_READ BIT(0)
@@ -474,6 +479,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
adap->quirks = of_device_get_match_data(&pdev->dev);
+ /*
+ * Disable the hardware clock stretching timeout. SMBUS
+ * specifies a limit for how long the device can stretch the
+ * clock, but core I2C doesn't.
+ */
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0);
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
ret = i2c_add_adapter(adap);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 490ee3962645..b00f35c0b066 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
/* set the data in/out register size for compatible SoCs */
if (of_device_is_compatible(dev->device->of_node,
- "brcmstb,brcmper-i2c"))
+ "brcm,brcmper-i2c"))
dev->data_regsz = sizeof(u8);
else
dev->data_regsz = sizeof(u32);
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index c1de8eb66169..cf54f1cb4c57 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev)
cci->master[idx].adap.quirks = &cci->data->quirks;
cci->master[idx].adap.algo = &cci_algo;
cci->master[idx].adap.dev.parent = dev;
- cci->master[idx].adap.dev.of_node = child;
+ cci->master[idx].adap.dev.of_node = of_node_get(child);
cci->master[idx].master = idx;
cci->master[idx].cci = cci;
@@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev)
continue;
ret = i2c_add_adapter(&cci->master[i].adap);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(cci->master[i].adap.dev.of_node);
goto error_i2c;
+ }
}
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
@@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev)
return 0;
error_i2c:
- for (; i >= 0; i--) {
- if (cci->master[i].cci)
+ for (--i ; i >= 0; i--) {
+ if (cci->master[i].cci) {
i2c_del_adapter(&cci->master[i].adap);
+ of_node_put(cci->master[i].adap.dev.of_node);
+ }
}
error:
disable_irq(cci->irq);
@@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev)
int i;
for (i = 0; i < cci->data->num_masters; i++) {
- if (cci->master[i].cci)
+ if (cci->master[i].cci) {
i2c_del_adapter(&cci->master[i].adap);
+ of_node_put(cci->master[i].adap.dev.of_node);
+ }
cci_halt(cci, i);
}
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index e6081dd0a880..d11f668016a6 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -1783,11 +1783,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "Unable to register iio device\n");
- goto err_trigger_unregister;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
err_trigger_unregister:
bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
err_buffer_cleanup:
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 32989d91b982..f7fd9e046588 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -173,12 +173,20 @@ struct fxls8962af_data {
u16 upper_thres;
};
-const struct regmap_config fxls8962af_regmap_conf = {
+const struct regmap_config fxls8962af_i2c_regmap_conf = {
.reg_bits = 8,
.val_bits = 8,
.max_register = FXLS8962AF_MAX_REG,
};
-EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf);
+EXPORT_SYMBOL_GPL(fxls8962af_i2c_regmap_conf);
+
+const struct regmap_config fxls8962af_spi_regmap_conf = {
+ .reg_bits = 8,
+ .pad_bits = 8,
+ .val_bits = 8,
+ .max_register = FXLS8962AF_MAX_REG,
+};
+EXPORT_SYMBOL_GPL(fxls8962af_spi_regmap_conf);
enum {
fxls8962af_idx_x,
diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c
index cfb004b20455..6bde9891effb 100644
--- a/drivers/iio/accel/fxls8962af-i2c.c
+++ b/drivers/iio/accel/fxls8962af-i2c.c
@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct i2c_client *client)
{
struct regmap *regmap;
- regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf);
+ regmap = devm_regmap_init_i2c(client, &fxls8962af_i2c_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&client->dev, "Failed to initialize i2c regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c
index 57108d3d480b..6f4dff3238d3 100644
--- a/drivers/iio/accel/fxls8962af-spi.c
+++ b/drivers/iio/accel/fxls8962af-spi.c
@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct spi_device *spi)
{
struct regmap *regmap;
- regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf);
+ regmap = devm_regmap_init_spi(spi, &fxls8962af_spi_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&spi->dev, "Failed to initialize spi regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h
index b67572c3ef06..9cbe98c3ba9a 100644
--- a/drivers/iio/accel/fxls8962af.h
+++ b/drivers/iio/accel/fxls8962af.h
@@ -17,6 +17,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq);
int fxls8962af_core_remove(struct device *dev);
extern const struct dev_pm_ops fxls8962af_pm_ops;
-extern const struct regmap_config fxls8962af_regmap_conf;
+extern const struct regmap_config fxls8962af_i2c_regmap_conf;
+extern const struct regmap_config fxls8962af_spi_regmap_conf;
#endif /* _FXLS8962AF_H_ */
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 0fe570316848..ac74cdcd2bc8 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1590,11 +1590,14 @@ static int kxcjk1013_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
- goto err_buffer_cleanup;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index 4c359fb05480..c53a3398b14c 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
- goto out_poweroff;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
out_poweroff:
mma9551_set_device_state(client, false);
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 0570ab1cc064..5ff6bc70708b 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
- goto out_poweroff;
+ goto err_pm_cleanup;
}
dev_dbg(&indio_dev->dev, "Registered device %s\n", name);
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
out_poweroff:
mma9551_set_device_state(client, false);
return ret;
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index bc2cfa5f9592..b400bbe291aa 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -76,7 +76,7 @@
#define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x)
#define AD7124_CONFIG_PGA_MSK GENMASK(2, 0)
#define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x)
-#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6)
+#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5)
#define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x)
/* AD7124_FILTER_X */
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 42ea8bc7e780..adc5ceaef8c9 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev,
struct z188_adc *adc;
struct iio_dev *indio_dev;
struct resource *mem;
+ int ret;
indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
if (!indio_dev)
@@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev,
adc->mem = mem;
mcb_set_drvdata(dev, indio_dev);
- return iio_device_register(indio_dev);
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+err_unmap:
+ iounmap(adc->base);
err:
mcb_release_mem(mem);
return -ENXIO;
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index d84ae6b008c1..e8fc4d01f30b 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -388,7 +388,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev,
mutex_lock(&priv->slock);
size = 0;
- for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) {
+ for_each_set_bit(ch_idx, active_scan_mask, ARRAY_SIZE(priv->l)) {
size += tsc2046_adc_group_set_layout(priv, group, ch_idx);
tsc2046_adc_group_set_cmd(priv, group, ch_idx);
group++;
@@ -548,7 +548,7 @@ static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv)
* enabled.
*/
size = 0;
- for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++)
+ for (ch_idx = 0; ch_idx < ARRAY_SIZE(priv->l); ch_idx++)
size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx);
priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL);
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index 5271073bb74e..acd230a6af35 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -134,7 +134,6 @@ struct ad74413r_state {
#define AD74413R_CH_EN_MASK(x) BIT(x)
#define AD74413R_REG_DIN_COMP_OUT 0x25
-#define AD74413R_DIN_COMP_OUT_SHIFT_X(x) x
#define AD74413R_REG_ADC_RESULT_X(x) (0x26 + (x))
#define AD74413R_ADC_RESULT_MAX GENMASK(15, 0)
@@ -288,7 +287,7 @@ static void ad74413r_gpio_set_multiple(struct gpio_chip *chip,
unsigned int offset = 0;
int ret;
- for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+ for_each_set_bit_from(offset, mask, chip->ngpio) {
unsigned int real_offset = st->gpo_gpio_offsets[offset];
ret = ad74413r_set_gpo_config(st, real_offset,
@@ -316,7 +315,7 @@ static int ad74413r_gpio_get(struct gpio_chip *chip, unsigned int offset)
if (ret)
return ret;
- status &= AD74413R_DIN_COMP_OUT_SHIFT_X(real_offset);
+ status &= BIT(real_offset);
return status ? 1 : 0;
}
@@ -334,11 +333,10 @@ static int ad74413r_gpio_get_multiple(struct gpio_chip *chip,
if (ret)
return ret;
- for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+ for_each_set_bit_from(offset, mask, chip->ngpio) {
unsigned int real_offset = st->comp_gpio_offsets[offset];
- if (val & BIT(real_offset))
- *bits |= offset;
+ __assign_bit(offset, bits, val & BIT(real_offset));
}
return ret;
@@ -840,7 +838,7 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev,
{
struct ad74413r_state *st = iio_priv(indio_dev);
struct spi_transfer *xfer = st->adc_samples_xfer;
- u8 *rx_buf = &st->adc_samples_buf.rx_buf[-1 * AD74413R_FRAME_SIZE];
+ u8 *rx_buf = st->adc_samples_buf.rx_buf;
u8 *tx_buf = st->adc_samples_tx_buf;
unsigned int channel;
int ret = -EINVAL;
@@ -894,9 +892,10 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev,
spi_message_add_tail(xfer, &st->adc_samples_msg);
- xfer++;
tx_buf += AD74413R_FRAME_SIZE;
- rx_buf += AD74413R_FRAME_SIZE;
+ if (xfer != st->adc_samples_xfer)
+ rx_buf += AD74413R_FRAME_SIZE;
+ xfer++;
}
xfer->rx_buf = rx_buf;
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
index 6cdeb50143af..3f3c478e9baa 100644
--- a/drivers/iio/frequency/admv1013.c
+++ b/drivers/iio/frequency/admv1013.c
@@ -348,7 +348,7 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
vcm = regulator_get_voltage(st->reg);
- if (vcm >= 0 && vcm < 1800000)
+ if (vcm < 1800000)
mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
else if (vcm > 1800000 && vcm < 2600000)
mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 17b939a367ad..81a6d09788bd 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -1188,11 +1188,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "unable to register iio device\n");
- goto err_buffer_cleanup;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index ed129321a14d..f9b4540db1f4 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -1403,6 +1403,7 @@ static int adis16480_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
const struct adis_data *adis16480_data;
+ irq_handler_t trigger_handler = NULL;
struct iio_dev *indio_dev;
struct adis16480 *st;
int ret;
@@ -1474,8 +1475,12 @@ static int adis16480_probe(struct spi_device *spi)
st->clk_freq = st->chip_info->int_clk;
}
+ /* Only use our trigger handler if burst mode is supported */
+ if (adis16480_data->burst_len)
+ trigger_handler = adis16480_trigger_handler;
+
ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
- adis16480_trigger_handler);
+ trigger_handler);
if (ret)
return ret;
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 1dabfd615dab..f89724481df9 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1385,7 +1385,7 @@ static int kmx61_probe(struct i2c_client *client,
ret = iio_device_register(data->acc_indio_dev);
if (ret < 0) {
dev_err(&client->dev, "Failed to register acc iio device\n");
- goto err_buffer_cleanup_mag;
+ goto err_pm_cleanup;
}
ret = iio_device_register(data->mag_indio_dev);
@@ -1398,6 +1398,9 @@ static int kmx61_probe(struct i2c_client *client,
err_iio_unregister_acc:
iio_device_unregister(data->acc_indio_dev);
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
err_buffer_cleanup_mag:
if (client->irq > 0)
iio_triggered_buffer_cleanup(data->mag_indio_dev);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 727b4b6ac696..93f0c6bce502 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -1374,8 +1374,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
+ /*
+ * we need to wait for sensor settling time before
+ * reading data in order to avoid corrupted samples
+ */
delay = 1000000000 / sensor->odr;
- usleep_range(delay, 2 * delay);
+ usleep_range(3 * delay, 4 * delay);
err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data));
if (err < 0)
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index f96f53175349..3d4d21f979fa 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -962,13 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "unable to register iio device\n");
- goto err_disable_runtime_pm;
+ goto err_pm_cleanup;
}
dev_dbg(dev, "Registered device %s\n", name);
return 0;
-err_disable_runtime_pm:
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index c447526288f4..50c53409ceb6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3370,22 +3370,30 @@ err:
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
- if (!src_addr || !src_addr->sa_family) {
- src_addr = (struct sockaddr *) &id->route.addr.src_addr;
- src_addr->sa_family = dst_addr->sa_family;
- if (IS_ENABLED(CONFIG_IPV6) &&
- dst_addr->sa_family == AF_INET6) {
- struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
- struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
- src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
- if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
- id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
- } else if (dst_addr->sa_family == AF_IB) {
- ((struct sockaddr_ib *) src_addr)->sib_pkey =
- ((struct sockaddr_ib *) dst_addr)->sib_pkey;
- }
- }
- return rdma_bind_addr(id, src_addr);
+ struct sockaddr_storage zero_sock = {};
+
+ if (src_addr && src_addr->sa_family)
+ return rdma_bind_addr(id, src_addr);
+
+ /*
+ * When the src_addr is not specified, automatically supply an any addr
+ */
+ zero_sock.ss_family = dst_addr->sa_family;
+ if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *src_addr6 =
+ (struct sockaddr_in6 *)&zero_sock;
+ struct sockaddr_in6 *dst_addr6 =
+ (struct sockaddr_in6 *)dst_addr;
+
+ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
+ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ id->route.addr.dev_addr.bound_dev_if =
+ dst_addr6->sin6_scope_id;
+ } else if (dst_addr->sa_family == AF_IB) {
+ ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
+ ((struct sockaddr_ib *)dst_addr)->sib_pkey;
+ }
+ return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 0a3b28142c05..41c272980f91 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -541,7 +541,7 @@ static struct attribute *port_diagc_attributes[] = {
};
static const struct attribute_group port_diagc_group = {
- .name = "linkcontrol",
+ .name = "diag_counters",
.attrs = port_diagc_attributes,
};
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 7c3f98e57889..759b85f03331 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -2682,6 +2682,8 @@ static void rtrs_clt_dev_release(struct device *dev)
struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
dev);
+ mutex_destroy(&clt->paths_ev_mutex);
+ mutex_destroy(&clt->paths_mutex);
kfree(clt);
}
@@ -2711,6 +2713,8 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
return ERR_PTR(-ENOMEM);
}
+ clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.release = rtrs_clt_dev_release;
uuid_gen(&clt->paths_uuid);
INIT_LIST_HEAD_RCU(&clt->paths_list);
clt->paths_num = paths_num;
@@ -2727,53 +2731,51 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
init_waitqueue_head(&clt->permits_wait);
mutex_init(&clt->paths_ev_mutex);
mutex_init(&clt->paths_mutex);
+ device_initialize(&clt->dev);
- clt->dev.class = rtrs_clt_dev_class;
- clt->dev.release = rtrs_clt_dev_release;
err = dev_set_name(&clt->dev, "%s", sessname);
if (err)
- goto err;
+ goto err_put;
+
/*
* Suppress user space notification until
* sysfs files are created
*/
dev_set_uevent_suppress(&clt->dev, true);
- err = device_register(&clt->dev);
- if (err) {
- put_device(&clt->dev);
- goto err;
- }
+ err = device_add(&clt->dev);
+ if (err)
+ goto err_put;
clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
if (!clt->kobj_paths) {
err = -ENOMEM;
- goto err_dev;
+ goto err_del;
}
err = rtrs_clt_create_sysfs_root_files(clt);
if (err) {
kobject_del(clt->kobj_paths);
kobject_put(clt->kobj_paths);
- goto err_dev;
+ goto err_del;
}
dev_set_uevent_suppress(&clt->dev, false);
kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
return clt;
-err_dev:
- device_unregister(&clt->dev);
-err:
+err_del:
+ device_del(&clt->dev);
+err_put:
free_percpu(clt->pcpu_path);
- kfree(clt);
+ put_device(&clt->dev);
return ERR_PTR(err);
}
static void free_clt(struct rtrs_clt_sess *clt)
{
- free_permits(clt);
free_percpu(clt->pcpu_path);
- mutex_destroy(&clt->paths_ev_mutex);
- mutex_destroy(&clt->paths_mutex);
- /* release callback will free clt in last put */
+
+ /*
+ * release callback will free clt and destroy mutexes in last put
+ */
device_unregister(&clt->dev);
}
@@ -2890,6 +2892,7 @@ void rtrs_clt_close(struct rtrs_clt_sess *clt)
rtrs_clt_destroy_path_files(clt_path, NULL);
kobject_put(&clt_path->kobj);
}
+ free_permits(clt);
free_clt(clt);
}
EXPORT_SYMBOL(rtrs_clt_close);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index e174e853f8a4..285b766e4e70 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -4047,9 +4047,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
spin_unlock(&host->target_lock);
/*
- * Wait for tl_err and target port removal tasks.
+ * srp_queue_remove_work() queues a call to
+ * srp_remove_target(). The latter function cancels
+ * target->tl_err_work so waiting for the remove works to
+ * finish is sufficient.
*/
- flush_workqueue(system_long_wq);
flush_workqueue(srp_remove_wq);
kfree(host);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ccaeb2426385..c3139bc2aa0d 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -2285,6 +2285,12 @@ int input_register_device(struct input_dev *dev)
/* KEY_RESERVED is not supposed to be transmitted to userspace. */
__clear_bit(KEY_RESERVED, dev->keybit);
+ /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
+ if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
+ __clear_bit(BTN_RIGHT, dev->keybit);
+ __clear_bit(BTN_MIDDLE, dev->keybit);
+ }
+
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
input_cleanse_bitmasks(dev);
diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c
index a472489ccbad..164f6c757f6b 100644
--- a/drivers/input/mouse/psmouse-smbus.c
+++ b/drivers/input/mouse/psmouse-smbus.c
@@ -75,6 +75,8 @@ static void psmouse_smbus_detach_i2c_client(struct i2c_client *client)
"Marking SMBus companion %s as gone\n",
dev_name(&smbdev->client->dev));
smbdev->dead = true;
+ device_link_remove(&smbdev->client->dev,
+ &smbdev->psmouse->ps2dev.serio->dev);
serio_rescan(smbdev->psmouse->ps2dev.serio);
} else {
list_del(&smbdev->node);
@@ -174,6 +176,8 @@ static void psmouse_smbus_disconnect(struct psmouse *psmouse)
kfree(smbdev);
} else {
smbdev->dead = true;
+ device_link_remove(&smbdev->client->dev,
+ &psmouse->ps2dev.serio->dev);
psmouse_dbg(smbdev->psmouse,
"posting removal request for SMBus companion %s\n",
dev_name(&smbdev->client->dev));
@@ -270,6 +274,12 @@ int psmouse_smbus_init(struct psmouse *psmouse,
if (smbdev->client) {
/* We have our companion device */
+ if (!device_link_add(&smbdev->client->dev,
+ &psmouse->ps2dev.serio->dev,
+ DL_FLAG_STATELESS))
+ psmouse_warn(psmouse,
+ "failed to set up link with iSMBus companion %s\n",
+ dev_name(&smbdev->client->dev));
return 0;
}
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 7c82c4f5fa6b..129ebc810de8 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -571,8 +571,20 @@ static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume);
#ifdef CONFIG_OF
static const struct of_device_id zinitix_of_match[] = {
+ { .compatible = "zinitix,bt402" },
+ { .compatible = "zinitix,bt403" },
+ { .compatible = "zinitix,bt404" },
+ { .compatible = "zinitix,bt412" },
+ { .compatible = "zinitix,bt413" },
+ { .compatible = "zinitix,bt431" },
+ { .compatible = "zinitix,bt432" },
+ { .compatible = "zinitix,bt531" },
{ .compatible = "zinitix,bt532" },
+ { .compatible = "zinitix,bt538" },
{ .compatible = "zinitix,bt541" },
+ { .compatible = "zinitix,bt548" },
+ { .compatible = "zinitix,bt554" },
+ { .compatible = "zinitix,at100" },
{ }
};
MODULE_DEVICE_TABLE(of, zinitix_of_match);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index dcbd6d201619..997ace47bbd5 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2077,7 +2077,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- blk_set_queue_dying(md->queue);
+ blk_mark_disk_dead(md->disk);
/*
* Take suspend_lock so that presuspend and postsuspend methods
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 4e61b28a002f..8d718aa56d33 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
- int retries = 0;
do {
u32 status;
int err;
+ int retries = 0;
- mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+ while (retries++ <= MMC_READ_SINGLE_RETRIES) {
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
- mmc_wait_for_req(host, mrq);
+ mmc_wait_for_req(host, mrq);
- err = mmc_send_status(card, &status);
- if (err)
- goto error_exit;
-
- if (!mmc_host_is_spi(host) &&
- !mmc_ready_for_data(status)) {
- err = mmc_blk_fix_state(card, req);
+ err = mmc_send_status(card, &status);
if (err)
goto error_exit;
- }
- if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
- continue;
+ if (!mmc_host_is_spi(host) &&
+ !mmc_ready_for_data(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
- retries = 0;
+ if (!mrq->cmd->error)
+ break;
+ }
if (mrq->cmd->error ||
mrq->data->error ||
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 6ed6c51fac69..d503821a3e60 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -264,16 +264,20 @@ static int phram_setup(const char *val)
}
}
- if (erasesize)
- div_u64_rem(len, (uint32_t)erasesize, &rem);
-
if (len == 0 || erasesize == 0 || erasesize > len
- || erasesize > UINT_MAX || rem) {
+ || erasesize > UINT_MAX) {
parse_err("illegal erasesize or len\n");
ret = -EINVAL;
goto error;
}
+ div_u64_rem(len, (uint32_t)erasesize, &rem);
+ if (rem) {
+ parse_err("len is not multiple of erasesize\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
ret = register_device(name, start, len, (uint32_t)erasesize);
if (ret)
goto error;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 70f492dce158..eef87b28d6c8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -546,6 +546,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
config.stride = 1;
config.read_only = true;
config.root_only = true;
+ config.ignore_wp = true;
config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
config.priv = mtd;
@@ -833,6 +834,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.owner = THIS_MODULE;
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
+ config.ignore_wp = true;
config.reg_read = reg_read;
config.size = size;
config.of_node = np;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 20408b7db540..d986ab4e4c35 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -42,7 +42,8 @@ config MTD_NAND_OMAP2
tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on HAS_IOMEM
- select OMAP_GPMC if ARCH_K3
+ select MEMORY
+ select OMAP_GPMC
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
and Keystone platforms.
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index f75929783b94..aee78f5f4f15 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize / trans,
host->hwcfg.sector_size_1k);
- if (!ret) {
+ if (ret != -EBADMSG) {
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
if (*err_addr)
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 1b64c5a5140d..ded4df473928 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -2285,7 +2285,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
this->hw.must_apply_timings = false;
ret = gpmi_nfc_apply_timings(this);
if (ret)
- return ret;
+ goto out_pm;
}
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
@@ -2414,6 +2414,7 @@ unmap:
this->bch = false;
+out_pm:
pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index efe0ffe4f1ab..9054559e52dd 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
struct ingenic_ecc *ecc;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(pdev)) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
ecc = platform_get_drvdata(pdev);
clk_prepare_enable(ecc->clk);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 7c6efa3b6255..1a77542c6d67 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2,7 +2,6 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
-
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/bitops.h>
@@ -3073,10 +3072,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (dma_mapping_error(dev, nandc->base_dma))
return -ENXIO;
- ret = qcom_nandc_alloc(nandc);
- if (ret)
- goto err_nandc_alloc;
-
ret = clk_prepare_enable(nandc->core_clk);
if (ret)
goto err_core_clk;
@@ -3085,6 +3080,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (ret)
goto err_aon_clk;
+ ret = qcom_nandc_alloc(nandc);
+ if (ret)
+ goto err_nandc_alloc;
+
ret = qcom_nandc_setup(nandc);
if (ret)
goto err_setup;
@@ -3096,15 +3095,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
return 0;
err_setup:
+ qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
clk_disable_unprepare(nandc->aon_clk);
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
- qcom_nandc_unalloc(nandc);
-err_nandc_alloc:
dma_unmap_resource(dev, res->start, resource_size(res),
DMA_BIDIRECTIONAL, 0);
-
return ret;
}
diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
index 06a818cd2433..4311b89d8df0 100644
--- a/drivers/mtd/parsers/qcomsmempart.c
+++ b/drivers/mtd/parsers/qcomsmempart.c
@@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
+ size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
+ int ret, i, j, tmpparts, numparts = 0;
struct smem_flash_pentry *pentry;
struct smem_flash_ptable *ptable;
- size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
struct mtd_partition *parts;
- int ret, i, numparts;
char *name, *c;
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
@@ -75,7 +75,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
pr_debug("Parsing partition table info from SMEM\n");
ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
if (IS_ERR(ptable)) {
- pr_err("Error reading partition table header\n");
+ if (PTR_ERR(ptable) != -EPROBE_DEFER)
+ pr_err("Error reading partition table header\n");
return PTR_ERR(ptable);
}
@@ -87,8 +88,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
}
/* Ensure that # of partitions is less than the max we have allocated */
- numparts = le32_to_cpu(ptable->numparts);
- if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
+ tmpparts = le32_to_cpu(ptable->numparts);
+ if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
pr_err("Partition numbers exceed the max limit\n");
return -EINVAL;
}
@@ -116,11 +117,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
return PTR_ERR(ptable);
}
+ for (i = 0; i < tmpparts; i++) {
+ pentry = &ptable->pentry[i];
+ if (pentry->name[0] != '\0')
+ numparts++;
+ }
+
parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
- for (i = 0; i < numparts; i++) {
+ for (i = 0, j = 0; i < tmpparts; i++) {
pentry = &ptable->pentry[i];
if (pentry->name[0] == '\0')
continue;
@@ -135,24 +142,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
for (c = name; *c != '\0'; c++)
*c = tolower(*c);
- parts[i].name = name;
- parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
- parts[i].mask_flags = pentry->attr;
- parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
+ parts[j].name = name;
+ parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
+ parts[j].mask_flags = pentry->attr;
+ parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
i, pentry->name, le32_to_cpu(pentry->offset),
le32_to_cpu(pentry->length), pentry->attr);
+ j++;
}
pr_debug("SMEM partition table found: ver: %d len: %d\n",
- le32_to_cpu(ptable->version), numparts);
+ le32_to_cpu(ptable->version), tmpparts);
*pparts = parts;
return numparts;
out_free_parts:
- while (--i >= 0)
- kfree(parts[i].name);
+ while (--j >= 0)
+ kfree(parts[j].name);
kfree(parts);
*pparts = NULL;
@@ -166,6 +174,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
for (i = 0; i < nr_parts; i++)
kfree(pparts[i].name);
+
+ kfree(pparts);
}
static const struct of_device_id qcomsmem_of_match_table[] = {
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 9fd1d6cba3cd..a86b1f71762e 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port)
if (bond == NULL)
return 0;
- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
+ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
}
/**
@@ -1995,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
*/
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
{
- BOND_AD_INFO(bond).agg_select_timer = timeout;
+ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
}
/**
@@ -2279,6 +2279,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
}
/**
+ * bond_agg_timer_advance - advance agg_select_timer
+ * @bond: bonding structure
+ *
+ * Return true when agg_select_timer reaches 0.
+ */
+static bool bond_agg_timer_advance(struct bonding *bond)
+{
+ int val, nval;
+
+ while (1) {
+ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
+ if (!val)
+ return false;
+ nval = val - 1;
+ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
+ val, nval) == val)
+ break;
+ }
+ return nval == 0;
+}
+
+/**
* bond_3ad_state_machine_handler - handle state machines timeout
* @work: work context to fetch bonding struct to work on from
*
@@ -2313,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
if (!bond_has_slaves(bond))
goto re_arm;
- /* check if agg_select_timer timer after initialize is timed out */
- if (BOND_AD_INFO(bond).agg_select_timer &&
- !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ if (bond_agg_timer_advance(bond)) {
slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 238b56d77c36..aebeb46e6fa6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2379,10 +2379,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_select_active_slave(bond);
}
- if (!bond_has_slaves(bond)) {
- bond_set_carrier(bond);
+ bond_set_carrier(bond);
+ if (!bond_has_slaves(bond))
eth_hw_addr_random(bond_dev);
- }
unblock_netpoll_tx();
synchronize_rcu();
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index c0c91440340a..0029d279616f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -82,6 +82,7 @@ config NET_DSA_REALTEK_SMI
config NET_DSA_SMSC_LAN9303
tristate
+ depends on VLAN_8021Q || VLAN_8021Q=n
select NET_DSA_TAG_LAN9303
select REGMAP
help
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index d55784d19fa4..3969d89fa4db 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -10,6 +10,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include "lan9303.h"
@@ -1083,21 +1084,27 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
+ vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
return lan9303_enable_processing_port(chip, port);
}
static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return;
+ vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
@@ -1310,7 +1317,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_HIGH);
if (IS_ERR(chip->reset_gpio))
return PTR_ERR(chip->reset_gpio);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 320ee7fe91a8..8a7a8093a156 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -2176,8 +2176,8 @@ static int gswip_remove(struct platform_device *pdev)
if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
- mdiobus_free(priv->ds->slave_mii_bus);
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+ mdiobus_free(priv->ds->slave_mii_bus);
}
for (i = 0; i < priv->num_gphy_fw; i++)
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 55dbda04ea62..243f8ad6d06e 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -26,7 +26,7 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
struct dsa_switch *ds = dev->ds;
u8 port_member = 0, cpu_port;
const struct dsa_port *dp;
- int i;
+ int i, j;
if (!dsa_is_user_port(ds, port))
return;
@@ -45,13 +45,33 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
continue;
if (!dsa_port_bridge_same(dp, other_dp))
continue;
+ if (other_p->stp_state != BR_STATE_FORWARDING)
+ continue;
- if (other_p->stp_state == BR_STATE_FORWARDING &&
- p->stp_state == BR_STATE_FORWARDING) {
+ if (p->stp_state == BR_STATE_FORWARDING) {
val |= BIT(port);
port_member |= BIT(i);
}
+ /* Retain port [i]'s relationship to other ports than [port] */
+ for (j = 0; j < ds->num_ports; j++) {
+ const struct dsa_port *third_dp;
+ struct ksz_port *third_p;
+
+ if (j == i)
+ continue;
+ if (j == port)
+ continue;
+ if (!dsa_is_user_port(ds, j))
+ continue;
+ third_p = &dev->ports[j];
+ if (third_p->stp_state != BR_STATE_FORWARDING)
+ continue;
+ third_dp = dsa_to_port(ds, j);
+ if (dsa_port_bridge_same(other_dp, third_dp))
+ val |= BIT(j);
+ }
+
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8530dbe403f4..ab1676553714 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2284,6 +2284,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
+ /* The ATU removal procedure needs the FID to be mapped in the VTU,
+ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
+ * switchdev workqueue to ensure that all FDB entries are deleted
+ * before we remove the VLAN.
+ */
+ dsa_flush_workqueue();
+
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index da595242bc13..f50604f3e541 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
atl1c_clean_buffer(pdev, buffer_info);
}
- netdev_reset_queue(adapter->netdev);
+ netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index c6412c523637..b4381cd41979 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
+ struct resource *regs;
int ret;
bgmac = bgmac_alloc(&pdev->dev);
@@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
- else
+ /* The idm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+ }
- bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
+ /* The nicpm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+ if (regs) {
+ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
+ }
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 774c1f1a57c3..eedb48d945ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -100,6 +100,9 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
+MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
int bnx2x_num_queues;
module_param_named(num_queues, bnx2x_num_queues, int, 0444);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4f94136a011a..b1c98d1408b8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4747,8 +4747,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
return rc;
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
- req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
- req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
req->mask = cpu_to_le32(vnic->rx_mask);
return hwrm_req_send_silent(bp, req);
}
@@ -7787,6 +7789,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
return 0;
}
+static void bnxt_remap_fw_health_regs(struct bnxt *bp)
+{
+ if (!bp->fw_health)
+ return;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
+ bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
+ } else {
+ bnxt_try_map_fw_health_reg(bp);
+ }
+}
+
static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -8639,6 +8654,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
vnic->uc_filter_count = 1;
vnic->rx_mask = 0;
+ if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
+ goto skip_rx_mask;
+
if (bp->dev->flags & IFF_BROADCAST)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
@@ -8648,7 +8666,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (bp->dev->flags & IFF_ALLMULTI) {
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (bp->dev->flags & IFF_MULTICAST) {
u32 mask = 0;
bnxt_mc_list_updated(bp, &mask);
@@ -8659,6 +8677,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (rc)
goto err_out;
+skip_rx_mask:
rc = bnxt_hwrm_set_coal(bp);
if (rc)
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
@@ -9850,8 +9869,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
resc_reinit = true;
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
fw_reset = true;
- else if (bp->fw_health && !bp->fw_health->status_reliable)
- bnxt_try_map_fw_health_reg(bp);
+ else
+ bnxt_remap_fw_health_regs(bp);
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
@@ -10330,13 +10349,15 @@ int bnxt_half_open_nic(struct bnxt *bp)
goto half_open_err;
}
- rc = bnxt_alloc_mem(bp, false);
+ rc = bnxt_alloc_mem(bp, true);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
- rc = bnxt_init_nic(bp, false);
+ set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ rc = bnxt_init_nic(bp, true);
if (rc) {
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@@ -10344,7 +10365,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
dev_close(bp->dev);
return rc;
}
@@ -10354,9 +10375,10 @@ half_open_err:
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
- bnxt_hwrm_resource_free(bp, false, false);
+ bnxt_hwrm_resource_free(bp, false, true);
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
}
void bnxt_reenable_sriov(struct bnxt *bp)
@@ -10772,7 +10794,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (dev->flags & IFF_MULTICAST) {
mc_update = bnxt_mc_list_updated(bp, &mask);
}
@@ -10849,9 +10871,10 @@ skip_uc:
!bnxt_promisc_ok(bp))
vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
- if (rc && vnic->mc_list_count) {
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 440dfeb4948b..666fc1e7a7d2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1921,6 +1921,7 @@ struct bnxt {
#define BNXT_STATE_RECOVER 12
#define BNXT_STATE_FW_NON_FATAL_COND 13
#define BNXT_STATE_FW_ACTIVATE_RESET 14
+#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 4da31b1b84f9..f6e21fac0e69 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -367,6 +367,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
}
}
+/* Live patch status in NVM */
+#define BNXT_LIVEPATCH_NOT_INSTALLED 0
+#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL
+#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE
+#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \
+ FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE)
+#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK
+
+#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK)
+
static int
bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
{
@@ -374,8 +384,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
struct hwrm_fw_livepatch_query_input *query_req;
struct hwrm_fw_livepatch_output *patch_resp;
struct hwrm_fw_livepatch_input *patch_req;
+ u16 flags, live_patch_state;
+ bool activated = false;
u32 installed = 0;
- u16 flags;
u8 target;
int rc;
@@ -394,7 +405,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
hwrm_req_drop(bp, query_req);
return rc;
}
- patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
patch_resp = hwrm_req_hold(bp, patch_req);
@@ -407,12 +417,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
}
flags = le16_to_cpu(query_resp->status_flags);
- if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
+ live_patch_state = BNXT_LIVEPATCH_STATE(flags);
+
+ if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED)
continue;
- if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
- !strncmp(query_resp->active_ver, query_resp->install_ver,
- sizeof(query_resp->active_ver)))
+
+ if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) {
+ activated = true;
continue;
+ }
+
+ if (live_patch_state == BNXT_LIVEPATCH_INSTALLED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
+ else if (live_patch_state == BNXT_LIVEPATCH_REMOVED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE;
patch_req->fw_target = target;
rc = hwrm_req_send(bp, patch_req);
@@ -424,8 +442,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
}
if (!rc && !installed) {
- NL_SET_ERR_MSG_MOD(extack, "No live patches found");
- rc = -ENOENT;
+ if (activated) {
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already activated");
+ rc = -EEXIST;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "No live patches found");
+ rc = -ENOENT;
+ }
}
hwrm_req_drop(bp, query_req);
hwrm_req_drop(bp, patch_req);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 003330e8cd58..8aaa2335f848 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -25,6 +25,7 @@
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
+#include "bnxt_ulp.h"
#include "bnxt_xdp.h"
#include "bnxt_ptp.h"
#include "bnxt_ethtool.h"
@@ -1969,6 +1970,9 @@ static int bnxt_get_fecparam(struct net_device *dev,
case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
fec->active_fec |= ETHTOOL_FEC_LLRS;
break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_OFF;
+ break;
}
return 0;
}
@@ -3454,7 +3458,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
if (!skb)
return -ENOMEM;
data = skb_put(skb, pkt_size);
- eth_broadcast_addr(data);
+ ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
@@ -3548,9 +3552,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- rc = bnxt_close_nic(bp, false, false);
- if (rc)
+ bnxt_ulp_stop(bp);
+ rc = bnxt_close_nic(bp, true, false);
+ if (rc) {
+ bnxt_ulp_start(bp, rc);
return;
+ }
bnxt_run_fw_tests(bp, test_mask, &test_results);
buf[BNXT_MACLPBK_TEST_IDX] = 1;
@@ -3560,6 +3567,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (rc) {
bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
+ bnxt_ulp_start(bp, rc);
return;
}
if (bnxt_run_loopback(bp))
@@ -3585,7 +3593,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
- rc = bnxt_open_nic(bp, false, true);
+ rc = bnxt_open_nic(bp, true, true);
+ bnxt_ulp_start(bp, rc);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 566c9487ef55..b01d42928a53 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -644,17 +644,23 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
/* Last byte of resp contains valid bit */
valid = ((u8 *)ctx->resp) + len - 1;
- for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
+ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
/* make sure we read from updated DMA memory */
dma_rmb();
if (*valid)
break;
- usleep_range(1, 5);
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
- hwrm_total_timeout(i), req_type,
+ hwrm_total_timeout(i) + j, req_type,
le16_to_cpu(ctx->req->seq_id), len, *valid);
goto exit;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index d52bd2d63aec..c98032e38188 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -90,7 +90,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
}
-#define HWRM_VALID_BIT_DELAY_USEC 150
+#define HWRM_VALID_BIT_DELAY_USEC 50000
static inline bool bnxt_cfa_hwrm_message(u16 req_type)
{
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 691605c15265..d5356db7539a 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -989,117 +989,6 @@ static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
return 0;
}
-static void ftgmac100_adjust_link(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- bool tx_pause, rx_pause;
- int new_speed;
-
- /* We store "no link" as speed 0 */
- if (!phydev->link)
- new_speed = 0;
- else
- new_speed = phydev->speed;
-
- /* Grab pause settings from PHY if configured to do so */
- if (priv->aneg_pause) {
- rx_pause = tx_pause = phydev->pause;
- if (phydev->asym_pause)
- tx_pause = !rx_pause;
- } else {
- rx_pause = priv->rx_pause;
- tx_pause = priv->tx_pause;
- }
-
- /* Link hasn't changed, do nothing */
- if (phydev->speed == priv->cur_speed &&
- phydev->duplex == priv->cur_duplex &&
- rx_pause == priv->rx_pause &&
- tx_pause == priv->tx_pause)
- return;
-
- /* Print status if we have a link or we had one and just lost it,
- * don't print otherwise.
- */
- if (new_speed || priv->cur_speed)
- phy_print_status(phydev);
-
- priv->cur_speed = new_speed;
- priv->cur_duplex = phydev->duplex;
- priv->rx_pause = rx_pause;
- priv->tx_pause = tx_pause;
-
- /* Link is down, do nothing else */
- if (!new_speed)
- return;
-
- /* Disable all interrupts */
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-
- /* Reset the adapter asynchronously */
- schedule_work(&priv->reset_task);
-}
-
-static int ftgmac100_mii_probe(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct platform_device *pdev = to_platform_device(priv->dev);
- struct device_node *np = pdev->dev.of_node;
- struct phy_device *phydev;
- phy_interface_t phy_intf;
- int err;
-
- /* Default to RGMII. It's a gigabit part after all */
- err = of_get_phy_mode(np, &phy_intf);
- if (err)
- phy_intf = PHY_INTERFACE_MODE_RGMII;
-
- /* Aspeed only supports these. I don't know about other IP
- * block vendors so I'm going to just let them through for
- * now. Note that this is only a warning if for some obscure
- * reason the DT really means to lie about it or it's a newer
- * part we don't know about.
- *
- * On the Aspeed SoC there are additionally straps and SCU
- * control bits that could tell us what the interface is
- * (or allow us to configure it while the IP block is held
- * in reset). For now I chose to keep this driver away from
- * those SoC specific bits and assume the device-tree is
- * right and the SCU has been configured properly by pinmux
- * or the firmware.
- */
- if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
- netdev_warn(netdev,
- "Unsupported PHY mode %s !\n",
- phy_modes(phy_intf));
- }
-
- phydev = phy_find_first(priv->mii_bus);
- if (!phydev) {
- netdev_info(netdev, "%s: no PHY found\n", netdev->name);
- return -ENODEV;
- }
-
- phydev = phy_connect(netdev, phydev_name(phydev),
- &ftgmac100_adjust_link, phy_intf);
-
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
- return PTR_ERR(phydev);
- }
-
- /* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.rst)
- */
- phy_support_asym_pause(phydev);
-
- /* Display what we found */
- phy_attached_info(phydev);
-
- return 0;
-}
-
static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
struct net_device *netdev = bus->priv;
@@ -1410,10 +1299,8 @@ static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
return err;
}
-static void ftgmac100_reset_task(struct work_struct *work)
+static void ftgmac100_reset(struct ftgmac100 *priv)
{
- struct ftgmac100 *priv = container_of(work, struct ftgmac100,
- reset_task);
struct net_device *netdev = priv->netdev;
int err;
@@ -1459,6 +1346,134 @@ static void ftgmac100_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static void ftgmac100_reset_task(struct work_struct *work)
+{
+ struct ftgmac100 *priv = container_of(work, struct ftgmac100,
+ reset_task);
+
+ ftgmac100_reset(priv);
+}
+
+static void ftgmac100_adjust_link(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ bool tx_pause, rx_pause;
+ int new_speed;
+
+ /* We store "no link" as speed 0 */
+ if (!phydev->link)
+ new_speed = 0;
+ else
+ new_speed = phydev->speed;
+
+ /* Grab pause settings from PHY if configured to do so */
+ if (priv->aneg_pause) {
+ rx_pause = tx_pause = phydev->pause;
+ if (phydev->asym_pause)
+ tx_pause = !rx_pause;
+ } else {
+ rx_pause = priv->rx_pause;
+ tx_pause = priv->tx_pause;
+ }
+
+ /* Link hasn't changed, do nothing */
+ if (phydev->speed == priv->cur_speed &&
+ phydev->duplex == priv->cur_duplex &&
+ rx_pause == priv->rx_pause &&
+ tx_pause == priv->tx_pause)
+ return;
+
+ /* Print status if we have a link or we had one and just lost it,
+ * don't print otherwise.
+ */
+ if (new_speed || priv->cur_speed)
+ phy_print_status(phydev);
+
+ priv->cur_speed = new_speed;
+ priv->cur_duplex = phydev->duplex;
+ priv->rx_pause = rx_pause;
+ priv->tx_pause = tx_pause;
+
+ /* Link is down, do nothing else */
+ if (!new_speed)
+ return;
+
+ /* Disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+ /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+ * order consistent to prevent dead lock.
+ */
+ if (netdev->phydev)
+ mutex_unlock(&netdev->phydev->lock);
+
+ ftgmac100_reset(priv);
+
+ if (netdev->phydev)
+ mutex_lock(&netdev->phydev->lock);
+
+}
+
+static int ftgmac100_mii_probe(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_device *phydev;
+ phy_interface_t phy_intf;
+ int err;
+
+ /* Default to RGMII. It's a gigabit part after all */
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
+ phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+ /* Aspeed only supports these. I don't know about other IP
+ * block vendors so I'm going to just let them through for
+ * now. Note that this is only a warning if for some obscure
+ * reason the DT really means to lie about it or it's a newer
+ * part we don't know about.
+ *
+ * On the Aspeed SoC there are additionally straps and SCU
+ * control bits that could tell us what the interface is
+ * (or allow us to configure it while the IP block is held
+ * in reset). For now I chose to keep this driver away from
+ * those SoC specific bits and assume the device-tree is
+ * right and the SCU has been configured properly by pinmux
+ * or the firmware.
+ */
+ if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
+ netdev_warn(netdev,
+ "Unsupported PHY mode %s !\n",
+ phy_modes(phy_intf));
+ }
+
+ phydev = phy_find_first(priv->mii_bus);
+ if (!phydev) {
+ netdev_info(netdev, "%s: no PHY found\n", netdev->name);
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(netdev, phydev_name(phydev),
+ &ftgmac100_adjust_link, phy_intf);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.rst)
+ */
+ phy_support_asym_pause(phydev);
+
+ /* Display what we found */
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index dd9385d15f6b..0f90d2d5bb60 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4338,7 +4338,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
-
+ mutex_init(&priv->onestep_tstamp_lock);
skb_queue_head_init(&priv->tx_skbs);
priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index d6eefbbf163f..cacd454ac696 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = cls->common.extack;
+ int ret = -EOPNOTSUPP;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
@@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
}
*vlan = (u16)match.key->vlan_id;
+ ret = 0;
}
- return 0;
+ return ret;
}
static int
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 29617a86b299..dee05a353dbd 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -5917,10 +5917,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
be64_to_cpu(session_token));
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
H_SESSION_ERR_DETECTED, session_token, 0, 0);
- if (rc)
+ if (rc) {
netdev_err(netdev,
"H_VIOCTL initiated failover failed, rc %ld\n",
rc);
+ goto last_resort;
+ }
+
+ return count;
last_resort:
netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 0c4b7dfb3b35..31b03fe78d3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5372,15 +5372,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
/* There is no need to reset BW when mqprio mode is on. */
if (pf->flags & I40E_FLAG_TC_MQPRIO)
return 0;
-
- if (!vsi->mqprio_qopt.qopt.hw) {
- if (pf->flags & I40E_FLAG_DCB_ENABLED)
- goto skip_reset;
-
- if (IS_ENABLED(CONFIG_I40E_DCB) &&
- i40e_dcb_hw_get_num_tc(&pf->hw) == 1)
- goto skip_reset;
-
+ if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret)
dev_info(&pf->pdev->dev,
@@ -5388,8 +5380,6 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
vsi->seid);
return ret;
}
-
-skip_reset:
memset(&bw_data, 0, sizeof(bw_data));
bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index a9fa701aaa95..473b1f6be9de 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -280,7 +280,6 @@ enum ice_pf_state {
ICE_VFLR_EVENT_PENDING,
ICE_FLTR_OVERFLOW_PROMISC,
ICE_VF_DIS,
- ICE_VF_DEINIT_IN_PROGRESS,
ICE_CFG_BUSY,
ICE_SERVICE_SCHED,
ICE_SERVICE_DIS,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index a6d7d3eff186..e2af99a763ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -3340,7 +3340,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
!ice_fw_supports_report_dflt_cfg(hw)) {
- struct ice_link_default_override_tlv tlv;
+ struct ice_link_default_override_tlv tlv = { 0 };
status = ice_get_link_default_override(&tlv, pi);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 864692b157b6..73edc24d81d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -44,6 +44,7 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
ctrl_vsi->rxq_map[vf->vf_id];
rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
rule_info.flags_info.act_valid = true;
+ rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
vf->repr->mac_rule);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0c187cf04fcf..53256aca27c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1684,6 +1684,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
+
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
+ ICE_FLOW_SEG_HDR_ESP);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
+ vsi_num, status);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 17a9bb461dc3..f3c346e13b7a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1799,7 +1799,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* reset, so print the event prior to reset.
*/
ice_print_vf_rx_mdd_event(vf);
+ mutex_lock(&pf->vf[i].cfg_lock);
ice_reset_vf(&pf->vf[i], false);
+ mutex_unlock(&pf->vf[i].cfg_lock);
}
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index dc1b0e9e6df5..695b6dd61dc2 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -47,6 +47,7 @@ enum ice_protocol_type {
enum ice_sw_tunnel_type {
ICE_NON_TUN = 0,
+ ICE_SW_TUN_AND_NON_TUN,
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_NVGRE,
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index ae291d442539..000c39d163a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1533,9 +1533,12 @@ exit:
static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
{
struct timespec64 now, then;
+ int ret;
then = ns_to_timespec64(delta);
- ice_ptp_gettimex64(info, &now, NULL);
+ ret = ice_ptp_gettimex64(info, &now, NULL);
+ if (ret)
+ return ret;
now = timespec64_add(now, then);
return ice_ptp_settime64(info, (const struct timespec64 *)&now);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 11ae0bee3590..475ec2afa210 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -4537,6 +4537,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_NVGRE:
prof_type = ICE_PROF_TUN_GRE;
break;
+ case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
break;
@@ -5305,7 +5306,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
goto err_ice_add_adv_rule;
- if (rinfo->tun_type != ICE_NON_TUN) {
+ if (rinfo->tun_type != ICE_NON_TUN &&
+ rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
s_rule->pdata.lkup_tx_rx.hdr,
pkt_offsets);
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index e8aab664270a..65cf32eb4046 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -709,7 +709,7 @@ ice_tc_set_port(struct flow_match_ports match,
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
else
fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
- fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+
headers->l4_key.dst_port = match.key->dst;
headers->l4_mask.dst_port = match.mask->dst;
}
@@ -718,7 +718,7 @@ ice_tc_set_port(struct flow_match_ports match,
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
else
fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
- fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+
headers->l4_key.src_port = match.key->src;
headers->l4_mask.src_port = match.mask->src;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 39b80124d282..408f78e3eb13 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -500,8 +500,6 @@ void ice_free_vfs(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
unsigned int tmp, i;
- set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
-
if (!pf->vf)
return;
@@ -519,22 +517,26 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
- /* Avoid wait time by stopping all VFs at the same time */
- ice_for_each_vf(pf, i)
- ice_dis_vf_qs(&pf->vf[i]);
-
tmp = pf->num_alloc_vfs;
pf->num_qps_per_vf = 0;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
- if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ mutex_lock(&vf->cfg_lock);
+
+ ice_dis_vf_qs(vf);
+
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
/* disable VF qp mappings and set VF disable state */
- ice_dis_vf_mappings(&pf->vf[i]);
- set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
- ice_free_vf_res(&pf->vf[i]);
+ ice_dis_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_free_vf_res(vf);
}
- mutex_destroy(&pf->vf[i].cfg_lock);
+ mutex_unlock(&vf->cfg_lock);
+
+ mutex_destroy(&vf->cfg_lock);
}
if (ice_sriov_free_msix_res(pf))
@@ -570,7 +572,6 @@ void ice_free_vfs(struct ice_pf *pf)
i);
clear_bit(ICE_VF_DIS, pf->state);
- clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
}
@@ -1498,6 +1499,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
+ mutex_lock(&vf->cfg_lock);
+
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -1512,6 +1515,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
+
+ mutex_unlock(&vf->cfg_lock);
}
if (ice_is_eswitch_mode_switchdev(pf))
@@ -1562,6 +1567,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
u32 reg;
int i;
+ lockdep_assert_held(&vf->cfg_lock);
+
dev = ice_pf_to_dev(pf);
if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
@@ -2061,9 +2068,12 @@ void ice_process_vflr_event(struct ice_pf *pf)
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx))
+ if (reg & BIT(bit_idx)) {
/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ mutex_lock(&vf->cfg_lock);
ice_reset_vf(vf, true);
+ mutex_unlock(&vf->cfg_lock);
+ }
}
}
@@ -2140,7 +2150,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
if (!vf)
return;
+ mutex_lock(&vf->cfg_lock);
ice_vc_reset_vf(vf);
+ mutex_unlock(&vf->cfg_lock);
}
/**
@@ -4625,10 +4637,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
struct device *dev;
int err = 0;
- /* if de-init is underway, don't process messages from VF */
- if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
- return;
-
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id)) {
err = -EINVAL;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 105247582684..143ca8be5eb5 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2704,6 +2704,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
static struct platform_device *port_platdev[3];
+static void mv643xx_eth_shared_of_remove(void)
+{
+ int n;
+
+ for (n = 0; n < 3; n++) {
+ platform_device_del(port_platdev[n]);
+ port_platdev[n] = NULL;
+ }
+}
+
static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
struct device_node *pnp)
{
@@ -2740,7 +2750,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
return -EINVAL;
}
- of_get_mac_address(pnp, ppd.mac_addr);
+ ret = of_get_mac_address(pnp, ppd.mac_addr);
+ if (ret)
+ return ret;
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2804,21 +2816,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
if (ret) {
of_node_put(pnp);
+ mv643xx_eth_shared_of_remove();
return ret;
}
}
return 0;
}
-static void mv643xx_eth_shared_of_remove(void)
-{
- int n;
-
- for (n = 0; n < 3; n++) {
- platform_device_del(port_platdev[n]);
- port_platdev[n] = NULL;
- }
-}
#else
static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 7cdbf8b8bbf6..1a835b48791b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6870,6 +6870,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
dev->dev.of_node = port_node;
+ port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
+ port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
+
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
@@ -6940,9 +6943,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.supported_interfaces);
}
- port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
- port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
-
phylink = phylink_create(&port->phylink_config, port_fwnode,
phy_mode, &mvpp2_phylink_ops);
if (IS_ERR(phylink)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index 26efa33de56f..9cc844bd00f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -16,11 +16,13 @@ struct mlx5e_tc_act_parse_state {
unsigned int num_actions;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
+ bool ct_clear;
bool encap;
bool decap;
bool mpls_push;
bool ptype_host;
const struct ip_tunnel_info *tun_info;
+ struct mlx5e_mpls_info mpls_info;
struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
int if_count;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index 06ec30cdb269..58cc33f1363d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -27,8 +27,13 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
+ bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
int err;
+ /* It's redundant to do ct clear more than once. */
+ if (clear_action && parse_state->ct_clear)
+ return 0;
+
err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
&attr->parse_attr->mod_hdr_acts,
act, parse_state->extack);
@@ -40,6 +45,8 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ parse_state->ct_clear = clear_action;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index c614fc7fdc9c..2e615e0ba972 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -177,6 +177,12 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
return -ENOMEM;
parse_state->encap = false;
+
+ if (parse_state->mpls_push) {
+ memcpy(&parse_attr->mpls_info[esw_attr->out_count],
+ &parse_state->mpls_info, sizeof(parse_state->mpls_info));
+ parse_state->mpls_push = false;
+ }
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
/* attr->dests[].rep is resolved when we handle encap */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
index 784fc4f68b1e..89ca88c78840 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
@@ -22,6 +22,16 @@ tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
return true;
}
+static void
+copy_mpls_info(struct mlx5e_mpls_info *mpls_info,
+ const struct flow_action_entry *act)
+{
+ mpls_info->label = act->mpls_push.label;
+ mpls_info->tc = act->mpls_push.tc;
+ mpls_info->bos = act->mpls_push.bos;
+ mpls_info->ttl = act->mpls_push.ttl;
+}
+
static int
tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -29,6 +39,7 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
parse_state->mpls_push = true;
+ copy_mpls_info(&parse_state->mpls_info, act);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index f832c26ff2c3..70b40ae384e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -35,6 +35,7 @@ enum {
struct mlx5e_tc_flow_parse_attr {
const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 9918ed8c059b..d39d0dae22fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -750,6 +750,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
+ const struct mlx5e_mpls_info *mpls_info;
unsigned long tbl_time_before = 0;
struct mlx5e_encap_entry *e;
struct mlx5e_encap_key key;
@@ -760,6 +761,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
tun_info = parse_attr->tun_info[out_index];
+ mpls_info = &parse_attr->mpls_info[out_index];
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
@@ -810,6 +812,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
goto out_err_init;
}
e->tun_info = tun_info;
+ memcpy(&e->mpls_info, mpls_info, sizeof(*mpls_info));
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
if (err)
goto out_err_init;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
index 60952b33b568..c5b1617d556f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -30,16 +30,15 @@ static int generate_ip_tun_hdr(char buf[],
struct mlx5e_encap_entry *r)
{
const struct ip_tunnel_key *tun_key = &r->tun_info->key;
+ const struct mlx5e_mpls_info *mpls_info = &r->mpls_info;
struct udphdr *udp = (struct udphdr *)(buf);
struct mpls_shim_hdr *mpls;
- u32 tun_id;
- tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id));
mpls = (struct mpls_shim_hdr *)(udp + 1);
*ip_proto = IPPROTO_UDP;
udp->dest = tun_key->tp_dst;
- *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true);
+ *mpls = mpls_entry_encode(mpls_info->label, mpls_info->ttl, mpls_info->tc, mpls_info->bos);
return 0;
}
@@ -60,37 +59,31 @@ static int parse_tunnel(struct mlx5e_priv *priv,
void *headers_v)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- struct flow_match_enc_keyid enc_keyid;
struct flow_match_mpls match;
void *misc2_c;
void *misc2_v;
- misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters_2);
- misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters_2);
-
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
- return 0;
-
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
- return 0;
-
- flow_rule_match_enc_keyid(rule, &enc_keyid);
-
- if (!enc_keyid.mask->keyid)
- return 0;
-
if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
return -EOPNOTSUPP;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ return -EOPNOTSUPP;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
+ return 0;
+
flow_rule_match_mpls(rule, &match);
/* Only support matching the first LSE */
if (match.mask->used_lses != 1)
return -EOPNOTSUPP;
+ misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+ misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_label,
match.mask->ls[0].mpls_label);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 57d755db1cf5..6e80585d731f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1792,7 +1792,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
if (size_read < 0) {
netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
__func__, size_read);
- return 0;
+ return size_read;
}
i += size_read;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index b01dacb6f527..b3f7520dfd08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -183,6 +183,13 @@ struct mlx5e_decap_entry {
struct rcu_head rcu;
};
+struct mlx5e_mpls_info {
+ u32 label;
+ u8 tc;
+ u8 bos;
+ u8 ttl;
+};
+
struct mlx5e_encap_entry {
/* attached neigh hash entry */
struct mlx5e_neigh_hash_entry *nhe;
@@ -196,6 +203,7 @@ struct mlx5e_encap_entry {
struct list_head route_list;
struct mlx5_pkt_reformat *pkt_reformat;
const struct ip_tunnel_info *tun_info;
+ struct mlx5e_mpls_info mpls_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index ee0a8f5206e3..6530d7bd5045 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1349,7 +1349,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
}
/* True when explicitly set via priv flag, or XDP prog is loaded */
- if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
+ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
+ get_cqe_tls_offload(cqe))
goto csum_unnecessary;
/* CQE csum doesn't cover padding octets in short ethernet
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 8c9163d2c646..08a75654f5f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -334,6 +334,7 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
buf[count] = st.st_func(priv);
netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
+ count++;
}
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 26e326fe503c..00f1d16db456 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1254,9 +1254,6 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
-
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
@@ -1272,6 +1269,9 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
struct ethtool_fec_stats *fec_stats)
{
+ if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+ return;
+
fec_set_corrected_bits_total(priv, fec_stats);
fec_set_block_stats(priv, fec_stats);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 2022fa4a9598..b27532a9301e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3204,6 +3204,18 @@ actions_match_supported(struct mlx5e_priv *priv,
return false;
}
+ if (!(~actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+ return false;
+ }
+
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action,
actions, ct_flow, ct_clear, extack))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 11bbcd5f5b8b..694c54066955 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -697,7 +697,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
}
int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 min_rate, u32 max_rate)
+ u32 max_rate, u32 min_rate)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9a7b25692505..cfcd72bad9af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2838,10 +2838,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return false;
- if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
- mlx5_ecpf_vport_exists(esw->dev))
- return false;
-
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b628917e38e4..537c82b9aa53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2074,6 +2074,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
+ } else {
+ up_write_ref_node(&fte->node, false);
}
kfree(handle);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index df58cba37930..1e8ec4f236b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2c774f367199..bba72b220cc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -526,7 +526,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
/* Check log_max_qp from HCA caps to set in current profile */
if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
- prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
} else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp,
@@ -1840,10 +1840,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
+ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 7f6fd9c5e371..e289cfdbce07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -4,7 +4,6 @@
#include "dr_types.h"
#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
-#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
struct mlx5dr_icm_pool {
enum mlx5dr_icm_type icm_type;
@@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
kvfree(icm_mr);
}
-static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
+static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
{
- chunk->ste_arr = kvzalloc(chunk->num_of_entries *
- sizeof(chunk->ste_arr[0]), GFP_KERNEL);
- if (!chunk->ste_arr)
- return -ENOMEM;
-
- chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
- DR_STE_SIZE_REDUCED, GFP_KERNEL);
- if (!chunk->hw_ste_arr)
- goto out_free_ste_arr;
-
- chunk->miss_list = kvmalloc(chunk->num_of_entries *
- sizeof(chunk->miss_list[0]), GFP_KERNEL);
- if (!chunk->miss_list)
- goto out_free_hw_ste_arr;
+ /* We support only one type of STE size, both for ConnectX-5 and later
+ * devices. Once the support for match STE which has a larger tag is
+ * added (32B instead of 16B), the STE size for devices later than
+ * ConnectX-5 needs to account for that.
+ */
+ return DR_STE_SIZE_REDUCED;
+}
- return 0;
+static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
+{
+ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+ int index = offset / DR_STE_SIZE;
-out_free_hw_ste_arr:
- kvfree(chunk->hw_ste_arr);
-out_free_ste_arr:
- kvfree(chunk->ste_arr);
- return -ENOMEM;
+ chunk->ste_arr = &buddy->ste_arr[index];
+ chunk->miss_list = &buddy->miss_list[index];
+ chunk->hw_ste_arr = buddy->hw_ste_arr +
+ index * dr_icm_buddy_get_ste_size(buddy);
}
static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
{
- kvfree(chunk->miss_list);
- kvfree(chunk->hw_ste_arr);
- kvfree(chunk->ste_arr);
+ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+
+ memset(chunk->hw_ste_arr, 0,
+ chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+ memset(chunk->ste_arr, 0,
+ chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
}
static enum mlx5dr_icm_type
@@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
kvfree(chunk);
}
+static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+ int num_of_entries =
+ mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
+
+ buddy->ste_arr = kvcalloc(num_of_entries,
+ sizeof(struct mlx5dr_ste), GFP_KERNEL);
+ if (!buddy->ste_arr)
+ return -ENOMEM;
+
+ /* Preallocate full STE size on non-ConnectX-5 devices since
+ * we need to support both full and reduced with the same cache.
+ */
+ buddy->hw_ste_arr = kvcalloc(num_of_entries,
+ dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
+ if (!buddy->hw_ste_arr)
+ goto free_ste_arr;
+
+ buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
+ if (!buddy->miss_list)
+ goto free_hw_ste_arr;
+
+ return 0;
+
+free_hw_ste_arr:
+ kvfree(buddy->hw_ste_arr);
+free_ste_arr:
+ kvfree(buddy->ste_arr);
+ return -ENOMEM;
+}
+
+static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+ kvfree(buddy->ste_arr);
+ kvfree(buddy->hw_ste_arr);
+ kvfree(buddy->miss_list);
+}
+
static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy;
@@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
buddy->icm_mr = icm_mr;
buddy->pool = pool;
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ /* Reduce allocations by preallocating and reusing the STE structures */
+ if (dr_icm_buddy_init_ste_cache(buddy))
+ goto err_cleanup_buddy;
+ }
+
/* add it to the -start- of the list in order to search in it first */
list_add(&buddy->list_node, &pool->buddy_mem_list);
return 0;
+err_cleanup_buddy:
+ mlx5dr_buddy_cleanup(buddy);
err_free_buddy:
kvfree(buddy);
free_mr:
@@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
mlx5dr_buddy_cleanup(buddy);
+ if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_buddy_cleanup_ste_cache(buddy);
+
kvfree(buddy);
}
@@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
chunk->byte_size =
mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
chunk->seg = seg;
+ chunk->buddy_mem = buddy_mem_pool;
- if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
- mlx5dr_err(pool->dmn,
- "Failed to init ste arrays (order: %d)\n",
- chunk_size);
- goto out_free_chunk;
- }
+ if (pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_chunk_ste_init(chunk, offset);
buddy_mem_pool->used_memory += chunk->byte_size;
- chunk->buddy_mem = buddy_mem_pool;
INIT_LIST_HEAD(&chunk->chunk_list);
/* chunk now is part of the used_list */
list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
return chunk;
-
-out_free_chunk:
- kvfree(chunk);
- return NULL;
}
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
{
- if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
- return true;
+ int allow_hot_size;
+
+ /* sync when hot memory reaches half of the pool size */
+ allow_hot_size =
+ mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) / 2;
- return false;
+ return pool->hot_memory_size > allow_hot_size;
}
static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index e87cf498c77b..38971fe1dfe1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
return (spec->dmac_47_16 || spec->dmac_15_0);
}
-static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
-{
- return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
- spec->src_ip_63_32 || spec->src_ip_31_0);
-}
-
-static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
-{
- return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
- spec->dst_ip_63_32 || spec->dst_ip_31_0);
-}
-
static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
{
return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
@@ -503,11 +491,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
&mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) {
- if (dr_mask_is_dst_addr_set(&mask.outer))
+ if (DR_MASK_IS_DST_IP_SET(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_src_addr_set(&mask.outer))
+ if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
@@ -610,11 +598,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
&mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) {
- if (dr_mask_is_dst_addr_set(&mask.inner))
+ if (DR_MASK_IS_DST_IP_SET(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_src_addr_set(&mask.inner))
+ if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 7e61742e58a0..187e29b409b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
used_hw_action_num);
}
+static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
+ struct mlx5dr_match_spec *spec)
+{
+ if (spec->ip_version) {
+ if (spec->ip_version != 0xf) {
+ mlx5dr_err(dmn,
+ "Partial ip_version mask with src/dst IP is not supported\n");
+ return -EINVAL;
+ }
+ } else if (spec->ethertype != 0xffff &&
+ (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
+ mlx5dr_err(dmn,
+ "Partial/no ethertype mask with src/dst IP is not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
u8 match_criteria,
struct mlx5dr_match_param *mask,
struct mlx5dr_match_param *value)
{
- if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
+ if (value)
+ return 0;
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
mlx5dr_err(dmn,
"Partial mask source_port is not supported\n");
@@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
}
}
+ if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
+ dr_ste_build_pre_check_spec(dmn, &mask->outer))
+ return -EINVAL;
+
+ if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
+ dr_ste_build_pre_check_spec(dmn, &mask->inner))
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 1b3d484b99be..55fcb751e24a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -798,6 +798,16 @@ struct mlx5dr_match_param {
(_misc3)->icmpv4_code || \
(_misc3)->icmpv4_header_data)
+#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
+ (_spec)->src_ip_95_64 || \
+ (_spec)->src_ip_63_32 || \
+ (_spec)->src_ip_31_0)
+
+#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
+ (_spec)->dst_ip_95_64 || \
+ (_spec)->dst_ip_63_32 || \
+ (_spec)->dst_ip_31_0)
+
struct mlx5dr_esw_caps {
u64 drop_icm_address_rx;
u64 drop_icm_address_tx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index a476da2424f8..3f311462bedf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -233,7 +233,11 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
-#define MLX5_FLOW_CONTEXT_ACTION_MAX 32
+/* We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
@@ -403,9 +407,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
- num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
- err = -ENOSPC;
+ if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
goto free_actions;
}
@@ -478,8 +482,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
- err = -ENOSPC;
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
goto free_actions;
}
@@ -499,14 +504,28 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (num_term_actions == 1) {
- if (term_actions->reformat)
+ if (term_actions->reformat) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
actions[num_actions++] = term_actions->reformat;
+ }
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
num_term_actions,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index c7c93131b762..dfa223415fe2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem {
* sync_ste command sets them free.
*/
struct list_head hot_list;
+
+ /* Memory optimisation */
+ struct mlx5dr_ste *ste_arr;
+ struct list_head *miss_list;
+ u8 *hw_ste_arr;
};
int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index e6de86552df0..fd3ceb74620d 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -549,14 +549,18 @@ EXPORT_SYMBOL(ocelot_vlan_add);
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool del_pvid = false;
int err;
+ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ del_pvid = true;
+
err = ocelot_vlan_member_del(ocelot, port, vid);
if (err)
return err;
/* Ingress */
- if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ if (del_pvid)
ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 784292b16290..1543e47456d5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -723,6 +723,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
return true;
if (netif_is_gretap(netdev))
return true;
+ if (netif_is_ip6gretap(netdev))
+ return true;
return false;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 0a326e04e692..cb43651ea9ba 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
int port, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
- int ida_idx = NFP_MAX_MAC_INDEX, err;
struct nfp_tun_offloaded_mac *entry;
+ int ida_idx = -1, err;
u16 nfp_mac_idx = 0;
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
@@ -997,7 +997,7 @@ err_remove_hash:
err_free_entry:
kfree(entry);
err_free_ida:
- if (ida_idx != NFP_MAX_MAC_INDEX)
+ if (ida_idx != -1)
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
return err;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index b900ab5aef2a..64c7e26c3b75 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1433,6 +1433,8 @@ static int temac_probe(struct platform_device *pdev)
lp->indirect_lock = devm_kmalloc(&pdev->dev,
sizeof(*lp->indirect_lock),
GFP_KERNEL);
+ if (!lp->indirect_lock)
+ return -ENOMEM;
spin_lock_init(lp->indirect_lock);
}
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index b1fc153125d9..45c3c4a1101b 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -668,11 +668,11 @@ static void sixpack_close(struct tty_struct *tty)
*/
netif_stop_queue(sp->dev);
+ unregister_netdev(sp->dev);
+
del_timer_sync(&sp->tx_t);
del_timer_sync(&sp->resync_t);
- unregister_netdev(sp->dev);
-
/* Free all 6pack frame buffers after unreg. */
kfree(sp->rbuff);
kfree(sp->xbuff);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index f3438d3e104a..2bc730fd260e 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2975,8 +2975,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
ca8210_hw->phy->cca_ed_level = -9800;
ca8210_hw->phy->symbol_duration = 16;
- ca8210_hw->phy->lifs_period = 40;
- ca8210_hw->phy->sifs_period = 12;
+ ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
+ ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
ca8210_hw->flags =
IEEE802154_HW_AFILT |
IEEE802154_HW_OMIT_CKSUM |
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index eaa6fb3224bc..62723a7faa2d 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -403,8 +403,16 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty,
mctp_serial_push(dev, c[i]);
}
+static void mctp_serial_uninit(struct net_device *ndev)
+{
+ struct mctp_serial *dev = netdev_priv(ndev);
+
+ cancel_work_sync(&dev->tx_work);
+}
+
static const struct net_device_ops mctp_serial_netdev_ops = {
.ndo_start_xmit = mctp_serial_tx,
+ .ndo_uninit = mctp_serial_uninit,
};
static void mctp_serial_setup(struct net_device *ndev)
@@ -483,7 +491,6 @@ static void mctp_serial_close(struct tty_struct *tty)
int idx = dev->idx;
unregister_netdev(dev->netdev);
- cancel_work_sync(&dev->tx_work);
ida_free(&mctp_serial_ida, idx);
}
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 5f4cd24a0241..4eba5a91075c 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus)
if (ret)
return ret;
- return clk_prepare_enable(priv->mdio_clk);
+ ret = clk_prepare_enable(priv->mdio_clk);
+ if (ret == 0)
+ mdelay(10);
+
+ return ret;
}
static int ipq4019_mdio_probe(struct platform_device *pdev)
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 4300261e2f9e..378ee779061c 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data,
if (err)
goto err_fib6_rt_nh_del;
- fib6_event->rt_arr[i]->trap = true;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
}
return 0;
err_fib6_rt_nh_del:
for (i--; i >= 0; i--) {
- fib6_event->rt_arr[i]->trap = false;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
}
return err;
diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
index b7a5ae20edd5..68ee434f9dea 100644
--- a/drivers/net/phy/mediatek-ge.c
+++ b/drivers/net/phy/mediatek-ge.c
@@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
static int mt7531_phy_config_init(struct phy_device *phydev)
{
- if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
- return -EINVAL;
-
mtk_gephy_config_init(phydev);
/* PHY link down power saving enable */
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index eb3817d70f2b..9b4dfa3001d6 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -583,6 +583,11 @@ static const struct usb_device_id products[] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible;
* wire-incompatible with true CDC Ethernet implementations.
* (And, it seems, needlessly so...)
@@ -640,6 +645,13 @@ static const struct usb_device_id products[] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = 0,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
/* reported with some C860 units */
.idProduct = 0x9050, /* C-860 */
ZAURUS_MASTER_INTERFACE,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 82bb5ed94c48..c0b8b4aa78f3 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -659,6 +659,11 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
+ /* Telit FN990 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e303b522efb5..15f91d691bba 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- int len;
+ unsigned int len;
int nframes;
int x;
- int offset;
+ unsigned int offset;
union {
struct usb_cdc_ncm_ndp16 *ndp16;
struct usb_cdc_ncm_ndp32 *ndp32;
@@ -1790,8 +1790,8 @@ next_ndp:
break;
}
- /* sanity checking */
- if (((offset + len) > skb_in->len) ||
+ /* sanity checking - watch out for integer wrap*/
+ if ((offset > skb_in->len) || (len > skb_in->len - offset) ||
(len > ctx->rx_max) || (len < ETH_HLEN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index b658510cc9a4..5a53e63d33a6 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* ignore the CRC length */
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
- if (len > ETH_FRAME_LEN)
+ if (len > ETH_FRAME_LEN || len > skb->len)
return 0;
/* the last packet of current skb */
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 8e717a0b559b..7984f2157d22 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -256,6 +256,11 @@ static const struct usb_device_id products [] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
{
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
@@ -315,6 +320,13 @@ static const struct usb_device_id products [] = {
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
/* reported with some C860 units */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 0eb13e5df517..d99140960a82 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -693,7 +693,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
{
struct brcmf_fw_item *first = &req->items[0];
struct brcmf_fw *fwctx;
- char *alt_path;
+ char *alt_path = NULL;
int ret;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
@@ -712,7 +712,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
fwctx->done = fw_cb;
/* First try alternative board-specific path if any */
- alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type);
+ if (fwctx->req->board_type)
+ alt_path = brcm_alt_fw_path(first->path,
+ fwctx->req->board_type);
if (alt_path) {
ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index c21c0c68849a..85e704283755 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -80,19 +80,6 @@ config IWLWIFI_OPMODE_MODULAR
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLDVM=n && IWLMVM=n
-config IWLWIFI_BCAST_FILTERING
- bool "Enable broadcast filtering"
- depends on IWLMVM
- help
- Say Y here to enable default bcast filtering configuration.
-
- Enabling broadcast filtering will drop any incoming wireless
- broadcast frames, except some very specific predefined
- patterns (e.g. incoming arp requests).
-
- If unsure, don't enable this option, as some programs might
- expect incoming broadcasts for their normal operations.
-
menu "Debugging Options"
config IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 790c96df58cb..c17ab53fcd8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
@@ -888,10 +888,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
* only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 0703e41403a6..35b8856e511f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -502,11 +502,6 @@ enum iwl_legacy_cmds {
DEBUG_LOG_MSG = 0xf7,
/**
- * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
- */
- BCAST_FILTER_CMD = 0xcf,
-
- /**
* @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
*/
MCAST_FILTER_CMD = 0xd0,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
index dd62a63956b3..e44c70b7c790 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
@@ -36,92 +36,4 @@ struct iwl_mcast_filter_cmd {
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
-#define MAX_BCAST_FILTERS 8
-#define MAX_BCAST_FILTER_ATTRS 2
-
-/**
- * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
- * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
- * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
- * start of ip payload).
- */
-enum iwl_mvm_bcast_filter_attr_offset {
- BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
- BCAST_FILTER_OFFSET_IP_END = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
- * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
- * @offset: starting offset of this pattern.
- * @reserved1: reserved
- * @val: value to match - big endian (MSB is the first
- * byte to match from offset pos).
- * @mask: mask to match (big endian).
- */
-struct iwl_fw_bcast_filter_attr {
- u8 offset_type;
- u8 offset;
- __le16 reserved1;
- __be32 val;
- __be32 mask;
-} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
-
-/**
- * enum iwl_mvm_bcast_filter_frame_type - filter frame type
- * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
- * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
- */
-enum iwl_mvm_bcast_filter_frame_type {
- BCAST_FILTER_FRAME_TYPE_ALL = 0,
- BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter - broadcast filter
- * @discard: discard frame (1) or let it pass (0).
- * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
- * @reserved1: reserved
- * @num_attrs: number of valid attributes in this filter.
- * @attrs: attributes of this filter. a filter is considered matched
- * only when all its attributes are matched (i.e. AND relationship)
- */
-struct iwl_fw_bcast_filter {
- u8 discard;
- u8 frame_type;
- u8 num_attrs;
- u8 reserved1;
- struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
-} __packed; /* BCAST_FILTER_S_VER_1 */
-
-/**
- * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
- * @default_discard: default action for this mac (discard (1) / pass (0)).
- * @reserved1: reserved
- * @attached_filters: bitmap of relevant filters for this mac.
- */
-struct iwl_fw_bcast_mac {
- u8 default_discard;
- u8 reserved1;
- __le16 attached_filters;
-} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
-
-/**
- * struct iwl_bcast_filter_cmd - broadcast filtering configuration
- * @disable: enable (0) / disable (1)
- * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
- * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
- * @reserved1: reserved
- * @filters: broadcast filters
- * @macs: broadcast filtering configuration per-mac
- */
-struct iwl_bcast_filter_cmd {
- u8 disable;
- u8 max_bcast_filters;
- u8 max_macs;
- u8 reserved1;
- struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
- struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
-} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
-
#endif /* __iwl_fw_api_filter_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 173a6991587b..4a7723eb8c1d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -752,7 +752,6 @@ struct iwl_lq_cmd {
u8 iwl_fw_rate_idx_to_plcp(int idx);
u32 iwl_new_rate_from_v1(u32 rate_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags);
const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
const char *iwl_rs_pretty_ant(u8 ant);
const char *iwl_rs_pretty_bw(int bw);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index e4ebda64cd52..efc6540d31af 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -181,7 +181,6 @@ struct iwl_ucode_capa {
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
- * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
@@ -196,7 +195,6 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
- IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
};
typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
index a21c3befd93b..a835214611ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
@@ -91,6 +91,20 @@ const char *iwl_rs_pretty_bw(int bw)
}
IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return IWL_RATE_INVALID;
+}
+
u32 iwl_new_rate_from_v1(u32 rate_v1)
{
u32 rate_v2 = 0;
@@ -144,7 +158,10 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
} else {
u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
- WARN_ON(legacy_rate < 0);
+ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
rate_v2 |= legacy_rate;
if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
@@ -172,20 +189,6 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
}
IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
-{
- int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
- int idx;
- bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
- int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
- int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
-
- for (idx = offset; idx < last; idx++)
- if (iwl_fw_rate_idx_to_plcp(idx) == rate)
- return idx - offset;
- return -1;
-}
-
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
char *type;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index f90d4662c164..8e10ba88afb3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -329,6 +329,7 @@ enum {
#define CSR_HW_REV_TYPE_2x00 (0x0000100)
#define CSR_HW_REV_TYPE_105 (0x0000110)
#define CSR_HW_REV_TYPE_135 (0x0000120)
+#define CSR_HW_REV_TYPE_3160 (0x0000164)
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 83e3b731ad29..6651e78b39ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1707,6 +1707,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
out_unbind:
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
+ /* drv has just been freed by the release */
+ failure = false;
free:
if (failure)
iwl_dealloc_ucode(drv);
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index d9733aaf6f6e..2f7f0f994ca3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#include <linux/etherdevice.h>
@@ -146,6 +146,7 @@ struct iwl_mei_filters {
* @csme_taking_ownership: true when CSME is taking ownership. Used to remember
* to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
* flow.
+ * @link_prot_state: true when we are in link protection PASSIVE
* @csa_throttle_end_wk: used when &csa_throttled is true
* @data_q_lock: protects the access to the data queues which are
* accessed without the mutex.
@@ -165,6 +166,7 @@ struct iwl_mei {
bool amt_enabled;
bool csa_throttled;
bool csme_taking_ownership;
+ bool link_prot_state;
struct delayed_work csa_throttle_end_wk;
spinlock_t data_q_lock;
@@ -229,8 +231,6 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
if (IS_ERR(mem->ctrl)) {
int ret = PTR_ERR(mem->ctrl);
- dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
- ret);
mem->ctrl = NULL;
return ret;
@@ -669,6 +669,8 @@ iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
+ mei->link_prot_state = status->link_prot_state;
+
/*
* Update the Rfkill state in case the host does not own the device:
* if we are in Link Protection, ask to not touch the device, else,
@@ -1663,9 +1665,11 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
mei_cldev_get_drvdata(iwl_mei_global_cldev);
/* we have already a SAP connection */
- if (iwl_mei_is_connected())
+ if (iwl_mei_is_connected()) {
iwl_mei_send_sap_msg(mei->cldev,
SAP_MSG_NOTIF_WIFIDR_UP);
+ ops->rfkill(priv, mei->link_prot_state);
+ }
}
ret = 0;
@@ -1784,6 +1788,8 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
#endif /* CONFIG_DEBUG_FS */
+#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
+
/*
* iwl_mei_probe - the probe function called by the mei bus enumeration
*
@@ -1795,6 +1801,7 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
static int iwl_mei_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
+ int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
struct iwl_mei *mei;
int ret;
@@ -1812,15 +1819,31 @@ static int iwl_mei_probe(struct mei_cl_device *cldev,
mei_cldev_set_drvdata(cldev, mei);
mei->cldev = cldev;
- /*
- * The CSME firmware needs to boot the internal WLAN client. Wait here
- * so that the DMA map request will succeed.
- */
- msleep(20);
+ do {
+ ret = iwl_mei_alloc_shared_mem(cldev);
+ if (!ret)
+ break;
+ /*
+ * The CSME firmware needs to boot the internal WLAN client.
+ * This can take time in certain configurations (usually
+ * upon resume and when the whole CSME firmware is shut down
+ * during suspend).
+ *
+ * Wait a bit before retrying and hope we'll succeed next time.
+ */
- ret = iwl_mei_alloc_shared_mem(cldev);
- if (ret)
+ dev_dbg(&cldev->dev,
+ "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
+ ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
+ msleep(100);
+ alloc_retry--;
+ } while (alloc_retry);
+
+ if (ret) {
+ dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
+ ret);
goto free;
+ }
iwl_mei_init_shared_mem(mei);
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
index 5f966af69720..468102a95e1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/net.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
@@ -195,8 +195,7 @@ static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb,
bool match;
if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
- !pskb_may_pull(skb, skb_network_offset(skb) +
- sizeof(ip_hdrlen(skb) - sizeof(*iphdr))))
+ !pskb_may_pull(skb, skb_network_offset(skb) + ip_hdrlen(skb)))
return false;
iphdrlen = ip_hdrlen(skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index fb4920b01dbb..63432c24eb59 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1369,189 +1369,6 @@ static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
return count;
}
-#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- const struct iwl_fw_bcast_filter *filter;
- char *buf;
- int bufsz = 1024;
- int i, j, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
- filter = &cmd.filters[i];
-
- ADD_TEXT("Filter [%d]:\n", i);
- ADD_TEXT("\tDiscard=%d\n", filter->discard);
- ADD_TEXT("\tFrame Type: %s\n",
- filter->frame_type ? "IPv4" : "Generic");
-
- for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
- const struct iwl_fw_bcast_filter_attr *attr;
-
- attr = &filter->attrs[j];
- if (!attr->mask)
- break;
-
- ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
- j, attr->offset,
- attr->offset_type ? "IP End" :
- "Payload Start",
- be32_to_cpu(attr->mask),
- be32_to_cpu(attr->val),
- le16_to_cpu(attr->reserved1));
- }
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- int pos, next_pos;
- struct iwl_fw_bcast_filter filter = {};
- struct iwl_bcast_filter_cmd cmd;
- u32 filter_id, attr_id, mask, value;
- int err = 0;
-
- if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
- &filter.frame_type, &pos) != 3)
- return -EINVAL;
-
- if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
- filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
- return -EINVAL;
-
- for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
- attr_id++) {
- struct iwl_fw_bcast_filter_attr *attr =
- &filter.attrs[attr_id];
-
- if (pos >= count)
- break;
-
- if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
- &attr->offset, &attr->offset_type,
- &mask, &value, &next_pos) != 4)
- return -EINVAL;
-
- attr->mask = cpu_to_be32(mask);
- attr->val = cpu_to_be32(value);
- if (mask)
- filter.num_attrs++;
-
- pos += next_pos;
- }
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
- &filter, sizeof(filter));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- char *buf;
- int bufsz = 1024;
- int i, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
- const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
-
- ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
- i, mac->default_discard, mac->attached_filters);
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_bcast_filter_cmd cmd;
- struct iwl_fw_bcast_mac mac = {};
- u32 mac_id, attached_filters;
- int err = 0;
-
- if (!mvm->bcast_filters)
- return -ENOENT;
-
- if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
- &attached_filters) != 3)
- return -EINVAL;
-
- if (mac_id >= ARRAY_SIZE(cmd.macs) ||
- mac.default_discard > 1 ||
- attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
- return -EINVAL;
-
- mac.attached_filters = cpu_to_le16(attached_filters);
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
- &mac, sizeof(mac));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-#endif
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1881,11 +1698,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
-#endif
-
#ifdef CONFIG_ACPI
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
#endif
@@ -2097,21 +1909,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
- bcast_dir = debugfs_create_dir("bcast_filtering",
- mvm->debugfs_dir);
-
- debugfs_create_bool("override", 0600, bcast_dir,
- &mvm->dbgfs_bcast_filtering.override);
-
- MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
- bcast_dir, 0600);
- MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
- bcast_dir, 0600);
- }
-#endif
-
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 6f4690e56a46..ae589b3b8c46 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1741,7 +1741,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_sar_init(mvm);
if (ret == 0)
ret = iwl_mvm_sar_geo_init(mvm);
- else if (ret < 0)
+ if (ret < 0)
goto error;
ret = iwl_mvm_sgom_init(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 65f4fe3ef504..4ac599f6ad22 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -55,79 +55,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
},
};
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-/*
- * Use the reserved field to indicate magic values.
- * these values will only be used internally by the driver,
- * and won't make it to the fw (reserved will be 0).
- * BC_FILTER_MAGIC_IP - configure the val of this attribute to
- * be the vif's ip address. in case there is not a single
- * ip address (0, or more than 1), this attribute will
- * be skipped.
- * BC_FILTER_MAGIC_MAC - set the val of this attribute to
- * the LSB bytes of the vif's mac address
- */
-enum {
- BC_FILTER_MAGIC_NONE = 0,
- BC_FILTER_MAGIC_IP,
- BC_FILTER_MAGIC_MAC,
-};
-
-static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
- {
- /* arp */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
- .attrs = {
- {
- /* frame type - arp, hw type - ethernet */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header),
- .val = cpu_to_be32(0x08060001),
- .mask = cpu_to_be32(0xffffffff),
- },
- {
- /* arp dest ip */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header) + 2 +
- sizeof(struct arphdr) +
- ETH_ALEN + sizeof(__be32) +
- ETH_ALEN,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
- },
- },
- },
- {
- /* dhcp offer bcast */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
- .attrs = {
- {
- /* udp dest port - 68 (bootp client)*/
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = offsetof(struct udphdr, dest),
- .val = cpu_to_be32(0x00440000),
- .mask = cpu_to_be32(0xffff0000),
- },
- {
- /* dhcp - lsb bytes of client hw address */
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = 38,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
- },
- },
- },
- /* last filter must be empty */
- {},
-};
-#endif
-
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
.max_peers = IWL_MVM_TOF_MAX_APS,
.report_ap_tsf = 1,
@@ -693,11 +620,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#endif
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* assign default bcast filtering configuration */
- mvm->bcast_filters = iwl_mvm_default_bcast_filters;
-#endif
-
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
@@ -1853,162 +1775,6 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-struct iwl_bcast_iter_data {
- struct iwl_mvm *mvm;
- struct iwl_bcast_filter_cmd *cmd;
- u8 current_filter;
-};
-
-static void
-iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
- const struct iwl_fw_bcast_filter *in_filter,
- struct iwl_fw_bcast_filter *out_filter)
-{
- struct iwl_fw_bcast_filter_attr *attr;
- int i;
-
- memcpy(out_filter, in_filter, sizeof(*out_filter));
-
- for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
- attr = &out_filter->attrs[i];
-
- if (!attr->mask)
- break;
-
- switch (attr->reserved1) {
- case cpu_to_le16(BC_FILTER_MAGIC_IP):
- if (vif->bss_conf.arp_addr_cnt != 1) {
- attr->mask = 0;
- continue;
- }
-
- attr->val = vif->bss_conf.arp_addr_list[0];
- break;
- case cpu_to_le16(BC_FILTER_MAGIC_MAC):
- attr->val = *(__be32 *)&vif->addr[2];
- break;
- default:
- break;
- }
- attr->reserved1 = 0;
- out_filter->num_attrs++;
- }
-}
-
-static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_bcast_iter_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
- struct iwl_bcast_filter_cmd *cmd = data->cmd;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_fw_bcast_mac *bcast_mac;
- int i;
-
- if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
- return;
-
- bcast_mac = &cmd->macs[mvmvif->id];
-
- /*
- * enable filtering only for associated stations, but not for P2P
- * Clients
- */
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
- !vif->bss_conf.assoc)
- return;
-
- bcast_mac->default_discard = 1;
-
- /* copy all configured filters */
- for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
- /*
- * Make sure we don't exceed our filters limit.
- * if there is still a valid filter to be configured,
- * be on the safe side and just allow bcast for this mac.
- */
- if (WARN_ON_ONCE(data->current_filter >=
- ARRAY_SIZE(cmd->filters))) {
- bcast_mac->default_discard = 0;
- bcast_mac->attached_filters = 0;
- break;
- }
-
- iwl_mvm_set_bcast_filter(vif,
- &mvm->bcast_filters[i],
- &cmd->filters[data->current_filter]);
-
- /* skip current filter if it contains no attributes */
- if (!cmd->filters[data->current_filter].num_attrs)
- continue;
-
- /* attach the filter to current mac */
- bcast_mac->attached_filters |=
- cpu_to_le16(BIT(data->current_filter));
-
- data->current_filter++;
- }
-}
-
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd)
-{
- struct iwl_bcast_iter_data iter_data = {
- .mvm = mvm,
- .cmd = cmd,
- };
-
- if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
- return false;
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
- cmd->max_macs = ARRAY_SIZE(cmd->macs);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /* use debugfs filters/macs if override is configured */
- if (mvm->dbgfs_bcast_filtering.override) {
- memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
- sizeof(cmd->filters));
- memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
- sizeof(cmd->macs));
- return true;
- }
-#endif
-
- /* if no filters are configured, do nothing */
- if (!mvm->bcast_filters)
- return false;
-
- /* configure and attach these filters for each associated sta vif */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bcast_filter_iterator, &iter_data);
-
- return true;
-}
-
-static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- struct iwl_bcast_filter_cmd cmd;
-
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
- return 0;
-
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- return 0;
-
- return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
-}
-#else
-static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- return 0;
-}
-#endif
-
static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -2520,7 +2286,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
iwl_mvm_recalc_multicast(mvm);
- iwl_mvm_configure_bcast_filter(mvm);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
@@ -2570,11 +2335,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
}
- if (changes & BSS_CHANGED_ARP_FILTER) {
- IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
- iwl_mvm_configure_bcast_filter(mvm);
- }
-
if (changes & BSS_CHANGED_BANDWIDTH)
iwl_mvm_apply_fw_smps_request(vif);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 1dcbb0eb63c3..d78f40730594 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -884,17 +884,6 @@ struct iwl_mvm {
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* broadcast filters to configure for each associated station */
- const struct iwl_fw_bcast_filter *bcast_filters;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct {
- bool override;
- struct iwl_bcast_filter_cmd cmd;
- } dbgfs_bcast_filtering;
-#endif
-#endif
-
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
struct iwl_mvm_int_sta snif_sta;
@@ -1593,8 +1582,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
@@ -2225,7 +2212,7 @@ static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
{
bool sw_rfkill =
- mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false;
+ mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false;
if (mvm->mei_registered)
iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 87630d38dc52..1f8b97995b94 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -469,7 +469,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(MCC_CHUB_UPDATE_CMD),
HCMD_NAME(MARKER_CMD),
HCMD_NAME(BT_PROFILE_NOTIFICATION),
- HCMD_NAME(BCAST_FILTER_CMD),
HCMD_NAME(MCAST_FILTER_CMD),
HCMD_NAME(REPLY_SF_CFG_CMD),
HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 6fa2c12f7955..9213f8518f10 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1427,7 +1427,7 @@ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
struct ieee80211_tx_rate *r = &info->status.rates[0];
if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
- TX_CMD, 0) > 6)
+ TX_CMD, 0) <= 6)
rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
info->status.antenna =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 0febdcacbd42..94f40c4d2421 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -385,8 +385,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index a63386a01232..ef14584fc0a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1329,8 +1329,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 8d54f9face2f..fc5725f6daee 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2336,6 +2336,15 @@ static void hw_scan_work(struct work_struct *work)
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
+ if (!ieee80211_tx_prepare_skb(hwsim->hw,
+ hwsim->hw_scan_vif,
+ probe,
+ hwsim->tmp_chan->band,
+ NULL)) {
+ kfree_skb(probe);
+ continue;
+ }
+
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
@@ -3770,6 +3779,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
}
txi->flags |= IEEE80211_TX_STAT_ACK;
}
+
+ if (hwsim_flags & HWSIM_TX_CTL_NO_ACK)
+ txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status_irqsafe(data2->hw, skb);
return 0;
out:
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index d24b7a7993aa..990360d75cb6 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be)
unsigned int queue_index;
xen_unregister_watchers(vif);
+ xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
@@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
/* Not interested in this watch anymore. */
unregister_hotplug_status_watch(be);
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
}
kfree(str);
}
@@ -824,15 +824,11 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
- if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
- NULL, hotplug_status_changed,
- "%s/%s", dev->nodename,
- "hotplug-status");
- if (err)
- goto err;
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
+ hotplug_status_changed,
+ "%s/%s", dev->nodename, "hotplug-status");
+ if (!err)
be->have_hotplug_status_watch = 1;
- }
netif_tx_wake_all_queues(be->vif->dev);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 79005ea1a33e..fd4720d37cc0 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1723,7 +1723,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return 0;
}
-static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
{
struct nvme_ctrl *ctrl = ns->ctrl;
@@ -1739,7 +1739,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- return 0;
+ return;
+
if (ctrl->ops->flags & NVME_F_FABRICS) {
/*
* The NVMe over Fabrics specification only supports metadata as
@@ -1747,7 +1748,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* remap the separate metadata buffer from the block layer.
*/
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
- return -EINVAL;
+ return;
ns->features |= NVME_NS_EXT_LBAS;
@@ -1774,8 +1775,6 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
else
ns->features |= NVME_NS_METADATA_SUPPORTED;
}
-
- return 0;
}
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
@@ -1916,9 +1915,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
- ret = nvme_configure_metadata(ns, id);
- if (ret)
- goto out_unfreeze;
+ nvme_configure_metadata(ns, id);
nvme_set_chunk_sectors(ns, id);
nvme_update_disk_info(ns->disk, ns, id);
@@ -1934,7 +1931,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns);
if (ret && !nvme_first_scan(ns->disk))
- goto out;
+ return ret;
}
if (nvme_ns_head_multipath(ns->head)) {
@@ -1949,16 +1946,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
return 0;
out_unfreeze:
- blk_mq_unfreeze_queue(ns->disk->queue);
-out:
/*
* If probing fails due an unsupported feature, hide the block device,
* but still allow other access.
*/
if (ret == -ENODEV) {
ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
+ blk_mq_unfreeze_queue(ns->disk->queue);
return ret;
}
@@ -4574,7 +4571,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
- blk_set_queue_dying(ns->queue);
+ blk_mark_disk_dead(ns->disk);
nvme_start_ns_queue(ns);
set_capacity_and_notify(ns->disk, 0);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index f8bf6606eb2f..ff775235534c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -848,7 +848,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- blk_set_queue_dying(head->disk->queue);
+ blk_mark_disk_dead(head->disk);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 891a36d02e7c..65e00c64a588 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -44,6 +44,8 @@ struct nvme_tcp_request {
u32 data_len;
u32 pdu_len;
u32 pdu_sent;
+ u32 h2cdata_left;
+ u32 h2cdata_offset;
u16 ttag;
__le16 status;
struct list_head entry;
@@ -95,6 +97,7 @@ struct nvme_tcp_queue {
struct nvme_tcp_request *request;
int queue_size;
+ u32 maxh2cdata;
size_t cmnd_capsule_len;
struct nvme_tcp_ctrl *ctrl;
unsigned long flags;
@@ -572,23 +575,26 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
return ret;
}
-static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
- struct nvme_tcp_r2t_pdu *pdu)
+static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
{
struct nvme_tcp_data_pdu *data = req->pdu;
struct nvme_tcp_queue *queue = req->queue;
struct request *rq = blk_mq_rq_from_pdu(req);
+ u32 h2cdata_sent = req->pdu_len;
u8 hdgst = nvme_tcp_hdgst_len(queue);
u8 ddgst = nvme_tcp_ddgst_len(queue);
req->state = NVME_TCP_SEND_H2C_PDU;
req->offset = 0;
- req->pdu_len = le32_to_cpu(pdu->r2t_length);
+ req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
req->pdu_sent = 0;
+ req->h2cdata_left -= req->pdu_len;
+ req->h2cdata_offset += h2cdata_sent;
memset(data, 0, sizeof(*data));
data->hdr.type = nvme_tcp_h2c_data;
- data->hdr.flags = NVME_TCP_F_DATA_LAST;
+ if (!req->h2cdata_left)
+ data->hdr.flags = NVME_TCP_F_DATA_LAST;
if (queue->hdr_digest)
data->hdr.flags |= NVME_TCP_F_HDGST;
if (queue->data_digest)
@@ -597,9 +603,9 @@ static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
data->hdr.pdo = data->hdr.hlen + hdgst;
data->hdr.plen =
cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
- data->ttag = pdu->ttag;
+ data->ttag = req->ttag;
data->command_id = nvme_cid(rq);
- data->data_offset = pdu->r2t_offset;
+ data->data_offset = cpu_to_le32(req->h2cdata_offset);
data->data_length = cpu_to_le32(req->pdu_len);
}
@@ -609,6 +615,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
struct nvme_tcp_request *req;
struct request *rq;
u32 r2t_length = le32_to_cpu(pdu->r2t_length);
+ u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) {
@@ -633,14 +640,19 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
return -EPROTO;
}
- if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
+ if (unlikely(r2t_offset < req->data_sent)) {
dev_err(queue->ctrl->ctrl.device,
"req %d unexpected r2t offset %u (expected %zu)\n",
- rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
+ rq->tag, r2t_offset, req->data_sent);
return -EPROTO;
}
- nvme_tcp_setup_h2c_data_pdu(req, pdu);
+ req->pdu_len = 0;
+ req->h2cdata_left = r2t_length;
+ req->h2cdata_offset = r2t_offset;
+ req->ttag = pdu->ttag;
+
+ nvme_tcp_setup_h2c_data_pdu(req);
nvme_tcp_queue_request(req, false, true);
return 0;
@@ -928,6 +940,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
int req_data_len = req->data_len;
+ u32 h2cdata_left = req->h2cdata_left;
while (true) {
struct page *page = nvme_tcp_req_cur_page(req);
@@ -972,7 +985,10 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DDGST;
req->offset = 0;
} else {
- nvme_tcp_done_send_req(queue);
+ if (h2cdata_left)
+ nvme_tcp_setup_h2c_data_pdu(req);
+ else
+ nvme_tcp_done_send_req(queue);
}
return 1;
}
@@ -1030,9 +1046,14 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
if (queue->hdr_digest && !req->offset)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
- ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
- offset_in_page(pdu) + req->offset, len,
- MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+ if (!req->h2cdata_left)
+ ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
+ offset_in_page(pdu) + req->offset, len,
+ MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+ else
+ ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
+ offset_in_page(pdu) + req->offset, len,
+ MSG_DONTWAIT | MSG_MORE);
if (unlikely(ret <= 0))
return ret;
@@ -1052,6 +1073,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
size_t offset = req->offset;
+ u32 h2cdata_left = req->h2cdata_left;
int ret;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
@@ -1069,7 +1091,10 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
return ret;
if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
- nvme_tcp_done_send_req(queue);
+ if (h2cdata_left)
+ nvme_tcp_setup_h2c_data_pdu(req);
+ else
+ nvme_tcp_done_send_req(queue);
return 1;
}
@@ -1261,6 +1286,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
struct msghdr msg = {};
struct kvec iov;
bool ctrl_hdgst, ctrl_ddgst;
+ u32 maxh2cdata;
int ret;
icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
@@ -1344,6 +1370,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp;
}
+ maxh2cdata = le32_to_cpu(icresp->maxdata);
+ if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
+ pr_err("queue %d: invalid maxh2cdata returned %u\n",
+ nvme_tcp_queue_id(queue), maxh2cdata);
+ goto free_icresp;
+ }
+ queue->maxh2cdata = maxh2cdata;
+
ret = 0;
free_icresp:
kfree(icresp);
@@ -2329,6 +2363,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_sent = 0;
req->pdu_len = 0;
req->pdu_sent = 0;
+ req->h2cdata_left = 0;
req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 23a38dcf0fc4..9fd1602b539d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -771,7 +771,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (config->wp_gpio)
nvmem->wp_gpio = config->wp_gpio;
- else
+ else if (!config->ignore_wp)
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
if (IS_ERR(nvmem->wp_gpio)) {
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index ad85ff6474ff..ec315b060cd5 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -648,8 +648,8 @@ void __init early_init_fdt_scan_reserved_mem(void)
}
fdt_scan_reserved_mem();
- fdt_init_reserved_mem();
fdt_reserve_elfcorehdr();
+ fdt_init_reserved_mem();
}
/**
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 70992103c07d..2c2fb161b572 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -513,24 +513,24 @@ static void __init of_unittest_parse_phandle_with_args(void)
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells", 1, &args);
EXPECT_END(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
EXPECT_BEGIN(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
rc = of_count_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells");
EXPECT_END(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
@@ -670,12 +670,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args",
"phandle", 1, &args);
EXPECT_END(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
@@ -1257,12 +1257,12 @@ static void __init of_unittest_platform_populate(void)
unittest(pdev, "device 2 creation failed\n");
EXPECT_BEGIN(KERN_INFO,
- "platform testcase-data:testcase-device2: IRQ index 0 not found");
+ "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found");
irq = platform_get_irq(pdev, 0);
EXPECT_END(KERN_INFO,
- "platform testcase-data:testcase-device2: IRQ index 0 not found");
+ "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found");
unittest(irq < 0 && irq != -EPROBE_DEFER,
"device parsing error failed - %d\n", irq);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 059566f54429..9be007c9420f 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc->usg_calls++;
#endif
- while(sg_dma_len(sglist) && nents--) {
+ while (nents && sg_dma_len(sglist)) {
#ifdef CCIO_COLLECT_STATS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
@@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ccio_unmap_page(dev, sg_dma_address(sglist),
sg_dma_len(sglist), direction, 0);
++sglist;
+ nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index e60690d38d67..374b9199878d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
- while (sg_dma_len(sglist) && nents--) {
+ while (nents && sg_dma_len(sglist)) {
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
direction, 0);
@@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
#endif
++sglist;
+ nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 20ea2ee330b8..ae0bc2fee4ca 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -2155,8 +2155,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
if (!hv_dev)
continue;
- if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
- set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
+ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
+ hv_dev->desc.virtual_numa_node < num_possible_nodes())
+ /*
+ * The kernel may boot with some NUMA nodes offline
+ * (e.g. in a KDUMP kernel) or with NUMA disabled via
+ * "numa=off". In those cases, adjust the host provided
+ * NUMA node to a valid NUMA node used by the kernel.
+ */
+ set_dev_node(&dev->dev,
+ numa_map_to_online_node(
+ hv_dev->desc.virtual_numa_node));
put_pcichild(hv_dev);
}
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 71258ea3d35f..f8e82c5e2d87 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1329,7 +1329,8 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
* indirectly via kernel emulated PCI bridge driver.
*/
mvebu_pcie_setup_hw(port);
- mvebu_pcie_set_local_dev_nr(port, 0);
+ mvebu_pcie_set_local_dev_nr(port, 1);
+ mvebu_pcie_set_local_bus_nr(port, 0);
}
pcie->nports = i;
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index cc166c683638..eb05cceab964 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -99,11 +99,13 @@ struct vmd_irq {
* @srcu: SRCU struct for local synchronization.
* @count: number of child IRQs assigned to this vector; used to track
* sharing.
+ * @virq: The underlying VMD Linux interrupt number
*/
struct vmd_irq_list {
struct list_head irq_list;
struct srcu_struct srcu;
unsigned int count;
+ unsigned int virq;
};
struct vmd_dev {
@@ -253,7 +255,6 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
struct msi_desc *desc = arg->desc;
struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
- unsigned int index, vector;
if (!vmdirq)
return -ENOMEM;
@@ -261,10 +262,8 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
INIT_LIST_HEAD(&vmdirq->node);
vmdirq->irq = vmd_next_irq(vmd, desc);
vmdirq->virq = virq;
- index = index_from_irqs(vmd, vmdirq->irq);
- vector = pci_irq_vector(vmd->dev, index);
- irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
+ irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
handle_untracked_irq, vmd, NULL);
return 0;
}
@@ -685,7 +684,8 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd)
return err;
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
- err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
+ vmd->irqs[i].virq = pci_irq_vector(dev, i);
+ err = devm_request_irq(&dev->dev, vmd->irqs[i].virq,
vmd_irq, IRQF_NO_THREAD,
vmd->name, &vmd->irqs[i]);
if (err)
@@ -969,7 +969,7 @@ static int vmd_suspend(struct device *dev)
int i;
for (i = 0; i < vmd->msix_count; i++)
- devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
+ devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]);
return 0;
}
@@ -981,7 +981,7 @@ static int vmd_resume(struct device *dev)
int err, i;
for (i = 0; i < vmd->msix_count; i++) {
- err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+ err = devm_request_irq(dev, vmd->irqs[i].virq,
vmd_irq, IRQF_NO_THREAD,
vmd->name, &vmd->irqs[i]);
if (err)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d2dd6a6cda60..65f7f6b0576c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5344,11 +5344,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
*/
static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
- if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
- (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
- (pdev->device == 0x7341 && pdev->revision != 0x00))
- return;
-
if (pdev->device == 0x15d8) {
if (pdev->revision == 0xcf &&
pdev->subsystem_vendor == 0xea50 &&
@@ -5370,10 +5365,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
/* AMD Iceland dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
/* AMD Navi10 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7310, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7318, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7319, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731a, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731b, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731e, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731f, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
/* AMD Raven platform iGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 0bcd19597e4a..3ddaeffc0415 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -749,7 +749,6 @@ static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
{ "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
{ "INT34C6", (kernel_ulong_t)&tglh_soc_data },
{ "INTC1055", (kernel_ulong_t)&tgllp_soc_data },
- { "INTC1057", (kernel_ulong_t)&tgllp_soc_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index 49e32684dbb2..ecab6bf63dc6 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -482,7 +482,7 @@ static int k210_pinconf_get_drive(unsigned int max_strength_ua)
{
int i;
- for (i = K210_PC_DRIVE_MAX; i; i--) {
+ for (i = K210_PC_DRIVE_MAX; i >= 0; i--) {
if (k210_pinconf_drive_strength[i] <= max_strength_ua)
return i;
}
@@ -527,7 +527,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_UP:
if (!arg)
return -EINVAL;
- val |= K210_PC_PD;
+ val |= K210_PC_PU;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg *= 1000;
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c
index 0b912152a405..266da41a6162 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/pinctrl-starfive.c
@@ -1164,6 +1164,7 @@ static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger)
}
static struct irq_chip starfive_irq_chip = {
+ .name = "StarFive GPIO",
.irq_ack = starfive_irq_ack,
.irq_mask = starfive_irq_mask,
.irq_mask_ack = starfive_irq_mask_ack,
@@ -1308,7 +1309,6 @@ static int starfive_probe(struct platform_device *pdev)
sfp->gc.ngpio = NR_GPIOS;
starfive_irq_chip.parent_device = dev;
- starfive_irq_chip.name = sfp->gc.label;
sfp->gc.irq.chip = &starfive_irq_chip;
sfp->gc.irq.parent_handler = starfive_gpio_irq_handler;
diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
index abac3eec565e..444ec81ba02d 100644
--- a/drivers/platform/surface/surface3_power.c
+++ b/drivers/platform/surface/surface3_power.c
@@ -232,14 +232,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix)
}
bix->last_full_charg_capacity = ret;
- /* get serial number */
+ /*
+ * Get serial number, on some devices (with unofficial replacement
+ * battery?) reading any of the serial number range addresses gets
+ * nacked in this case just leave the serial number empty.
+ */
ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO,
sizeof(buf), buf);
- if (ret != sizeof(buf)) {
+ if (ret == -EREMOTEIO) {
+ /* no serial number available */
+ } else if (ret != sizeof(buf)) {
dev_err(&client->dev, "Error reading serial no: %d\n", ret);
return ret;
+ } else {
+ snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
}
- snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
/* get cycle count */
ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT);
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index 4c72ba68b315..b1103f85a85a 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
#include <linux/seq_file.h>
@@ -85,6 +86,9 @@
#define PMC_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
+/* QoS request for letting CPUs in idle states, but not the deepest */
+#define AMD_PMC_MAX_IDLE_STATE_LATENCY 3
+
#define SOC_SUBSYSTEM_IP_MAX 12
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
@@ -131,6 +135,7 @@ struct amd_pmc_dev {
struct device *dev;
struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
+ struct pm_qos_request amd_pmc_pm_qos_req;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
@@ -521,6 +526,14 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
rc = rtc_alarm_irq_enable(rtc_device, 0);
dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration);
+ /*
+ * Prevent CPUs from getting into deep idle states while sending OS_HINT
+ * which is otherwise generally safe to send when at least one of the CPUs
+ * is not in deep idle states.
+ */
+ cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, AMD_PMC_MAX_IDLE_STATE_LATENCY);
+ wake_up_all_idle_cpus();
+
return rc;
}
@@ -538,24 +551,31 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
/* Activate CZN specific RTC functionality */
if (pdev->cpu_id == AMD_CPU_ID_CZN) {
rc = amd_pmc_verify_czn_rtc(pdev, &arg);
- if (rc < 0)
- return rc;
+ if (rc)
+ goto fail;
}
/* Dump the IdleMask before we send hint to SMU */
amd_pmc_idlemask_read(pdev, dev, NULL);
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
- if (rc)
+ if (rc) {
dev_err(pdev->dev, "suspend failed\n");
+ goto fail;
+ }
if (enable_stb)
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF);
- if (rc) {
+ if (rc) {
dev_err(pdev->dev, "error writing to STB\n");
- return rc;
+ goto fail;
}
+ return 0;
+fail:
+ if (pdev->cpu_id == AMD_CPU_ID_CZN)
+ cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
return rc;
}
@@ -579,12 +599,15 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
/* Write data incremented by 1 to distinguish in stb_read */
if (enable_stb)
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
- if (rc) {
+ if (rc)
dev_err(pdev->dev, "error writing to STB\n");
- return rc;
- }
- return 0;
+ /* Restore the QoS request back to defaults if it was set */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN)
+ cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
+
+ return rc;
}
static const struct dev_pm_ops amd_pmc_pm_ops = {
@@ -722,6 +745,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
amd_pmc_get_smu_version(dev);
platform_set_drvdata(pdev, dev);
amd_pmc_dbgfs_register(dev);
+ cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
err_pci_dev_put:
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a3b83b22a3b1..2104a2621e50 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2223,7 +2223,7 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
- if (err == -ENODEV)
+ if (err == -ENODEV || err == -ENODATA)
return 0;
return err;
}
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
index f93d437fd192..525f09a3b5ff 100644
--- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -100,7 +100,8 @@ static struct gpiod_lookup_table surface_go_tps68470_gpios = {
.dev_id = "i2c-INT347A:00",
.table = {
GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW)
+ GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW),
+ { }
}
};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e1626cda6b67..7d0947b827e2 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8703,6 +8703,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
+ TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */
TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
};
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index b274942dc46a..01ad84fd147c 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -1523,6 +1523,9 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
BQ256XX_WDT_BIT_SHIFT);
ret = power_supply_get_battery_info(bq->charger, &bat_info);
+ if (ret == -ENOMEM)
+ return ret;
+
if (ret) {
dev_warn(bq->dev, "battery info missing, default values will be applied\n");
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 0c87ad0dbf71..728e2a6cc9c3 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -689,7 +689,7 @@ static int cw_bat_probe(struct i2c_client *client)
if (ret) {
/* Allocate an empty battery */
cw_bat->battery = devm_kzalloc(&client->dev,
- sizeof(cw_bat->battery),
+ sizeof(*cw_bat->battery),
GFP_KERNEL);
if (!cw_bat->battery)
return -ENOMEM;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 86aa4141efa9..d2553970a67b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -6014,9 +6014,8 @@ core_initcall(regulator_init);
static int regulator_late_cleanup(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
- const struct regulator_ops *ops = rdev->desc->ops;
struct regulation_constraints *c = rdev->constraints;
- int enabled, ret;
+ int ret;
if (c && c->always_on)
return 0;
@@ -6029,14 +6028,8 @@ static int regulator_late_cleanup(struct device *dev, void *data)
if (rdev->use_count)
goto unlock;
- /* If we can't read the status assume it's always on. */
- if (ops->is_enabled)
- enabled = ops->is_enabled(rdev);
- else
- enabled = 1;
-
- /* But if reading the status failed, assume that it's off. */
- if (enabled <= 0)
+ /* If reading the status failed, assume that it's off. */
+ if (_regulator_is_enabled(rdev) <= 0)
goto unlock;
if (have_full_constraints()) {
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 6f21223a488e..eb9df485bd8a 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -87,16 +87,16 @@ static struct da9121_range da9121_3A_1phase_current = {
};
static struct da9121_range da914x_40A_4phase_current = {
- .val_min = 14000000,
- .val_max = 80000000,
- .val_stp = 2000000,
+ .val_min = 26000000,
+ .val_max = 78000000,
+ .val_stp = 4000000,
.reg_min = 1,
.reg_max = 14,
};
static struct da9121_range da914x_20A_2phase_current = {
- .val_min = 7000000,
- .val_max = 40000000,
+ .val_min = 13000000,
+ .val_max = 39000000,
.val_stp = 2000000,
.reg_min = 1,
.reg_max = 14,
@@ -561,7 +561,7 @@ static const struct regulator_desc da9217_reg = {
};
#define DA914X_MIN_MV 500
-#define DA914X_MAX_MV 1000
+#define DA914X_MAX_MV 1300
#define DA914X_STEP_MV 10
#define DA914X_MIN_SEL (DA914X_MIN_MV / DA914X_STEP_MV)
#define DA914X_N_VOLTAGES (((DA914X_MAX_MV - DA914X_MIN_MV) / DA914X_STEP_MV) \
@@ -585,10 +585,6 @@ static const struct regulator_desc da9141_reg = {
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
.enable_reg = DA9121_REG_BUCK_BUCK1_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
- /* Default value of BUCK_BUCK1_0.CH1_SRC_DVC_UP */
- .ramp_delay = 20000,
- /* tBUCK_EN */
- .enable_time = 20,
};
static const struct regulator_desc da9142_reg = {
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a1e0a106c132..98cabe09c040 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -592,6 +592,7 @@ struct lpfc_vport {
#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
+#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index bac78fbce8d6..fa8415259cb8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
+ if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
+ vport->fc_flag &= ~FC_PT2PT_NO_NVME;
+
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) &&
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index db5ccae1b63d..f936833c9909 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1072,7 +1072,8 @@ stop_rr_fcf_flogi:
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
+ FC_PT2PT_NO_NVME);
spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be
@@ -4607,6 +4608,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes
*/
+ if ((vport->fc_flag & FC_PT2PT) &&
+ cmd == ELS_CMD_NVMEPRLI) {
+ switch (stat.un.b.lsRjtRsnCode) {
+ case LSRJT_UNABLE_TPC:
+ case LSRJT_INVALID_CMD:
+ case LSRJT_LOGICAL_ERR:
+ case LSRJT_CMD_UNSUPPORTED:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0168 NVME PRLI LS_RJT "
+ "reason %x port doesn't "
+ "support NVME, disabling NVME\n",
+ stat.un.b.lsRjtRsnCode);
+ retry = 0;
+ vport->fc_flag |= FC_PT2PT_NO_NVME;
+ goto out_retry;
+ }
+ }
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
/* The driver has a VALID PLOGI but the rport has
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 7d717a4ac14d..fdf5e777bf11 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+ vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */
lpfc_nvme_update_localport(vport);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 5916ed7662d5..4eb89aa4a39d 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->list_tmf_work = NULL;
}
}
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
- if (!found) {
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
+ if (!found)
goto check_cleanup_reqs;
- }
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
@@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->state = CLEANUP_RECV;
unlock:
spin_unlock_bh(&conn->session->back_lock);
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
wake_up_interruptible(&qedi_conn->wait_queue);
return;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 50b12d60dc1b..9349557b8a01 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2681,7 +2681,7 @@ static int ufshcd_map_queues(struct Scsi_Host *shost)
break;
case HCTX_TYPE_READ:
map->nr_queues = 0;
- break;
+ continue;
default:
WARN_ON_ONCE(true);
}
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 670cc82d17dc..ca75b14931ec 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -411,17 +411,12 @@ out:
return ret;
}
-static int init_clks(struct platform_device *pdev, struct clk **clk)
+static void init_clks(struct platform_device *pdev, struct clk **clk)
{
int i;
- for (i = CLK_NONE + 1; i < CLK_MAX; i++) {
+ for (i = CLK_NONE + 1; i < CLK_MAX; i++)
clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
- if (IS_ERR(clk[i]))
- return PTR_ERR(clk[i]);
- }
-
- return 0;
}
static struct scp *init_scp(struct platform_device *pdev,
@@ -431,7 +426,7 @@ static struct scp *init_scp(struct platform_device *pdev,
{
struct genpd_onecell_data *pd_data;
struct resource *res;
- int i, j, ret;
+ int i, j;
struct scp *scp;
struct clk *clk[CLK_MAX];
@@ -486,9 +481,7 @@ static struct scp *init_scp(struct platform_device *pdev,
pd_data->num_domains = num;
- ret = init_clks(pdev, clk);
- if (ret)
- return ERR_PTR(ret);
+ init_clks(pdev, clk);
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 553b6b9d0222..c6a1bb09be05 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -585,6 +585,12 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
{
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ if (atomic_read(&rs->state) & RXDMA)
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ if (atomic_read(&rs->state) & TXDMA)
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ atomic_set(&rs->state, 0);
+ spi_enable_chip(rs, false);
rs->slave_abort = true;
spi_finalize_current_transfer(ctlr);
@@ -654,7 +660,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
struct spi_controller *ctlr;
struct resource *mem;
struct device_node *np = pdev->dev.of_node;
- u32 rsd_nsecs;
+ u32 rsd_nsecs, num_cs;
bool slave_mode;
slave_mode = of_property_read_bool(np, "spi-slave");
@@ -764,8 +770,9 @@ static int rockchip_spi_probe(struct platform_device *pdev)
* rk spi0 has two native cs, spi1..5 one cs only
* if num-cs is missing in the dts, default to 1
*/
- if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect))
- ctlr->num_chipselect = 1;
+ if (of_property_read_u32(np, "num-cs", &num_cs))
+ num_cs = 1;
+ ctlr->num_chipselect = num_cs;
ctlr->use_gpio_descriptors = true;
}
ctlr->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index cfa222c9bd5e..78f31b61a2aa 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
if (op->dummy.nbytes) {
tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
memset(tmpbuf, 0xff, op->dummy.nbytes);
reinit_completion(&xqspi->data_completion);
xqspi->txbuf = tmpbuf;
diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
index abe9395a0aef..861a154144e6 100644
--- a/drivers/staging/fbtft/fb_st7789v.c
+++ b/drivers/staging/fbtft/fb_st7789v.c
@@ -144,6 +144,8 @@ static int init_display(struct fbtft_par *par)
{
int rc;
+ par->fbtftops.reset(par);
+
rc = init_tearing_effect_line(par);
if (rc)
return rc;
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index f2684d2d6851..4a35347b3020 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -654,6 +654,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
fbops->fb_blank = fbtft_fb_blank;
fbdefio->delay = HZ / fps;
+ fbdefio->sort_pagelist = true;
fbdefio->deferred_io = fbtft_deferred_io;
fb_deferred_io_init(info);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 72acb1f61849..4f478812cb51 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -404,6 +404,10 @@ static void int3400_notify(acpi_handle handle,
thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event);
thermal_prop[4] = NULL;
kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop);
+ kfree(thermal_prop[0]);
+ kfree(thermal_prop[1]);
+ kfree(thermal_prop[2]);
+ kfree(thermal_prop[3]);
}
static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 0b1808e3a912..fa92f727fdf8 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -439,7 +439,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
modembits |= MDM_RTR;
if (dlci->modem_tx & TIOCM_RI)
modembits |= MDM_IC;
- if (dlci->modem_tx & TIOCM_CD)
+ if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
modembits |= MDM_DV;
return modembits;
}
@@ -448,7 +448,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
* @addr: address EA from the frame
- * @cr: C/R bit from the frame
+ * @cr: C/R bit seen as initiator
* @control: control including PF bit
* @data: following data bytes
* @dlen: length of data
@@ -548,7 +548,7 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
* gsm_send - send a control frame
* @gsm: our GSM mux
* @addr: address for control frame
- * @cr: command/response bit
+ * @cr: command/response bit seen as initiator
* @control: control byte including PF bit
*
* Format up and transmit a control frame. These do not go via the
@@ -563,11 +563,15 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
int len;
u8 cbuf[10];
u8 ibuf[3];
+ int ocr;
+
+ /* toggle C/R coding if not initiator */
+ ocr = cr ^ (gsm->initiator ? 0 : 1);
switch (gsm->encoding) {
case 0:
cbuf[0] = GSM0_SOF;
- cbuf[1] = (addr << 2) | (cr << 1) | EA;
+ cbuf[1] = (addr << 2) | (ocr << 1) | EA;
cbuf[2] = control;
cbuf[3] = EA; /* Length of data = 0 */
cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
@@ -577,7 +581,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
case 1:
case 2:
/* Control frame + packing (but not frame stuffing) in mode 1 */
- ibuf[0] = (addr << 2) | (cr << 1) | EA;
+ ibuf[0] = (addr << 2) | (ocr << 1) | EA;
ibuf[1] = control;
ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
/* Stuffing may double the size worst case */
@@ -611,7 +615,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
{
- gsm_send(gsm, addr, 1, control);
+ gsm_send(gsm, addr, 0, control);
}
/**
@@ -1017,25 +1021,25 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data,
* @tty: virtual tty bound to the DLCI
* @dlci: DLCI to affect
* @modem: modem bits (full EA)
- * @clen: command length
+ * @slen: number of signal octets
*
* Used when a modem control message or line state inline in adaption
* layer 2 is processed. Sort out the local modem state and throttles
*/
static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
- u32 modem, int clen)
+ u32 modem, int slen)
{
int mlines = 0;
u8 brk = 0;
int fc;
- /* The modem status command can either contain one octet (v.24 signals)
- or two octets (v.24 signals + break signals). The length field will
- either be 2 or 3 respectively. This is specified in section
- 5.4.6.3.7 of the 27.010 mux spec. */
+ /* The modem status command can either contain one octet (V.24 signals)
+ * or two octets (V.24 signals + break signals). This is specified in
+ * section 5.4.6.3.7 of the 07.10 mux spec.
+ */
- if (clen == 2)
+ if (slen == 1)
modem = modem & 0x7f;
else {
brk = modem & 0x7f;
@@ -1092,6 +1096,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
+ int slen;
const u8 *dp = data;
struct tty_struct *tty;
@@ -1111,6 +1116,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
return;
dlci = gsm->dlci[addr];
+ slen = len;
while (gsm_read_ea(&modem, *dp++) == 0) {
len--;
if (len == 0)
@@ -1127,7 +1133,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
modem |= (brk & 0x7f);
}
tty = tty_port_tty_get(&dlci->port);
- gsm_process_modem(tty, dlci, modem, clen);
+ gsm_process_modem(tty, dlci, modem, slen);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
@@ -1451,6 +1457,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci)
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
kfifo_reset(&dlci->fifo);
+ /* Ensure that gsmtty_open() can return. */
+ tty_port_set_initialized(&dlci->port, 0);
+ wake_up_interruptible(&dlci->port.open_wait);
} else
dlci->gsm->dead = true;
/* Unregister gsmtty driver,report gsmtty dev remove uevent for user */
@@ -1514,7 +1523,7 @@ static void gsm_dlci_t1(struct timer_list *t)
dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
} else {
- gsm_dlci_close(dlci);
+ gsm_dlci_begin_close(dlci); /* prevent half open link */
}
break;
@@ -1593,6 +1602,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
struct tty_struct *tty;
unsigned int modem = 0;
int len = clen;
+ int slen = 0;
if (debug & 16)
pr_debug("%d bytes for tty\n", len);
@@ -1605,12 +1615,14 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
case 2: /* Asynchronous serial with line state in each frame */
while (gsm_read_ea(&modem, *data++) == 0) {
len--;
+ slen++;
if (len == 0)
return;
}
+ slen++;
tty = tty_port_tty_get(port);
if (tty) {
- gsm_process_modem(tty, dlci, modem, clen);
+ gsm_process_modem(tty, dlci, modem, slen);
tty_kref_put(tty);
}
fallthrough;
@@ -1748,7 +1760,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
- tty_hangup(tty);
+ /* We cannot use tty_hangup() because in tty_kref_put() the tty
+ * driver assumes that the hangup queue is free and reuses it to
+ * queue release_one_tty() -> NULL pointer panic in
+ * process_one_work().
+ */
+ tty_vhangup(tty);
tty_port_tty_set(&dlci->port, NULL);
tty_kref_put(tty);
@@ -1800,10 +1817,10 @@ static void gsm_queue(struct gsm_mux *gsm)
goto invalid;
cr = gsm->address & 1; /* C/R bit */
+ cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */
gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
- cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */
dlci = gsm->dlci[address];
switch (gsm->control) {
@@ -3234,9 +3251,9 @@ static void gsmtty_throttle(struct tty_struct *tty)
if (dlci->state == DLCI_CLOSED)
return;
if (C_CRTSCTS(tty))
- dlci->modem_tx &= ~TIOCM_DTR;
+ dlci->modem_tx &= ~TIOCM_RTS;
dlci->throttled = true;
- /* Send an MSC with DTR cleared */
+ /* Send an MSC with RTS cleared */
gsmtty_modem_update(dlci, 0);
}
@@ -3246,9 +3263,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
if (dlci->state == DLCI_CLOSED)
return;
if (C_CRTSCTS(tty))
- dlci->modem_tx |= TIOCM_DTR;
+ dlci->modem_tx |= TIOCM_RTS;
dlci->throttled = false;
- /* Send an MSC with DTR set */
+ /* Send an MSC with RTS set */
gsmtty_modem_update(dlci, 0);
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 5e988e514653..efc72104c840 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1926,7 +1926,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
return false;
canon_head = smp_load_acquire(&ldata->canon_head);
- n = min(*nr + 1, canon_head - ldata->read_tail);
+ n = min(*nr, canon_head - ldata->read_tail);
tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
@@ -1948,10 +1948,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
n += N_TTY_BUF_SIZE;
c = n + found;
- if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
- c = min(*nr, c);
+ if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
n = c;
- }
n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
__func__, eol, found, n, c, tail, more);
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index 673cda3d011d..948d0a1c6ae8 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
unsigned long address;
int err;
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
if (!dev->irq && (dev->id.sversion == 0xad))
dev->irq = iosapic_serial_irq(dev);
#endif
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 64e7e6c8145f..38d1c0748533 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
static void sc16is7xx_tx_proc(struct kthread_work *ws)
{
struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
if ((port->rs485.flags & SER_RS485_ENABLED) &&
(port->rs485.delay_rts_before_send > 0))
msleep(port->rs485.delay_rts_before_send);
+ mutex_lock(&s->efr_lock);
sc16is7xx_handle_tx(port);
+ mutex_unlock(&s->efr_lock);
}
static void sc16is7xx_reconf_rs485(struct uart_port *port)
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 8a63da3ab39d..88c337bf564f 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -1418,6 +1418,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
#define dwc2_is_device_connected(hsotg) (hsotg->connected)
+#define dwc2_is_device_enabled(hsotg) (hsotg->enabled)
int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup);
int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg);
@@ -1454,6 +1455,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
int testmode)
{ return 0; }
#define dwc2_is_device_connected(hsotg) (0)
+#define dwc2_is_device_enabled(hsotg) (0)
static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg,
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
index 1b39c4776369..d8d6493bc457 100644
--- a/drivers/usb/dwc2/drd.c
+++ b/drivers/usb/dwc2/drd.c
@@ -130,8 +130,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
already = dwc2_ovr_avalid(hsotg, true);
} else if (role == USB_ROLE_DEVICE) {
already = dwc2_ovr_bvalid(hsotg, true);
- /* This clear DCTL.SFTDISCON bit */
- dwc2_hsotg_core_connect(hsotg);
+ if (dwc2_is_device_enabled(hsotg)) {
+ /* This clear DCTL.SFTDISCON bit */
+ dwc2_hsotg_core_connect(hsotg);
+ }
} else {
if (dwc2_is_device_mode(hsotg)) {
if (!dwc2_ovr_bvalid(hsotg, false))
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 7ff8fc8f79a9..06d0e88ec8af 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -43,6 +43,7 @@
#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
#define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -85,8 +86,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
static struct gpiod_lookup_table platform_bytcr_gpios = {
.dev_id = "0000:00:16.0",
.table = {
- GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH),
{}
},
};
@@ -119,6 +120,13 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
{}
};
+static const struct property_entry dwc3_pci_intel_byt_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
+
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
@@ -161,6 +169,10 @@ static const struct software_node dwc3_pci_intel_swnode = {
.properties = dwc3_pci_intel_properties,
};
+static const struct software_node dwc3_pci_intel_byt_swnode = {
+ .properties = dwc3_pci_intel_byt_properties,
+};
+
static const struct software_node dwc3_pci_intel_mrfld_swnode = {
.properties = dwc3_pci_mrfld_properties,
};
@@ -344,7 +356,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT),
- (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
(kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, },
@@ -409,6 +421,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 183b90923f51..a0c883f19a41 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -4160,9 +4160,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
+ local_bh_disable();
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_process_event_buf(evt);
spin_unlock_irqrestore(&dwc->lock, flags);
+ local_bh_enable();
return ret;
}
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index b7ccf1803656..00b3f6b3bb31 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -922,6 +922,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
params->resp_avail = resp_avail;
params->v = v;
INIT_LIST_HEAD(&params->resp_queue);
+ spin_lock_init(&params->resp_lock);
pr_debug("%s: configNr = %d\n", __func__, i);
return params;
@@ -1015,12 +1016,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf)
{
rndis_resp_t *r, *n;
+ spin_lock(&params->resp_lock);
list_for_each_entry_safe(r, n, &params->resp_queue, list) {
if (r->buf == buf) {
list_del(&r->list);
kfree(r);
}
}
+ spin_unlock(&params->resp_lock);
}
EXPORT_SYMBOL_GPL(rndis_free_response);
@@ -1030,14 +1033,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
if (!length) return NULL;
+ spin_lock(&params->resp_lock);
list_for_each_entry_safe(r, n, &params->resp_queue, list) {
if (!r->send) {
r->send = 1;
*length = r->length;
+ spin_unlock(&params->resp_lock);
return r->buf;
}
}
+ spin_unlock(&params->resp_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(rndis_get_next_response);
@@ -1054,7 +1060,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
r->length = length;
r->send = 0;
+ spin_lock(&params->resp_lock);
list_add_tail(&r->list, &params->resp_queue);
+ spin_unlock(&params->resp_lock);
return r;
}
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index f6167f7fea82..6206b8b7490f 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -174,6 +174,7 @@ typedef struct rndis_params {
void (*resp_avail)(void *v);
void *v;
struct list_head resp_queue;
+ spinlock_t resp_lock;
} rndis_params;
/* RNDIS Message parser and other useless functions */
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 6ce886fb7bfe..2907fad04e2c 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1615,6 +1615,8 @@ static void xudc_getstatus(struct xusb_udc *udc)
break;
case USB_RECIP_ENDPOINT:
epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum >= XUSB_MAX_ENDPOINTS)
+ goto stall;
target_ep = &udc->ep[epnum];
epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
@@ -1682,6 +1684,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
case USB_RECIP_ENDPOINT:
if (!udc->setup.wValue) {
endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (endpoint >= XUSB_MAX_ENDPOINTS) {
+ xudc_ep0_stall(udc);
+ return;
+ }
target_ep = &udc->ep[endpoint];
outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK;
outinbit = outinbit >> 7;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dc357cabb265..2d378543bc3a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1091,6 +1091,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
+ bool reinit_xhc = false;
if (!hcd->state)
return 0;
@@ -1107,10 +1108,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_lock_irq(&xhci->lock);
- if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
- hibernated = true;
- if (!hibernated) {
+ if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
+ reinit_xhc = true;
+
+ if (!reinit_xhc) {
/*
* Some controllers might lose power during suspend, so wait
* for controller not ready bit to clear, just as in xHC init.
@@ -1143,12 +1145,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
- temp = readl(&xhci->op_regs->status);
}
- /* If restore operation fails, re-initialize the HC during resume */
- if ((temp & STS_SRE) || hibernated) {
+ temp = readl(&xhci->op_regs->status);
+ /* re-initialize the HC on Restore Error, or Host Controller Error */
+ if (temp & (STS_SRE | STS_HCE)) {
+ reinit_xhc = true;
+ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+ }
+
+ if (reinit_xhc) {
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
@@ -1604,9 +1611,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
struct urb_priv *urb_priv;
int num_tds;
- if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
- true, true, __func__) <= 0)
+ if (!urb)
return -EINVAL;
+ ret = xhci_check_args(hcd, urb->dev, urb->ep,
+ true, true, __func__);
+ if (ret <= 0)
+ return ret ? ret : -EINVAL;
slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
@@ -3323,7 +3333,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
return -EINVAL;
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
- return -EINVAL;
+ return ret ? ret : -EINVAL;
if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 58cba8ee0277..2798fca71261 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -81,7 +81,6 @@
#define CH341_QUIRK_SIMULATE_BREAK BIT(1)
static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x1a86, 0x5512) },
{ USB_DEVICE(0x1a86, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7522) },
{ USB_DEVICE(0x1a86, 0x7523) },
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 962e9943fc20..e7755d9cfc61 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5821E 0x81d7
#define DELL_PRODUCT_5821E_ESIM 0x81e0
+#define DELL_PRODUCT_5829E_ESIM 0x81e4
+#define DELL_PRODUCT_5829E 0x81e6
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E),
+ .driver_info = RSVD(0) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
+ .driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
.driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */
+ .driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
+ .driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
.driver_info = NCTRL(0) | ZLP },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 6d27a5b5e3ca..7ffcda94d323 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -761,12 +761,12 @@ static int tps6598x_probe(struct i2c_client *client)
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret < 0)
- return ret;
+ goto err_clear_mask;
trace_tps6598x_status(status);
ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
if (ret < 0)
- return ret;
+ goto err_clear_mask;
/*
* This fwnode has a "compatible" property, but is never populated as a
@@ -855,7 +855,8 @@ err_role_put:
usb_role_switch_put(tps->role_sw);
err_fwnode_put:
fwnode_handle_put(fwnode);
-
+err_clear_mask:
+ tps6598x_write64(tps, TPS_REG_INT_MASK1, 0);
return ret;
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index d6ca1c7ad513..37f0b4274113 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -629,16 +629,18 @@ err:
return ret;
}
-static int vhost_vsock_stop(struct vhost_vsock *vsock)
+static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
{
size_t i;
- int ret;
+ int ret = 0;
mutex_lock(&vsock->dev.mutex);
- ret = vhost_dev_check_owner(&vsock->dev);
- if (ret)
- goto err;
+ if (check_owner) {
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+ }
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
struct vhost_virtqueue *vq = &vsock->vqs[i];
@@ -753,7 +755,12 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
* inefficient. Room for improvement here. */
vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
- vhost_vsock_stop(vsock);
+ /* Don't check the owner, because we are in the release path, so we
+ * need to stop the vsock device in any case.
+ * vhost_vsock_stop() can not fail in this case, so we don't need to
+ * check the return code.
+ */
+ vhost_vsock_stop(vsock, false);
vhost_vsock_flush(vsock);
vhost_dev_stop(&vsock->dev);
@@ -868,7 +875,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (start)
return vhost_vsock_start(vsock);
else
- return vhost_vsock_stop(vsock);
+ return vhost_vsock_stop(vsock, true);
case VHOST_GET_FEATURES:
features = VHOST_VSOCK_FEATURES;
if (copy_to_user(argp, &features, sizeof(features)))
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index fd66f4d4a621..b9054f658838 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -1059,6 +1059,7 @@ static const struct fb_ops broadsheetfb_ops = {
static struct fb_deferred_io broadsheetfb_defio = {
.delay = HZ/4,
+ .sort_pagelist = true,
.deferred_io = broadsheetfb_dpy_deferred_io,
};
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index a591d291b231..98b0f23bf5e2 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -59,6 +59,7 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
printk(KERN_ERR "no mapping available\n");
BUG_ON(!page->mapping);
+ INIT_LIST_HEAD(&page->lru);
page->index = vmf->pgoff;
vmf->page = page;
@@ -95,7 +96,7 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
struct page *page = vmf->page;
struct fb_info *info = vmf->vma->vm_private_data;
struct fb_deferred_io *fbdefio = info->fbdefio;
- struct page *cur;
+ struct list_head *pos = &fbdefio->pagelist;
/* this is a callback we get when userspace first tries to
write to the page. we schedule a workqueue. that workqueue
@@ -122,21 +123,38 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
*/
lock_page(page);
- /* we loop through the pagelist before adding in order
- to keep the pagelist sorted */
- list_for_each_entry(cur, &fbdefio->pagelist, lru) {
- /* this check is to catch the case where a new
- process could start writing to the same page
- through a new pte. this new access can cause the
- mkwrite even when the original ps's pte is marked
- writable */
- if (unlikely(cur == page))
- goto page_already_added;
- else if (cur->index > page->index)
- break;
+ /*
+ * This check is to catch the case where a new process could start
+ * writing to the same page through a new PTE. This new access
+ * can cause a call to .page_mkwrite even if the original process'
+ * PTE is marked writable.
+ *
+ * TODO: The lru field is owned by the page cache; hence the name.
+ * We dequeue in fb_deferred_io_work() after flushing the
+ * page's content into video memory. Instead of lru, fbdefio
+ * should have it's own field.
+ */
+ if (!list_empty(&page->lru))
+ goto page_already_added;
+
+ if (unlikely(fbdefio->sort_pagelist)) {
+ /*
+ * We loop through the pagelist before adding in order to
+ * keep the pagelist sorted. This has significant overhead
+ * of O(n^2) with n being the number of written pages. If
+ * possible, drivers should try to work with unsorted page
+ * lists instead.
+ */
+ struct page *cur;
+
+ list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+ if (cur->index > page->index)
+ break;
+ }
+ pos = &cur->lru;
}
- list_add_tail(&page->lru, &cur->lru);
+ list_add_tail(&page->lru, pos);
page_already_added:
mutex_unlock(&fbdefio->lock);
@@ -194,7 +212,7 @@ static void fb_deferred_io_work(struct work_struct *work)
/* clear the list */
list_for_each_safe(node, next, &fbdefio->pagelist) {
- list_del(node);
+ list_del_init(node);
}
mutex_unlock(&fbdefio->lock);
}
diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c
index 952826557a0c..af858dd23ea6 100644
--- a/drivers/video/fbdev/metronomefb.c
+++ b/drivers/video/fbdev/metronomefb.c
@@ -568,6 +568,7 @@ static const struct fb_ops metronomefb_ops = {
static struct fb_deferred_io metronomefb_defio = {
.delay = HZ,
+ .sort_pagelist = true,
.deferred_io = metronomefb_dpy_deferred_io,
};
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index b9cdd02c1000..184bb8433b78 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -980,6 +980,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
+ fbdefio->sort_pagelist = true;
fbdefio->deferred_io = dlfb_dpy_deferred_io;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8992e0096163..947f04789389 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3291,7 +3291,7 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
int __init btrfs_auto_defrag_init(void);
void __cold btrfs_auto_defrag_exit(void);
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode);
+ struct btrfs_inode *inode, u32 extent_thresh);
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 5a36add21305..c28ceddefae4 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -261,6 +261,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
em->mod_start = merge->mod_start;
em->generation = max(em->generation, merge->generation);
+ set_bit(EXTENT_FLAG_MERGED, &em->flags);
rb_erase_cached(&merge->rb_node, &tree->map);
RB_CLEAR_NODE(&merge->rb_node);
@@ -278,6 +279,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
RB_CLEAR_NODE(&merge->rb_node);
em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
em->generation = max(em->generation, merge->generation);
+ set_bit(EXTENT_FLAG_MERGED, &em->flags);
free_extent_map(merge);
}
}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 8e217337dff9..d2fa32ffe304 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -25,6 +25,8 @@ enum {
EXTENT_FLAG_FILLING,
/* filesystem extent mapping type */
EXTENT_FLAG_FS_MAPPING,
+ /* This em is merged from two or more physically adjacent ems */
+ EXTENT_FLAG_MERGED,
};
struct extent_map {
@@ -40,6 +42,12 @@ struct extent_map {
u64 ram_bytes;
u64 block_start;
u64 block_len;
+
+ /*
+ * Generation of the extent map, for merged em it's the highest
+ * generation of all merged ems.
+ * For non-merged extents, it's from btrfs_file_extent_item::generation.
+ */
u64 generation;
unsigned long flags;
/* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 11204dbbe053..a0179cc62913 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -50,11 +50,14 @@ struct inode_defrag {
/* root objectid */
u64 root;
- /* last offset we were able to defrag */
- u64 last_offset;
-
- /* if we've wrapped around back to zero once already */
- int cycled;
+ /*
+ * The extent size threshold for autodefrag.
+ *
+ * This value is different for compressed/non-compressed extents,
+ * thus needs to be passed from higher layer.
+ * (aka, inode_should_defrag())
+ */
+ u32 extent_thresh;
};
static int __compare_inode_defrag(struct inode_defrag *defrag1,
@@ -107,8 +110,8 @@ static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
*/
if (defrag->transid < entry->transid)
entry->transid = defrag->transid;
- if (defrag->last_offset > entry->last_offset)
- entry->last_offset = defrag->last_offset;
+ entry->extent_thresh = min(defrag->extent_thresh,
+ entry->extent_thresh);
return -EEXIST;
}
}
@@ -134,7 +137,7 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
* enabled
*/
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode)
+ struct btrfs_inode *inode, u32 extent_thresh)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -160,6 +163,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
defrag->ino = btrfs_ino(inode);
defrag->transid = transid;
defrag->root = root->root_key.objectid;
+ defrag->extent_thresh = extent_thresh;
spin_lock(&fs_info->defrag_inodes_lock);
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
@@ -179,34 +183,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
}
/*
- * Requeue the defrag object. If there is a defrag object that points to
- * the same inode in the tree, we will merge them together (by
- * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
- */
-static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
- struct inode_defrag *defrag)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- int ret;
-
- if (!__need_auto_defrag(fs_info))
- goto out;
-
- /*
- * Here we don't check the IN_DEFRAG flag, because we need merge
- * them together.
- */
- spin_lock(&fs_info->defrag_inodes_lock);
- ret = __btrfs_add_inode_defrag(inode, defrag);
- spin_unlock(&fs_info->defrag_inodes_lock);
- if (ret)
- goto out;
- return;
-out:
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
-}
-
-/*
* pick the defragable inode that we want, if it doesn't exist, we will get
* the next one.
*/
@@ -278,8 +254,14 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
struct btrfs_root *inode_root;
struct inode *inode;
struct btrfs_ioctl_defrag_range_args range;
- int num_defrag;
- int ret;
+ int ret = 0;
+ u64 cur = 0;
+
+again:
+ if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
+ goto cleanup;
+ if (!__need_auto_defrag(fs_info))
+ goto cleanup;
/* get the inode */
inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
@@ -295,39 +277,30 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
goto cleanup;
}
+ if (cur >= i_size_read(inode)) {
+ iput(inode);
+ goto cleanup;
+ }
+
/* do a chunk of defrag */
clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
memset(&range, 0, sizeof(range));
range.len = (u64)-1;
- range.start = defrag->last_offset;
+ range.start = cur;
+ range.extent_thresh = defrag->extent_thresh;
sb_start_write(fs_info->sb);
- num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
+ ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
BTRFS_DEFRAG_BATCH);
sb_end_write(fs_info->sb);
- /*
- * if we filled the whole defrag batch, there
- * must be more work to do. Queue this defrag
- * again
- */
- if (num_defrag == BTRFS_DEFRAG_BATCH) {
- defrag->last_offset = range.start;
- btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
- } else if (defrag->last_offset && !defrag->cycled) {
- /*
- * we didn't fill our defrag batch, but
- * we didn't start at zero. Make sure we loop
- * around to the start of the file.
- */
- defrag->last_offset = 0;
- defrag->cycled = 1;
- btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
- } else {
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- }
-
iput(inode);
- return 0;
+
+ if (ret < 0)
+ goto cleanup;
+
+ cur = max(cur + fs_info->sectorsize, range.start);
+ goto again;
+
cleanup:
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3b2403b6127f..76e530f76e3c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -560,12 +560,12 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
}
static inline void inode_should_defrag(struct btrfs_inode *inode,
- u64 start, u64 end, u64 num_bytes, u64 small_write)
+ u64 start, u64 end, u64 num_bytes, u32 small_write)
{
/* If this is a small write inside eof, kick off a defrag */
if (num_bytes < small_write &&
(start > 0 || end + 1 < inode->disk_i_size))
- btrfs_add_inode_defrag(NULL, inode);
+ btrfs_add_inode_defrag(NULL, inode, small_write);
}
/*
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 33eda39df685..8d47ec5fc4f4 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1012,8 +1012,155 @@ out:
return ret;
}
+/*
+ * Defrag specific helper to get an extent map.
+ *
+ * Differences between this and btrfs_get_extent() are:
+ *
+ * - No extent_map will be added to inode->extent_tree
+ * To reduce memory usage in the long run.
+ *
+ * - Extra optimization to skip file extents older than @newer_than
+ * By using btrfs_search_forward() we can skip entire file ranges that
+ * have extents created in past transactions, because btrfs_search_forward()
+ * will not visit leaves and nodes with a generation smaller than given
+ * minimal generation threshold (@newer_than).
+ *
+ * Return valid em if we find a file extent matching the requirement.
+ * Return NULL if we can not find a file extent matching the requirement.
+ *
+ * Return ERR_PTR() for error.
+ */
+static struct extent_map *defrag_get_extent(struct btrfs_inode *inode,
+ u64 start, u64 newer_than)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_file_extent_item *fi;
+ struct btrfs_path path = { 0 };
+ struct extent_map *em;
+ struct btrfs_key key;
+ u64 ino = btrfs_ino(inode);
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ key.objectid = ino;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = start;
+
+ if (newer_than) {
+ ret = btrfs_search_forward(root, &key, &path, newer_than);
+ if (ret < 0)
+ goto err;
+ /* Can't find anything newer */
+ if (ret > 0)
+ goto not_found;
+ } else {
+ ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
+ if (ret < 0)
+ goto err;
+ }
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
+ /*
+ * If btrfs_search_slot() makes path to point beyond nritems,
+ * we should not have an empty leaf, as this inode must at
+ * least have its INODE_ITEM.
+ */
+ ASSERT(btrfs_header_nritems(path.nodes[0]));
+ path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1;
+ }
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+ /* Perfect match, no need to go one slot back */
+ if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY &&
+ key.offset == start)
+ goto iterate;
+
+ /* We didn't find a perfect match, needs to go one slot back */
+ if (path.slots[0] > 0) {
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+ if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
+ path.slots[0]--;
+ }
+
+iterate:
+ /* Iterate through the path to find a file extent covering @start */
+ while (true) {
+ u64 extent_end;
+
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0]))
+ goto next;
+
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+
+ /*
+ * We may go one slot back to INODE_REF/XATTR item, then
+ * need to go forward until we reach an EXTENT_DATA.
+ * But we should still has the correct ino as key.objectid.
+ */
+ if (WARN_ON(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY)
+ goto next;
+
+ /* It's beyond our target range, definitely not extent found */
+ if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY)
+ goto not_found;
+
+ /*
+ * | |<- File extent ->|
+ * \- start
+ *
+ * This means there is a hole between start and key.offset.
+ */
+ if (key.offset > start) {
+ em->start = start;
+ em->orig_start = start;
+ em->block_start = EXTENT_MAP_HOLE;
+ em->len = key.offset - start;
+ break;
+ }
+
+ fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
+ struct btrfs_file_extent_item);
+ extent_end = btrfs_file_extent_end(&path);
+
+ /*
+ * |<- file extent ->| |
+ * \- start
+ *
+ * We haven't reached start, search next slot.
+ */
+ if (extent_end <= start)
+ goto next;
+
+ /* Now this extent covers @start, convert it to em */
+ btrfs_extent_item_to_extent_map(inode, &path, fi, false, em);
+ break;
+next:
+ ret = btrfs_next_item(root, &path);
+ if (ret < 0)
+ goto err;
+ if (ret > 0)
+ goto not_found;
+ }
+ btrfs_release_path(&path);
+ return em;
+
+not_found:
+ btrfs_release_path(&path);
+ free_extent_map(em);
+ return NULL;
+
+err:
+ btrfs_release_path(&path);
+ free_extent_map(em);
+ return ERR_PTR(ret);
+}
+
static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
- bool locked)
+ u64 newer_than, bool locked)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -1028,6 +1175,20 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
em = lookup_extent_mapping(em_tree, start, sectorsize);
read_unlock(&em_tree->lock);
+ /*
+ * We can get a merged extent, in that case, we need to re-search
+ * tree to get the original em for defrag.
+ *
+ * If @newer_than is 0 or em::generation < newer_than, we can trust
+ * this em, as either we don't care about the generation, or the
+ * merged extent map will be rejected anyway.
+ */
+ if (em && test_bit(EXTENT_FLAG_MERGED, &em->flags) &&
+ newer_than && em->generation >= newer_than) {
+ free_extent_map(em);
+ em = NULL;
+ }
+
if (!em) {
struct extent_state *cached = NULL;
u64 end = start + sectorsize - 1;
@@ -1035,7 +1196,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
/* get the big lock and read metadata off disk */
if (!locked)
lock_extent_bits(io_tree, start, end, &cached);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, sectorsize);
+ em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
if (!locked)
unlock_extent_cached(io_tree, start, end, &cached);
@@ -1046,23 +1207,42 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
return em;
}
+static u32 get_extent_max_capacity(const struct extent_map *em)
+{
+ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
+ return BTRFS_MAX_COMPRESSED;
+ return BTRFS_MAX_EXTENT_SIZE;
+}
+
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
bool locked)
{
struct extent_map *next;
- bool ret = true;
+ bool ret = false;
/* this is the last extent */
if (em->start + em->len >= i_size_read(inode))
return false;
- next = defrag_lookup_extent(inode, em->start + em->len, locked);
+ /*
+ * We want to check if the next extent can be merged with the current
+ * one, which can be an extent created in a past generation, so we pass
+ * a minimum generation of 0 to defrag_lookup_extent().
+ */
+ next = defrag_lookup_extent(inode, em->start + em->len, 0, locked);
+ /* No more em or hole */
if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
- ret = false;
- else if ((em->block_start + em->block_len == next->block_start) &&
- (em->block_len > SZ_128K && next->block_len > SZ_128K))
- ret = false;
-
+ goto out;
+ if (test_bit(EXTENT_FLAG_PREALLOC, &next->flags))
+ goto out;
+ /*
+ * If the next extent is at its max capacity, defragging current extent
+ * makes no sense, as the total number of extents won't change.
+ */
+ if (next->len >= get_extent_max_capacity(em))
+ goto out;
+ ret = true;
+out:
free_extent_map(next);
return ret;
}
@@ -1186,8 +1366,10 @@ struct defrag_target_range {
static int defrag_collect_targets(struct btrfs_inode *inode,
u64 start, u64 len, u32 extent_thresh,
u64 newer_than, bool do_compress,
- bool locked, struct list_head *target_list)
+ bool locked, struct list_head *target_list,
+ u64 *last_scanned_ret)
{
+ bool last_is_target = false;
u64 cur = start;
int ret = 0;
@@ -1197,7 +1379,9 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
bool next_mergeable = true;
u64 range_len;
- em = defrag_lookup_extent(&inode->vfs_inode, cur, locked);
+ last_is_target = false;
+ em = defrag_lookup_extent(&inode->vfs_inode, cur,
+ newer_than, locked);
if (!em)
break;
@@ -1210,6 +1394,10 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
if (em->generation < newer_than)
goto next;
+ /* This em is under writeback, no need to defrag */
+ if (em->generation == (u64)-1)
+ goto next;
+
/*
* Our start offset might be in the middle of an existing extent
* map, so take that into account.
@@ -1250,6 +1438,13 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
if (range_len >= extent_thresh)
goto next;
+ /*
+ * Skip extents already at its max capacity, this is mostly for
+ * compressed extents, which max cap is only 128K.
+ */
+ if (em->len >= get_extent_max_capacity(em))
+ goto next;
+
next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
locked);
if (!next_mergeable) {
@@ -1268,6 +1463,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
}
add:
+ last_is_target = true;
range_len = min(extent_map_end(em), start + len) - cur;
/*
* This one is a good target, check if it can be merged into
@@ -1311,6 +1507,17 @@ next:
kfree(entry);
}
}
+ if (!ret && last_scanned_ret) {
+ /*
+ * If the last extent is not a target, the caller can skip to
+ * the end of that extent.
+ * Otherwise, we can only go the end of the specified range.
+ */
+ if (!last_is_target)
+ *last_scanned_ret = max(cur, *last_scanned_ret);
+ else
+ *last_scanned_ret = max(start + len, *last_scanned_ret);
+ }
return ret;
}
@@ -1369,7 +1576,8 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
}
static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
- u32 extent_thresh, u64 newer_than, bool do_compress)
+ u32 extent_thresh, u64 newer_than, bool do_compress,
+ u64 *last_scanned_ret)
{
struct extent_state *cached_state = NULL;
struct defrag_target_range *entry;
@@ -1415,7 +1623,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
*/
ret = defrag_collect_targets(inode, start, len, extent_thresh,
newer_than, do_compress, true,
- &target_list);
+ &target_list, last_scanned_ret);
if (ret < 0)
goto unlock_extent;
@@ -1450,7 +1658,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
u64 start, u32 len, u32 extent_thresh,
u64 newer_than, bool do_compress,
unsigned long *sectors_defragged,
- unsigned long max_sectors)
+ unsigned long max_sectors,
+ u64 *last_scanned_ret)
{
const u32 sectorsize = inode->root->fs_info->sectorsize;
struct defrag_target_range *entry;
@@ -1461,7 +1670,7 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
ret = defrag_collect_targets(inode, start, len, extent_thresh,
newer_than, do_compress, false,
- &target_list);
+ &target_list, NULL);
if (ret < 0)
goto out;
@@ -1478,6 +1687,15 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
range_len = min_t(u32, range_len,
(max_sectors - *sectors_defragged) * sectorsize);
+ /*
+ * If defrag_one_range() has updated last_scanned_ret,
+ * our range may already be invalid (e.g. hole punched).
+ * Skip if our range is before last_scanned_ret, as there is
+ * no need to defrag the range anymore.
+ */
+ if (entry->start + range_len <= *last_scanned_ret)
+ continue;
+
if (ra)
page_cache_sync_readahead(inode->vfs_inode.i_mapping,
ra, NULL, entry->start >> PAGE_SHIFT,
@@ -1490,7 +1708,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
* accounting.
*/
ret = defrag_one_range(inode, entry->start, range_len,
- extent_thresh, newer_than, do_compress);
+ extent_thresh, newer_than, do_compress,
+ last_scanned_ret);
if (ret < 0)
break;
*sectors_defragged += range_len >>
@@ -1501,6 +1720,8 @@ out:
list_del_init(&entry->list);
kfree(entry);
}
+ if (ret >= 0)
+ *last_scanned_ret = max(*last_scanned_ret, start + len);
return ret;
}
@@ -1586,6 +1807,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
while (cur < last_byte) {
const unsigned long prev_sectors_defragged = sectors_defragged;
+ u64 last_scanned = cur;
u64 cluster_end;
/* The cluster size 256K should always be page aligned */
@@ -1615,8 +1837,8 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
BTRFS_I(inode)->defrag_compress = compress_type;
ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
cluster_end + 1 - cur, extent_thresh,
- newer_than, do_compress,
- &sectors_defragged, max_to_defrag);
+ newer_than, do_compress, &sectors_defragged,
+ max_to_defrag, &last_scanned);
if (sectors_defragged > prev_sectors_defragged)
balance_dirty_pages_ratelimited(inode->i_mapping);
@@ -1624,11 +1846,12 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
btrfs_inode_unlock(inode, 0);
if (ret < 0)
break;
- cur = cluster_end + 1;
+ cur = max(cluster_end + 1, last_scanned);
if (ret > 0) {
ret = 0;
break;
}
+ cond_resched();
}
if (ra_allocated)
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 0fb90cbe7669..e6e28a9c7987 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -380,6 +380,17 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
kunmap(cur_page);
cur_in += LZO_LEN;
+ if (seg_len > lzo1x_worst_compress(PAGE_SIZE)) {
+ /*
+ * seg_len shouldn't be larger than we have allocated
+ * for workspace->cbuf
+ */
+ btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
+ seg_len);
+ ret = -EIO;
+ goto out;
+ }
+
/* Copy the compressed segment payload into workspace */
copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index d8ccb62aa7d2..201eb2628aea 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4999,6 +4999,10 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
+ btrfs_err(fs_info,
+ "send: IO error at offset %llu for inode %llu root %llu",
+ page_offset(page), sctx->cur_ino,
+ sctx->send_root->root_key.objectid);
put_page(page);
ret = -EIO;
break;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c43bbc7f623e..c3cfdfd8de9b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1981,16 +1981,24 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
/*
- * We use writeback_inodes_sb here because if we used
+ * We use try_to_writeback_inodes_sb() here because if we used
* btrfs_start_delalloc_roots we would deadlock with fs freeze.
* Currently are holding the fs freeze lock, if we do an async flush
* we'll do btrfs_join_transaction() and deadlock because we need to
* wait for the fs freeze lock. Using the direct flushing we benefit
* from already being in a transaction and our join_transaction doesn't
* have to re-take the fs freeze lock.
+ *
+ * Note that try_to_writeback_inodes_sb() will only trigger writeback
+ * if it can read lock sb->s_umount. It will always be able to lock it,
+ * except when the filesystem is being unmounted or being frozen, but in
+ * those cases sync_filesystem() is called, which results in calling
+ * writeback_inodes_sb() while holding a write lock on sb->s_umount.
+ * Note that we don't call writeback_inodes_sb() directly, because it
+ * will emit a warning if sb->s_umount is not locked.
*/
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
- writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
+ try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
return 0;
}
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index ee3aab3dd4ac..bf861fef2f0c 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -949,6 +949,9 @@ static void populate_new_aces(char *nacl_base,
pnntace = (struct cifs_ace *) (nacl_base + nsize);
nsize += setup_special_mode_ACE(pnntace, nmode);
num_aces++;
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ nsize += setup_authusers_ACE(pnntace);
+ num_aces++;
goto set_size;
}
@@ -1297,7 +1300,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
if (uid_valid(uid)) { /* chown */
uid_t id;
- nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
+ nowner_sid_ptr = kzalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!nowner_sid_ptr) {
rc = -ENOMEM;
@@ -1326,7 +1329,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
}
if (gid_valid(gid)) { /* chgrp */
gid_t id;
- ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
+ ngroup_sid_ptr = kzalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!ngroup_sid_ptr) {
rc = -ENOMEM;
@@ -1613,7 +1616,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
nsecdesclen = secdesclen;
if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
if (mode_from_sid)
- nsecdesclen += sizeof(struct cifs_ace);
+ nsecdesclen += 2 * sizeof(struct cifs_ace);
else /* cifsacl */
nsecdesclen += 5 * sizeof(struct cifs_ace);
} else { /* chown */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 199edac0cb59..082c21478686 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -919,6 +919,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
out_super:
deactivate_locked_super(sb);
+ return root;
out:
if (cifs_sb) {
kfree(cifs_sb->prepath);
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 7ec35f3f0a5f..a92e9eec521f 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -149,7 +149,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_u32("echo_interval", Opt_echo_interval),
fsparam_u32("max_credits", Opt_max_credits),
fsparam_u32("handletimeout", Opt_handletimeout),
- fsparam_u32("snapshot", Opt_snapshot),
+ fsparam_u64("snapshot", Opt_snapshot),
fsparam_u32("max_channels", Opt_max_channels),
/* Mount options which take string value */
@@ -1078,7 +1078,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->echo_interval = result.uint_32;
break;
case Opt_snapshot:
- ctx->snapshot_time = result.uint_32;
+ ctx->snapshot_time = result.uint_64;
break;
case Opt_max_credits:
if (result.uint_32 < 20 || result.uint_32 > 60000) {
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 5723d50340e5..32f478c7a66d 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -127,11 +127,6 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
struct cifs_server_iface *ifaces = NULL;
size_t iface_count;
- if (ses->server->dialect < SMB30_PROT_ID) {
- cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
- return 0;
- }
-
spin_lock(&ses->chan_lock);
new_chan_count = old_chan_count = ses->chan_count;
@@ -145,6 +140,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
return 0;
}
+ if (ses->server->dialect < SMB30_PROT_ID) {
+ spin_unlock(&ses->chan_lock);
+ cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+ return 0;
+ }
+
if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
ses->chan_max = 1;
spin_unlock(&ses->chan_lock);
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 7d8b72d67c80..9d486fbbfbbd 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -175,11 +175,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
switch (handler->flags) {
case XATTR_CIFS_NTSD_FULL:
aclflags = (CIFS_ACL_OWNER |
+ CIFS_ACL_GROUP |
CIFS_ACL_DACL |
CIFS_ACL_SACL);
break;
case XATTR_CIFS_NTSD:
aclflags = (CIFS_ACL_OWNER |
+ CIFS_ACL_GROUP |
CIFS_ACL_DACL);
break;
case XATTR_CIFS_ACL:
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index d3cd2a94d1e8..d1f9d2632202 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -34,6 +34,14 @@
*/
DEFINE_SPINLOCK(configfs_dirent_lock);
+/*
+ * All of link_obj/unlink_obj/link_group/unlink_group require that
+ * subsys->su_mutex is held.
+ * But parent configfs_subsystem is NULL when config_item is root.
+ * Use this mutex when config_item is root.
+ */
+static DEFINE_MUTEX(configfs_subsystem_mutex);
+
static void configfs_d_iput(struct dentry * dentry,
struct inode * inode)
{
@@ -1859,7 +1867,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
sd = root->d_fsdata;
+ mutex_lock(&configfs_subsystem_mutex);
link_group(to_config_group(sd->s_element), group);
+ mutex_unlock(&configfs_subsystem_mutex);
inode_lock_nested(d_inode(root), I_MUTEX_PARENT);
@@ -1884,7 +1894,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
inode_unlock(d_inode(root));
if (err) {
+ mutex_lock(&configfs_subsystem_mutex);
unlink_group(group);
+ mutex_unlock(&configfs_subsystem_mutex);
configfs_release_fs();
}
put_fragment(frag);
@@ -1931,7 +1943,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
dput(dentry);
+ mutex_lock(&configfs_subsystem_mutex);
unlink_group(group);
+ mutex_unlock(&configfs_subsystem_mutex);
configfs_release_fs();
}
diff --git a/fs/file_table.c b/fs/file_table.c
index 4969021fa676..7d2e692b66a9 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -27,6 +27,7 @@
#include <linux/task_work.h>
#include <linux/ima.h>
#include <linux/swap.h>
+#include <linux/kmemleak.h>
#include <linux/atomic.h>
@@ -119,8 +120,11 @@ static struct ctl_table fs_stat_sysctls[] = {
static int __init init_fs_stat_sysctls(void)
{
register_sysctl_init("fs", fs_stat_sysctls);
- if (IS_ENABLED(CONFIG_BINFMT_MISC))
- register_sysctl_mount_point("fs/binfmt_misc");
+ if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
+ struct ctl_table_header *hdr;
+ hdr = register_sysctl_mount_point("fs/binfmt_misc");
+ kmemleak_not_leak(hdr);
+ }
return 0;
}
fs_initcall(init_fs_stat_sysctls);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 77b9c7e4793b..4715980e9015 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4567,6 +4567,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
} else {
list_add_tail(&buf->list, &(*head)->list);
}
+ cond_resched();
}
return i ? i : -ENOMEM;
@@ -7693,7 +7694,7 @@ static int io_run_task_work_sig(void)
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
- signed long *timeout)
+ ktime_t timeout)
{
int ret;
@@ -7705,8 +7706,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
if (test_bit(0, &ctx->check_cq_overflow))
return 1;
- *timeout = schedule_timeout(*timeout);
- return !*timeout ? -ETIME : 1;
+ if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
+ return -ETIME;
+ return 1;
}
/*
@@ -7719,7 +7721,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
{
struct io_wait_queue iowq;
struct io_rings *rings = ctx->rings;
- signed long timeout = MAX_SCHEDULE_TIMEOUT;
+ ktime_t timeout = KTIME_MAX;
int ret;
do {
@@ -7735,7 +7737,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (get_timespec64(&ts, uts))
return -EFAULT;
- timeout = timespec64_to_jiffies(&ts);
+ timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
}
if (sig) {
@@ -7767,7 +7769,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
}
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
- ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+ ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
finish_wait(&ctx->cq_wait, &iowq.wq);
cond_resched();
} while (ret > 0);
@@ -7924,7 +7926,15 @@ static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
ret = wait_for_completion_interruptible(&data->done);
if (!ret) {
mutex_lock(&ctx->uring_lock);
- break;
+ if (atomic_read(&data->refs) > 0) {
+ /*
+ * it has been revived by another thread while
+ * we were unlocked
+ */
+ mutex_unlock(&ctx->uring_lock);
+ } else {
+ break;
+ }
}
atomic_inc(&data->refs);
diff --git a/fs/namespace.c b/fs/namespace.c
index 40b994a29e90..de6fae84f1a1 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -469,6 +469,24 @@ void mnt_drop_write_file(struct file *file)
}
EXPORT_SYMBOL(mnt_drop_write_file);
+/**
+ * mnt_hold_writers - prevent write access to the given mount
+ * @mnt: mnt to prevent write access to
+ *
+ * Prevents write access to @mnt if there are no active writers for @mnt.
+ * This function needs to be called and return successfully before changing
+ * properties of @mnt that need to remain stable for callers with write access
+ * to @mnt.
+ *
+ * After this functions has been called successfully callers must pair it with
+ * a call to mnt_unhold_writers() in order to stop preventing write access to
+ * @mnt.
+ *
+ * Context: This function expects lock_mount_hash() to be held serializing
+ * setting MNT_WRITE_HOLD.
+ * Return: On success 0 is returned.
+ * On error, -EBUSY is returned.
+ */
static inline int mnt_hold_writers(struct mount *mnt)
{
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -500,6 +518,18 @@ static inline int mnt_hold_writers(struct mount *mnt)
return 0;
}
+/**
+ * mnt_unhold_writers - stop preventing write access to the given mount
+ * @mnt: mnt to stop preventing write access to
+ *
+ * Stop preventing write access to @mnt allowing callers to gain write access
+ * to @mnt again.
+ *
+ * This function can only be called after a successful call to
+ * mnt_hold_writers().
+ *
+ * Context: This function expects lock_mount_hash() to be held.
+ */
static inline void mnt_unhold_writers(struct mount *mnt)
{
/*
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 7bc7cf6b26f0..75cb1cbe4cde 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2010,14 +2010,14 @@ no_open:
if (!res) {
inode = d_inode(dentry);
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
- !S_ISDIR(inode->i_mode))
+ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
res = ERR_PTR(-ENOTDIR);
else if (inode && S_ISREG(inode->i_mode))
res = ERR_PTR(-EOPENSTALE);
} else if (!IS_ERR(res)) {
inode = d_inode(res);
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
- !S_ISDIR(inode->i_mode)) {
+ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
dput(res);
res = ERR_PTR(-ENOTDIR);
} else if (inode && S_ISREG(inode->i_mode)) {
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index a918c3a834b6..d96baa4450e3 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -853,12 +853,9 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
}
/* Flush out writes to the server in order to update c/mtime. */
- if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
- S_ISREG(inode->i_mode)) {
- err = filemap_write_and_wait(inode->i_mapping);
- if (err)
- goto out;
- }
+ if ((request_mask & (STATX_CTIME | STATX_MTIME)) &&
+ S_ISREG(inode->i_mode))
+ filemap_write_and_wait(inode->i_mapping);
/*
* We may force a getattr if the user cares about atime.
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f5020828ab65..0e0db6c27619 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1229,8 +1229,7 @@ nfs4_update_changeattr_locked(struct inode *inode,
NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
- NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR |
- NFS_INO_REVAL_PAGECACHE;
+ NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
}
nfsi->attrtimeo_timestamp = jiffies;
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index bafc02bf8220..de7252715b12 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -264,7 +264,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
if (!gid_valid(gid))
return -EINVAL;
opts->gid = gid;
- set_gid(tracefs_mount->mnt_root, gid);
break;
case Opt_mode:
if (match_octal(&args[0], &option))
@@ -291,7 +290,9 @@ static int tracefs_apply_options(struct super_block *sb)
inode->i_mode |= opts->mode;
inode->i_uid = opts->uid;
- inode->i_gid = opts->gid;
+
+ /* Set all the group ids to the mount option */
+ set_gid(sb->s_root, opts->gid);
return 0;
}
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 4c0dee78b2f8..d84714e4e46a 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1753,6 +1753,11 @@ xfs_remount_ro(
};
int error;
+ /* Flush all the dirty data to disk. */
+ error = sync_filesystem(mp->m_super);
+ if (error)
+ return error;
+
/*
* Cancel background eofb scanning so it cannot race with the final
* log force+buftarg wait and deadlock the remount.
@@ -1831,8 +1836,6 @@ xfs_fs_reconfigure(
if (error)
return error;
- sync_filesystem(mp->m_super);
-
/* inode32 -> inode64 */
if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
diff --git a/include/drm/dp/drm_dp_helper.h b/include/drm/dp/drm_dp_helper.h
index 98d020835b49..69487bd8ed56 100644
--- a/include/drm/dp/drm_dp_helper.h
+++ b/include/drm/dp/drm_dp_helper.h
@@ -560,6 +560,7 @@ struct drm_panel;
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_2_CDS 3 /* 2.0 E11 */
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_4 7 /* 1.4 */
# define DP_TRAINING_PATTERN_MASK 0x3
@@ -738,11 +739,13 @@ struct drm_panel;
DP_LANE_CHANNEL_EQ_DONE | \
DP_LANE_SYMBOL_LOCKED)
-#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
-
-#define DP_INTERLANE_ALIGN_DONE (1 << 0)
-#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
-#define DP_LINK_STATUS_UPDATED (1 << 7)
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */
+#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */
+#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
#define DP_SINK_STATUS 0x205
# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
@@ -1112,6 +1115,7 @@ struct drm_panel;
# define DP_UHBR13_5 (1 << 2)
#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT (1 << 7)
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01
@@ -1347,6 +1351,7 @@ struct drm_panel;
# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0)
/* See DP_128B132B_SUPPORTED_LINK_RATES for values */
#define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */
+#define DP_PHY_REPEATER_EQ_DONE 0xf0008 /* 2.0 E11 */
enum drm_dp_phy {
DP_PHY_DPRX,
@@ -1549,6 +1554,15 @@ void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux);
+bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]);
+
u8 drm_dp_link_rate_to_bw_code(int link_rate);
int drm_dp_bw_code_to_link_rate(u8 link_bw);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 061d87313fac..f27b4060faa2 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -649,6 +649,13 @@ struct drm_bridge_funcs {
* the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
*/
void (*hpd_disable)(struct drm_bridge *bridge);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows bridges to create bridge-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_bridge *bridge, struct dentry *root);
};
/**
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index f524db152413..572077ff8ae7 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -22,6 +22,9 @@
start__ >= max__ || size__ > max__ - start__; \
})
+#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
+#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
+
struct drm_buddy_block {
#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
@@ -132,11 +135,17 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size);
void drm_buddy_fini(struct drm_buddy *mm);
struct drm_buddy_block *
-drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order);
+drm_get_buddy(struct drm_buddy_block *block);
+
+int drm_buddy_alloc_blocks(struct drm_buddy *mm,
+ u64 start, u64 end, u64 size,
+ u64 min_page_size,
+ struct list_head *blocks,
+ unsigned long flags);
-int drm_buddy_alloc_range(struct drm_buddy *mm,
- struct list_head *blocks,
- u64 start, u64 size);
+int drm_buddy_block_trim(struct drm_buddy *mm,
+ u64 new_size,
+ struct list_head *blocks);
void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 64cf5f88c05b..5166186146f4 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -592,13 +592,13 @@ struct drm_display_info {
bool rgb_quant_range_selectable;
/**
- * @edid_hdmi_dc_rgb444_modes: Mask of supported hdmi deep color modes
+ * @edid_hdmi_rgb444_dc_modes: Mask of supported hdmi deep color modes
* in RGB 4:4:4. Even more stuff redundant with @bus_formats.
*/
u8 edid_hdmi_rgb444_dc_modes;
/**
- * @edid_hdmi_dc_ycbcr444_modes: Mask of supported hdmi deep color
+ * @edid_hdmi_ycbcr444_dc_modes: Mask of supported hdmi deep color
* modes in YCbCr 4:4:4. Even more stuff redundant with @bus_formats.
*/
u8 edid_hdmi_ycbcr444_dc_modes;
@@ -1142,6 +1142,13 @@ struct drm_connector_funcs {
* has been received from a source outside the display driver / device.
*/
void (*oob_hotplug_event)(struct drm_connector *connector);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows connectors to create connector-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_connector *connector, struct dentry *root);
};
/**
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index b30ed5de0a33..0b0937c0b2f6 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -43,4 +43,8 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
const void *vmap, const struct drm_framebuffer *fb,
const struct drm_rect *rect);
+void drm_fb_xrgb8888_to_mono_reversed(void *dst, unsigned int dst_pitch, const void *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 68347b63fc71..d0a57853c188 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -137,6 +137,8 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent);
+extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
+
/*
* GEM object functions
*/
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 91ca575a78de..6b5e01295348 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -918,20 +918,14 @@ struct drm_mode_config {
bool async_page_flip;
/**
- * @allow_fb_modifiers:
+ * @fb_modifiers_not_supported:
*
- * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
- * Note that drivers should not set this directly, it is automatically
- * set in drm_universal_plane_init().
- *
- * IMPORTANT:
- *
- * If this is set the driver must fill out the full implicit modifier
- * information in their &drm_mode_config_funcs.fb_create hook for legacy
- * userspace which does not set modifiers. Otherwise the GETFB2 ioctl is
- * broken for modifier aware userspace.
+ * When this flag is set, the DRM device will not expose modifier
+ * support to userspace. This is only used by legacy drivers that infer
+ * the buffer layout through heuristics without using modifiers. New
+ * drivers shall not set fhis flag.
*/
- bool allow_fb_modifiers;
+ bool fb_modifiers_not_supported;
/**
* @normalize_zpos:
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 4602f833eb51..1ba2d424a53f 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -29,6 +29,7 @@
#include <linux/list.h>
struct backlight_device;
+struct dentry;
struct device_node;
struct drm_connector;
struct drm_device;
@@ -125,6 +126,13 @@ struct drm_panel_funcs {
*/
int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
struct display_timing *timings);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows panels to create panels-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_panel *panel, struct dentry *root);
};
/**
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 06759badf99f..2628c7cde2da 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -803,6 +803,9 @@ void *__drmm_universal_plane_alloc(struct drm_device *dev,
*
* The @drm_plane_funcs.destroy hook must be NULL.
*
+ * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
+ * @format_modifiers to NULL. The plane will advertise the linear modifier.
+ *
* Returns:
* Pointer to new plane, or ERR_PTR on failure.
*/
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index bbc22fad8d80..944f83ef9f2e 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -457,13 +457,14 @@ struct drm_gpu_scheduler {
atomic_t _score;
bool ready;
bool free_guilty;
+ struct device *dev;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
- atomic_t *score, const char *name);
+ atomic_t *score, const char *name, struct device *dev);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 4fd727b52da1..0f1d55262c68 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -130,10 +130,15 @@ struct ttm_resource_manager {
struct dma_fence *move;
/*
- * Protected by the global->lru_lock.
+ * Protected by the bdev->lru_lock.
*/
-
struct list_head lru[TTM_MAX_BO_PRIORITY];
+
+ /**
+ * @usage: How much of the resources are used, protected by the
+ * bdev->lru_lock.
+ */
+ uint64_t usage;
};
/**
@@ -278,11 +283,12 @@ void ttm_resource_set_bo(struct ttm_resource *res,
void ttm_resource_manager_init(struct ttm_resource_manager *man,
struct ttm_device *bdev,
- unsigned long p_size);
+ uint64_t size);
int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_resource_manager *man);
+uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f35aea98bc35..16b47035e4b0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -748,7 +748,8 @@ extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
bool __must_check blk_get_queue(struct request_queue *);
extern void blk_put_queue(struct request_queue *);
-extern void blk_set_queue_dying(struct request_queue *);
+
+void blk_mark_disk_dead(struct gendisk *disk);
#ifdef CONFIG_BLOCK
/*
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index fa517ae604ad..d0ad379d1e62 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -209,11 +209,9 @@ static inline bool map_value_has_timer(const struct bpf_map *map)
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
if (unlikely(map_value_has_spin_lock(map)))
- *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
- (struct bpf_spin_lock){};
+ memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
if (unlikely(map_value_has_timer(map)))
- *(struct bpf_timer *)(dst + map->timer_off) =
- (struct bpf_timer){};
+ memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
}
/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
@@ -224,7 +222,8 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
if (unlikely(map_value_has_spin_lock(map))) {
s_off = map->spin_lock_off;
s_sz = sizeof(struct bpf_spin_lock);
- } else if (unlikely(map_value_has_timer(map))) {
+ }
+ if (unlikely(map_value_has_timer(map))) {
t_off = map->timer_off;
t_sz = sizeof(struct bpf_timer);
}
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 1ab29e61b078..3522a272b74d 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -382,6 +382,9 @@ struct cpufreq_driver {
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
+ /* Will be called after the driver is fully initialized */
+ void (*ready)(struct cpufreq_policy *policy);
+
struct freq_attr **attr;
/* platform specific boost support code */
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 303dd712220f..fec374f69e12 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -45,19 +45,6 @@ struct dma_fence_array {
struct irq_work work;
};
-extern const struct dma_fence_ops dma_fence_array_ops;
-
-/**
- * dma_fence_is_array - check if a fence is from the array subsclass
- * @fence: fence to test
- *
- * Return true if it is a dma_fence_array and false otherwise.
- */
-static inline bool dma_fence_is_array(struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_array_ops;
-}
-
/**
* to_dma_fence_array - cast a fence to a dma_fence_array
* @fence: fence to cast to a dma_fence_array
@@ -68,7 +55,7 @@ static inline bool dma_fence_is_array(struct dma_fence *fence)
static inline struct dma_fence_array *
to_dma_fence_array(struct dma_fence *fence)
{
- if (fence->ops != &dma_fence_array_ops)
+ if (!fence || !dma_fence_is_array(fence))
return NULL;
return container_of(fence, struct dma_fence_array, base);
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
index 54fe3443fd2c..10d51bcdf7b7 100644
--- a/include/linux/dma-fence-chain.h
+++ b/include/linux/dma-fence-chain.h
@@ -49,7 +49,6 @@ struct dma_fence_chain {
spinlock_t lock;
};
-extern const struct dma_fence_ops dma_fence_chain_ops;
/**
* to_dma_fence_chain - cast a fence to a dma_fence_chain
@@ -61,13 +60,28 @@ extern const struct dma_fence_ops dma_fence_chain_ops;
static inline struct dma_fence_chain *
to_dma_fence_chain(struct dma_fence *fence)
{
- if (!fence || fence->ops != &dma_fence_chain_ops)
+ if (!fence || !dma_fence_is_chain(fence))
return NULL;
return container_of(fence, struct dma_fence_chain, base);
}
/**
+ * dma_fence_chain_contained - return the contained fence
+ * @fence: the fence to test
+ *
+ * If the fence is a dma_fence_chain the function returns the fence contained
+ * inside the chain object, otherwise it returns the fence itself.
+ */
+static inline struct dma_fence *
+dma_fence_chain_contained(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+
+ return chain ? chain->fence : fence;
+}
+
+/**
* dma_fence_chain_alloc
*
* Returns a new struct dma_fence_chain object or NULL on failure.
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 1ea691753bd3..775cdc0b4f24 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -587,4 +587,42 @@ struct dma_fence *dma_fence_get_stub(void);
struct dma_fence *dma_fence_allocate_private_stub(void);
u64 dma_fence_context_alloc(unsigned num);
+extern const struct dma_fence_ops dma_fence_array_ops;
+extern const struct dma_fence_ops dma_fence_chain_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * dma_fence_is_chain - check if a fence is from the chain subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_chain and false otherwise.
+ */
+static inline bool dma_fence_is_chain(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+/**
+ * dma_fence_is_container - check if a fence is a container for other fences
+ * @fence: the fence to test
+ *
+ * Return true if this fence is a container for other fences, false otherwise.
+ * This is important since we can't build up large fence structure or otherwise
+ * we run into recursion during operation on those fences.
+ */
+static inline bool dma_fence_is_container(struct dma_fence *fence)
+{
+ return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
+}
+
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index dca2b1355bb1..6150d11a607e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -62,6 +62,14 @@
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
+ * This is a hint to the DMA-mapping subsystem that the device is expected
+ * to overwrite the entire mapped size, thus the caller does not require any
+ * of the previous buffer contents to be preserved. This allows
+ * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers.
+ */
+#define DMA_ATTR_OVERWRITE (1UL << 10)
+
+/*
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can
* be given to a device to use as a DMA source or target. It is specific to a
* given device and there may be a translation between the CPU physical address
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index a715df97b31a..afdfdfac729f 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -153,6 +153,13 @@ struct dma_resv {
* struct dma_resv_iter - current position into the dma_resv fences
*
* Don't touch this directly in the driver, use the accessor function instead.
+ *
+ * IMPORTANT
+ *
+ * When using the lockless iterators like dma_resv_iter_next_unlocked() or
+ * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
+ * Code which accumulates statistics or similar needs to check for this with
+ * dma_resv_iter_is_restarted().
*/
struct dma_resv_iter {
/** @obj: The dma_resv object we iterate over */
@@ -243,7 +250,11 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
* &dma_resv.lock and using RCU instead. The cursor needs to be initialized
* with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
* the iterator a reference to the dma_fence is held and the RCU lock dropped.
- * When the dma_resv is modified the iteration starts over again.
+ *
+ * Beware that the iterator can be restarted when the struct dma_resv for
+ * @cursor is modified. Code which accumulates statistics or similar needs to
+ * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
+ * lock iterator dma_resv_for_each_fence() whenever possible.
*/
#define dma_resv_for_each_fence_unlocked(cursor, fence) \
for (fence = dma_resv_iter_first_unlocked(cursor); \
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3d7306c9a706..9a77ab615c36 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -204,6 +204,7 @@ struct fb_pixmap {
struct fb_deferred_io {
/* delay between mkwrite and deferred handler */
unsigned long delay;
+ bool sort_pagelist; /* sort pagelist by offset */
struct mutex lock; /* mutex that protects the page list */
struct list_head pagelist; /* list of touched pages */
/* callback */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index f565a8938836..fe2e0179ed51 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1262,6 +1262,7 @@ struct hv_device {
struct vmbus_channel *channel;
struct kset *channels_kset;
struct device_dma_parameters dma_parms;
+ u64 dma_mask;
/* place holder to keep track of the dir for hv device in debugfs */
struct dentry *debug_dir;
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
index f4186f91caa6..e69a002d5aa4 100644
--- a/include/linux/iosys-map.h
+++ b/include/linux/iosys-map.h
@@ -121,6 +121,45 @@ struct iosys_map {
}
/**
+ * IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map
+ * @map_: The dma-buf mapping structure to copy from
+ * @offset_: Offset to add to the other mapping
+ *
+ * Initializes a new iosys_map struct based on another passed as argument. It
+ * does a shallow copy of the struct so it's possible to update the back storage
+ * without changing where the original map points to. It is the equivalent of
+ * doing:
+ *
+ * .. code-block:: c
+ *
+ * iosys_map map = other_map;
+ * iosys_map_incr(&map, &offset);
+ *
+ * Example usage:
+ *
+ * .. code-block:: c
+ *
+ * void foo(struct device *dev, struct iosys_map *base_map)
+ * {
+ * ...
+ * struct iosys_map map = IOSYS_MAP_INIT_OFFSET(base_map, FIELD_OFFSET);
+ * ...
+ * }
+ *
+ * The advantage of using the initializer over just increasing the offset with
+ * iosys_map_incr() like above is that the new map will always point to the
+ * right place of the buffer during its scope. It reduces the risk of updating
+ * the wrong part of the buffer and having no compiler warning about that. If
+ * the assignment to IOSYS_MAP_INIT_OFFSET() is forgotten, the compiler can warn
+ * about the use of uninitialized variable.
+ */
+#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \
+ struct iosys_map copy = *map_; \
+ iosys_map_incr(&copy, offset_); \
+ copy; \
+})
+
+/**
* iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory
* @map: The iosys_map structure
* @vaddr: A system-memory address
@@ -220,22 +259,43 @@ static inline void iosys_map_clear(struct iosys_map *map)
}
/**
- * iosys_map_memcpy_to - Memcpy into iosys mapping
+ * iosys_map_memcpy_to - Memcpy into offset of iosys_map
* @dst: The iosys_map structure
+ * @dst_offset: The offset from which to copy
* @src: The source buffer
* @len: The number of byte in src
*
- * Copies data into a iosys mapping. The source buffer is in system
- * memory. Depending on the buffer's location, the helper picks the correct
- * method of accessing the memory.
+ * Copies data into a iosys_map with an offset. The source buffer is in
+ * system memory. Depending on the buffer's location, the helper picks the
+ * correct method of accessing the memory.
*/
-static inline void iosys_map_memcpy_to(struct iosys_map *dst, const void *src,
- size_t len)
+static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset,
+ const void *src, size_t len)
{
if (dst->is_iomem)
- memcpy_toio(dst->vaddr_iomem, src, len);
+ memcpy_toio(dst->vaddr_iomem + dst_offset, src, len);
+ else
+ memcpy(dst->vaddr + dst_offset, src, len);
+}
+
+/**
+ * iosys_map_memcpy_from - Memcpy from iosys_map into system memory
+ * @dst: Destination in system memory
+ * @src: The iosys_map structure
+ * @src_offset: The offset from which to copy
+ * @len: The number of byte in src
+ *
+ * Copies data from a iosys_map with an offset. The dest buffer is in
+ * system memory. Depending on the mapping location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_from(void *dst, const struct iosys_map *src,
+ size_t src_offset, size_t len)
+{
+ if (src->is_iomem)
+ memcpy_fromio(dst, src->vaddr_iomem + src_offset, len);
else
- memcpy(dst->vaddr, src, len);
+ memcpy(dst, src->vaddr + src_offset, len);
}
/**
@@ -254,4 +314,146 @@ static inline void iosys_map_incr(struct iosys_map *map, size_t incr)
map->vaddr += incr;
}
+/**
+ * iosys_map_memset - Memset iosys_map
+ * @dst: The iosys_map structure
+ * @offset: Offset from dst where to start setting value
+ * @value: The value to set
+ * @len: The number of bytes to set in dst
+ *
+ * Set value in iosys_map. Depending on the buffer's location, the helper
+ * picks the correct method of accessing the memory.
+ */
+static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
+ int value, size_t len)
+{
+ if (dst->is_iomem)
+ memset_io(dst->vaddr_iomem + offset, value, len);
+ else
+ memset(dst->vaddr + offset, value, len);
+}
+
+/**
+ * iosys_map_rd - Read a C-type value from the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from which to read
+ * @type__: Type of the value being read
+ *
+ * Read a C type value from iosys_map, handling possible un-aligned accesses to
+ * the mapping.
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd(map__, offset__, type__) ({ \
+ type__ val; \
+ iosys_map_memcpy_from(&val, map__, offset__, sizeof(val)); \
+ val; \
+})
+
+/**
+ * iosys_map_wr - Write a C-type value to the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from the mapping to write to
+ * @type__: Type of the value being written
+ * @val__: Value to write
+ *
+ * Write a C-type value to the iosys_map, handling possible un-aligned accesses
+ * to the mapping.
+ */
+#define iosys_map_wr(map__, offset__, type__, val__) ({ \
+ type__ val = (val__); \
+ iosys_map_memcpy_to(map__, offset__, &val, sizeof(val)); \
+})
+
+/**
+ * iosys_map_rd_field - Read a member from a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beggining of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ *
+ * Read a value from iosys_map considering its layout is described by a C struct
+ * starting at @struct_offset__. The field offset and size is calculated and its
+ * value read handling possible un-aligned memory accesses. For example: suppose
+ * there is a @struct foo defined as below and the value ``foo.field2.inner2``
+ * needs to be read from the iosys_map:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * int field1;
+ * struct {
+ * int inner1;
+ * int inner2;
+ * } field2;
+ * int field3;
+ * } __packed;
+ *
+ * This is the expected memory layout of a buffer using iosys_map_rd_field():
+ *
+ * +------------------------------+--------------------------+
+ * | Address | Content |
+ * +==============================+==========================+
+ * | buffer + 0000 | start of mmapped buffer |
+ * | | pointed by iosys_map |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + ``struct_offset__`` | start of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + wwww | ``foo.field2.inner2`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + yyyy | end of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + zzzz | end of mmaped buffer |
+ * +------------------------------+--------------------------+
+ *
+ * Values automatically calculated by this macro or not needed are denoted by
+ * wwww, yyyy and zzzz. This is the code to read that value:
+ *
+ * .. code-block:: c
+ *
+ * x = iosys_map_rd_field(&map, offset, struct foo, field2.inner2);
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \
+ struct_type__ *s; \
+ iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s->field__)); \
+})
+
+/**
+ * iosys_map_wr_field - Write to a member of a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beggining of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ * @val__: Value to write
+ *
+ * Write a value to the iosys_map considering its layout is described by a C struct
+ * starting at @struct_offset__. The field offset and size is calculated and the
+ * @val__ is written handling possible un-aligned memory accesses. Refer to
+ * iosys_map_rd_field() for expected usage and memory layout.
+ */
+#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \
+ struct_type__ *s; \
+ iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s->field__), val__); \
+})
+
#endif /* __IOSYS_MAP_H__ */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e490b84732d1..8b5a314db167 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2158,7 +2158,7 @@ struct net_device {
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index 959e0bd9a913..75470159a194 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -12,6 +12,7 @@
#define NVME_TCP_DISC_PORT 8009
#define NVME_TCP_ADMIN_CCSZ SZ_8K
#define NVME_TCP_DIGEST_LENGTH 4
+#define NVME_TCP_MIN_MAXH2CDATA 4096
enum nvme_tcp_pfv {
NVME_TCP_PFV_1_0 = 0x0,
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 98efb7b5660d..c9a3ac9efeaa 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -70,7 +70,8 @@ struct nvmem_keepout {
* @word_size: Minimum read/write access granularity.
* @stride: Minimum read/write access stride.
* @priv: User context passed to read/write callbacks.
- * @wp-gpio: Write protect pin
+ * @wp-gpio: Write protect pin
+ * @ignore_wp: Write Protect pin is managed by the provider.
*
* Note: A default "nvmem<id>" name will be assigned to the device if
* no name is specified in its configuration. In such case "<id>" is
@@ -92,6 +93,7 @@ struct nvmem_config {
enum nvmem_type type;
bool read_only;
bool root_only;
+ bool ignore_wp;
struct device_node *of_node;
bool no_of_node;
nvmem_reg_read_t reg_read;
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index b9198a1b3a84..e84e54d1b490 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -54,8 +54,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
-extern void sched_post_fork(struct task_struct *p,
- struct kernel_clone_args *kargs);
+extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 37bde99b74af..5b6193fd8bd9 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -660,8 +660,7 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
- __alloc_size(1);
+extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 7a22921c9db7..4d72258d42fd 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -106,4 +106,24 @@ void kfree_strarray(char **array, size_t n);
char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n);
+static inline const char *str_yes_no(bool v)
+{
+ return v ? "yes" : "no";
+}
+
+static inline const char *str_on_off(bool v)
+{
+ return v ? "on" : "off";
+}
+
+static inline const char *str_enable_disable(bool v)
+{
+ return v ? "enable" : "disable";
+}
+
+static inline const char *str_enabled_disabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
#endif
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 70c069aef02c..dcea51fb60e2 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -699,6 +699,8 @@ event_triggers_post_call(struct trace_event_file *file,
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
+bool __trace_trigger_soft_disabled(struct trace_event_file *file);
+
/**
* trace_trigger_soft_disabled - do triggers and test if soft disabled
* @file: The file pointer of the event to test
@@ -708,20 +710,20 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
* triggers that require testing the fields, it will return true,
* otherwise false.
*/
-static inline bool
+static __always_inline bool
trace_trigger_soft_disabled(struct trace_event_file *file)
{
unsigned long eflags = file->flags;
- if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
- if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
- event_triggers_call(file, NULL, NULL, NULL);
- if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
- return true;
- if (eflags & EVENT_FILE_FL_PID_FILTER)
- return trace_event_ignore_this_pid(file);
- }
- return false;
+ if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
+ EVENT_FILE_FL_SOFT_DISABLED |
+ EVENT_FILE_FL_PID_FILTER))))
+ return false;
+
+ if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
+ return false;
+
+ return __trace_trigger_soft_disabled(file);
}
#ifdef CONFIG_BPF_EVENTS
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index e7ce719838b5..59940e230b78 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -109,8 +109,6 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- u32 banned_flags);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index 38785d48baff..184105d68294 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -262,7 +262,7 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
struct bond_3ad_stats stats;
- u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
+ atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 5218041e5c8f..79c67f14c448 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -22,7 +22,7 @@
#include <asm/checksum.h>
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
-static inline
+static __always_inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
int len)
{
@@ -33,7 +33,7 @@ __wsum csum_and_copy_from_user (const void __user *src, void *dst,
#endif
#ifndef HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user
+static __always_inline __wsum csum_and_copy_to_user
(const void *src, void __user *dst, int len)
{
__wsum sum = csum_partial(src, len, ~0U);
@@ -45,7 +45,7 @@ static __inline__ __wsum csum_and_copy_to_user
#endif
#ifndef _HAVE_ARCH_CSUM_AND_COPY
-static inline __wsum
+static __always_inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
memcpy(dst, src, len);
@@ -54,7 +54,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len)
#endif
#ifndef HAVE_ARCH_CSUM_ADD
-static inline __wsum csum_add(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
u32 res = (__force u32)csum;
res += (__force u32)addend;
@@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
}
#endif
-static inline __wsum csum_sub(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
{
return csum_add(csum, ~addend);
}
-static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
+static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
{
u16 res = (__force u16)csum;
@@ -75,12 +75,12 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
return (__force __sum16)(res + (res < (__force u16)addend));
}
-static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
+static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
{
return csum16_add(csum, ~addend);
}
-static inline __wsum csum_shift(__wsum sum, int offset)
+static __always_inline __wsum csum_shift(__wsum sum, int offset)
{
/* rotate sum to align it with a 16b boundary */
if (offset & 1)
@@ -88,42 +88,43 @@ static inline __wsum csum_shift(__wsum sum, int offset)
return sum;
}
-static inline __wsum
+static __always_inline __wsum
csum_block_add(__wsum csum, __wsum csum2, int offset)
{
return csum_add(csum, csum_shift(csum2, offset));
}
-static inline __wsum
+static __always_inline __wsum
csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
{
return csum_block_add(csum, csum2, offset);
}
-static inline __wsum
+static __always_inline __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
return csum_block_add(csum, ~csum2, offset);
}
-static inline __wsum csum_unfold(__sum16 n)
+static __always_inline __wsum csum_unfold(__sum16 n)
{
return (__force __wsum)n;
}
-static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
+static __always_inline
+__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
{
return csum_partial(buff, len, sum);
}
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
-static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
{
*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
}
-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
+static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
__wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
@@ -136,11 +137,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
* m : old value of a 16bit field
* m' : new value of a 16bit field
*/
-static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
+static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
{
*sum = ~csum16_add(csum16_sub(~(*sum), old), new);
}
+static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
+{
+ *csum = csum_add(csum_sub(*csum, old), new);
+}
+
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, bool pseudohdr);
@@ -150,16 +156,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
__wsum diff, bool pseudohdr);
-static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
- __be16 from, __be16 to,
- bool pseudohdr)
+static __always_inline
+void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
+ __be16 from, __be16 to, bool pseudohdr)
{
inet_proto_csum_replace4(sum, skb, (__force __be32)from,
(__force __be32)to, pseudohdr);
}
-static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
- int start, int offset)
+static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
+ int start, int offset)
{
__sum16 *psum = (__sum16 *)(ptr + offset);
__wsum delta;
@@ -175,12 +181,12 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
return delta;
}
-static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
+static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
{
*psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
}
-static inline __wsum wsum_negate(__wsum val)
+static __always_inline __wsum wsum_negate(__wsum val)
{
return (__force __wsum)-((__force u32)val);
}
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 57b3e4e7413b..85a5ba3772f5 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -1187,6 +1187,7 @@ void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
void dsa_switch_shutdown(struct dsa_switch *ds);
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
+void dsa_flush_workqueue(void);
#ifdef CONFIG_PM_SLEEP
int dsa_switch_suspend(struct dsa_switch *ds);
int dsa_switch_resume(struct dsa_switch *ds);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 40ae8f1b18e5..2048bc8748cb 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -190,14 +190,16 @@ struct fib6_info {
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
+
+ u8 offload;
+ u8 trap;
+ u8 offload_failed;
+
u8 should_flush:1,
dst_nocount:1,
dst_nopolicy:1,
fib6_destroying:1,
- offload:1,
- trap:1,
- offload_failed:1,
- unused:1;
+ unused:4;
struct rcu_head rcu;
struct nexthop *nh;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 3afcb128e064..92eec13d1693 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -393,17 +393,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt)
kfree_rcu(opt, rcu);
}
+#if IS_ENABLED(CONFIG_IPV6)
struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
__be32 label)
{
- if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key))
+ if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
+ READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
return NULL;
}
+#endif
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
struct ip6_flowlabel *fl,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index eaf55da9a205..c4c0861deac1 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -905,9 +905,9 @@ struct nft_expr_ops {
int (*offload)(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr);
+ bool (*offload_action)(const struct nft_expr *expr);
void (*offload_stats)(struct nft_expr *expr,
const struct flow_stats *stats);
- u32 offload_flags;
const struct nft_expr_type *type;
void *data;
};
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index f9d95ff82df8..797147843958 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -67,8 +67,6 @@ struct nft_flow_rule {
struct flow_rule *rule;
};
-#define NFT_OFFLOAD_F_ACTION (1 << 0)
-
void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
enum flow_dissector_key_id addr_type);
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index a4b550380316..6bd7e5a85ce7 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -77,9 +77,10 @@ struct netns_ipv6 {
spinlock_t fib6_gc_lock;
unsigned int ip6_rt_gc_expire;
unsigned long ip6_rt_last_gc;
+ unsigned char flowlabel_has_excl;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- unsigned int fib6_rules_require_fldissect;
bool fib6_has_custom_rules;
+ unsigned int fib6_rules_require_fldissect;
#ifdef CONFIG_IPV6_SUBTREES
unsigned int fib6_routes_require_src;
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index ff9b508d9c5f..50aecd28b355 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -507,7 +507,7 @@ struct sock {
#endif
u16 sk_tsflags;
u8 sk_shutdown;
- u32 sk_tskey;
+ atomic_t sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
@@ -2667,7 +2667,7 @@ static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
__sock_tx_timestamp(tsflags, tx_flags);
if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
- *tskey = sk->sk_tskey++;
+ *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
}
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
*tx_flags |= SKBTX_WIFI_STATUS;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index e1e351682872..0a0d56a6158e 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -663,41 +663,73 @@ struct drm_mode_fb_cmd {
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+/**
+ * struct drm_mode_fb_cmd2 - Frame-buffer metadata.
+ *
+ * This struct holds frame-buffer metadata. There are two ways to use it:
+ *
+ * - User-space can fill this struct and perform a &DRM_IOCTL_MODE_ADDFB2
+ * ioctl to register a new frame-buffer. The new frame-buffer object ID will
+ * be set by the kernel in @fb_id.
+ * - User-space can set @fb_id and perform a &DRM_IOCTL_MODE_GETFB2 ioctl to
+ * fetch metadata about an existing frame-buffer.
+ *
+ * In case of planar formats, this struct allows up to 4 buffer objects with
+ * offsets and pitches per plane. The pitch and offset order is dictated by the
+ * format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as:
+ *
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed by an
+ * interleaved U/V plane containing 8 bit 2x2 subsampled colour difference
+ * samples.
+ *
+ * So it would consist of a Y plane at ``offsets[0]`` and a UV plane at
+ * ``offsets[1]``.
+ *
+ * To accommodate tiled, compressed, etc formats, a modifier can be specified.
+ * For more information see the "Format Modifiers" section. Note that even
+ * though it looks like we have a modifier per-plane, we in fact do not. The
+ * modifier for each plane must be identical. Thus all combinations of
+ * different data layouts for multi-plane formats must be enumerated as
+ * separate modifiers.
+ *
+ * All of the entries in @handles, @pitches, @offsets and @modifier must be
+ * zero when unused. Warning, for @offsets and @modifier zero can't be used to
+ * figure out whether the entry is used or not since it's a valid value (a zero
+ * offset is common, and a zero modifier is &DRM_FORMAT_MOD_LINEAR).
+ */
struct drm_mode_fb_cmd2 {
+ /** @fb_id: Object ID of the frame-buffer. */
__u32 fb_id;
+ /** @width: Width of the frame-buffer. */
__u32 width;
+ /** @height: Height of the frame-buffer. */
__u32 height;
- __u32 pixel_format; /* fourcc code from drm_fourcc.h */
- __u32 flags; /* see above flags */
+ /**
+ * @pixel_format: FourCC format code, see ``DRM_FORMAT_*`` constants in
+ * ``drm_fourcc.h``.
+ */
+ __u32 pixel_format;
+ /**
+ * @flags: Frame-buffer flags (see &DRM_MODE_FB_INTERLACED and
+ * &DRM_MODE_FB_MODIFIERS).
+ */
+ __u32 flags;
- /*
- * In case of planar formats, this ioctl allows up to 4
- * buffer objects with offsets and pitches per plane.
- * The pitch and offset order is dictated by the fourcc,
- * e.g. NV12 (https://fourcc.org/yuv.php#NV12) is described as:
- *
- * YUV 4:2:0 image with a plane of 8 bit Y samples
- * followed by an interleaved U/V plane containing
- * 8 bit 2x2 subsampled colour difference samples.
- *
- * So it would consist of Y as offsets[0] and UV as
- * offsets[1]. Note that offsets[0] will generally
- * be 0 (but this is not required).
- *
- * To accommodate tiled, compressed, etc formats, a
- * modifier can be specified. The default value of zero
- * indicates "native" format as specified by the fourcc.
- * Vendor specific modifier token. Note that even though
- * it looks like we have a modifier per-plane, we in fact
- * do not. The modifier for each plane must be identical.
- * Thus all combinations of different data layouts for
- * multi plane formats must be enumerated as separate
- * modifiers.
+ /**
+ * @handles: GEM buffer handle, one per plane. Set to 0 if the plane is
+ * unused. The same handle can be used for multiple planes.
*/
__u32 handles[4];
- __u32 pitches[4]; /* pitch for each plane */
- __u32 offsets[4]; /* offset of each plane */
- __u64 modifier[4]; /* ie, tiling, compress */
+ /** @pitches: Pitch (aka. stride) in bytes, one per plane. */
+ __u32 pitches[4];
+ /** @offsets: Offset into the buffer in bytes, one per plane. */
+ __u32 offsets[4];
+ /**
+ * @modifier: Format modifier, one per plane. See ``DRM_FORMAT_MOD_*``
+ * constants in ``drm_fourcc.h``. All planes must use the same
+ * modifier. Ignored unless &DRM_MODE_FB_MODIFIERS is set in @flags.
+ */
+ __u64 modifier[4];
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 5191b57e1562..507ee1f2aa96 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1134,6 +1134,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_VM_GPA_BITS 207
#define KVM_CAP_XSAVE2 208
#define KVM_CAP_SYS_ATTRIBUTES 209
+#define KVM_CAP_PPC_AIL_MODE_3 210
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index e16dafeb2450..3e23b3fa79ff 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -5688,7 +5688,8 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
}
if (check_ptr_off_reg(env, reg, regno))
return -EINVAL;
- } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || reg2btf_ids[reg->type])) {
+ } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID ||
+ (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) {
const struct btf_type *reg_ref_t;
const struct btf *reg_btf;
const char *reg_ref_tname;
@@ -5706,7 +5707,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
reg_ref_id = reg->btf_id;
} else {
reg_btf = btf_vmlinux;
- reg_ref_id = *reg2btf_ids[reg->type];
+ reg_ref_id = *reg2btf_ids[base_type(reg->type)];
}
reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id,
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 01cfdf40c838..55c084251fab 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
*/
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/bpf-cgroup.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
@@ -1075,6 +1076,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
void *key;
u32 idx;
+ BTF_TYPE_EMIT(struct bpf_timer);
callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
if (!callback_fn)
goto out;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index fa4505f9b611..ca70fe6fba38 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1355,6 +1355,7 @@ int generic_map_delete_batch(struct bpf_map *map,
maybe_wait_bpf_programs(map);
if (err)
break;
+ cond_resched();
}
if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
err = -EFAULT;
@@ -1412,6 +1413,7 @@ int generic_map_update_batch(struct bpf_map *map,
if (err)
break;
+ cond_resched();
}
if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
@@ -1509,6 +1511,7 @@ int generic_map_lookup_batch(struct bpf_map *map,
swap(prev_key, key);
retry = MAP_LOOKUP_RETRIES;
cp++;
+ cond_resched();
}
if (err == -EFAULT)
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 0e877dbcfeea..afc6c0e9c966 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -546,6 +546,7 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct cgroup *cgrp;
+ struct cgroup_file_ctx *ctx;
BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
@@ -553,8 +554,9 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
* Release agent gets called with all capabilities,
* require capabilities to set release agent.
*/
- if ((of->file->f_cred->user_ns != &init_user_ns) ||
- !capable(CAP_SYS_ADMIN))
+ ctx = of->priv;
+ if ((ctx->ns->user_ns != &init_user_ns) ||
+ !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
return -EPERM;
cgrp = cgroup_kn_lock_live(of->kn, false);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 9d05c3ca2d5e..a557eea7166f 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -6166,6 +6166,20 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
if (ret)
goto err;
+ /*
+ * Spawning a task directly into a cgroup works by passing a file
+ * descriptor to the target cgroup directory. This can even be an O_PATH
+ * file descriptor. But it can never be a cgroup.procs file descriptor.
+ * This was done on purpose so spawning into a cgroup could be
+ * conceptualized as an atomic
+ *
+ * fd = openat(dfd_cgroup, "cgroup.procs", ...);
+ * write(fd, <child-pid>, ...);
+ *
+ * sequence, i.e. it's a shorthand for the caller opening and writing
+ * cgroup.procs of the cgroup indicated by @dfd_cgroup. This allows us
+ * to always use the caller's credentials.
+ */
ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
!(kargs->flags & CLONE_THREAD),
current->nsproxy->cgroup_ns);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 4c7254e8f49a..5de18448016c 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2289,6 +2289,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
+ cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
@@ -2342,6 +2343,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
+ cpus_read_unlock();
}
/* The various types of files and directories in a cpuset file system */
@@ -3522,8 +3524,8 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
return cs;
}
-/**
- * cpuset_node_allowed - Can we allocate on a memory node?
+/*
+ * __cpuset_node_allowed - Can we allocate on a memory node?
* @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
@@ -3694,8 +3696,8 @@ void cpuset_print_current_mems_allowed(void)
int cpuset_memory_pressure_enabled __read_mostly;
-/**
- * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
+/*
+ * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
*
* Keep a running average of the rate of synchronous (direct)
* page reclaim efforts initiated by tasks in each cpuset.
@@ -3710,7 +3712,7 @@ int cpuset_memory_pressure_enabled __read_mostly;
* "memory_pressure". Value displayed is an integer
* representing the recent rate of entry into the synchronous
* (direct) page reclaim by any task attached to the cpuset.
- **/
+ */
void __cpuset_memory_pressure_bump(void)
{
diff --git a/kernel/cred.c b/kernel/cred.c
index 473d17c431f3..933155c96922 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -665,21 +665,16 @@ EXPORT_SYMBOL(cred_fscmp);
int set_cred_ucounts(struct cred *new)
{
- struct task_struct *task = current;
- const struct cred *old = task->real_cred;
struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
- if (new->user == old->user && new->user_ns == old->user_ns)
- return 0;
-
/*
* This optimization is needed because alloc_ucounts() uses locks
* for table lookups.
*/
- if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
+ if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->uid))
return 0;
- if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
+ if (!(new_ucounts = alloc_ucounts(new->user_ns, new->uid)))
return -EAGAIN;
new->ucounts = new_ucounts;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index f1e7ea160b43..bfc56cb21705 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -628,7 +628,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
tlb_addr = slot_addr(mem->start, index) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+ (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE ||
+ dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
return tlb_addr;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index d75a528f7b21..a024bf6254df 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2021,18 +2021,18 @@ static __latent_entropy struct task_struct *copy_process(
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
+ retval = copy_creds(p, clone_flags);
+ if (retval < 0)
+ goto bad_fork_free;
+
retval = -EAGAIN;
if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
if (p->real_cred->user != INIT_USER &&
!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
- goto bad_fork_free;
+ goto bad_fork_cleanup_count;
}
current->flags &= ~PF_NPROC_EXCEEDED;
- retval = copy_creds(p, clone_flags);
- if (retval < 0)
- goto bad_fork_free;
-
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
@@ -2267,6 +2267,17 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_put_pidfd;
/*
+ * Now that the cgroups are pinned, re-clone the parent cgroup and put
+ * the new task on the correct runqueue. All this *before* the task
+ * becomes visible.
+ *
+ * This isn't part of ->can_fork() because while the re-cloning is
+ * cgroup specific, it unconditionally needs to place the task on a
+ * runqueue.
+ */
+ sched_cgroup_fork(p, args);
+
+ /*
* From this point on we must avoid any synchronous user-space
* communication until we take the tasklist-lock. In particular, we do
* not want user-space to be able to predict the process start-time by
@@ -2323,10 +2334,6 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_cancel_cgroup;
}
- /* past the last point of failure */
- if (pidfile)
- fd_install(pidfd, pidfile);
-
init_task_pid_links(p);
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@@ -2375,8 +2382,11 @@ static __latent_entropy struct task_struct *copy_process(
syscall_tracepoint_update(p);
write_unlock_irq(&tasklist_lock);
+ if (pidfile)
+ fd_install(pidfd, pidfile);
+
proc_fork_connector(p);
- sched_post_fork(p, args);
+ sched_post_fork(p);
cgroup_post_fork(p, args);
perf_event_fork(p);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4a882f83aeb9..f8a0212189ca 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3462,7 +3462,7 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
u16 chain_hlock = chain_hlocks[chain->base + i];
unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
- return lock_classes + class_idx - 1;
+ return lock_classes + class_idx;
}
/*
@@ -3530,7 +3530,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
hlock_id = chain_hlocks[chain->base + i];
chain_key = print_chain_key_iteration(hlock_id, chain_key);
- print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1);
+ print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
printk("\n");
}
}
diff --git a/kernel/module_decompress.c b/kernel/module_decompress.c
index b01c69c2ff99..ffef98a20320 100644
--- a/kernel/module_decompress.c
+++ b/kernel/module_decompress.c
@@ -250,6 +250,7 @@ void module_decompress_cleanup(struct load_info *info)
info->max_pages = info->used_pages = 0;
}
+#ifdef CONFIG_SYSFS
static ssize_t compression_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -269,3 +270,4 @@ static int __init module_decompress_sysfs_init(void)
return 0;
}
late_initcall(module_decompress_sysfs_init);
+#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fcf0c180617c..9745613d531c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1214,9 +1214,8 @@ int tg_nop(struct task_group *tg, void *data)
}
#endif
-static void set_load_weight(struct task_struct *p)
+static void set_load_weight(struct task_struct *p, bool update_load)
{
- bool update_load = !(READ_ONCE(p->__state) & TASK_NEW);
int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load;
@@ -4407,7 +4406,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->static_prio = NICE_TO_PRIO(0);
p->prio = p->normal_prio = p->static_prio;
- set_load_weight(p);
+ set_load_weight(p, false);
/*
* We don't need the reset flag anymore after the fork. It has
@@ -4425,6 +4424,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
init_entity_runnable_average(&p->se);
+
#ifdef CONFIG_SCHED_INFO
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -4440,18 +4440,23 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
return 0;
}
-void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
{
unsigned long flags;
-#ifdef CONFIG_CGROUP_SCHED
- struct task_group *tg;
-#endif
+ /*
+ * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
+ * required yet, but lockdep gets upset if rules are violated.
+ */
raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_CGROUP_SCHED
- tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
- struct task_group, css);
- p->sched_task_group = autogroup_task_group(p, tg);
+ if (1) {
+ struct task_group *tg;
+ tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
+ struct task_group, css);
+ tg = autogroup_task_group(p, tg);
+ p->sched_task_group = tg;
+ }
#endif
rseq_migrate(p);
/*
@@ -4462,7 +4467,10 @@ void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
+void sched_post_fork(struct task_struct *p)
+{
uclamp_post_fork(p);
}
@@ -6922,7 +6930,7 @@ void set_user_nice(struct task_struct *p, long nice)
put_prev_task(rq, p);
p->static_prio = NICE_TO_PRIO(nice);
- set_load_weight(p);
+ set_load_weight(p, true);
old_prio = p->prio;
p->prio = effective_prio(p);
@@ -7213,7 +7221,7 @@ static void __setscheduler_params(struct task_struct *p,
*/
p->rt_priority = attr->sched_priority;
p->normal_prio = normal_prio(p);
- set_load_weight(p);
+ set_load_weight(p, true);
}
/*
@@ -9446,7 +9454,7 @@ void __init sched_init(void)
#endif
}
- set_load_weight(&init_task);
+ set_load_weight(&init_task, false);
/*
* The boot idle thread does lazy MMU switching as well:
diff --git a/kernel/sys.c b/kernel/sys.c
index ecc4cf019242..97dc9e5d6bf9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -472,6 +472,16 @@ static int set_user(struct cred *new)
if (!new_user)
return -EAGAIN;
+ free_uid(new->user);
+ new->user = new_user;
+ return 0;
+}
+
+static void flag_nproc_exceeded(struct cred *new)
+{
+ if (new->ucounts == current_ucounts())
+ return;
+
/*
* We don't fail in case of NPROC limit excess here because too many
* poorly written programs don't check set*uid() return code, assuming
@@ -480,15 +490,10 @@ static int set_user(struct cred *new)
* failure to the execve() stage.
*/
if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
- new_user != INIT_USER &&
- !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
+ new->user != INIT_USER)
current->flags |= PF_NPROC_EXCEEDED;
else
current->flags &= ~PF_NPROC_EXCEEDED;
-
- free_uid(new->user);
- new->user = new_user;
- return 0;
}
/*
@@ -563,6 +568,7 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
if (retval < 0)
goto error;
+ flag_nproc_exceeded(new);
return commit_creds(new);
error:
@@ -625,6 +631,7 @@ long __sys_setuid(uid_t uid)
if (retval < 0)
goto error;
+ flag_nproc_exceeded(new);
return commit_creds(new);
error:
@@ -704,6 +711,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
if (retval < 0)
goto error;
+ flag_nproc_exceeded(new);
return commit_creds(new);
error:
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f9feb197b2da..a4b462b6f944 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -7191,7 +7191,6 @@ static int __init ftrace_nodyn_init(void)
core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
-static inline void ftrace_startup_enable(int command) { }
static inline void ftrace_startup_all(int command) { }
# define ftrace_startup_sysctl() do { } while (0)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7c2578efde26..3050892d1812 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1474,10 +1474,12 @@ static int __init set_buf_size(char *str)
if (!str)
return 0;
buf_size = memparse(str, &str);
- /* nr_entries can not be zero */
- if (buf_size == 0)
- return 0;
- trace_buf_size = buf_size;
+ /*
+ * nr_entries can not be zero and the startup
+ * tests require some buffer space. Therefore
+ * ensure we have at least 4096 bytes of buffer.
+ */
+ trace_buf_size = max(4096UL, buf_size);
return 1;
}
__setup("trace_buf_size=", set_buf_size);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d038ddbf1bea..c5b09c31e077 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -136,7 +136,6 @@ struct kprobe_trace_entry_head {
struct eprobe_trace_entry_head {
struct trace_entry ent;
- unsigned int type;
};
struct kretprobe_trace_entry_head {
diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
index 191db32dec46..541aa13581b9 100644
--- a/kernel/trace/trace_eprobe.c
+++ b/kernel/trace/trace_eprobe.c
@@ -242,7 +242,6 @@ static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
static int eprobe_event_define_fields(struct trace_event_call *event_call)
{
- int ret;
struct eprobe_trace_entry_head field;
struct trace_probe *tp;
@@ -250,8 +249,6 @@ static int eprobe_event_define_fields(struct trace_event_call *event_call)
if (WARN_ON_ONCE(!tp))
return -ENOENT;
- DEFINE_FIELD(unsigned int, type, FIELD_STRING_TYPE, 0);
-
return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
}
@@ -270,7 +267,9 @@ print_eprobe_event(struct trace_iterator *iter, int flags,
struct trace_event_call *pevent;
struct trace_event *probed_event;
struct trace_seq *s = &iter->seq;
+ struct trace_eprobe *ep;
struct trace_probe *tp;
+ unsigned int type;
field = (struct eprobe_trace_entry_head *)iter->ent;
tp = trace_probe_primary_from_call(
@@ -278,15 +277,18 @@ print_eprobe_event(struct trace_iterator *iter, int flags,
if (WARN_ON_ONCE(!tp))
goto out;
+ ep = container_of(tp, struct trace_eprobe, tp);
+ type = ep->event->event.type;
+
trace_seq_printf(s, "%s: (", trace_probe_name(tp));
- probed_event = ftrace_find_event(field->type);
+ probed_event = ftrace_find_event(type);
if (probed_event) {
pevent = container_of(probed_event, struct trace_event_call, event);
trace_seq_printf(s, "%s.%s", pevent->class->system,
trace_event_name(pevent));
} else {
- trace_seq_printf(s, "%u", field->type);
+ trace_seq_printf(s, "%u", type);
}
trace_seq_putc(s, ')');
@@ -498,10 +500,6 @@ __eprobe_trace_func(struct eprobe_data *edata, void *rec)
return;
entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
- if (edata->ep->event)
- entry->type = edata->ep->event->event.type;
- else
- entry->type = 0;
store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize);
trace_event_buffer_commit(&fbuffer);
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index d00fee705f9c..7eb9d04f1c2e 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -84,6 +84,20 @@ event_triggers_call(struct trace_event_file *file,
}
EXPORT_SYMBOL_GPL(event_triggers_call);
+bool __trace_trigger_soft_disabled(struct trace_event_file *file)
+{
+ unsigned long eflags = file->flags;
+
+ if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
+ event_triggers_call(file, NULL, NULL, NULL);
+ if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
+ return true;
+ if (eflags & EVENT_FILE_FL_PID_FILTER)
+ return trace_event_ignore_this_pid(file);
+ return false;
+}
+EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
+
/**
* event_triggers_post_call - Call 'post_triggers' for a trace event
* @file: The trace_event_file associated with the event
@@ -1295,6 +1309,16 @@ traceon_trigger(struct event_trigger_data *data,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (tracer_tracing_is_on(file->tr))
+ return;
+
+ tracer_tracing_on(file->tr);
+ return;
+ }
+
if (tracing_is_on())
return;
@@ -1306,8 +1330,15 @@ traceon_count_trigger(struct event_trigger_data *data,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
- if (tracing_is_on())
- return;
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (tracer_tracing_is_on(file->tr))
+ return;
+ } else {
+ if (tracing_is_on())
+ return;
+ }
if (!data->count)
return;
@@ -1315,7 +1346,10 @@ traceon_count_trigger(struct event_trigger_data *data,
if (data->count != -1)
(data->count)--;
- tracing_on();
+ if (file)
+ tracer_tracing_on(file->tr);
+ else
+ tracing_on();
}
static void
@@ -1323,6 +1357,16 @@ traceoff_trigger(struct event_trigger_data *data,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (!tracer_tracing_is_on(file->tr))
+ return;
+
+ tracer_tracing_off(file->tr);
+ return;
+ }
+
if (!tracing_is_on())
return;
@@ -1334,8 +1378,15 @@ traceoff_count_trigger(struct event_trigger_data *data,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
- if (!tracing_is_on())
- return;
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (!tracer_tracing_is_on(file->tr))
+ return;
+ } else {
+ if (!tracing_is_on())
+ return;
+ }
if (!data->count)
return;
@@ -1343,7 +1394,10 @@ traceoff_count_trigger(struct event_trigger_data *data,
if (data->count != -1)
(data->count)--;
- tracing_off();
+ if (file)
+ tracer_tracing_off(file->tr);
+ else
+ tracing_off();
}
static int
@@ -1540,7 +1594,12 @@ stacktrace_trigger(struct event_trigger_data *data,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
- trace_dump_stack(STACK_SKIP);
+ struct trace_event_file *file = data->private_data;
+
+ if (file)
+ __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
+ else
+ trace_dump_stack(STACK_SKIP);
}
static void
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index 870a08da5b48..cfddb30e65ab 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -1437,6 +1437,37 @@ static struct cpumask osnoise_cpumask;
static struct cpumask save_cpumask;
/*
+ * osnoise_sleep - sleep until the next period
+ */
+static void osnoise_sleep(void)
+{
+ u64 interval;
+ ktime_t wake_time;
+
+ mutex_lock(&interface_lock);
+ interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
+ mutex_unlock(&interface_lock);
+
+ /*
+ * differently from hwlat_detector, the osnoise tracer can run
+ * without a pause because preemption is on.
+ */
+ if (!interval) {
+ /* Let synchronize_rcu_tasks() make progress */
+ cond_resched_tasks_rcu_qs();
+ return;
+ }
+
+ wake_time = ktime_add_us(ktime_get(), interval);
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ while (schedule_hrtimeout_range(&wake_time, 0, HRTIMER_MODE_ABS)) {
+ if (kthread_should_stop())
+ break;
+ }
+}
+
+/*
* osnoise_main - The osnoise detection kernel thread
*
* Calls run_osnoise() function to measure the osnoise for the configured runtime,
@@ -1444,30 +1475,10 @@ static struct cpumask save_cpumask;
*/
static int osnoise_main(void *data)
{
- u64 interval;
while (!kthread_should_stop()) {
-
run_osnoise();
-
- mutex_lock(&interface_lock);
- interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
- mutex_unlock(&interface_lock);
-
- do_div(interval, USEC_PER_MSEC);
-
- /*
- * differently from hwlat_detector, the osnoise tracer can run
- * without a pause because preemption is on.
- */
- if (interval < 1) {
- /* Let synchronize_rcu_tasks() make progress */
- cond_resched_tasks_rcu_qs();
- continue;
- }
-
- if (msleep_interruptible(interval))
- break;
+ osnoise_sleep();
}
return 0;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 73d90179b51b..80863c6508e5 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -871,15 +871,15 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
switch (ptype) {
case PROBE_PRINT_NORMAL:
fmt = "(%lx)";
- arg = "REC->" FIELD_STRING_IP;
+ arg = ", REC->" FIELD_STRING_IP;
break;
case PROBE_PRINT_RETURN:
fmt = "(%lx <- %lx)";
- arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
+ arg = ", REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
break;
case PROBE_PRINT_EVENT:
- fmt = "(%u)";
- arg = "REC->" FIELD_STRING_TYPE;
+ fmt = "";
+ arg = "";
break;
default:
WARN_ON_ONCE(1);
@@ -903,7 +903,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
parg->type->fmt);
}
- pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", arg);
for (i = 0; i < tp->nr_args; i++) {
parg = tp->args + i;
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 99e7a5df025e..92cc149af0fd 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -38,7 +38,6 @@
#define FIELD_STRING_IP "__probe_ip"
#define FIELD_STRING_RETIP "__probe_ret_ip"
#define FIELD_STRING_FUNC "__probe_func"
-#define FIELD_STRING_TYPE "__probe_type"
#undef DEFINE_FIELD
#define DEFINE_FIELD(type, item, name, is_signed) \
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index afd937a46496..abcadbe933bb 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -784,9 +784,7 @@ static struct fgraph_ops fgraph_ops __initdata = {
.retfunc = &trace_graph_return,
};
-#if defined(CONFIG_DYNAMIC_FTRACE) && \
- defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
-#define TEST_DIRECT_TRAMP
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
noinline __noclone static void trace_direct_tramp(void) { }
#endif
@@ -849,7 +847,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
goto out;
}
-#ifdef TEST_DIRECT_TRAMP
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
tracing_reset_online_cpus(&tr->array_buffer);
set_graph_array(tr);
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 65b597431c86..06ea04d44685 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -350,7 +350,8 @@ bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsign
if (rlimit > LONG_MAX)
max = LONG_MAX;
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
- if (get_ucounts_value(iter, type) > max)
+ long val = get_ucounts_value(iter, type);
+ if (val < 0 || val > max)
return true;
max = READ_ONCE(iter->ns->ucount_max[type]);
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index b0e0acdf96c1..6dd5330f7a99 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -414,6 +414,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
return 0;
buf->ops = &page_cache_pipe_buf_ops;
+ buf->flags = 0;
get_page(page);
buf->page = page;
buf->offset = offset;
@@ -577,6 +578,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
break;
buf->ops = &default_pipe_buf_ops;
+ buf->flags = 0;
buf->page = page;
buf->offset = 0;
buf->len = min_t(ssize_t, left, PAGE_SIZE);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 26a5c9007653..3b413f8c8a71 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -869,11 +869,14 @@ static void kmem_cache_invalid_free(struct kunit *test)
kmem_cache_destroy(cache);
}
+static void empty_cache_ctor(void *object) { }
+
static void kmem_cache_double_destroy(struct kunit *test)
{
struct kmem_cache *cache;
- cache = kmem_cache_create("test_cache", 200, 0, 0, NULL);
+ /* Provide a constructor to prevent cache merging. */
+ cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
kmem_cache_destroy(cache);
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 61895cc01d09..f294db835f4b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4159,10 +4159,10 @@ static int __init hugepages_setup(char *s)
pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
return 0;
}
+ if (tmp >= nr_online_nodes)
+ goto invalid;
node = tmp;
p += count + 1;
- if (node < 0 || node >= nr_online_nodes)
- goto invalid;
/* Parse hugepages */
if (sscanf(p, "%lu%n", &tmp, &count) != 1)
goto invalid;
@@ -4851,14 +4851,13 @@ again:
}
static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, pte_t *src_pte)
+ unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
{
struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm;
- pte_t *dst_pte, pte;
spinlock_t *src_ptl, *dst_ptl;
+ pte_t pte;
- dst_pte = huge_pte_offset(mm, new_addr, huge_page_size(h));
dst_ptl = huge_pte_lock(h, mm, dst_pte);
src_ptl = huge_pte_lockptr(h, mm, src_pte);
@@ -4917,7 +4916,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
if (!dst_pte)
break;
- move_huge_pte(vma, old_addr, new_addr, src_pte);
+ move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
}
flush_tlb_range(vma, old_end - len, old_end);
mmu_notifier_invalidate_range_end(&range);
diff --git a/mm/memblock.c b/mm/memblock.c
index 1018e50566f3..b12a364f2766 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -366,14 +366,20 @@ void __init memblock_discard(void)
addr = __pa(memblock.reserved.regions);
size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.reserved.max);
- memblock_free_late(addr, size);
+ if (memblock_reserved_in_slab)
+ kfree(memblock.reserved.regions);
+ else
+ memblock_free_late(addr, size);
}
if (memblock.memory.regions != memblock_memory_init_regions) {
addr = __pa(memblock.memory.regions);
size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.memory.max);
- memblock_free_late(addr, size);
+ if (memblock_memory_in_slab)
+ kfree(memblock.memory.regions);
+ else
+ memblock_free_late(addr, size);
}
memblock_memory = NULL;
diff --git a/mm/mmap.c b/mm/mmap.c
index 1e8fdb0b51ed..d445c1b9d606 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3186,6 +3186,7 @@ void exit_mmap(struct mm_struct *mm)
vma = remove_vma(vma);
cond_resched();
}
+ mm->mmap = NULL;
mmap_write_unlock(mm);
vm_unacct_memory(nr_accounted);
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 0138dfcdb1d8..5ca3fbcb1495 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -94,7 +94,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
/* Also skip shared copy-on-write pages */
if (is_cow_mapping(vma->vm_flags) &&
- page_mapcount(page) != 1)
+ page_count(page) != 1)
continue;
/*
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index de2409889489..db4f2641d1cd 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -82,6 +82,9 @@ static void br_multicast_find_del_pg(struct net_bridge *br,
struct net_bridge_port_group *pg);
static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
+static int br_mc_disabled_update(struct net_device *dev, bool value,
+ struct netlink_ext_ack *extack);
+
static struct net_bridge_port_group *
br_sg_port_find(struct net_bridge *br,
struct net_bridge_port_group_sg_key *sg_p)
@@ -1156,6 +1159,7 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
return mp;
if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
+ br_mc_disabled_update(br->dev, false, NULL);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
return ERR_PTR(-E2BIG);
}
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index a271688780a2..307ee1174a6e 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -2006,7 +2006,7 @@ struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
/* set the end-packet for broadcast */
session->pkt.last = session->pkt.total;
- skcb->tskey = session->sk->sk_tskey++;
+ skcb->tskey = atomic_inc_return(&session->sk->sk_tskey) - 1;
session->tskey = skcb->tskey;
return session;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7b288a121a41..d5dc6be2522c 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -283,13 +283,17 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
rcu_read_lock();
list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
+ struct net_device *dev;
+
/*
* only add a note to our monitor buffer if:
* 1) this is the dev we received on
* 2) its after the last_rx delta
* 3) our rx_dropped count has gone up
*/
- if ((new_stat->dev == napi->dev) &&
+ /* Paired with WRITE_ONCE() in dropmon_net_event() */
+ dev = READ_ONCE(new_stat->dev);
+ if ((dev == napi->dev) &&
(time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
(napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
trace_drop_common(NULL, NULL);
@@ -1576,7 +1580,10 @@ static int dropmon_net_event(struct notifier_block *ev_block,
mutex_lock(&net_dm_mutex);
list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
if (new_stat->dev == dev) {
- new_stat->dev = NULL;
+
+ /* Paired with READ_ONCE() in trace_napi_poll_hit() */
+ WRITE_ONCE(new_stat->dev, NULL);
+
if (trace_state == TRACE_OFF) {
list_del_rcu(&new_stat->list);
kfree_rcu(new_stat, rcu);
diff --git a/net/core/filter.c b/net/core/filter.c
index 4603b7cd3cd1..9eb785842258 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2710,6 +2710,9 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
if (unlikely(flags))
return -EINVAL;
+ if (unlikely(len == 0))
+ return 0;
+
/* First find the starting scatterlist element */
i = msg->sg.start;
do {
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 53ea262ecafd..fbddf966206b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -213,7 +213,7 @@ static ssize_t speed_show(struct device *dev,
if (!rtnl_trylock())
return restart_syscall();
- if (netif_running(netdev)) {
+ if (netif_running(netdev) && netif_device_present(netdev)) {
struct ethtool_link_ksettings cmd;
if (!__ethtool_get_link_ksettings(netdev, &cmd))
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 710da8a36729..2fb8eb6791e8 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1699,6 +1699,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
+ struct Qdisc *qdisc;
ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
@@ -1716,6 +1717,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
goto nla_put_failure;
+ qdisc = rtnl_dereference(dev->qdisc);
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
nla_put_u8(skb, IFLA_OPERSTATE,
@@ -1735,8 +1737,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
#endif
put_master_ifindex(skb, dev) ||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
- (dev->qdisc &&
- nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
+ (qdisc &&
+ nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
nla_put_ifalias(skb, dev) ||
nla_put_u32(skb, IFLA_CARRIER_CHANGES,
atomic_read(&dev->carrier_up_count) +
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9d0388bed0c1..b8138c372535 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2276,7 +2276,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Free pulled out fragments. */
while ((list = skb_shinfo(skb)->frag_list) != insp) {
skb_shinfo(skb)->frag_list = list->next;
- kfree_skb(list);
+ consume_skb(list);
}
/* And insert new clone at head. */
if (clone) {
@@ -4730,7 +4730,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
if (sk_is_tcp(sk))
- serr->ee.ee_data -= sk->sk_tskey;
+ serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
}
err = sock_queue_err_skb(sk, skb);
@@ -6105,7 +6105,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb,
/* Free pulled out fragments. */
while ((list = shinfo->frag_list) != insp) {
shinfo->frag_list = list->next;
- kfree_skb(list);
+ consume_skb(list);
}
/* And insert new clone at head. */
if (clone) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 4ff806d71921..6eb174805bf0 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -879,9 +879,9 @@ int sock_set_timestamping(struct sock *sk, int optname,
if ((1 << sk->sk_state) &
(TCPF_CLOSE | TCPF_LISTEN))
return -EINVAL;
- sk->sk_tskey = tcp_sk(sk)->snd_una;
+ atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
} else {
- sk->sk_tskey = 0;
+ atomic_set(&sk->sk_tskey, 0);
}
}
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index d9d0d227092c..c43f7446a75d 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -349,6 +349,7 @@ void dsa_flush_workqueue(void)
{
flush_workqueue(dsa_owq);
}
+EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 760306f0012f..23c79e91ac67 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -147,7 +147,6 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
bool dsa_schedule_work(struct work_struct *work);
-void dsa_flush_workqueue(void);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 2199104ca7df..880f910b23a9 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -260,11 +260,16 @@ static void dsa_netdev_ops_set(struct net_device *dev,
dev->dsa_ptr->netdev_ops = ops;
}
+/* Keep the master always promiscuous if the tagging protocol requires that
+ * (garbles MAC DA) or if it doesn't support unicast filtering, case in which
+ * it would revert to promiscuous mode as soon as we call dev_uc_add() on it
+ * anyway.
+ */
static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
{
const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
- if (!ops->promisc_on_master)
+ if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
return;
ASSERT_RTNL();
diff --git a/net/dsa/port.c b/net/dsa/port.c
index bd78192e0e47..1a40c52f5a42 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -395,10 +395,17 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
.tree_index = dp->ds->dst->index,
.sw_index = dp->ds->index,
.port = dp->index,
- .bridge = *dp->bridge,
};
int err;
+ /* If the port could not be offloaded to begin with, then
+ * there is nothing to do.
+ */
+ if (!dp->bridge)
+ return;
+
+ info.bridge = *dp->bridge;
+
/* Here the port is already unbridged. Reflect the current configuration
* so that drivers can program their chips accordingly.
*/
@@ -781,9 +788,15 @@ int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
struct dsa_port *cpu_dp = dp->cpu_dp;
int err;
- err = dev_uc_add(cpu_dp->master, addr);
- if (err)
- return err;
+ /* Avoid a call to __dev_set_promiscuity() on the master, which
+ * requires rtnl_lock(), since we can't guarantee that is held here,
+ * and we can't take it either.
+ */
+ if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
+ err = dev_uc_add(cpu_dp->master, addr);
+ if (err)
+ return err;
+ }
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
}
@@ -800,9 +813,11 @@ int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
struct dsa_port *cpu_dp = dp->cpu_dp;
int err;
- err = dev_uc_del(cpu_dp->master, addr);
- if (err)
- return err;
+ if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
+ err = dev_uc_del(cpu_dp->master, addr);
+ if (err)
+ return err;
+ }
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
}
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index cb548188f813..98d7d7120bab 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -77,7 +77,6 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
{
- __be16 *lan9303_tag;
u16 lan9303_tag1;
unsigned int source_port;
@@ -87,14 +86,15 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
return NULL;
}
- lan9303_tag = dsa_etype_header_pos_rx(skb);
-
- if (lan9303_tag[0] != htons(ETH_P_8021Q)) {
- dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n");
- return NULL;
+ if (skb_vlan_tag_present(skb)) {
+ lan9303_tag1 = skb_vlan_tag_get(skb);
+ __vlan_hwaccel_clear_tag(skb);
+ } else {
+ skb_push_rcsum(skb, ETH_HLEN);
+ __skb_vlan_pop(skb, &lan9303_tag1);
+ skb_pull_rcsum(skb, ETH_HLEN);
}
- lan9303_tag1 = ntohs(lan9303_tag[1]);
source_port = lan9303_tag1 & 0x3;
skb->dev = dsa_master_find_slave(dev, 0, source_port);
@@ -103,13 +103,6 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
return NULL;
}
- /* remove the special VLAN tag between the MAC addresses
- * and the current ethertype field.
- */
- skb_pull_rcsum(skb, 2 + 2);
-
- dsa_strip_etype_header(skb, LAN9303_TAG_LEN);
-
if (!(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU))
dsa_default_offload_fwd_mark(skb);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9c465bac1eb0..72fde2888ad2 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1376,8 +1376,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
}
ops = rcu_dereference(inet_offloads[proto]);
- if (likely(ops && ops->callbacks.gso_segment))
+ if (likely(ops && ops->callbacks.gso_segment)) {
segs = ops->callbacks.gso_segment(skb, features);
+ if (!segs)
+ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
+ }
if (IS_ERR_OR_NULL(segs))
goto out;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4d61ddd8a0ec..85117b45216d 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -436,6 +436,9 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
if (net->ipv4.fib_has_custom_local_routes ||
fib4_has_custom_rules(net))
goto full_check;
+ /* Within the same container, it is regarded as a martian source,
+ * and the same host but different containers are not.
+ */
if (inet_lookup_ifaddr_rcu(net, src))
return -EINVAL;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index e184bcb19943..78e40ea42e58 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -16,10 +16,9 @@ struct fib_alias {
u8 fa_slen;
u32 tb_id;
s16 fa_default;
- u8 offload:1,
- trap:1,
- offload_failed:1,
- unused:5;
+ u8 offload;
+ u8 trap;
+ u8 offload_failed;
struct rcu_head rcu;
};
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index b4589861b84c..2dd375f7407b 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -525,9 +525,9 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
fri.dst_len = dst_len;
fri.tos = fa->fa_tos;
fri.type = fa->fa_type;
- fri.offload = fa->offload;
- fri.trap = fa->trap;
- fri.offload_failed = fa->offload_failed;
+ fri.offload = READ_ONCE(fa->offload);
+ fri.trap = READ_ONCE(fa->trap);
+ fri.offload_failed = READ_ONCE(fa->offload_failed);
err = fib_dump_info(skb, info->portid, seq, event, &fri, nlm_flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 8060524f4256..f7f74d5c14da 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1047,19 +1047,23 @@ void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
if (!fa_match)
goto out;
- if (fa_match->offload == fri->offload && fa_match->trap == fri->trap &&
- fa_match->offload_failed == fri->offload_failed)
+ /* These are paired with the WRITE_ONCE() happening in this function.
+ * The reason is that we are only protected by RCU at this point.
+ */
+ if (READ_ONCE(fa_match->offload) == fri->offload &&
+ READ_ONCE(fa_match->trap) == fri->trap &&
+ READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
- fa_match->offload = fri->offload;
- fa_match->trap = fri->trap;
+ WRITE_ONCE(fa_match->offload, fri->offload);
+ WRITE_ONCE(fa_match->trap, fri->trap);
/* 2 means send notifications only if offload_failed was changed. */
if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&
- fa_match->offload_failed == fri->offload_failed)
+ READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
- fa_match->offload_failed = fri->offload_failed;
+ WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
if (!net->ipv4.sysctl_fib_notify_on_flag_change)
goto out;
@@ -2297,9 +2301,9 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
fri.dst_len = KEYLENGTH - fa->fa_slen;
fri.tos = fa->fa_tos;
fri.type = fa->fa_type;
- fri.offload = fa->offload;
- fri.trap = fa->trap;
- fri.offload_failed = fa->offload_failed;
+ fri.offload = READ_ONCE(fa->offload);
+ fri.trap = READ_ONCE(fa->trap);
+ fri.offload_failed = READ_ONCE(fa->offload_failed);
err = fib_dump_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 139cec29ed06..7911916a480b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -991,7 +991,7 @@ static int __ip_append_data(struct sock *sk,
if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
- tskey = sk->sk_tskey++;
+ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index bcf7bc71cb56..3ee947557b88 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -172,16 +172,22 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
struct sock *sk = NULL;
struct inet_sock *isk;
struct hlist_nulls_node *hnode;
- int dif = skb->dev->ifindex;
+ int dif, sdif;
if (skb->protocol == htons(ETH_P_IP)) {
+ dif = inet_iif(skb);
+ sdif = inet_sdif(skb);
pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
(int)ident, &ip_hdr(skb)->daddr, dif);
#if IS_ENABLED(CONFIG_IPV6)
} else if (skb->protocol == htons(ETH_P_IPV6)) {
+ dif = inet6_iif(skb);
+ sdif = inet6_sdif(skb);
pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
(int)ident, &ipv6_hdr(skb)->daddr, dif);
#endif
+ } else {
+ return NULL;
}
read_lock_bh(&ping_table.lock);
@@ -221,7 +227,7 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
}
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
- sk->sk_bound_dev_if != inet_sdif(skb))
+ sk->sk_bound_dev_if != sdif)
continue;
sock_hold(sk);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ff6f91cdb6c4..f33ad1f383b6 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3395,8 +3395,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fa->fa_tos == fri.tos &&
fa->fa_info == res.fi &&
fa->fa_type == fri.type) {
- fri.offload = fa->offload;
- fri.trap = fa->trap;
+ fri.offload = READ_ONCE(fa->offload);
+ fri.trap = READ_ONCE(fa->trap);
break;
}
}
diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
index b91003538d87..bc3a043a5d5c 100644
--- a/net/ipv4/udp_tunnel_nic.c
+++ b/net/ipv4/udp_tunnel_nic.c
@@ -846,7 +846,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
list_for_each_entry(node, &info->shared->devices, list)
if (node->dev == dev)
break;
- if (node->dev != dev)
+ if (list_entry_is_head(node, &info->shared->devices, list))
return;
list_del(&node->list);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f927c199a93c..6c8ab3e6e6fe 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1839,8 +1839,8 @@ out:
}
EXPORT_SYMBOL(ipv6_dev_get_saddr);
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- u32 banned_flags)
+static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
+ u32 banned_flags)
{
struct inet6_ifaddr *ifp;
int err = -EADDRNOTAVAIL;
@@ -4998,6 +4998,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
goto error;
+ spin_lock_bh(&ifa->lock);
if (!((ifa->flags&IFA_F_PERMANENT) &&
(ifa->prefered_lft == INFINITY_LIFE_TIME))) {
preferred = ifa->prefered_lft;
@@ -5019,6 +5020,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
preferred = INFINITY_LIFE_TIME;
valid = INFINITY_LIFE_TIME;
}
+ spin_unlock_bh(&ifa->lock);
if (!ipv6_addr_any(&ifa->peer_addr)) {
if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index aa673a6a7e43..ceb85c67ce39 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -450,8 +450,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
err = -EINVAL;
goto done;
}
- if (fl_shared_exclusive(fl) || fl->opt)
+ if (fl_shared_exclusive(fl) || fl->opt) {
+ WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1);
static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
+ }
return fl;
done:
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index b29e9ba5e113..5f577e21459b 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -114,6 +114,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
if (likely(ops && ops->callbacks.gso_segment)) {
skb_reset_transport_header(skb);
segs = ops->callbacks.gso_segment(skb, features);
+ if (!segs)
+ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
}
if (IS_ERR_OR_NULL(segs))
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2995f8d89e7e..304a295de84f 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1465,7 +1465,7 @@ static int __ip6_append_data(struct sock *sk,
if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
- tskey = sk->sk_tskey++;
+ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index bed8155508c8..a8861db52c18 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1759,7 +1759,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
skb_reserve(skb, hlen);
skb_tailroom_reserve(skb, mtu, tlen);
- if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
+ if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
* use unspecified address as the source address
* when a valid link-local address is not available.
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index f4884cda13b9..ea1cf414a92e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5753,11 +5753,11 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
}
if (!dst) {
- if (rt->offload)
+ if (READ_ONCE(rt->offload))
rtm->rtm_flags |= RTM_F_OFFLOAD;
- if (rt->trap)
+ if (READ_ONCE(rt->trap))
rtm->rtm_flags |= RTM_F_TRAP;
- if (rt->offload_failed)
+ if (READ_ONCE(rt->offload_failed))
rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
}
@@ -6215,19 +6215,20 @@ void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
struct sk_buff *skb;
int err;
- if (f6i->offload == offload && f6i->trap == trap &&
- f6i->offload_failed == offload_failed)
+ if (READ_ONCE(f6i->offload) == offload &&
+ READ_ONCE(f6i->trap) == trap &&
+ READ_ONCE(f6i->offload_failed) == offload_failed)
return;
- f6i->offload = offload;
- f6i->trap = trap;
+ WRITE_ONCE(f6i->offload, offload);
+ WRITE_ONCE(f6i->trap, trap);
/* 2 means send notifications only if offload_failed was changed. */
if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
- f6i->offload_failed == offload_failed)
+ READ_ONCE(f6i->offload_failed) == offload_failed)
return;
- f6i->offload_failed = offload_failed;
+ WRITE_ONCE(f6i->offload_failed, offload_failed);
if (!rcu_access_pointer(f6i->fib6_node))
/* The route was removed from the tree, do not send
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1eeabdf10052..e5ccf17618ab 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -666,7 +666,7 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
ieee80211_ie_build_he_6ghz_cap(sdata, skb);
}
-static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
+static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -686,6 +686,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
const struct ieee80211_sband_iftype_data *iftd;
struct ieee80211_prep_tx_info info = {};
+ int ret;
/* we know it's writable, cast away the const */
if (assoc_data->ie_len)
@@ -699,7 +700,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
- return;
+ return -EINVAL;
}
chan = chanctx_conf->def.chan;
rcu_read_unlock();
@@ -750,7 +751,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
(iftd ? iftd->vendor_elems.len : 0),
GFP_KERNEL);
if (!skb)
- return;
+ return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -1031,15 +1032,22 @@ skip_rates:
skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
}
- if (assoc_data->fils_kek_len &&
- fils_encrypt_assoc_req(skb, assoc_data) < 0) {
- dev_kfree_skb(skb);
- return;
+ if (assoc_data->fils_kek_len) {
+ ret = fils_encrypt_assoc_req(skb, assoc_data);
+ if (ret < 0) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
}
pos = skb_tail_pointer(skb);
kfree(ifmgd->assoc_req_ies);
ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC);
+ if (!ifmgd->assoc_req_ies) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
ifmgd->assoc_req_ies_len = pos - ie_start;
drv_mgd_prepare_tx(local, sdata, &info);
@@ -1049,6 +1057,8 @@ skip_rates:
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
ieee80211_tx_skb(sdata, skb);
+
+ return 0;
}
void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -4497,6 +4507,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
struct ieee80211_local *local = sdata->local;
+ int ret;
sdata_assert_lock(sdata);
@@ -4517,7 +4528,9 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
sdata_info(sdata, "associate with %pM (try %d/%d)\n",
assoc_data->bss->bssid, assoc_data->tries,
IEEE80211_ASSOC_MAX_TRIES);
- ieee80211_send_assoc(sdata);
+ ret = ieee80211_send_assoc(sdata);
+ if (ret)
+ return ret;
if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 8d9f4ff3e285..e52cef750500 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -412,13 +412,14 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* this function.
*/
rc = mctp_key_add(key, msk);
- if (rc)
+ if (rc) {
kfree(key);
+ } else {
+ trace_mctp_key_acquire(key);
- trace_mctp_key_acquire(key);
-
- /* we don't need to release key->lock on exit */
- mctp_key_unref(key);
+ /* we don't need to release key->lock on exit */
+ mctp_key_unref(key);
+ }
key = NULL;
} else {
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index 3240b72271a7..7558802a1435 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -35,12 +35,14 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR),
SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD),
SNMP_MIB_ITEM("PortAdd", MPTCP_MIB_PORTADD),
+ SNMP_MIB_ITEM("AddAddrDrop", MPTCP_MIB_ADDADDRDROP),
SNMP_MIB_ITEM("MPJoinPortSynRx", MPTCP_MIB_JOINPORTSYNRX),
SNMP_MIB_ITEM("MPJoinPortSynAckRx", MPTCP_MIB_JOINPORTSYNACKRX),
SNMP_MIB_ITEM("MPJoinPortAckRx", MPTCP_MIB_JOINPORTACKRX),
SNMP_MIB_ITEM("MismatchPortSynRx", MPTCP_MIB_MISMATCHPORTSYNRX),
SNMP_MIB_ITEM("MismatchPortAckRx", MPTCP_MIB_MISMATCHPORTACKRX),
SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR),
+ SNMP_MIB_ITEM("RmAddrDrop", MPTCP_MIB_RMADDRDROP),
SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX),
SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX),
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index ecd3d8b117e0..2966fcb6548b 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -28,12 +28,14 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_ADDADDR, /* Received ADD_ADDR with echo-flag=0 */
MPTCP_MIB_ECHOADD, /* Received ADD_ADDR with echo-flag=1 */
MPTCP_MIB_PORTADD, /* Received ADD_ADDR with a port-number */
+ MPTCP_MIB_ADDADDRDROP, /* Dropped incoming ADD_ADDR */
MPTCP_MIB_JOINPORTSYNRX, /* Received a SYN MP_JOIN with a different port-number */
MPTCP_MIB_JOINPORTSYNACKRX, /* Received a SYNACK MP_JOIN with a different port-number */
MPTCP_MIB_JOINPORTACKRX, /* Received an ACK MP_JOIN with a different port-number */
MPTCP_MIB_MISMATCHPORTSYNRX, /* Received a SYN MP_JOIN with a mismatched port-number */
MPTCP_MIB_MISMATCHPORTACKRX, /* Received an ACK MP_JOIN with a mismatched port-number */
MPTCP_MIB_RMADDR, /* Received RM_ADDR */
+ MPTCP_MIB_RMADDRDROP, /* Dropped incoming RM_ADDR */
MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */
MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */
MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 696b2c4613a7..7bea318ac5f2 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -213,6 +213,8 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
mptcp_pm_add_addr_send_ack(msk);
} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
pm->remote = *addr;
+ } else {
+ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
}
spin_unlock_bh(&pm->lock);
@@ -253,8 +255,10 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
mptcp_event_addr_removed(msk, rm_list->ids[i]);
spin_lock_bh(&pm->lock);
- mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
- pm->rm_list_rx = *rm_list;
+ if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
+ pm->rm_list_rx = *rm_list;
+ else
+ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
spin_unlock_bh(&pm->lock);
}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 356f596e2032..4b5d795383cd 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -546,6 +546,16 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
if (msk->pm.add_addr_signaled < add_addr_signal_max) {
local = select_signal_address(pernet, msk);
+ /* due to racing events on both ends we can reach here while
+ * previous add address is still running: if we invoke now
+ * mptcp_pm_announce_addr(), that will fail and the
+ * corresponding id will be marked as used.
+ * Instead let the PM machinery reschedule us when the
+ * current address announce will be completed.
+ */
+ if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
+ return;
+
if (local) {
if (mptcp_pm_alloc_anno_list(msk, local)) {
__clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
@@ -650,6 +660,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
unsigned int add_addr_accept_max;
struct mptcp_addr_info remote;
unsigned int subflows_max;
+ bool reset_port = false;
int i, nr;
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
@@ -659,15 +670,19 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
msk->pm.add_addr_accepted, add_addr_accept_max,
msk->pm.remote.family);
- if (lookup_subflow_by_daddr(&msk->conn_list, &msk->pm.remote))
+ remote = msk->pm.remote;
+ if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
goto add_addr_echo;
+ /* pick id 0 port, if none is provided the remote address */
+ if (!remote.port) {
+ reset_port = true;
+ remote.port = sk->sk_dport;
+ }
+
/* connect to the specified remote address, using whatever
* local address the routing configuration will pick.
*/
- remote = msk->pm.remote;
- if (!remote.port)
- remote.port = sk->sk_dport;
nr = fill_local_addresses_vec(msk, addrs);
msk->pm.add_addr_accepted++;
@@ -680,8 +695,12 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
__mptcp_subflow_connect(sk, &addrs[i], &remote);
spin_lock_bh(&msk->pm.lock);
+ /* be sure to echo exactly the received address */
+ if (reset_port)
+ remote.port = 0;
+
add_addr_echo:
- mptcp_pm_announce_addr(msk, &msk->pm.remote, true);
+ mptcp_pm_announce_addr(msk, &remote, true);
mptcp_pm_nl_addr_send_ack(msk);
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5fa16990da95..9cd1d7a62804 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -6551,12 +6551,15 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
{
struct nft_object *newobj;
struct nft_trans *trans;
- int err;
+ int err = -ENOMEM;
+
+ if (!try_module_get(type->owner))
+ return -ENOENT;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
sizeof(struct nft_trans_obj));
if (!trans)
- return -ENOMEM;
+ goto err_trans;
newobj = nft_obj_init(ctx, type, attr);
if (IS_ERR(newobj)) {
@@ -6573,6 +6576,8 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
err_free_trans:
kfree(trans);
+err_trans:
+ module_put(type->owner);
return err;
}
@@ -8185,7 +8190,7 @@ static void nft_obj_commit_update(struct nft_trans *trans)
if (obj->ops->update)
obj->ops->update(obj, newobj);
- kfree(newobj);
+ nft_obj_destroy(&trans->ctx, newobj);
}
static void nft_commit_release(struct nft_trans *trans)
@@ -8976,7 +8981,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
break;
case NFT_MSG_NEWOBJ:
if (nft_trans_obj_update(trans)) {
- kfree(nft_trans_obj_newobj(trans));
+ nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
nft_trans_destroy(trans);
} else {
trans->ctx.table->use--;
@@ -9636,10 +9641,13 @@ EXPORT_SYMBOL_GPL(__nft_release_basechain);
static void __nft_release_hook(struct net *net, struct nft_table *table)
{
+ struct nft_flowtable *flowtable;
struct nft_chain *chain;
list_for_each_entry(chain, &table->chains, list)
nf_tables_unregister_hook(net, table, chain);
+ list_for_each_entry(flowtable, &table->flowtables, list)
+ nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list);
}
static void __nft_release_hooks(struct net *net)
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 9656c1646222..2d36952b1392 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -94,7 +94,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
expr = nft_expr_first(rule);
while (nft_expr_more(rule, expr)) {
- if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
+ if (expr->ops->offload_action &&
+ expr->ops->offload_action(expr))
num_actions++;
expr = nft_expr_next(expr);
diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
index bbf3fcba3df4..5b5c607fbf83 100644
--- a/net/netfilter/nft_dup_netdev.c
+++ b/net/netfilter/nft_dup_netdev.c
@@ -67,6 +67,11 @@ static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx,
return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif);
}
+static bool nft_dup_netdev_offload_action(const struct nft_expr *expr)
+{
+ return true;
+}
+
static struct nft_expr_type nft_dup_netdev_type;
static const struct nft_expr_ops nft_dup_netdev_ops = {
.type = &nft_dup_netdev_type,
@@ -75,6 +80,7 @@ static const struct nft_expr_ops nft_dup_netdev_ops = {
.init = nft_dup_netdev_init,
.dump = nft_dup_netdev_dump,
.offload = nft_dup_netdev_offload,
+ .offload_action = nft_dup_netdev_offload_action,
};
static struct nft_expr_type nft_dup_netdev_type __read_mostly = {
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index fa9301ca6033..619e394a91de 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -79,6 +79,11 @@ static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx,
return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif);
}
+static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr)
+{
+ return true;
+}
+
struct nft_fwd_neigh {
u8 sreg_dev;
u8 sreg_addr;
@@ -222,6 +227,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
.dump = nft_fwd_netdev_dump,
.validate = nft_fwd_validate,
.offload = nft_fwd_netdev_offload,
+ .offload_action = nft_fwd_netdev_offload_action,
};
static const struct nft_expr_ops *
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 90c64d27ae53..d0f67d325bdf 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -213,6 +213,16 @@ static int nft_immediate_offload(struct nft_offload_ctx *ctx,
return 0;
}
+static bool nft_immediate_offload_action(const struct nft_expr *expr)
+{
+ const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+ if (priv->dreg == NFT_REG_VERDICT)
+ return true;
+
+ return false;
+}
+
static const struct nft_expr_ops nft_imm_ops = {
.type = &nft_imm_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
@@ -224,7 +234,7 @@ static const struct nft_expr_ops nft_imm_ops = {
.dump = nft_immediate_dump,
.validate = nft_immediate_validate,
.offload = nft_immediate_offload,
- .offload_flags = NFT_OFFLOAD_F_ACTION,
+ .offload_action = nft_immediate_offload_action,
};
struct nft_expr_type nft_imm_type __read_mostly = {
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index c4f308460dd1..a726b623963d 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -340,11 +340,20 @@ static int nft_limit_obj_pkts_dump(struct sk_buff *skb,
return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
}
+static void nft_limit_obj_pkts_destroy(const struct nft_ctx *ctx,
+ struct nft_object *obj)
+{
+ struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
+
+ nft_limit_destroy(ctx, &priv->limit);
+}
+
static struct nft_object_type nft_limit_obj_type;
static const struct nft_object_ops nft_limit_obj_pkts_ops = {
.type = &nft_limit_obj_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv_pkts)),
.init = nft_limit_obj_pkts_init,
+ .destroy = nft_limit_obj_pkts_destroy,
.eval = nft_limit_obj_pkts_eval,
.dump = nft_limit_obj_pkts_dump,
};
@@ -378,11 +387,20 @@ static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
}
+static void nft_limit_obj_bytes_destroy(const struct nft_ctx *ctx,
+ struct nft_object *obj)
+{
+ struct nft_limit_priv *priv = nft_obj_data(obj);
+
+ nft_limit_destroy(ctx, priv);
+}
+
static struct nft_object_type nft_limit_obj_type;
static const struct nft_object_ops nft_limit_obj_bytes_ops = {
.type = &nft_limit_obj_type,
.size = sizeof(struct nft_limit_priv),
.init = nft_limit_obj_bytes_init,
+ .destroy = nft_limit_obj_bytes_destroy,
.eval = nft_limit_obj_bytes_eval,
.dump = nft_limit_obj_bytes_dump,
};
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index a0109fa1e92d..1133e06f3c40 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -191,8 +191,10 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
if (err)
goto nf_ct_failure;
err = nf_synproxy_ipv6_init(snet, ctx->net);
- if (err)
+ if (err) {
+ nf_synproxy_ipv4_fini(snet, ctx->net);
goto nf_ct_failure;
+ }
break;
}
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 5e6459e11605..7013f55f05d1 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -220,8 +220,10 @@ static void socket_mt_destroy(const struct xt_mtdtor_param *par)
{
if (par->family == NFPROTO_IPV4)
nf_defrag_ipv4_disable(par->net);
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
else if (par->family == NFPROTO_IPV6)
- nf_defrag_ipv4_disable(par->net);
+ nf_defrag_ipv6_disable(par->net);
+#endif
}
static struct xt_match socket_mt_reg[] __read_mostly = {
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 076774034bb9..780d9e2246f3 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -423,12 +423,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
memcpy(addr, new_addr, sizeof(__be32[4]));
}
-static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
+static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
{
+ u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
+
+ ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
+ (__force __wsum)(ipv6_tclass << 12));
+
+ ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
+}
+
+static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
+{
+ u32 ofl;
+
+ ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
+ fl = OVS_MASKED(ofl, fl, mask);
+
/* Bits 21-24 are always unmasked, so this retains their values. */
- OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
- OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
- OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
+ nh->flow_lbl[0] = (u8)(fl >> 16);
+ nh->flow_lbl[1] = (u8)(fl >> 8);
+ nh->flow_lbl[2] = (u8)fl;
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
+}
+
+static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
+{
+ new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
+ (__force __wsum)(new_ttl << 8));
+ nh->hop_limit = new_ttl;
}
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
@@ -546,18 +577,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
}
}
if (mask->ipv6_tclass) {
- ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
+ set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
flow_key->ip.tos = ipv6_get_dsfield(nh);
}
if (mask->ipv6_label) {
- set_ipv6_fl(nh, ntohl(key->ipv6_label),
+ set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
ntohl(mask->ipv6_label));
flow_key->ipv6.label =
*(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
}
if (mask->ipv6_hlimit) {
- OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
- mask->ipv6_hlimit);
+ set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
flow_key->ip.ttl = nh->hop_limit;
}
return 0;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 32563cef85bf..ca03e7284254 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -274,7 +274,7 @@ static int tcf_action_offload_add_ex(struct tc_action *action,
err = tc_setup_action(&fl_action->action, actions);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "Failed to setup tc actions for offload\n");
+ "Failed to setup tc actions for offload");
goto fl_err;
}
@@ -1037,6 +1037,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
restart_act_graph:
for (i = 0; i < nr_actions; i++) {
const struct tc_action *a = actions[i];
+ int repeat_ttl;
if (jmp_prgcnt > 0) {
jmp_prgcnt -= 1;
@@ -1045,11 +1046,17 @@ restart_act_graph:
if (tc_act_skip_sw(a->tcfa_flags))
continue;
+
+ repeat_ttl = 32;
repeat:
ret = a->ops->act(skb, a, res);
- if (ret == TC_ACT_REPEAT)
- goto repeat; /* we need a ttl - JHS */
-
+ if (unlikely(ret == TC_ACT_REPEAT)) {
+ if (--repeat_ttl != 0)
+ goto repeat;
+ /* suspicious opcode, stop pipeline */
+ net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
+ return TC_ACT_OK;
+ }
if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index f99247fc6468..33e70d60f0bf 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -533,11 +533,6 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
struct nf_conn *ct;
u8 dir;
- /* Previously seen or loopback */
- ct = nf_ct_get(skb, &ctinfo);
- if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
- return false;
-
switch (family) {
case NFPROTO_IPV4:
if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 5f0f346b576f..5ce1208a6ea3 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1044,7 +1044,7 @@ static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
/* Find qdisc */
if (!*parent) {
- *q = dev->qdisc;
+ *q = rcu_dereference(dev->qdisc);
*parent = (*q)->handle;
} else {
*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
@@ -2587,7 +2587,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
parent = tcm->tcm_parent;
if (!parent)
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
@@ -2962,7 +2962,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
if (!tcm->tcm_parent)
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 179825a3b2fd..e3c0e8ea2dbb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -301,7 +301,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
if (!handle)
return NULL;
- q = qdisc_match_from_root(dev->qdisc, handle);
+ q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
if (q)
goto out;
@@ -320,7 +320,7 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
if (!handle)
return NULL;
- q = qdisc_match_from_root(dev->qdisc, handle);
+ q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
if (q)
goto out;
@@ -1082,10 +1082,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
skip:
if (!ingress) {
notify_and_destroy(net, skb, n, classid,
- dev->qdisc, new);
+ rtnl_dereference(dev->qdisc), new);
if (new && !new->ops->attach)
qdisc_refcount_inc(new);
- dev->qdisc = new ? : &noop_qdisc;
+ rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
if (new && new->ops->attach)
new->ops->attach(new);
@@ -1451,7 +1451,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
}
if (!q) {
NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
@@ -1540,7 +1540,7 @@ replay:
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
}
/* It may be default qdisc, ignore it */
@@ -1762,7 +1762,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
s_q_idx = 0;
q_idx = 0;
- if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
+ if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
+ skb, cb, &q_idx, s_q_idx,
true, tca[TCA_DUMP_INVISIBLE]) < 0)
goto done;
@@ -2033,7 +2034,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
} else if (qid1) {
qid = qid1;
} else if (qid == 0)
- qid = dev->qdisc->handle;
+ qid = rtnl_dereference(dev->qdisc)->handle;
/* Now qid is genuine qdisc handle consistent
* both with parent and child.
@@ -2044,7 +2045,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
portid = TC_H_MAKE(qid, portid);
} else {
if (qid == 0)
- qid = dev->qdisc->handle;
+ qid = rtnl_dereference(dev->qdisc)->handle;
}
/* OK. Locate qdisc */
@@ -2205,7 +2206,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
t = 0;
- if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
+ if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
+ skb, tcm, cb, &t, s_t, true) < 0)
goto done;
dev_queue = dev_ingress_queue(dev);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f893d9a81b01..5bab9f8b8f45 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1164,30 +1164,33 @@ static void attach_default_qdiscs(struct net_device *dev)
if (!netif_is_multiqueue(dev) ||
dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
- dev->qdisc = txq->qdisc_sleeping;
- qdisc_refcount_inc(dev->qdisc);
+ qdisc = txq->qdisc_sleeping;
+ rcu_assign_pointer(dev->qdisc, qdisc);
+ qdisc_refcount_inc(qdisc);
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
if (qdisc) {
- dev->qdisc = qdisc;
+ rcu_assign_pointer(dev->qdisc, qdisc);
qdisc->ops->attach(qdisc);
}
}
+ qdisc = rtnl_dereference(dev->qdisc);
/* Detect default qdisc setup/init failed and fallback to "noqueue" */
- if (dev->qdisc == &noop_qdisc) {
+ if (qdisc == &noop_qdisc) {
netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
default_qdisc_ops->id, noqueue_qdisc_ops.id);
dev->priv_flags |= IFF_NO_QUEUE;
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
- dev->qdisc = txq->qdisc_sleeping;
- qdisc_refcount_inc(dev->qdisc);
+ qdisc = txq->qdisc_sleeping;
+ rcu_assign_pointer(dev->qdisc, qdisc);
+ qdisc_refcount_inc(qdisc);
dev->priv_flags ^= IFF_NO_QUEUE;
}
#ifdef CONFIG_NET_SCHED
- if (dev->qdisc != &noop_qdisc)
- qdisc_hash_add(dev->qdisc, false);
+ if (qdisc != &noop_qdisc)
+ qdisc_hash_add(qdisc, false);
#endif
}
@@ -1217,7 +1220,7 @@ void dev_activate(struct net_device *dev)
* and noqueue_qdisc for virtual interfaces
*/
- if (dev->qdisc == &noop_qdisc)
+ if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
attach_default_qdiscs(dev);
if (!netif_carrier_ok(dev))
@@ -1383,7 +1386,7 @@ static int qdisc_change_tx_queue_len(struct net_device *dev,
void dev_qdisc_change_real_num_tx(struct net_device *dev,
unsigned int new_real_tx)
{
- struct Qdisc *qdisc = dev->qdisc;
+ struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
if (qdisc->ops->change_real_num_tx)
qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
@@ -1447,7 +1450,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
void dev_init_scheduler(struct net_device *dev)
{
- dev->qdisc = &noop_qdisc;
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
@@ -1475,8 +1478,8 @@ void dev_shutdown(struct net_device *dev)
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
- qdisc_put(dev->qdisc);
- dev->qdisc = &noop_qdisc;
+ qdisc_put(rtnl_dereference(dev->qdisc));
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
WARN_ON(timer_pending(&dev->watchdog_timer));
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 8c89d0b0ca18..306d9e8cd1dd 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -667,14 +667,17 @@ static void smc_fback_error_report(struct sock *clcsk)
static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
{
struct sock *clcsk;
+ int rc = 0;
mutex_lock(&smc->clcsock_release_lock);
if (!smc->clcsock) {
- mutex_unlock(&smc->clcsock_release_lock);
- return -EBADF;
+ rc = -EBADF;
+ goto out;
}
clcsk = smc->clcsock->sk;
+ if (smc->use_fallback)
+ goto out;
smc->use_fallback = true;
smc->fallback_rsn = reason_code;
smc_stat_fallback(smc);
@@ -702,8 +705,9 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
smc->clcsock->sk->sk_user_data =
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
}
+out:
mutex_unlock(&smc->clcsock_release_lock);
- return 0;
+ return rc;
}
/* fall back during connect */
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 0599246c0376..29f0a559d884 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -113,7 +113,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
pnettable = &sn->pnettable;
/* remove table entry */
- write_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist,
list) {
if (!pnet_name ||
@@ -131,7 +131,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
rc = 0;
}
}
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
/* if this is not the initial namespace, stop here */
if (net != &init_net)
@@ -192,7 +192,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
sn = net_generic(net, smc_net_id);
pnettable = &sn->pnettable;
- write_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev &&
!strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) {
@@ -206,7 +206,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
break;
}
}
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return rc;
}
@@ -224,7 +224,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
sn = net_generic(net, smc_net_id);
pnettable = &sn->pnettable;
- write_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) {
dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker);
@@ -237,7 +237,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
break;
}
}
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return rc;
}
@@ -370,7 +370,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
strncpy(new_pe->eth_name, eth_name, IFNAMSIZ);
rc = -EEXIST;
new_netdev = true;
- write_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
if (tmp_pe->type == SMC_PNET_ETH &&
!strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) {
@@ -385,9 +385,9 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
GFP_ATOMIC);
}
list_add_tail(&new_pe->list, &pnettable->pnetlist);
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
} else {
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
kfree(new_pe);
goto out_put;
}
@@ -448,7 +448,7 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
new_pe->ib_port = ib_port;
new_ibdev = true;
- write_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
if (tmp_pe->type == SMC_PNET_IB &&
!strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
@@ -458,9 +458,9 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
}
if (new_ibdev) {
list_add_tail(&new_pe->list, &pnettable->pnetlist);
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
} else {
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
kfree(new_pe);
}
return (new_ibdev) ? 0 : -EEXIST;
@@ -605,7 +605,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
pnettable = &sn->pnettable;
/* dump pnettable entries */
- read_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid))
continue;
@@ -620,7 +620,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
break;
}
}
- read_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return idx;
}
@@ -864,7 +864,7 @@ int smc_pnet_net_init(struct net *net)
struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev;
INIT_LIST_HEAD(&pnettable->pnetlist);
- rwlock_init(&pnettable->lock);
+ mutex_init(&pnettable->lock);
INIT_LIST_HEAD(&pnetids_ndev->list);
rwlock_init(&pnetids_ndev->lock);
@@ -944,7 +944,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
sn = net_generic(net, smc_net_id);
pnettable = &sn->pnettable;
- read_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) {
/* get pnetid of netdev device */
@@ -953,7 +953,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
break;
}
}
- read_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return rc;
}
@@ -1156,7 +1156,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
sn = net_generic(&init_net, smc_net_id);
pnettable = &sn->pnettable;
- read_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
if (tmp_pe->type == SMC_PNET_IB &&
!strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) &&
@@ -1166,7 +1166,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
break;
}
}
- read_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return rc;
}
@@ -1185,7 +1185,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
sn = net_generic(&init_net, smc_net_id);
pnettable = &sn->pnettable;
- read_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
if (tmp_pe->type == SMC_PNET_IB &&
!strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
@@ -1194,7 +1194,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
break;
}
}
- read_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return rc;
}
diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h
index 14039272f7e4..80a88eea4949 100644
--- a/net/smc/smc_pnet.h
+++ b/net/smc/smc_pnet.h
@@ -29,7 +29,7 @@ struct smc_link_group;
* @pnetlist: List of PNETIDs
*/
struct smc_pnettable {
- rwlock_t lock;
+ struct mutex lock;
struct list_head pnetlist;
};
diff --git a/net/socket.c b/net/socket.c
index 50cf75730fd7..982eecad464c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -3448,7 +3448,7 @@ EXPORT_SYMBOL(kernel_connect);
* @addr: address holder
*
* Fills the @addr pointer with the address which the socket is bound.
- * Returns 0 or an error code.
+ * Returns the length of the address in bytes or an error code.
*/
int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
@@ -3463,7 +3463,7 @@ EXPORT_SYMBOL(kernel_getsockname);
* @addr: address holder
*
* Fills the @addr pointer with the address which the socket is connected.
- * Returns 0 or an error code.
+ * Returns the length of the address in bytes or an error code.
*/
int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index 9325479295b8..f09316a9035f 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -2276,7 +2276,7 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
struct tipc_aead_key *skey = NULL;
u16 key_gen = msg_key_gen(hdr);
- u16 size = msg_data_sz(hdr);
+ u32 size = msg_data_sz(hdr);
u8 *data = msg_data(hdr);
unsigned int keylen;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 01396dd1c899..1d8ba233d047 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -967,7 +967,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
list_for_each_entry(p, &sr->all_publ, all_publ)
if (p->key == *last_key)
break;
- if (p->key != *last_key)
+ if (list_entry_is_head(p, &sr->all_publ, all_publ))
return -EPIPE;
} else {
p = list_first_entry(&sr->all_publ,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9947b7dfe1d2..6ef95ce565bd 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -403,7 +403,7 @@ static void tipc_node_write_unlock(struct tipc_node *n)
u32 flags = n->action_flags;
struct list_head *publ_list;
struct tipc_uaddr ua;
- u32 bearer_id;
+ u32 bearer_id, node;
if (likely(!flags)) {
write_unlock_bh(&n->lock);
@@ -413,7 +413,8 @@ static void tipc_node_write_unlock(struct tipc_node *n)
tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
TIPC_LINK_STATE, n->addr, n->addr);
sk.ref = n->link_id;
- sk.node = n->addr;
+ sk.node = tipc_own_addr(net);
+ node = n->addr;
bearer_id = n->link_id & 0xffff;
publ_list = &n->publ_list;
@@ -423,17 +424,17 @@ static void tipc_node_write_unlock(struct tipc_node *n)
write_unlock_bh(&n->lock);
if (flags & TIPC_NOTIFY_NODE_DOWN)
- tipc_publ_notify(net, publ_list, sk.node, n->capabilities);
+ tipc_publ_notify(net, publ_list, node, n->capabilities);
if (flags & TIPC_NOTIFY_NODE_UP)
- tipc_named_node_up(net, sk.node, n->capabilities);
+ tipc_named_node_up(net, node, n->capabilities);
if (flags & TIPC_NOTIFY_LINK_UP) {
- tipc_mon_peer_up(net, sk.node, bearer_id);
+ tipc_mon_peer_up(net, node, bearer_id);
tipc_nametbl_publish(net, &ua, &sk, sk.ref);
}
if (flags & TIPC_NOTIFY_LINK_DOWN) {
- tipc_mon_peer_down(net, sk.node, bearer_id);
+ tipc_mon_peer_down(net, node, bearer_id);
tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
}
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3e63c83e641c..7545321c3440 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -3749,7 +3749,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
if (p->key == *last_publ)
break;
}
- if (p->key != *last_publ) {
+ if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
/* We never set seq or call nl_dump_check_consistent()
* this means that setting prev_seq here will cause the
* consistence check to fail in the netlink callback
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 3235261f138d..38baeb189d4e 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1401,6 +1401,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
+ vsock_remove_connected(vsk);
goto out_wait;
} else if (timeout == 0) {
err = -ETIMEDOUT;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3a54c8e6b6c6..f08d4b3bb148 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -332,29 +332,20 @@ static void cfg80211_event_work(struct work_struct *work)
void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
{
struct wireless_dev *wdev, *tmp;
- bool found = false;
ASSERT_RTNL();
- list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+ list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
if (wdev->nl_owner_dead) {
if (wdev->netdev)
dev_close(wdev->netdev);
- found = true;
- }
- }
-
- if (!found)
- return;
- wiphy_lock(&rdev->wiphy);
- list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
- if (wdev->nl_owner_dead) {
+ wiphy_lock(&rdev->wiphy);
cfg80211_leave(rdev, wdev);
rdev_del_virtual_intf(rdev, wdev);
+ wiphy_unlock(&rdev->wiphy);
}
}
- wiphy_unlock(&rdev->wiphy);
}
static void cfg80211_destroy_iface_wk(struct work_struct *work)
diff --git a/security/selinux/ima.c b/security/selinux/ima.c
index 727c4e43219d..ff7aea6b3774 100644
--- a/security/selinux/ima.c
+++ b/security/selinux/ima.c
@@ -77,7 +77,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state)
size_t policy_len;
int rc = 0;
- WARN_ON(!mutex_is_locked(&state->policy_mutex));
+ lockdep_assert_held(&state->policy_mutex);
state_str = selinux_ima_collect_state(state);
if (!state_str) {
@@ -117,7 +117,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state)
*/
void selinux_ima_measure_state(struct selinux_state *state)
{
- WARN_ON(mutex_is_locked(&state->policy_mutex));
+ lockdep_assert_not_held(&state->policy_mutex);
mutex_lock(&state->policy_mutex);
selinux_ima_measure_state_locked(state);
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index d79bf07e16be..023bedd9dfa3 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -166,7 +166,7 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
"#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s granted=%s (global-pid=%u) task={ pid=%u ppid=%u uid=%u gid=%u euid=%u egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
stamp.year, stamp.month, stamp.day, stamp.hour,
stamp.min, stamp.sec, r->profile, tomoyo_mode[r->mode],
- tomoyo_yesno(r->granted), gpid, tomoyo_sys_getpid(),
+ str_yes_no(r->granted), gpid, tomoyo_sys_getpid(),
tomoyo_sys_getppid(),
from_kuid(&init_user_ns, current_uid()),
from_kgid(&init_user_ns, current_gid()),
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 5c64927bf2b3..ff17abc96e5c 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -8,6 +8,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/security.h>
+#include <linux/string_helpers.h>
#include "common.h"
/* String table for operation mode. */
@@ -175,16 +176,6 @@ static bool tomoyo_manage_by_non_root;
/* Utility functions. */
/**
- * tomoyo_yesno - Return "yes" or "no".
- *
- * @value: Bool value.
- */
-const char *tomoyo_yesno(const unsigned int value)
-{
- return value ? "yes" : "no";
-}
-
-/**
* tomoyo_addprintf - strncat()-like-snprintf().
*
* @buffer: Buffer to write to. Must be '\0'-terminated.
@@ -730,8 +721,8 @@ static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config)
{
tomoyo_io_printf(head, "={ mode=%s grant_log=%s reject_log=%s }\n",
tomoyo_mode[config & 3],
- tomoyo_yesno(config & TOMOYO_CONFIG_WANT_GRANT_LOG),
- tomoyo_yesno(config & TOMOYO_CONFIG_WANT_REJECT_LOG));
+ str_yes_no(config & TOMOYO_CONFIG_WANT_GRANT_LOG),
+ str_yes_no(config & TOMOYO_CONFIG_WANT_REJECT_LOG));
}
/**
@@ -1354,8 +1345,8 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
case 3:
if (cond->grant_log != TOMOYO_GRANTLOG_AUTO)
tomoyo_io_printf(head, " grant_log=%s",
- tomoyo_yesno(cond->grant_log ==
- TOMOYO_GRANTLOG_YES));
+ str_yes_no(cond->grant_log ==
+ TOMOYO_GRANTLOG_YES));
tomoyo_set_lf(head);
return true;
}
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 85246b9df7ca..ca285f362705 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -959,7 +959,6 @@ char *tomoyo_read_token(struct tomoyo_acl_param *param);
char *tomoyo_realpath_from_path(const struct path *path);
char *tomoyo_realpath_nofollow(const char *pathname);
const char *tomoyo_get_exe(void);
-const char *tomoyo_yesno(const unsigned int value);
const struct tomoyo_path_info *tomoyo_compare_name_union
(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr);
const struct tomoyo_path_info *tomoyo_get_domainname
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index d1fcd1d5adae..6fd763d4d15b 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -511,7 +511,8 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
DEFAULT_GFP, 0);
if (!sgt)
return NULL;
- dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
+ sg_dma_address(sgt->sgl));
p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
if (p)
dmab->private_data = sgt;
@@ -540,9 +541,9 @@ static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
if (mode == SNDRV_DMA_SYNC_CPU) {
if (dmab->dev.dir == DMA_TO_DEVICE)
return;
+ invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
dmab->dev.dir);
- invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
} else {
if (dmab->dev.dir == DMA_FROM_DEVICE)
return;
@@ -671,9 +672,13 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
*/
static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
{
- dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
- return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
- dmab->dev.dir, DEFAULT_GFP);
+ void *p;
+
+ p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
+ dmab->dev.dir, DEFAULT_GFP);
+ if (p)
+ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
+ return p;
}
static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4b0338c4c543..572ff0d1fafe 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1615,6 +1615,7 @@ static const struct snd_pci_quirk probe_mask_list[] = {
/* forced codec slots */
SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103),
SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103),
+ SND_PCI_QUIRK(0x1558, 0x0351, "Schenker Dock 15", 0x105),
/* WinFast VP200 H (Teradici) user reported broken communication */
SND_PCI_QUIRK(0x3a21, 0x040d, "WinFast VP200 H", 0x101),
{}
@@ -1798,8 +1799,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
assign_position_fix(chip, check_position_fix(chip, position_fix[dev]));
- check_probe_mask(chip, dev);
-
if (single_cmd < 0) /* allow fallback to single_cmd at errors */
chip->fallback_to_single_cmd = 1;
else /* explicitly set to single_cmd or not */
@@ -1825,6 +1824,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
chip->bus.core.needs_damn_long_delay = 1;
}
+ check_probe_mask(chip, dev);
+
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
if (err < 0) {
dev_err(card->dev, "Error creating device [card]!\n");
@@ -1940,6 +1941,7 @@ static int azx_first_init(struct azx *chip)
dma_bits = 32;
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(dma_bits)))
dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(&pci->dev, UINT_MAX);
/* read number of streams from GCAP register instead of using
* hardcoded value
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8315bf7d4c38..3a42457984e9 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -138,6 +138,22 @@ struct alc_spec {
* COEF access helper functions
*/
+static void coef_mutex_lock(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ snd_hda_power_up_pm(codec);
+ mutex_lock(&spec->coef_mutex);
+}
+
+static void coef_mutex_unlock(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ mutex_unlock(&spec->coef_mutex);
+ snd_hda_power_down_pm(codec);
+}
+
static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
unsigned int coef_idx)
{
@@ -151,12 +167,11 @@ static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
unsigned int coef_idx)
{
- struct alc_spec *spec = codec->spec;
unsigned int val;
- mutex_lock(&spec->coef_mutex);
+ coef_mutex_lock(codec);
val = __alc_read_coefex_idx(codec, nid, coef_idx);
- mutex_unlock(&spec->coef_mutex);
+ coef_mutex_unlock(codec);
return val;
}
@@ -173,11 +188,9 @@ static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
unsigned int coef_idx, unsigned int coef_val)
{
- struct alc_spec *spec = codec->spec;
-
- mutex_lock(&spec->coef_mutex);
+ coef_mutex_lock(codec);
__alc_write_coefex_idx(codec, nid, coef_idx, coef_val);
- mutex_unlock(&spec->coef_mutex);
+ coef_mutex_unlock(codec);
}
#define alc_write_coef_idx(codec, coef_idx, coef_val) \
@@ -198,11 +211,9 @@ static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
unsigned int coef_idx, unsigned int mask,
unsigned int bits_set)
{
- struct alc_spec *spec = codec->spec;
-
- mutex_lock(&spec->coef_mutex);
+ coef_mutex_lock(codec);
__alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set);
- mutex_unlock(&spec->coef_mutex);
+ coef_mutex_unlock(codec);
}
#define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \
@@ -235,9 +246,7 @@ struct coef_fw {
static void alc_process_coef_fw(struct hda_codec *codec,
const struct coef_fw *fw)
{
- struct alc_spec *spec = codec->spec;
-
- mutex_lock(&spec->coef_mutex);
+ coef_mutex_lock(codec);
for (; fw->nid; fw++) {
if (fw->mask == (unsigned short)-1)
__alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
@@ -245,7 +254,7 @@ static void alc_process_coef_fw(struct hda_codec *codec,
__alc_update_coefex_idx(codec, fw->nid, fw->idx,
fw->mask, fw->val);
}
- mutex_unlock(&spec->coef_mutex);
+ coef_mutex_unlock(codec);
}
/*
@@ -9170,6 +9179,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x383d, "Legion Y9000X 2019", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
SND_PCI_QUIRK(0x17aa, 0x3847, "Legion 7 16ACHG6", ALC287_FIXUP_LEGION_16ACHG6),
SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
diff --git a/sound/soc/amd/acp/acp-mach.h b/sound/soc/amd/acp/acp-mach.h
index fd6299844ebe..c855f50d6b34 100644
--- a/sound/soc/amd/acp/acp-mach.h
+++ b/sound/soc/amd/acp/acp-mach.h
@@ -21,7 +21,6 @@
#include <linux/gpio/consumer.h>
#define EN_SPKR_GPIO_GB 0x11F
-#define EN_SPKR_GPIO_NK 0x146
#define EN_SPKR_GPIO_NONE -EINVAL
enum be_id {
diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c
index 07de46142655..4cc431e54fe1 100644
--- a/sound/soc/amd/acp/acp-sof-mach.c
+++ b/sound/soc/amd/acp/acp-sof-mach.c
@@ -37,7 +37,7 @@ static struct acp_card_drvdata sof_rt5682_max_data = {
.hs_codec_id = RT5682,
.amp_codec_id = MAX98360A,
.dmic_codec_id = DMIC,
- .gpio_spkr_en = EN_SPKR_GPIO_NK,
+ .gpio_spkr_en = EN_SPKR_GPIO_NONE,
};
static struct acp_card_drvdata sof_rt5682s_max_data = {
@@ -47,7 +47,7 @@ static struct acp_card_drvdata sof_rt5682s_max_data = {
.hs_codec_id = RT5682S,
.amp_codec_id = MAX98360A,
.dmic_codec_id = DMIC,
- .gpio_spkr_en = EN_SPKR_GPIO_NK,
+ .gpio_spkr_en = EN_SPKR_GPIO_NONE,
};
static const struct snd_kcontrol_new acp_controls[] = {
diff --git a/sound/soc/codecs/rt5668.c b/sound/soc/codecs/rt5668.c
index fb09715bf932..5b12cbf2ba21 100644
--- a/sound/soc/codecs/rt5668.c
+++ b/sound/soc/codecs/rt5668.c
@@ -1022,11 +1022,13 @@ static void rt5668_jack_detect_handler(struct work_struct *work)
container_of(work, struct rt5668_priv, jack_detect_work.work);
int val, btn_type;
- while (!rt5668->component)
- usleep_range(10000, 15000);
-
- while (!rt5668->component->card->instantiated)
- usleep_range(10000, 15000);
+ if (!rt5668->component || !rt5668->component->card ||
+ !rt5668->component->card->instantiated) {
+ /* card not yet ready, try later */
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5668->jack_detect_work, msecs_to_jiffies(15));
+ return;
+ }
mutex_lock(&rt5668->calibrate_mutex);
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 0a0ec4a021e1..be68d573a490 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -1092,11 +1092,13 @@ void rt5682_jack_detect_handler(struct work_struct *work)
struct snd_soc_dapm_context *dapm;
int val, btn_type;
- while (!rt5682->component)
- usleep_range(10000, 15000);
-
- while (!rt5682->component->card->instantiated)
- usleep_range(10000, 15000);
+ if (!rt5682->component || !rt5682->component->card ||
+ !rt5682->component->card->instantiated) {
+ /* card not yet ready, try later */
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, msecs_to_jiffies(15));
+ return;
+ }
dapm = snd_soc_component_get_dapm(rt5682->component);
diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
index efa1016831dd..1e662d1be2b3 100644
--- a/sound/soc/codecs/rt5682s.c
+++ b/sound/soc/codecs/rt5682s.c
@@ -824,11 +824,13 @@ static void rt5682s_jack_detect_handler(struct work_struct *work)
container_of(work, struct rt5682s_priv, jack_detect_work.work);
int val, btn_type;
- while (!rt5682s->component)
- usleep_range(10000, 15000);
-
- while (!rt5682s->component->card->instantiated)
- usleep_range(10000, 15000);
+ if (!rt5682s->component || !rt5682s->component->card ||
+ !rt5682s->component->card->instantiated) {
+ /* card not yet ready, try later */
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682s->jack_detect_work, msecs_to_jiffies(15));
+ return;
+ }
mutex_lock(&rt5682s->jdet_mutex);
mutex_lock(&rt5682s->calibrate_mutex);
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index 6549e7fef3e3..c5ea3b115966 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -38,10 +38,12 @@ static void tas2770_reset(struct tas2770_priv *tas2770)
gpiod_set_value_cansleep(tas2770->reset_gpio, 0);
msleep(20);
gpiod_set_value_cansleep(tas2770->reset_gpio, 1);
+ usleep_range(1000, 2000);
}
snd_soc_component_write(tas2770->component, TAS2770_SW_RST,
TAS2770_RST);
+ usleep_range(1000, 2000);
}
static int tas2770_set_bias_level(struct snd_soc_component *component,
@@ -110,6 +112,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component)
if (tas2770->sdz_gpio) {
gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
+ usleep_range(1000, 2000);
} else {
ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
TAS2770_PWR_CTRL_MASK,
@@ -510,8 +513,10 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
tas2770->component = component;
- if (tas2770->sdz_gpio)
+ if (tas2770->sdz_gpio) {
gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
+ usleep_range(1000, 2000);
+ }
tas2770_reset(tas2770);
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index f3672e3d1703..0582585236a2 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1441,7 +1441,8 @@ static int wm_adsp_buffer_parse_coeff(struct cs_dsp_coeff_ctl *cs_ctl)
int ret, i;
for (i = 0; i < 5; ++i) {
- ret = cs_dsp_coeff_read_ctrl(cs_ctl, 0, &coeff_v1, sizeof(coeff_v1));
+ ret = cs_dsp_coeff_read_ctrl(cs_ctl, 0, &coeff_v1,
+ min(cs_ctl->len, sizeof(coeff_v1)));
if (ret < 0)
return ret;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 148ddf4cace0..aeca58246fc7 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -952,6 +952,7 @@ static int skl_first_init(struct hdac_bus *bus)
/* allow 64bit DMA address if supported by H/W */
if (dma_set_mask_and_coherent(bus->dev, DMA_BIT_MASK(64)))
dma_set_mask_and_coherent(bus->dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(bus->dev, UINT_MAX);
/* initialize streams */
snd_hdac_ext_stream_init_all
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index a59e9d20cb46..4b1773c1fb95 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -524,7 +524,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
return -EINVAL;
}
- ret = regmap_update_bits(map, reg_irqclr, val_irqclr, val_irqclr);
+ ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr);
if (ret) {
dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret);
return ret;
@@ -665,7 +665,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
return -EINVAL;
}
if (interrupts & LPAIF_IRQ_PER(chan)) {
- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
+ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
if (rv) {
dev_err(soc_runtime->dev,
"error writing to irqclear reg: %d\n", rv);
@@ -676,7 +676,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
}
if (interrupts & LPAIF_IRQ_XRUN(chan)) {
- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
+ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
if (rv) {
dev_err(soc_runtime->dev,
"error writing to irqclear reg: %d\n", rv);
@@ -688,7 +688,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
}
if (interrupts & LPAIF_IRQ_ERR(chan)) {
- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
+ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
if (rv) {
dev_err(soc_runtime->dev,
"error writing to irqclear reg: %d\n", rv);
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 9833611b83d1..03ea9591fb16 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -308,7 +308,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
unsigned int sign_bit = mc->sign_bit;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
- int err;
+ int err, ret;
bool type_2r = false;
unsigned int val2 = 0;
unsigned int val, val_mask;
@@ -350,12 +350,18 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
err = snd_soc_component_update_bits(component, reg, val_mask, val);
if (err < 0)
return err;
+ ret = err;
- if (type_2r)
+ if (type_2r) {
err = snd_soc_component_update_bits(component, reg2, val_mask,
- val2);
+ val2);
+ /* Don't discard any error code or drop change flag */
+ if (ret == 0 || err < 0) {
+ ret = err;
+ }
+ }
- return err;
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
@@ -421,6 +427,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
int min = mc->min;
unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
int err = 0;
+ int ret;
unsigned int val, val_mask;
if (ucontrol->value.integer.value[0] < 0)
@@ -437,6 +444,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
err = snd_soc_component_update_bits(component, reg, val_mask, val);
if (err < 0)
return err;
+ ret = err;
if (snd_soc_volsw_is_stereo(mc)) {
unsigned int val2;
@@ -447,6 +455,11 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
err = snd_soc_component_update_bits(component, reg2, val_mask,
val2);
+
+ /* Don't discard any error code or drop change flag */
+ if (ret == 0 || err < 0) {
+ ret = err;
+ }
}
return err;
}
@@ -506,7 +519,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
unsigned int val, val_mask;
- int ret;
+ int err, ret;
if (invert)
val = (max - ucontrol->value.integer.value[0]) & mask;
@@ -515,9 +528,10 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
val_mask = mask << shift;
val = val << shift;
- ret = snd_soc_component_update_bits(component, reg, val_mask, val);
- if (ret < 0)
- return ret;
+ err = snd_soc_component_update_bits(component, reg, val_mask, val);
+ if (err < 0)
+ return err;
+ ret = err;
if (snd_soc_volsw_is_stereo(mc)) {
if (invert)
@@ -527,8 +541,12 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
val_mask = mask << shift;
val = val << shift;
- ret = snd_soc_component_update_bits(component, rreg, val_mask,
+ err = snd_soc_component_update_bits(component, rreg, val_mask,
val);
+ /* Don't discard any error code or drop change flag */
+ if (ret == 0 || err < 0) {
+ ret = err;
+ }
}
return ret;
@@ -877,6 +895,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
unsigned long mask = (1UL<<mc->nbits)-1;
long max = mc->max;
long val = ucontrol->value.integer.value[0];
+ int ret = 0;
unsigned int i;
if (val < mc->min || val > mc->max)
@@ -891,9 +910,11 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
regmask, regval);
if (err < 0)
return err;
+ if (err > 0)
+ ret = err;
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx);
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index c8fb082209ce..1385695d7745 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -956,6 +956,7 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
dev_dbg(sdev->dev, "DMA mask is 32 bit\n");
dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32));
}
+ dma_set_max_seg_size(&pci->dev, UINT_MAX);
/* init streams */
ret = hda_dsp_stream_init(sdev);
diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c
index 70319c822c10..2d444ec74202 100644
--- a/sound/usb/implicit.c
+++ b/sound/usb/implicit.c
@@ -47,13 +47,13 @@ struct snd_usb_implicit_fb_match {
static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
/* Generic matching */
IMPLICIT_FB_GENERIC_DEV(0x0499, 0x1509), /* Steinberg UR22 */
- IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2080), /* M-Audio FastTrack Ultra */
- IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2081), /* M-Audio FastTrack Ultra */
IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2030), /* M-Audio Fast Track C400 */
IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2031), /* M-Audio Fast Track C600 */
/* Fixed EP */
/* FIXME: check the availability of generic matching */
+ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2080, 0x81, 2), /* M-Audio FastTrack Ultra */
+ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2081, 0x81, 2), /* M-Audio FastTrack Ultra */
IMPLICIT_FB_FIXED_DEV(0x2466, 0x8010, 0x81, 2), /* Fractal Audio Axe-Fx III */
IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0001, 0x81, 2), /* Solid State Logic SSL2 */
IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0002, 0x81, 2), /* Solid State Logic SSL2+ */
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 630766ba259f..a5641956ef10 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -3678,17 +3678,14 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
cval->cache_val[idx]);
if (err < 0)
- return err;
+ break;
}
idx++;
}
} else {
/* master */
- if (cval->cached) {
- err = snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val);
- if (err < 0)
- return err;
- }
+ if (cval->cached)
+ snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val);
}
return 0;
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 3faf0f97edb1..a4a39c3e0f19 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -476,6 +476,7 @@
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
diff --git a/tools/cgroup/memcg_slabinfo.py b/tools/cgroup/memcg_slabinfo.py
index 1600b17dbb8a..1d3a90d93fe2 100644
--- a/tools/cgroup/memcg_slabinfo.py
+++ b/tools/cgroup/memcg_slabinfo.py
@@ -11,7 +11,7 @@ from drgn.helpers.linux import list_for_each_entry, list_empty
from drgn.helpers.linux import for_each_page
from drgn.helpers.linux.cpumask import for_each_online_cpu
from drgn.helpers.linux.percpu import per_cpu_ptr
-from drgn import container_of, FaultError, Object
+from drgn import container_of, FaultError, Object, cast
DESC = """
@@ -69,15 +69,15 @@ def oo_objects(s):
def count_partial(n, fn):
- nr_pages = 0
- for page in list_for_each_entry('struct page', n.partial.address_of_(),
- 'lru'):
- nr_pages += fn(page)
- return nr_pages
+ nr_objs = 0
+ for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
+ 'slab_list'):
+ nr_objs += fn(slab)
+ return nr_objs
-def count_free(page):
- return page.objects - page.inuse
+def count_free(slab):
+ return slab.objects - slab.inuse
def slub_get_slabinfo(s, cfg):
@@ -145,14 +145,14 @@ def detect_kernel_config():
return cfg
-def for_each_slab_page(prog):
+def for_each_slab(prog):
PGSlab = 1 << prog.constant('PG_slab')
PGHead = 1 << prog.constant('PG_head')
for page in for_each_page(prog):
try:
if page.flags.value_() & PGSlab:
- yield page
+ yield cast('struct slab *', page)
except FaultError:
pass
@@ -190,13 +190,13 @@ def main():
'list'):
obj_cgroups.add(ptr.value_())
- # look over all slab pages, belonging to non-root memcgs
- # and look for objects belonging to the given memory cgroup
- for page in for_each_slab_page(prog):
- objcg_vec_raw = page.memcg_data.value_()
+ # look over all slab folios and look for objects belonging
+ # to the given memory cgroup
+ for slab in for_each_slab(prog):
+ objcg_vec_raw = slab.memcg_data.value_()
if objcg_vec_raw == 0:
continue
- cache = page.slab_cache
+ cache = slab.slab_cache
if not cache:
continue
addr = cache.value_()
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 5191b57e1562..507ee1f2aa96 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -1134,6 +1134,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_VM_GPA_BITS 207
#define KVM_CAP_XSAVE2 208
#define KVM_CAP_SYS_ATTRIBUTES 209
+#define KVM_CAP_PPC_AIL_MODE_3 210
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 1b65042ab1db..82858b697c05 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -465,6 +465,8 @@ struct perf_event_attr {
/*
* User provided data if sigtrap=1, passed back to user via
* siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+ * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
+ * truncated accordingly on 32 bit architectures.
*/
__u64 sig_data;
};
diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h
index 581f9ffb4237..1973a18c096b 100644
--- a/tools/lib/perf/include/internal/cpumap.h
+++ b/tools/lib/perf/include/internal/cpumap.h
@@ -3,11 +3,7 @@
#define __LIBPERF_INTERNAL_CPUMAP_H
#include <linux/refcount.h>
-
-/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
-struct perf_cpu {
- int cpu;
-};
+#include <perf/cpumap.h>
/**
* A sized, reference counted, sorted array of integers representing CPU
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index 15b8faafd615..4a2edbdb5e2b 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -7,6 +7,11 @@
#include <stdio.h>
#include <stdbool.h>
+/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
+struct perf_cpu {
+ int cpu;
+};
+
LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map
index 93696affda2e..6fa0d651576b 100644
--- a/tools/lib/perf/libperf.map
+++ b/tools/lib/perf/libperf.map
@@ -2,6 +2,7 @@ LIBPERF_0.0.1 {
global:
libperf_init;
perf_cpu_map__dummy_new;
+ perf_cpu_map__default_new;
perf_cpu_map__get;
perf_cpu_map__put;
perf_cpu_map__new;
diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c
index d39378eaf897..87b0510a556f 100644
--- a/tools/lib/perf/tests/test-cpumap.c
+++ b/tools/lib/perf/tests/test-cpumap.c
@@ -14,6 +14,8 @@ static int libperf_print(enum libperf_print_level level,
int test_cpumap(int argc, char **argv)
{
struct perf_cpu_map *cpus;
+ struct perf_cpu cpu;
+ int idx;
__T_START;
@@ -27,6 +29,15 @@ int test_cpumap(int argc, char **argv)
perf_cpu_map__put(cpus);
perf_cpu_map__put(cpus);
+ cpus = perf_cpu_map__default_new();
+ if (!cpus)
+ return -1;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+ __T("wrong cpu number", cpu.cpu != -1);
+
+ perf_cpu_map__put(cpus);
+
__T_END;
return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
index b3479dfa9a1c..fa854c83b7e7 100644
--- a/tools/lib/perf/tests/test-evlist.c
+++ b/tools/lib/perf/tests/test-evlist.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE // needed for sched.h to get sched_[gs]etaffinity and CPU_(ZERO,SET)
+#include <inttypes.h>
#include <sched.h>
#include <stdio.h>
#include <stdarg.h>
@@ -526,12 +527,12 @@ static int test_stat_multiplexing(void)
min = counts[0].val;
for (i = 0; i < EVENT_NUM; i++) {
- __T_VERBOSE("Event %2d -- Raw count = %lu, run = %lu, enable = %lu\n",
+ __T_VERBOSE("Event %2d -- Raw count = %" PRIu64 ", run = %" PRIu64 ", enable = %" PRIu64 "\n",
i, counts[i].val, counts[i].run, counts[i].ena);
perf_counts_values__scale(&counts[i], true, &scaled);
if (scaled == 1) {
- __T_VERBOSE("\t Scaled count = %lu (%.2lf%%, %lu/%lu)\n",
+ __T_VERBOSE("\t Scaled count = %" PRIu64 " (%.2lf%%, %" PRIu64 "/%" PRIu64 ")\n",
counts[i].val,
(double)counts[i].run / (double)counts[i].ena * 100.0,
counts[i].run, counts[i].ena);
diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h
index 794a375dad36..b2aec04fce8f 100644
--- a/tools/lib/subcmd/subcmd-util.h
+++ b/tools/lib/subcmd/subcmd-util.h
@@ -50,15 +50,8 @@ static NORETURN inline void die(const char *err, ...)
static inline void *xrealloc(void *ptr, size_t size)
{
void *ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret) {
- ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret)
- die("Out of memory, realloc failed");
- }
+ if (!ret)
+ die("Out of memory, realloc failed");
return ret;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index abae8184e171..fa478ddcd18a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -463,7 +463,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
return -EINVAL;
if (PRINT_FIELD(WEIGHT) &&
- evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT))
+ evsel__do_check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(SYM) &&
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 32844d8a0ea5..52b137a184a6 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1536,13 +1536,20 @@ static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
return fprintf(fp, " ? ");
}
+static pid_t workload_pid = -1;
static bool done = false;
static bool interrupted = false;
-static void sig_handler(int sig)
+static void sighandler_interrupt(int sig __maybe_unused)
{
- done = true;
- interrupted = sig == SIGINT;
+ done = interrupted = true;
+}
+
+static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
+ void *context __maybe_unused)
+{
+ if (info->si_pid == workload_pid)
+ done = true;
}
static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
@@ -3938,7 +3945,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
bool draining = false;
trace->live = true;
- signal(SIGCHLD, sig_handler);
if (!trace->raw_augmented_syscalls) {
if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
@@ -4018,6 +4024,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
fprintf(trace->output, "Couldn't run the workload!\n");
goto out_delete_evlist;
}
+ workload_pid = evlist->workload.pid;
}
err = evlist__open(evlist);
@@ -4887,10 +4894,16 @@ int cmd_trace(int argc, const char **argv)
const char * const trace_subcommands[] = { "record", NULL };
int err = -1;
char bf[BUFSIZ];
+ struct sigaction sigchld_act;
signal(SIGSEGV, sighandler_dump_stack);
signal(SIGFPE, sighandler_dump_stack);
- signal(SIGINT, sig_handler);
+ signal(SIGINT, sighandler_interrupt);
+
+ memset(&sigchld_act, 0, sizeof(sigchld_act));
+ sigchld_act.sa_flags = SA_SIGINFO;
+ sigchld_act.sa_sigaction = sighandler_chld;
+ sigaction(SIGCHLD, &sigchld_act, NULL);
trace.evlist = evlist__new();
trace.sctbl = syscalltbl__new();
diff --git a/tools/perf/tests/attr/README b/tools/perf/tests/attr/README
index a36f49fb4dbe..1116fc6bf2ac 100644
--- a/tools/perf/tests/attr/README
+++ b/tools/perf/tests/attr/README
@@ -45,8 +45,10 @@ Following tests are defined (with perf commands):
perf record -d kill (test-record-data)
perf record -F 100 kill (test-record-freq)
perf record -g kill (test-record-graph-default)
+ perf record -g kill (test-record-graph-default-aarch64)
perf record --call-graph dwarf kill (test-record-graph-dwarf)
perf record --call-graph fp kill (test-record-graph-fp)
+ perf record --call-graph fp kill (test-record-graph-fp-aarch64)
perf record --group -e cycles,instructions kill (test-record-group)
perf record -e '{cycles,instructions}' kill (test-record-group1)
perf record -e '{cycles/period=1/,instructions/period=2/}:S' kill (test-record-group2)
diff --git a/tools/perf/tests/attr/test-record-graph-default b/tools/perf/tests/attr/test-record-graph-default
index 5d8234d50845..f0a18b4ea4f5 100644
--- a/tools/perf/tests/attr/test-record-graph-default
+++ b/tools/perf/tests/attr/test-record-graph-default
@@ -2,6 +2,8 @@
command = record
args = --no-bpf-event -g kill >/dev/null 2>&1
ret = 1
+# arm64 enables registers in the default mode (fp)
+arch = !aarch64
[event:base-record]
sample_type=295
diff --git a/tools/perf/tests/attr/test-record-graph-default-aarch64 b/tools/perf/tests/attr/test-record-graph-default-aarch64
new file mode 100644
index 000000000000..e98d62efb6f7
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-graph-default-aarch64
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event -g kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
diff --git a/tools/perf/tests/attr/test-record-graph-fp b/tools/perf/tests/attr/test-record-graph-fp
index 5630521c0b0f..a6e60e839205 100644
--- a/tools/perf/tests/attr/test-record-graph-fp
+++ b/tools/perf/tests/attr/test-record-graph-fp
@@ -2,6 +2,8 @@
command = record
args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
ret = 1
+# arm64 enables registers in fp mode
+arch = !aarch64
[event:base-record]
sample_type=295
diff --git a/tools/perf/tests/attr/test-record-graph-fp-aarch64 b/tools/perf/tests/attr/test-record-graph-fp-aarch64
new file mode 100644
index 000000000000..cbeea9971285
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-graph-fp-aarch64
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c
index 1f147fe6595f..e32ece90e164 100644
--- a/tools/perf/tests/sigtrap.c
+++ b/tools/perf/tests/sigtrap.c
@@ -22,19 +22,6 @@
#include "tests.h"
#include "../perf-sys.h"
-/*
- * PowerPC and S390 do not support creation of instruction breakpoints using the
- * perf_event interface.
- *
- * Just disable the test for these architectures until these issues are
- * resolved.
- */
-#if defined(__powerpc__) || defined(__s390x__)
-#define BP_ACCOUNT_IS_SUPPORTED 0
-#else
-#define BP_ACCOUNT_IS_SUPPORTED 1
-#endif
-
#define NUM_THREADS 5
static struct {
@@ -135,7 +122,7 @@ static int test__sigtrap(struct test_suite *test __maybe_unused, int subtest __m
char sbuf[STRERR_BUFSIZE];
int i, fd, ret = TEST_FAIL;
- if (!BP_ACCOUNT_IS_SUPPORTED) {
+ if (!BP_SIGNAL_IS_SUPPORTED) {
pr_debug("Test not supported on this architecture");
return TEST_SKIP;
}
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 7ecfaac7536a..16ec605a9fe4 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -1220,9 +1220,10 @@ bpf__obj_config_map(struct bpf_object *obj,
pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
out:
- free(map_name);
if (!err)
*key_scan_pos += strlen(map_opt);
+
+ free(map_name);
return err;
}
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 4f672f7d008c..8b95fb3c4d7b 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -50,8 +50,6 @@ struct cs_etm_auxtrace {
u8 timeless_decoding;
u8 snapshot_mode;
u8 data_queued;
- u8 sample_branches;
- u8 sample_instructions;
int num_cpu;
u64 latest_kernel_timestamp;
@@ -410,8 +408,8 @@ static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
{
struct cs_etm_packet *tmp;
- if (etm->sample_branches || etm->synth_opts.last_branch ||
- etm->sample_instructions) {
+ if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
+ etm->synth_opts.instructions) {
/*
* Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
* the next incoming packet.
@@ -1365,7 +1363,6 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
err = cs_etm__synth_event(session, &attr, id);
if (err)
return err;
- etm->sample_branches = true;
etm->branches_sample_type = attr.sample_type;
etm->branches_id = id;
id += 1;
@@ -1389,7 +1386,6 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
err = cs_etm__synth_event(session, &attr, id);
if (err)
return err;
- etm->sample_instructions = true;
etm->instructions_sample_type = attr.sample_type;
etm->instructions_id = id;
id += 1;
@@ -1420,7 +1416,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq,
tidq->prev_packet->last_instr_taken_branch)
cs_etm__update_last_branch_rb(etmq, tidq);
- if (etm->sample_instructions &&
+ if (etm->synth_opts.instructions &&
tidq->period_instructions >= etm->instructions_sample_period) {
/*
* Emit instruction sample periodically
@@ -1503,7 +1499,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq,
}
}
- if (etm->sample_branches) {
+ if (etm->synth_opts.branches) {
bool generate_sample = false;
/* Generate sample for tracing on packet */
@@ -1557,6 +1553,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq,
goto swap_packet;
if (etmq->etm->synth_opts.last_branch &&
+ etmq->etm->synth_opts.instructions &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
u64 addr;
@@ -1582,7 +1579,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq,
}
- if (etm->sample_branches &&
+ if (etm->synth_opts.branches &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
err = cs_etm__synth_branch_sample(etmq, tidq);
if (err)
@@ -1614,6 +1611,7 @@ static int cs_etm__end_block(struct cs_etm_queue *etmq,
* the trace.
*/
if (etmq->etm->synth_opts.last_branch &&
+ etmq->etm->synth_opts.instructions &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
u64 addr;
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index f5d260b1df4d..15a4547d608e 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -44,10 +44,6 @@ int perf_data__create_dir(struct perf_data *data, int nr)
if (!files)
return -ENOMEM;
- data->dir.version = PERF_DIR_VERSION;
- data->dir.files = files;
- data->dir.nr = nr;
-
for (i = 0; i < nr; i++) {
struct perf_data_file *file = &files[i];
@@ -62,6 +58,9 @@ int perf_data__create_dir(struct perf_data *data, int nr)
file->fd = ret;
}
+ data->dir.version = PERF_DIR_VERSION;
+ data->dir.files = files;
+ data->dir.nr = nr;
return 0;
out_err:
diff --git a/tools/perf/util/evlist-hybrid.c b/tools/perf/util/evlist-hybrid.c
index 7f234215147d..57f02beef023 100644
--- a/tools/perf/util/evlist-hybrid.c
+++ b/tools/perf/util/evlist-hybrid.c
@@ -154,8 +154,8 @@ int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
perf_cpu_map__put(matched_cpus);
perf_cpu_map__put(unmatched_cpus);
}
-
- ret = (unmatched_count == events_nr) ? -1 : 0;
+ if (events_nr)
+ ret = (unmatched_count == events_nr) ? -1 : 0;
out:
perf_cpu_map__put(cpus);
return ret;
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_crash.c b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
new file mode 100644
index 000000000000..f74b82305da8
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "timer_crash.skel.h"
+
+enum {
+ MODE_ARRAY,
+ MODE_HASH,
+};
+
+static void test_timer_crash_mode(int mode)
+{
+ struct timer_crash *skel;
+
+ skel = timer_crash__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load"))
+ return;
+ skel->bss->pid = getpid();
+ skel->bss->crash_map = mode;
+ if (!ASSERT_OK(timer_crash__attach(skel), "timer_crash__attach"))
+ goto end;
+ usleep(1);
+end:
+ timer_crash__destroy(skel);
+}
+
+void test_timer_crash(void)
+{
+ if (test__start_subtest("array"))
+ test_timer_crash_mode(MODE_ARRAY);
+ if (test__start_subtest("hash"))
+ test_timer_crash_mode(MODE_HASH);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
index 2966564b8497..6c85b00f27b2 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
@@ -235,7 +235,7 @@ SEC("sk_msg1")
int bpf_prog4(struct sk_msg_md *msg)
{
int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
- int *start, *end, *start_push, *end_push, *start_pop, *pop;
+ int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -249,8 +249,11 @@ int bpf_prog4(struct sk_msg_md *msg)
bpf_msg_pull_data(msg, *start, *end, 0);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_DROP;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
if (start_pop && pop)
@@ -263,6 +266,7 @@ int bpf_prog6(struct sk_msg_md *msg)
{
int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
+ int err = 0;
__u64 flags = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
@@ -279,8 +283,11 @@ int bpf_prog6(struct sk_msg_md *msg)
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_DROP;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
@@ -338,7 +345,7 @@ SEC("sk_msg5")
int bpf_prog10(struct sk_msg_md *msg)
{
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
- int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
+ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -352,8 +359,11 @@ int bpf_prog10(struct sk_msg_md *msg)
bpf_msg_pull_data(msg, *start, *end, 0);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_PASS;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
if (start_pop && pop)
diff --git a/tools/testing/selftests/bpf/progs/timer_crash.c b/tools/testing/selftests/bpf/progs/timer_crash.c
new file mode 100644
index 000000000000..f8f7944e70da
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_crash.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_elem {
+ struct bpf_timer timer;
+ struct bpf_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_elem);
+} amap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_elem);
+} hmap SEC(".maps");
+
+int pid = 0;
+int crash_map = 0; /* 0 for amap, 1 for hmap */
+
+SEC("fentry/do_nanosleep")
+int sys_enter(void *ctx)
+{
+ struct map_elem *e, value = {};
+ void *map = crash_map ? (void *)&hmap : (void *)&amap;
+
+ if (bpf_get_current_task_btf()->tgid != pid)
+ return 0;
+
+ *(void **)&value = (void *)0xdeadcaf3;
+
+ bpf_map_update_elem(map, &(int){0}, &value, 0);
+ /* For array map, doing bpf_map_update_elem will do a
+ * check_and_free_timer_in_array, which will trigger the crash if timer
+ * pointer was overwritten, for hmap we need to use bpf_timer_cancel.
+ */
+ if (crash_map == 1) {
+ e = bpf_map_lookup_elem(map, &(int){0});
+ if (!e)
+ return 0;
+ bpf_timer_cancel(&e->timer);
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index 12c5e27d32c1..2d7fca446c7f 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -3,8 +3,8 @@ CFLAGS = -Wall
CFLAGS += -Wno-nonnull
CFLAGS += -D_GNU_SOURCE
-TEST_PROGS := binfmt_script non-regular
-TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216
+TEST_PROGS := binfmt_script
+TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular
TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
# Makefile is a run-time dependency, since it's accessed by the execveat test
TEST_FILES := Makefile
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
index e96e279e0533..25432b8cd5bd 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
@@ -19,7 +19,7 @@ fail() { # mesg
FILTER=set_ftrace_filter
FUNC1="schedule"
-FUNC2="do_softirq"
+FUNC2="scheduler_tick"
ALL_FUNCS="#### all functions enabled ####"
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 0e4926bc9a58..17c3f0749f05 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -82,7 +82,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
-TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 192a2899bae8..94df2692e6e4 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -455,6 +455,7 @@ static void mfd_fail_write(int fd)
printf("mmap()+mprotect() didn't fail as expected\n");
abort();
}
+ munmap(p, mfd_def_size);
}
/* verify PUNCH_HOLE fails */
diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
index f31205f04ee0..8c5fea68ae67 100644
--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
@@ -1236,7 +1236,7 @@ static int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long
}
/**
- * Validate that an attached mount in our mount namespace can be idmapped.
+ * Validate that an attached mount in our mount namespace cannot be idmapped.
* (The kernel enforces that the mount's mount namespace and the caller's mount
* namespace match.)
*/
@@ -1259,7 +1259,7 @@ TEST_F(mount_setattr_idmapped, attached_mount_inside_current_mount_namespace)
attr.userns_fd = get_userns_fd(0, 10000, 10000);
ASSERT_GE(attr.userns_fd, 0);
- ASSERT_EQ(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 2674ba20d524..ff821025d309 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -71,6 +71,36 @@ chk_msk_remote_key_nr()
__chk_nr "grep -c remote_key" $*
}
+# $1: ns, $2: port
+wait_local_port_listen()
+{
+ local listener_ns="${1}"
+ local port="${2}"
+
+ local port_hex i
+
+ port_hex="$(printf "%04X" "${port}")"
+ for i in $(seq 10); do
+ ip netns exec "${listener_ns}" cat /proc/net/tcp | \
+ awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
+ break
+ sleep 0.1
+ done
+}
+
+wait_connected()
+{
+ local listener_ns="${1}"
+ local port="${2}"
+
+ local port_hex i
+
+ port_hex="$(printf "%04X" "${port}")"
+ for i in $(seq 10); do
+ ip netns exec ${listener_ns} grep -q " 0100007F:${port_hex} " /proc/net/tcp && break
+ sleep 0.1
+ done
+}
trap cleanup EXIT
ip netns add $ns
@@ -81,15 +111,15 @@ echo "a" | \
ip netns exec $ns \
./mptcp_connect -p 10000 -l -t ${timeout_poll} \
0.0.0.0 >/dev/null &
-sleep 0.1
+wait_local_port_listen $ns 10000
chk_msk_nr 0 "no msk on netns creation"
echo "b" | \
timeout ${timeout_test} \
ip netns exec $ns \
- ./mptcp_connect -p 10000 -j -t ${timeout_poll} \
+ ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} \
127.0.0.1 >/dev/null &
-sleep 0.1
+wait_connected $ns 10000
chk_msk_nr 2 "after MPC handshake "
chk_msk_remote_key_nr 2 "....chk remote_key"
chk_msk_fallback_nr 0 "....chk no fallback"
@@ -101,13 +131,13 @@ echo "a" | \
ip netns exec $ns \
./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
0.0.0.0 >/dev/null &
-sleep 0.1
+wait_local_port_listen $ns 10001
echo "b" | \
timeout ${timeout_test} \
ip netns exec $ns \
- ./mptcp_connect -p 10001 -j -t ${timeout_poll} \
+ ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} \
127.0.0.1 >/dev/null &
-sleep 0.1
+wait_connected $ns 10001
chk_msk_fallback_nr 1 "check fallback"
flush_pids
@@ -119,7 +149,7 @@ for I in `seq 1 $NR_CLIENTS`; do
./mptcp_connect -p $((I+10001)) -l -w 10 \
-t ${timeout_poll} 0.0.0.0 >/dev/null &
done
-sleep 0.1
+wait_local_port_listen $ns $((NR_CLIENTS + 10001))
for I in `seq 1 $NR_CLIENTS`; do
echo "b" | \
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index c0801df15f54..0c8a2a20b96c 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -660,6 +660,7 @@ chk_join_nr()
local ack_nr=$4
local count
local dump_stats
+ local with_cookie
printf "%02u %-36s %s" "$TEST_COUNT" "$msg" "syn"
count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'`
@@ -673,12 +674,20 @@ chk_join_nr()
fi
echo -n " - synack"
+ with_cookie=`ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies`
count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}'`
[ -z "$count" ] && count=0
if [ "$count" != "$syn_ack_nr" ]; then
- echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
- ret=1
- dump_stats=1
+ # simult connections exceeding the limit with cookie enabled could go up to
+ # synack validation as the conn limit can be enforced reliably only after
+ # the subflow creation
+ if [ "$with_cookie" = 2 ] && [ "$count" -gt "$syn_ack_nr" ] && [ "$count" -le "$syn_nr" ]; then
+ echo -n "[ ok ]"
+ else
+ echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
+ ret=1
+ dump_stats=1
+ fi
else
echo -n "[ ok ]"
fi
@@ -752,11 +761,17 @@ chk_add_nr()
local mis_ack_nr=${8:-0}
local count
local dump_stats
+ local timeout
+
+ timeout=`ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout`
printf "%-39s %s" " " "add"
- count=`ip netns exec $ns2 nstat -as | grep MPTcpExtAddAddr | awk '{print $2}'`
+ count=`ip netns exec $ns2 nstat -as MPTcpExtAddAddr | grep MPTcpExtAddAddr | awk '{print $2}'`
[ -z "$count" ] && count=0
- if [ "$count" != "$add_nr" ]; then
+
+ # if the test configured a short timeout tolerate greater then expected
+ # add addrs options, due to retransmissions
+ if [ "$count" != "$add_nr" ] && [ "$timeout" -gt 1 -o "$count" -lt "$add_nr" ]; then
echo "[fail] got $count ADD_ADDR[s] expected $add_nr"
ret=1
dump_stats=1
@@ -961,7 +976,7 @@ wait_for_tw()
local ns=$1
while [ $time -lt $timeout_ms ]; do
- local cnt=$(ip netns exec $ns ss -t state time-wait |wc -l)
+ local cnt=$(ip netns exec $ns nstat -as TcpAttemptFails | grep TcpAttemptFails | awk '{print $2}')
[ "$cnt" = 1 ] && return 1
time=$((time + 100))
@@ -1158,7 +1173,10 @@ signal_address_tests()
ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags signal
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags signal
ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags signal
- run_tests $ns1 $ns2 10.0.1.1
+
+ # the peer could possibly miss some addr notification, allow retransmission
+ ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
chk_join_nr "signal addresses race test" 3 3 3
# the server will not signal the address terminating
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index ffca314897c4..e4f845dd942b 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -6,7 +6,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
- conntrack_vrf.sh
+ conntrack_vrf.sh nft_synproxy.sh
LDLIBS = -lmnl
TEST_GEN_FILES = nf-queue
diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
index df322e47a54f..b35010cc7f6a 100755
--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
@@ -1601,4 +1601,4 @@ for name in ${TESTS}; do
done
done
-[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP}
+[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP} || exit 0
diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
index 6caf6ac8c285..695a1958723f 100755
--- a/tools/testing/selftests/netfilter/nft_fib.sh
+++ b/tools/testing/selftests/netfilter/nft_fib.sh
@@ -174,6 +174,7 @@ test_ping() {
ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.rp_filter=0 > /dev/null
sleep 3
diff --git a/tools/testing/selftests/netfilter/nft_synproxy.sh b/tools/testing/selftests/netfilter/nft_synproxy.sh
new file mode 100755
index 000000000000..b62933b680d6
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_synproxy.sh
@@ -0,0 +1,117 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+rnd=$(mktemp -u XXXXXXXX)
+nsr="nsr-$rnd" # synproxy machine
+ns1="ns1-$rnd" # iperf client
+ns2="ns2-$rnd" # iperf server
+
+checktool (){
+ if ! $1 > /dev/null 2>&1; then
+ echo "SKIP: Could not $2"
+ exit $ksft_skip
+ fi
+}
+
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "iperf3 --version" "run test without iperf3"
+checktool "ip netns add $nsr" "create net namespace"
+
+modprobe -q nf_conntrack
+
+ip netns add $ns1
+ip netns add $ns2
+
+cleanup() {
+ ip netns pids $ns1 | xargs kill 2>/dev/null
+ ip netns pids $ns2 | xargs kill 2>/dev/null
+ ip netns del $ns1
+ ip netns del $ns2
+
+ ip netns del $nsr
+}
+
+trap cleanup EXIT
+
+ip link add veth0 netns $nsr type veth peer name eth0 netns $ns1
+ip link add veth1 netns $nsr type veth peer name eth0 netns $ns2
+
+for dev in lo veth0 veth1; do
+ip -net $nsr link set $dev up
+done
+
+ip -net $nsr addr add 10.0.1.1/24 dev veth0
+ip -net $nsr addr add 10.0.2.1/24 dev veth1
+
+ip netns exec $nsr sysctl -q net.ipv4.conf.veth0.forwarding=1
+ip netns exec $nsr sysctl -q net.ipv4.conf.veth1.forwarding=1
+ip netns exec $nsr sysctl -q net.netfilter.nf_conntrack_tcp_loose=0
+
+for n in $ns1 $ns2; do
+ ip -net $n link set lo up
+ ip -net $n link set eth0 up
+done
+ip -net $ns1 addr add 10.0.1.99/24 dev eth0
+ip -net $ns2 addr add 10.0.2.99/24 dev eth0
+ip -net $ns1 route add default via 10.0.1.1
+ip -net $ns2 route add default via 10.0.2.1
+
+# test basic connectivity
+if ! ip netns exec $ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
+ echo "ERROR: $ns1 cannot reach $ns2" 1>&2
+ exit 1
+fi
+
+if ! ip netns exec $ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then
+ echo "ERROR: $ns2 cannot reach $ns1" 1>&2
+ exit 1
+fi
+
+ip netns exec $ns2 iperf3 -s > /dev/null 2>&1 &
+# ip netns exec $nsr tcpdump -vvv -n -i veth1 tcp | head -n 10 &
+
+sleep 1
+
+ip netns exec $nsr nft -f - <<EOF
+table inet filter {
+ chain prerouting {
+ type filter hook prerouting priority -300; policy accept;
+ meta iif veth0 tcp flags syn counter notrack
+ }
+
+ chain forward {
+ type filter hook forward priority 0; policy accept;
+
+ ct state new,established counter accept
+
+ meta iif veth0 meta l4proto tcp ct state untracked,invalid synproxy mss 1460 sack-perm timestamp
+
+ ct state invalid counter drop
+
+ # make ns2 unreachable w.o. tcp synproxy
+ tcp flags syn counter drop
+ }
+}
+EOF
+if [ $? -ne 0 ]; then
+ echo "SKIP: Cannot add nft synproxy"
+ exit $ksft_skip
+fi
+
+ip netns exec $ns1 timeout 5 iperf3 -c 10.0.2.99 -n $((1 * 1024 * 1024)) > /dev/null
+
+if [ $? -ne 0 ]; then
+ echo "FAIL: iperf3 returned an error" 1>&2
+ ret=$?
+ ip netns exec $nsr nft list ruleset
+else
+ echo "PASS: synproxy connection successful"
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
index 0ebfe8b0e147..585f7a0c10cb 100644
--- a/tools/testing/selftests/seccomp/Makefile
+++ b/tools/testing/selftests/seccomp/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -Wl,-no-as-needed -Wall
+CFLAGS += -Wl,-no-as-needed -Wall -isystem ../../../../usr/include/
LDFLAGS += -lpthread
TEST_GEN_PROGS := seccomp_bpf seccomp_benchmark
diff --git a/tools/testing/selftests/vm/map_fixed_noreplace.c b/tools/testing/selftests/vm/map_fixed_noreplace.c
index d91bde511268..eed44322d1a6 100644
--- a/tools/testing/selftests/vm/map_fixed_noreplace.c
+++ b/tools/testing/selftests/vm/map_fixed_noreplace.c
@@ -17,9 +17,6 @@
#define MAP_FIXED_NOREPLACE 0x100000
#endif
-#define BASE_ADDRESS (256ul * 1024 * 1024)
-
-
static void dump_maps(void)
{
char cmd[32];
@@ -28,18 +25,46 @@ static void dump_maps(void)
system(cmd);
}
+static unsigned long find_base_addr(unsigned long size)
+{
+ void *addr;
+ unsigned long flags;
+
+ flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ addr = mmap(NULL, size, PROT_NONE, flags, -1, 0);
+ if (addr == MAP_FAILED) {
+ printf("Error: couldn't map the space we need for the test\n");
+ return 0;
+ }
+
+ if (munmap(addr, size) != 0) {
+ printf("Error: couldn't map the space we need for the test\n");
+ return 0;
+ }
+ return (unsigned long)addr;
+}
+
int main(void)
{
+ unsigned long base_addr;
unsigned long flags, addr, size, page_size;
char *p;
page_size = sysconf(_SC_PAGE_SIZE);
+ //let's find a base addr that is free before we start the tests
+ size = 5 * page_size;
+ base_addr = find_base_addr(size);
+ if (!base_addr) {
+ printf("Error: couldn't map the space we need for the test\n");
+ return 1;
+ }
+
flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE;
// Check we can map all the areas we need below
errno = 0;
- addr = BASE_ADDRESS;
+ addr = base_addr;
size = 5 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
@@ -60,7 +85,7 @@ int main(void)
printf("unmap() successful\n");
errno = 0;
- addr = BASE_ADDRESS + page_size;
+ addr = base_addr + page_size;
size = 3 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -80,7 +105,7 @@ int main(void)
* +4 | free | new
*/
errno = 0;
- addr = BASE_ADDRESS;
+ addr = base_addr;
size = 5 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -101,7 +126,7 @@ int main(void)
* +4 | free |
*/
errno = 0;
- addr = BASE_ADDRESS + (2 * page_size);
+ addr = base_addr + (2 * page_size);
size = page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -121,7 +146,7 @@ int main(void)
* +4 | free | new
*/
errno = 0;
- addr = BASE_ADDRESS + (3 * page_size);
+ addr = base_addr + (3 * page_size);
size = 2 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -141,7 +166,7 @@ int main(void)
* +4 | free |
*/
errno = 0;
- addr = BASE_ADDRESS;
+ addr = base_addr;
size = 2 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -161,7 +186,7 @@ int main(void)
* +4 | free |
*/
errno = 0;
- addr = BASE_ADDRESS;
+ addr = base_addr;
size = page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -181,7 +206,7 @@ int main(void)
* +4 | free | new
*/
errno = 0;
- addr = BASE_ADDRESS + (4 * page_size);
+ addr = base_addr + (4 * page_size);
size = page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -192,7 +217,7 @@ int main(void)
return 1;
}
- addr = BASE_ADDRESS;
+ addr = base_addr;
size = 5 * page_size;
if (munmap((void *)addr, size) != 0) {
dump_maps();
diff --git a/tools/tracing/rtla/src/osnoise.c b/tools/tracing/rtla/src/osnoise.c
index 5648f9252e58..e60f1862bad0 100644
--- a/tools/tracing/rtla/src/osnoise.c
+++ b/tools/tracing/rtla/src/osnoise.c
@@ -810,7 +810,7 @@ struct osnoise_tool *osnoise_init_trace_tool(char *tracer)
retval = enable_tracer_by_name(trace->trace.inst, tracer);
if (retval) {
- err_msg("Could not enable osnoiser tracer for tracing\n");
+ err_msg("Could not enable %s tracer for tracing\n", tracer);
goto out_err;
}
diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
index 1f0b7fce55cf..52c053cc1789 100644
--- a/tools/tracing/rtla/src/osnoise_hist.c
+++ b/tools/tracing/rtla/src/osnoise_hist.c
@@ -426,7 +426,7 @@ static void osnoise_hist_usage(char *usage)
static const char * const msg[] = {
"",
" usage: rtla osnoise hist [-h] [-D] [-d s] [-p us] [-r us] [-s us] [-S us] [-t[=file]] \\",
- " [-c cpu-list] [-P priority] [-b N] [-e N] [--no-header] [--no-summary] \\",
+ " [-c cpu-list] [-P priority] [-b N] [-E N] [--no-header] [--no-summary] \\",
" [--no-index] [--with-zeros]",
"",
" -h/--help: print this menu",
@@ -439,7 +439,7 @@ static void osnoise_hist_usage(char *usage)
" -D/--debug: print debug info",
" -t/--trace[=file]: save the stopped trace to [file|osnoise_trace.txt]",
" -b/--bucket-size N: set the histogram bucket size (default 1)",
- " -e/--entries N: set the number of entries of the histogram (default 256)",
+ " -E/--entries N: set the number of entries of the histogram (default 256)",
" --no-header: do not print header",
" --no-summary: do not print summary",
" --no-index: do not print index",
@@ -486,7 +486,7 @@ static struct osnoise_hist_params
while (1) {
static struct option long_options[] = {
{"bucket-size", required_argument, 0, 'b'},
- {"entries", required_argument, 0, 'e'},
+ {"entries", required_argument, 0, 'E'},
{"cpus", required_argument, 0, 'c'},
{"debug", no_argument, 0, 'D'},
{"duration", required_argument, 0, 'd'},
@@ -507,7 +507,7 @@ static struct osnoise_hist_params
/* getopt_long stores the option index here. */
int option_index = 0;
- c = getopt_long(argc, argv, "c:b:d:e:Dhp:P:r:s:S:t::0123",
+ c = getopt_long(argc, argv, "c:b:d:E:Dhp:P:r:s:S:t::0123",
long_options, &option_index);
/* detect the end of the options. */
@@ -534,7 +534,7 @@ static struct osnoise_hist_params
if (!params->duration)
osnoise_hist_usage("Invalid -D duration\n");
break;
- case 'e':
+ case 'E':
params->entries = get_llong_from_str(optarg);
if ((params->entries < 10) || (params->entries > 9999999))
osnoise_hist_usage("Entries must be > 10 and < 9999999\n");
diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c
index c67dc28ef716..7af769b9c0de 100644
--- a/tools/tracing/rtla/src/osnoise_top.c
+++ b/tools/tracing/rtla/src/osnoise_top.c
@@ -573,6 +573,7 @@ out_top:
osnoise_free_top(tool->data);
osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
+ free(params);
out_exit:
exit(return_value);
}
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
index 436a799f9adf..237e1735afa7 100644
--- a/tools/tracing/rtla/src/timerlat_hist.c
+++ b/tools/tracing/rtla/src/timerlat_hist.c
@@ -429,7 +429,7 @@ static void timerlat_hist_usage(char *usage)
char *msg[] = {
"",
" usage: [rtla] timerlat hist [-h] [-q] [-d s] [-D] [-n] [-p us] [-i us] [-T us] [-s us] [-t[=file]] \\",
- " [-c cpu-list] [-P priority] [-e N] [-b N] [--no-irq] [--no-thread] [--no-header] [--no-summary] \\",
+ " [-c cpu-list] [-P priority] [-E N] [-b N] [--no-irq] [--no-thread] [--no-header] [--no-summary] \\",
" [--no-index] [--with-zeros]",
"",
" -h/--help: print this menu",
@@ -443,7 +443,7 @@ static void timerlat_hist_usage(char *usage)
" -T/--trace[=file]: save the stopped trace to [file|timerlat_trace.txt]",
" -n/--nano: display data in nanoseconds",
" -b/--bucket-size N: set the histogram bucket size (default 1)",
- " -e/--entries N: set the number of entries of the histogram (default 256)",
+ " -E/--entries N: set the number of entries of the histogram (default 256)",
" --no-irq: ignore IRQ latencies",
" --no-thread: ignore thread latencies",
" --no-header: do not print header",
@@ -494,7 +494,7 @@ static struct timerlat_hist_params
{"cpus", required_argument, 0, 'c'},
{"bucket-size", required_argument, 0, 'b'},
{"debug", no_argument, 0, 'D'},
- {"entries", required_argument, 0, 'e'},
+ {"entries", required_argument, 0, 'E'},
{"duration", required_argument, 0, 'd'},
{"help", no_argument, 0, 'h'},
{"irq", required_argument, 0, 'i'},
@@ -516,7 +516,7 @@ static struct timerlat_hist_params
/* getopt_long stores the option index here. */
int option_index = 0;
- c = getopt_long(argc, argv, "c:b:d:e:Dhi:np:P:s:t::T:012345",
+ c = getopt_long(argc, argv, "c:b:d:E:Dhi:np:P:s:t::T:012345",
long_options, &option_index);
/* detect the end of the options. */
@@ -543,7 +543,7 @@ static struct timerlat_hist_params
if (!params->duration)
timerlat_hist_usage("Invalid -D duration\n");
break;
- case 'e':
+ case 'E':
params->entries = get_llong_from_str(optarg);
if ((params->entries < 10) || (params->entries > 9999999))
timerlat_hist_usage("Entries must be > 10 and < 9999999\n");
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 58d31da8a2f7..0afc016cc54d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -5528,9 +5528,7 @@ static int kvm_suspend(void)
static void kvm_resume(void)
{
if (kvm_usage_count) {
-#ifdef CONFIG_LOCKDEP
- WARN_ON(lockdep_is_held(&kvm_count_lock));
-#endif
+ lockdep_assert_not_held(&kvm_count_lock);
hardware_enable_nolock(NULL);
}
}