diff options
380 files changed, 7281 insertions, 3826 deletions
diff --git a/Documentation/gpu/amdgpu/flashing.rst b/Documentation/gpu/amdgpu/flashing.rst new file mode 100644 index 000000000000..bd745c42a538 --- /dev/null +++ b/Documentation/gpu/amdgpu/flashing.rst @@ -0,0 +1,33 @@ +======================= + dGPU firmware flashing +======================= + +IFWI +---- +Flashing the dGPU integrated firmware image (IFWI) is supported by GPUs that +use the PSP to orchestrate the update (Navi3x or newer GPUs). +For supported GPUs, `amdgpu` will export a series of sysfs files that can be +used for the flash process. + +The IFWI flash process is: + +1. Ensure the IFWI image is intended for the dGPU on the system. +2. "Write" the IFWI image to the sysfs file `psp_vbflash`. This will stage the IFWI in memory. +3. "Read" from the `psp_vbflash` sysfs file to initiate the flash process. +4. Poll the `psp_vbflash_status` sysfs file to determine when the flash process completes. + +USB-C PD F/W +------------ +On GPUs that support flashing an updated USB-C PD firmware image, the process +is done using the `usbc_pd_fw` sysfs file. + +* Reading the file will provide the current firmware version. +* Writing the name of a firmware payload stored in `/lib/firmware/amdgpu` to the sysfs file will initiate the flash process. + +The firmware payload stored in `/lib/firmware/amdgpu` can be named any name +as long as it doesn't conflict with other existing binaries that are used by +`amdgpu`. + +sysfs files +----------- +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c diff --git a/Documentation/gpu/amdgpu/index.rst b/Documentation/gpu/amdgpu/index.rst index 03c2966cae79..912e699fd373 100644 --- a/Documentation/gpu/amdgpu/index.rst +++ b/Documentation/gpu/amdgpu/index.rst @@ -10,6 +10,7 @@ Next (GCN), Radeon DNA (RDNA), and Compute DNA (CDNA) architectures. module-parameters driver-core display/index + flashing xgmi ras thermal diff --git a/Documentation/gpu/rfc/i915_scheduler.rst b/Documentation/gpu/rfc/i915_scheduler.rst index d630f15ab795..ec086e7a43ff 100644 --- a/Documentation/gpu/rfc/i915_scheduler.rst +++ b/Documentation/gpu/rfc/i915_scheduler.rst @@ -135,9 +135,13 @@ Add I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT and drm_i915_context_engines_parallel_submit to the uAPI to implement this extension. +.. c:namespace-push:: rfc + .. kernel-doc:: include/uapi/drm/i915_drm.h :functions: i915_context_engines_parallel_submit +.. c:namespace-pop:: + Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL ------------------------------------------------------------------- Contexts that have been configured with the 'set_parallel' extension can only diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 8d16f280b695..29325981778a 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -62,7 +62,7 @@ subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror amdgpu-y := amdgpu_drv.o # add KMS driver -amdgpu-y += amdgpu_device.o amdgpu_kms.o \ +amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \ atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \ amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \ @@ -98,7 +98,7 @@ amdgpu-y += \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \ sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \ - nbio_v7_9.o aqua_vanjaram_reg_init.o + nbio_v7_9.o aqua_vanjaram.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8f2255b3a38a..035437d7d73a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1033,7 +1033,6 @@ struct amdgpu_device { bool has_pr3; bool ucode_sysfs_en; - bool psp_sysfs_en; /* Chip product information */ char product_number[20]; @@ -1128,7 +1127,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, u64 reg_addr, u32 reg_data); void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, - uint32_t reg, uint32_t v); + uint32_t reg, uint32_t v, uint32_t xcc_id); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); @@ -1506,4 +1505,8 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) int amdgpu_in_reset(struct amdgpu_device *adev); +extern const struct attribute_group amdgpu_vram_mgr_attr_group; +extern const struct attribute_group amdgpu_gtt_mgr_attr_group; +extern const struct attribute_group amdgpu_flash_attr_group; + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 385c6acb5728..a5a2b06c6588 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -868,7 +868,7 @@ static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm) if (!numa_info) { struct sysinfo info; - numa_info = kzalloc(sizeof *numa_info, GFP_KERNEL); + numa_info = kzalloc(sizeof(*numa_info), GFP_KERNEL); if (!numa_info) return NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index b4fcad0e62f7..629ca1ad75a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -830,3 +830,53 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) return adev->gmc.real_vram_size; } } + +int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, + u32 inst) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + struct amdgpu_ring_funcs *ring_funcs; + struct amdgpu_ring *ring; + int r = 0; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL); + if (!ring_funcs) + return -ENOMEM; + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + r = -ENOMEM; + goto free_ring_funcs; + } + + ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE; + ring->doorbell_index = doorbell_off; + ring->funcs = ring_funcs; + + spin_lock(&kiq->ring_lock); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock(&kiq->ring_lock); + r = -ENOMEM; + goto free_ring; + } + + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); + + if (kiq_ring->sched.ready && !adev->job_hang) + r = amdgpu_ring_test_helper(kiq_ring); + + spin_unlock(&kiq->ring_lock); + +free_ring: + kfree(ring); + +free_ring_funcs: + kfree(ring_funcs); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 1e4cc1fe88fc..082c9f4cfd34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -251,6 +251,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, uint32_t *payload); +int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, + u32 inst); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index 60f9e027fb66..e2fed6edbdd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -23,6 +23,7 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd_arcturus.h" #include "amdgpu_amdkfd_gfx_v9.h" +#include "amdgpu_amdkfd_aldebaran.h" #include "gc/gc_9_4_2_offset.h" #include "gc/gc_9_4_2_sh_mask.h" #include <uapi/linux/kfd_ioctl.h> @@ -36,7 +37,7 @@ * initialize the debug mode registers after it has disabled GFX off during the * debug session. */ -static uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev, +uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev, bool restore_dbg_registers, uint32_t vmid) { @@ -107,7 +108,7 @@ static uint32_t kgd_aldebaran_set_wave_launch_trap_override(struct amdgpu_device return data; } -static uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev, +uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev, uint8_t wave_launch_mode, uint32_t vmid) { @@ -125,7 +126,8 @@ static uint32_t kgd_gfx_aldebaran_set_address_watch( uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid) + uint32_t debug_vmid, + uint32_t inst) { uint32_t watch_address_high; uint32_t watch_address_low; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h new file mode 100644 index 000000000000..a7bdaf8d82dd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h @@ -0,0 +1,27 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev, + bool restore_dbg_registers, + uint32_t vmid); +uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev, + uint8_t wave_launch_mode, + uint32_t vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 5b4b7f8b92a5..490c8f5ddb60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -22,6 +22,7 @@ #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd_gfx_v9.h" +#include "amdgpu_amdkfd_aldebaran.h" #include "gc/gc_9_4_3_offset.h" #include "gc/gc_9_4_3_sh_mask.h" #include "athub/athub_1_8_0_offset.h" @@ -32,6 +33,7 @@ #include "soc15.h" #include "sdma/sdma_4_4_2_offset.h" #include "sdma/sdma_4_4_2_sh_mask.h" +#include <uapi/linux/kfd_ioctl.h> static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) { @@ -361,6 +363,156 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, return 0; } +/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */ +static uint32_t kgd_gfx_v9_4_3_disable_debug_trap(struct amdgpu_device *adev, + bool keep_trap_enabled, + uint32_t vmid) +{ + uint32_t data = 0; + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0); + + return data; +} + +static int kgd_gfx_v9_4_3_validate_trap_override_request( + struct amdgpu_device *adev, + uint32_t trap_override, + uint32_t *trap_mask_supported) +{ + *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID | + KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL | + KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_FP_OVERFLOW | + KFD_DBG_TRAP_MASK_FP_UNDERFLOW | + KFD_DBG_TRAP_MASK_FP_INEXACT | + KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | + KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION | + KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START | + KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END; + + if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR && + trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE) + return -EPERM; + + return 0; +} + +static uint32_t trap_mask_map_sw_to_hw(uint32_t mask) +{ + uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0; + uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0; + uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID | + KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL | + KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_FP_OVERFLOW | + KFD_DBG_TRAP_MASK_FP_UNDERFLOW | + KFD_DBG_TRAP_MASK_FP_INEXACT | + KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | + KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION); + uint32_t ret; + + ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en); + ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start); + ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end); + + return ret; +} + +static uint32_t trap_mask_map_hw_to_sw(uint32_t mask) +{ + uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN); + + if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START)) + ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START; + + if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END)) + ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END; + + return ret; +} + +/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */ +static uint32_t kgd_gfx_v9_4_3_set_wave_launch_trap_override( + struct amdgpu_device *adev, + uint32_t vmid, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t kfd_dbg_trap_cntl_prev) + +{ + uint32_t data = 0; + + *trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev); + + data = (trap_mask_bits & trap_mask_request) | + (*trap_mask_prev & ~trap_mask_request); + data = trap_mask_map_sw_to_hw(data); + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override); + + return data; +} + +#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H) +static uint32_t kgd_gfx_v9_4_3_set_address_watch( + struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid, + uint32_t inst) +{ + uint32_t watch_address_high; + uint32_t watch_address_low; + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + watch_address_low = lower_32_bits(watch_address); + watch_address_high = upper_32_bits(watch_address) & 0xffff; + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MODE, + watch_mode); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MASK, + watch_address_mask >> 7); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 1); + + WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + regTCP_WATCH0_ADDR_H) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_high); + + WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + regTCP_WATCH0_ADDR_L) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_low); + + return watch_address_cntl; +} + +static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) +{ + return 0; +} + const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping, @@ -379,6 +531,19 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, + .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = - kgd_gfx_v9_program_trap_handler_settings + kgd_gfx_v9_program_trap_handler_settings, + .build_grace_period_packet_info = + kgd_gfx_v9_build_grace_period_packet_info, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .enable_debug_trap = kgd_aldebaran_enable_debug_trap, + .disable_debug_trap = kgd_gfx_v9_4_3_disable_debug_trap, + .validate_trap_override_request = + kgd_gfx_v9_4_3_validate_trap_override_request, + .set_wave_launch_trap_override = + kgd_gfx_v9_4_3_set_wave_launch_trap_override, + .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v9_4_3_set_address_watch, + .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 8ad7a7779e14..f1f2c24de081 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -886,7 +886,8 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev, uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid) + uint32_t debug_vmid, + uint32_t inst) { uint32_t watch_address_high; uint32_t watch_address_low; @@ -968,7 +969,8 @@ uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev, * deq_retry_wait_time -- Wait Count for Global Wave Syncs. */ void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, - uint32_t *wait_times) + uint32_t *wait_times, + uint32_t inst) { *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); @@ -978,7 +980,8 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data) + uint32_t *reg_data, + uint32_t inst) { *reg_data = wait_times; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index e6b70196071a..ecaead24e8c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -44,12 +44,16 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev, uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid); + uint32_t debug_vmid, + uint32_t inst); uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev, uint32_t watch_id); -void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); +void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, + uint32_t *wait_times, + uint32_t inst); void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data); + uint32_t *reg_data, + uint32_t inst); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 91c3574ebed3..d67d003bada2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -637,7 +637,7 @@ static uint32_t kgd_gfx_v11_disable_debug_trap(struct amdgpu_device *adev, { uint32_t data = 0; - data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, keep_trap_enabled); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0); @@ -743,7 +743,8 @@ static uint32_t kgd_gfx_v11_set_address_watch(struct amdgpu_device *adev, uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid) + uint32_t debug_vmid, + uint32_t inst) { uint32_t watch_address_high; uint32_t watch_address_low; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 51d93fb13ea3..28963726bc97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -822,7 +822,8 @@ uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev, uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid) + uint32_t debug_vmid, + uint32_t inst) { uint32_t watch_address_high; uint32_t watch_address_low; @@ -903,10 +904,12 @@ uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev, * deq_retry_wait_time -- Wait Count for Global Wave Syncs. */ void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, - uint32_t *wait_times) + uint32_t *wait_times, + uint32_t inst) { - *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); + *wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + mmCP_IQ_WAIT_TIME2)); } void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, @@ -1100,12 +1103,13 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data) + uint32_t *reg_data, + uint32_t inst) { *reg_data = wait_times; /* - * The CP cannont handle a 0 grace period input and will result in + * The CP cannot handle a 0 grace period input and will result in * an infinite grace period being set so set to 1 to prevent this. */ if (grace_period == 0) @@ -1116,7 +1120,8 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, SCH_WAVE, grace_period); - *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2); + *reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + mmCP_IQ_WAIT_TIME2); } void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 5f54bff0db49..936e501908ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -89,12 +89,16 @@ uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev, uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid); + uint32_t debug_vmid, + uint32_t inst); uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev, uint32_t watch_id); -void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); +void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, + uint32_t *wait_times, + uint32_t inst); void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data); + uint32_t *reg_data, + uint32_t inst); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index edb35a88cb78..a136fba9f29b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1662,7 +1662,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; } - xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id; + xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? + 0 : fpriv->xcp_id; } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index f4e3c133a16c..dce9e7d5e4ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1776,7 +1776,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, struct amdgpu_device *adev = drm_to_adev(ddev); struct atom_context *ctx = adev->mode_info.atom_context; - return sysfs_emit(buf, "%s\n", ctx->vbios_version); + return sysfs_emit(buf, "%s\n", ctx->vbios_pn); } static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index d6d986be906a..6f241c574665 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -74,24 +74,29 @@ struct atpx_mux { u16 mux; } __packed; -bool amdgpu_has_atpx(void) { +bool amdgpu_has_atpx(void) +{ return amdgpu_atpx_priv.atpx_detected; } -bool amdgpu_has_atpx_dgpu_power_cntl(void) { +bool amdgpu_has_atpx_dgpu_power_cntl(void) +{ return amdgpu_atpx_priv.atpx.functions.power_cntl; } -bool amdgpu_is_atpx_hybrid(void) { +bool amdgpu_is_atpx_hybrid(void) +{ return amdgpu_atpx_priv.atpx.is_hybrid; } -bool amdgpu_atpx_dgpu_req_power_for_displays(void) { +bool amdgpu_atpx_dgpu_req_power_for_displays(void) +{ return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; } #if defined(CONFIG_ACPI) -void *amdgpu_atpx_get_dhandle(void) { +void *amdgpu_atpx_get_dhandle(void) +{ return amdgpu_atpx_priv.dhandle; } #endif @@ -113,6 +118,8 @@ static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function, union acpi_object atpx_arg_elements[2]; struct acpi_object_list atpx_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_device *adev = container_of(handle, struct acpi_device, handle); + struct device *dev = &adev->dev; atpx_arg.count = 2; atpx_arg.pointer = &atpx_arg_elements[0]; @@ -134,8 +141,8 @@ static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function, /* Fail only if calling the method fails and ATPX is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - printk("failed to evaluate ATPX got %s\n", - acpi_format_exception(status)); + dev_err(dev, "failed to evaluate ATPX got %s\n", + acpi_format_exception(status)); kfree(buffer.pointer); return NULL; } @@ -176,6 +183,8 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) { u32 valid_bits = 0; + struct acpi_device *adev = container_of(atpx->handle, struct acpi_device, handle); + struct device *dev = &adev->dev; if (atpx->functions.px_params) { union acpi_object *info; @@ -190,7 +199,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) size = *(u16 *) info->buffer.pointer; if (size < 10) { - printk("ATPX buffer is too small: %zu\n", size); + dev_err(dev, "ATPX buffer is too small: %zu\n", size); kfree(info); return -EINVAL; } @@ -223,11 +232,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) { - printk("ATPX Hybrid Graphics, forcing to ATPX\n"); + dev_info(dev, "ATPX Hybrid Graphics, forcing to ATPX\n"); atpx->functions.power_cntl = true; atpx->is_hybrid = false; } else { - printk("ATPX Hybrid Graphics\n"); + dev_info(dev, "ATPX Hybrid Graphics\n"); /* * Disable legacy PM methods only when pcie port PM is usable, * otherwise the device might fail to power off or power on. @@ -260,6 +269,8 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx) struct atpx_verify_interface output; size_t size; int err = 0; + struct acpi_device *adev = container_of(atpx->handle, struct acpi_device, handle); + struct device *dev = &adev->dev; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL); if (!info) @@ -278,8 +289,8 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx) memcpy(&output, info->buffer.pointer, size); /* TODO: check version? */ - printk("ATPX version %u, functions 0x%08x\n", - output.version, output.function_bits); + dev_info(dev, "ATPX version %u, functions 0x%08x\n", + output.version, output.function_bits); amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 56e89e76ff17..00ab0b3c8277 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -154,7 +154,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, } else { r = get_user(value, (uint32_t *)buf); if (!r) - amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value); + amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0); } if (r) { result = r; @@ -283,7 +283,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off } else { r = get_user(value, (uint32_t *)buf); if (!r) - amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value); + amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value, rd->id.xcc_id); } if (r) { result = r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a2cdde0ca0a7..dc0e5227119b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -159,7 +159,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, return sysfs_emit(buf, "%llu\n", cnt); } -static DEVICE_ATTR(pcie_replay_count, S_IRUGO, +static DEVICE_ATTR(pcie_replay_count, 0444, amdgpu_device_get_pcie_replay_count, NULL); static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); @@ -183,7 +183,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev, return sysfs_emit(buf, "%s\n", adev->product_name); } -static DEVICE_ATTR(product_name, S_IRUGO, +static DEVICE_ATTR(product_name, 0444, amdgpu_device_get_product_name, NULL); /** @@ -205,7 +205,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev, return sysfs_emit(buf, "%s\n", adev->product_number); } -static DEVICE_ATTR(product_number, S_IRUGO, +static DEVICE_ATTR(product_number, 0444, amdgpu_device_get_product_number, NULL); /** @@ -227,7 +227,7 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev, return sysfs_emit(buf, "%s\n", adev->serial); } -static DEVICE_ATTR(serial_number, S_IRUGO, +static DEVICE_ATTR(serial_number, 0444, amdgpu_device_get_serial_number, NULL); /** @@ -481,8 +481,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, /* * MMIO register read with bytes helper functions * @offset:bytes offset from MMIO start - * -*/ + */ /** * amdgpu_mm_rreg8 - read a memory mapped IO register @@ -506,8 +505,8 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) * MMIO register write with bytes helper functions * @offset:bytes offset from MMIO start * @value: the value want to be written to the register - * -*/ + */ + /** * amdgpu_mm_wreg8 - read a memory mapped IO register * @@ -571,7 +570,8 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, * this function is invoked only for the debugfs register access */ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, - uint32_t reg, uint32_t v) + uint32_t reg, uint32_t v, + uint32_t xcc_id) { if (amdgpu_device_skip_hw_access(adev)) return; @@ -580,7 +580,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->is_rlcg_access_range) { if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) - return amdgpu_sriov_wreg(adev, reg, v, 0, 0); + return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id); } else if ((reg * 4) >= adev->rmmio_size) { adev->pcie_wreg(adev, reg * 4, v); } else { @@ -589,94 +589,6 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, } /** - * amdgpu_mm_rdoorbell - read a doorbell dword - * - * @adev: amdgpu_device pointer - * @index: doorbell index - * - * Returns the value in the doorbell aperture at the - * requested doorbell index (CIK). - */ -u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) -{ - if (amdgpu_device_skip_hw_access(adev)) - return 0; - - if (index < adev->doorbell.num_kernel_doorbells) { - return readl(adev->doorbell.ptr + index); - } else { - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); - return 0; - } -} - -/** - * amdgpu_mm_wdoorbell - write a doorbell dword - * - * @adev: amdgpu_device pointer - * @index: doorbell index - * @v: value to write - * - * Writes @v to the doorbell aperture at the - * requested doorbell index (CIK). - */ -void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) -{ - if (amdgpu_device_skip_hw_access(adev)) - return; - - if (index < adev->doorbell.num_kernel_doorbells) { - writel(v, adev->doorbell.ptr + index); - } else { - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); - } -} - -/** - * amdgpu_mm_rdoorbell64 - read a doorbell Qword - * - * @adev: amdgpu_device pointer - * @index: doorbell index - * - * Returns the value in the doorbell aperture at the - * requested doorbell index (VEGA10+). - */ -u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) -{ - if (amdgpu_device_skip_hw_access(adev)) - return 0; - - if (index < adev->doorbell.num_kernel_doorbells) { - return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); - } else { - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); - return 0; - } -} - -/** - * amdgpu_mm_wdoorbell64 - write a doorbell Qword - * - * @adev: amdgpu_device pointer - * @index: doorbell index - * @v: value to write - * - * Writes @v to the doorbell aperture at the - * requested doorbell index (VEGA10+). - */ -void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) -{ - if (amdgpu_device_skip_hw_access(adev)) - return; - - if (index < adev->doorbell.num_kernel_doorbells) { - atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); - } else { - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); - } -} - -/** * amdgpu_device_indirect_rreg - read an indirect register * * @adev: amdgpu_device pointer @@ -1078,7 +990,7 @@ static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev) * @registers: pointer to the register array * @array_size: size of the register array * - * Programs an array or registers with and and or masks. + * Programs an array or registers with and or masks. * This is a helper for setting golden registers. */ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, @@ -1136,83 +1048,6 @@ int amdgpu_device_pci_reset(struct amdgpu_device *adev) } /* - * GPU doorbell aperture helpers function. - */ -/** - * amdgpu_device_doorbell_init - Init doorbell driver information. - * - * @adev: amdgpu_device pointer - * - * Init doorbell driver information (CIK) - * Returns 0 on success, error on failure. - */ -static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) -{ - - /* No doorbell on SI hardware generation */ - if (adev->asic_type < CHIP_BONAIRE) { - adev->doorbell.base = 0; - adev->doorbell.size = 0; - adev->doorbell.num_kernel_doorbells = 0; - adev->doorbell.ptr = NULL; - return 0; - } - - if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) - return -EINVAL; - - amdgpu_asic_init_doorbell_index(adev); - - /* doorbell bar mapping */ - adev->doorbell.base = pci_resource_start(adev->pdev, 2); - adev->doorbell.size = pci_resource_len(adev->pdev, 2); - - if (adev->enable_mes) { - adev->doorbell.num_kernel_doorbells = - adev->doorbell.size / sizeof(u32); - } else { - adev->doorbell.num_kernel_doorbells = - min_t(u32, adev->doorbell.size / sizeof(u32), - adev->doorbell_index.max_assignment+1); - if (adev->doorbell.num_kernel_doorbells == 0) - return -EINVAL; - - /* For Vega, reserve and map two pages on doorbell BAR since SDMA - * paging queue doorbell use the second page. The - * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the - * doorbells are in the first page. So with paging queue enabled, - * the max num_kernel_doorbells should + 1 page (0x400 in dword) - */ - if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) && - adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) - adev->doorbell.num_kernel_doorbells += 0x400; - } - - adev->doorbell.ptr = ioremap(adev->doorbell.base, - adev->doorbell.num_kernel_doorbells * - sizeof(u32)); - if (adev->doorbell.ptr == NULL) - return -ENOMEM; - - return 0; -} - -/** - * amdgpu_device_doorbell_fini - Tear down doorbell driver information. - * - * @adev: amdgpu_device pointer - * - * Tear down doorbell driver information (CIK) - */ -static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) -{ - iounmap(adev->doorbell.ptr); - adev->doorbell.ptr = NULL; -} - - - -/* * amdgpu_device_wb_*() * Writeback is the method by which the GPU updates special pages in memory * with the status of certain GPU events (fences, ring pointers,etc.). @@ -1321,10 +1156,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); struct pci_bus *root; struct resource *res; - unsigned i; + unsigned int i; u16 cmd; int r; + if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) + return 0; + /* Bypass for VF */ if (amdgpu_sriov_vf(adev)) return 0; @@ -1359,7 +1197,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) cmd & ~PCI_COMMAND_MEMORY); /* Free the VRAM and doorbell BAR, we most likely need to move both. */ - amdgpu_device_doorbell_fini(adev); + amdgpu_doorbell_fini(adev); if (adev->asic_type >= CHIP_BONAIRE) pci_release_resource(adev->pdev, 2); @@ -1376,7 +1214,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) /* When the doorbell or fb BAR isn't available we have no chance of * using the device. */ - r = amdgpu_device_doorbell_init(adev); + r = amdgpu_doorbell_init(adev); if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) return -ENODEV; @@ -1387,9 +1225,8 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) static bool amdgpu_device_read_bios(struct amdgpu_device *adev) { - if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) { + if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) return false; - } return true; } @@ -1425,6 +1262,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) if (adev->asic_type == CHIP_FIJI) { int err; uint32_t fw_ver; + err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); /* force vPost if error occured */ if (err) @@ -1527,6 +1365,7 @@ static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, bool state) { struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); + amdgpu_asic_set_vga_state(adev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | @@ -1549,7 +1388,8 @@ static void amdgpu_device_check_block_size(struct amdgpu_device *adev) { /* defines number of bits in page table versus page directory, * a page is 4KB so we have 12 bits offset, minimum 9 bits in the - * page table and the remaining bits are in the page directory */ + * page table and the remaining bits are in the page directory + */ if (amdgpu_vm_block_size == -1) return; @@ -1781,7 +1621,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - /* + /* * FIXME: open_count is protected by drm_global_mutex but that would lead to * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. @@ -3426,7 +3266,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) * * Main resume function for hardware IPs. The hardware IPs * are split into two resume functions because they are - * are also used in in recovering from a GPU reset and some additional + * also used in recovering from a GPU reset and some additional * steps need to be take between them. In this case (S3/S4) they are * run sequentially. * Returns 0 on success, negative error code on failure. @@ -3528,8 +3368,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) #else default: if (amdgpu_dc > 0) - DRM_INFO_ONCE("Display Core has been requested via kernel parameter " - "but isn't supported by ASIC, ignoring\n"); + DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); return false; #endif } @@ -3777,7 +3616,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); /* mutex initialization are all done here so we - * can recall function without having locking issues */ + * can recall function without having locking issues + */ mutex_init(&adev->firmware.mutex); mutex_init(&adev->pm.mutex); mutex_init(&adev->gfx.gpu_clock_mutex); @@ -3854,11 +3694,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); - if (adev->rmmio == NULL) { + if (!adev->rmmio) return -ENOMEM; - } + DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); - DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); + DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size); /* * Reset domain needs to be present early, before XGMI hive discovered @@ -3926,7 +3766,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, dev_info(adev->dev, "PCIE atomic ops is not supported\n"); /* doorbell bar mapping and doorbell index init*/ - amdgpu_device_doorbell_init(adev); + amdgpu_doorbell_init(adev); if (amdgpu_emu_mode == 1) { /* post the asic on emulation mode */ @@ -4069,14 +3909,6 @@ fence_driver_init: } else adev->ucode_sysfs_en = true; - r = amdgpu_psp_sysfs_init(adev); - if (r) { - adev->psp_sysfs_en = false; - if (!amdgpu_sriov_vf(adev)) - DRM_ERROR("Creating psp sysfs failed\n"); - } else - adev->psp_sysfs_en = true; - /* * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. * Otherwise the mgpu fan boost feature will be skipped due to the @@ -4120,7 +3952,8 @@ fence_driver_init: /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ /* this will fail for cards that aren't VGA class devices, just - * ignore it */ + * ignore it + */ if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); @@ -4172,7 +4005,7 @@ static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); /* Unmap all mapped bars - Doorbell, registers and VRAM */ - amdgpu_device_doorbell_fini(adev); + amdgpu_doorbell_fini(adev); iounmap(adev->rmmio); adev->rmmio = NULL; @@ -4203,7 +4036,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) /* make sure IB test finished before entering exclusive mode * to avoid preemption on IB test - * */ + */ if (amdgpu_sriov_vf(adev)) { amdgpu_virt_request_full_gpu(adev, false); amdgpu_virt_fini_data_exchange(adev); @@ -4226,8 +4059,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_pm_sysfs_fini(adev); if (adev->ucode_sysfs_en) amdgpu_ucode_sysfs_fini(adev); - if (adev->psp_sysfs_en) - amdgpu_psp_sysfs_fini(adev); sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); /* disable ras feature must before hw fini */ @@ -4286,7 +4117,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) iounmap(adev->rmmio); adev->rmmio = NULL; - amdgpu_device_doorbell_fini(adev); + amdgpu_doorbell_fini(adev); drm_dev_exit(idx); } @@ -4746,6 +4577,9 @@ retry: if (r) return r; + /* some sw clean up VF needs to do before recover */ + amdgpu_virt_post_reset(adev); + /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) @@ -4939,8 +4773,9 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (!ring || !ring->sched.thread) continue; - /*clear job fence from fence drv to avoid force_completion - *leave NULL and vm flush fence in fence drv */ + /* Clear job fence from fence drv to avoid force_completion + * leave NULL and vm flush fence in fence drv + */ amdgpu_fence_driver_clear_job_fences(ring); /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ @@ -4954,7 +4789,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, r = amdgpu_reset_prepare_hwcontext(adev, reset_context); /* If reset handler not implemented, continue; otherwise return */ - if (r == -ENOSYS) + if (r == -EOPNOTSUPP) r = 0; else return r; @@ -5072,7 +4907,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, reset_context->reset_device_list = device_list_handle; r = amdgpu_reset_perform_reset(tmp_adev, reset_context); /* If reset handler not implemented, continue; otherwise return */ - if (r == -ENOSYS) + if (r == -EOPNOTSUPP) r = 0; else return r; @@ -5561,9 +5396,8 @@ skip_hw_reset: if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)) amdgpu_mes_self_test(tmp_adev); - if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { + if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); - } if (tmp_adev->asic_reset_res) r = tmp_adev->asic_reset_res; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 8e1cfc87122d..c21140da9d9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1750,6 +1750,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 10): case IP_VERSION(13, 0, 11): + case IP_VERSION(14, 0, 0): amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); break; case IP_VERSION(13, 0, 4): diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b702f499f5fb..d20dd3f852fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -124,7 +124,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work) struct drm_crtc *crtc = &amdgpu_crtc->base; unsigned long flags; - unsigned i; + unsigned int i; int vpos, hpos; for (i = 0; i < work->shared_count; ++i) @@ -201,7 +201,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, u64 tiling_flags; int i, r; - work = kzalloc(sizeof *work, GFP_KERNEL); + work = kzalloc(sizeof(*work), GFP_KERNEL); if (work == NULL) return -ENOMEM; @@ -332,13 +332,15 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, adev = drm_to_adev(dev); /* if we have active crtcs and we don't have a power ref, - take the current one */ + * take the current one + */ if (active && !adev->have_disp_power_ref) { adev->have_disp_power_ref = true; return ret; } /* if we have no active crtcs, then drop the power ref - we got before */ + * we got before + */ if (!active && adev->have_disp_power_ref) { pm_runtime_put_autosuspend(dev->dev); adev->have_disp_power_ref = false; @@ -507,11 +509,10 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector, if (amdgpu_connector->router.ddc_valid) amdgpu_i2c_router_select_ddc_port(amdgpu_connector); - if (use_aux) { + if (use_aux) ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2); - } else { + else ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2); - } if (ret != 2) /* Couldn't find an accessible DDC on this connector */ @@ -520,10 +521,12 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector, * EDID header starts with: * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. * Only the first 6 bytes must be valid as - * drm_edid_block_valid() can fix the last 2 bytes */ + * drm_edid_block_valid() can fix the last 2 bytes + */ if (drm_edid_header_is_valid(buf) < 6) { /* Couldn't find an accessible EDID on this - * connector */ + * connector + */ return false; } return true; @@ -1216,8 +1219,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); if (obj == NULL) { - drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, " - "can't create framebuffer\n", mode_cmd->handles[0]); + drm_dbg_kms(dev, + "No GEM object associated to handle 0x%08X, can't create framebuffer\n", + mode_cmd->handles[0]); + return ERR_PTR(-ENOENT); } @@ -1410,6 +1415,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, } if (amdgpu_crtc->rmx_type != RMX_OFF) { fixed20_12 a, b; + a.full = dfixed_const(src_v); b.full = dfixed_const(dst_v); amdgpu_crtc->vsc.full = dfixed_div(a, b); @@ -1429,7 +1435,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, * * \param dev Device to query. * \param pipe Crtc to query. - * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). + * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). * For driver internal use only also supports these flags: * * USE_REAL_VBLANKSTART to use the real start of vblank instead @@ -1504,8 +1510,8 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, /* Called from driver internal vblank counter query code? */ if (flags & GET_DISTANCE_TO_VBLANKSTART) { - /* Caller wants distance from real vbl_start in *hpos */ - *hpos = *vpos - vbl_start; + /* Caller wants distance from real vbl_start in *hpos */ + *hpos = *vpos - vbl_start; } /* Fudge vblank to start a few scanlines earlier to handle the @@ -1527,7 +1533,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, /* In vblank? */ if (in_vbl) - ret |= DRM_SCANOUTPOS_IN_VBLANK; + ret |= DRM_SCANOUTPOS_IN_VBLANK; /* Called from driver internal vblank counter query code? */ if (flags & GET_DISTANCE_TO_VBLANKSTART) { @@ -1635,6 +1641,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); if (r == 0) { amdgpu_bo_unpin(aobj); @@ -1642,9 +1649,9 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) } } - if (fb == NULL || fb->obj[0] == NULL) { + if (!fb || !fb->obj[0]) continue; - } + robj = gem_to_amdgpu_bo(fb->obj[0]); if (!amdgpu_display_robj_is_fb(adev, robj)) { r = amdgpu_bo_reserve(robj, true); @@ -1671,6 +1678,7 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev) if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); if (r == 0) { r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index f637574644c0..0e593cfeb570 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -90,8 +90,7 @@ struct amdgpu_doorbell_index { uint32_t xcc_doorbell_range; }; -typedef enum _AMDGPU_DOORBELL_ASSIGNMENT -{ +enum AMDGPU_DOORBELL_ASSIGNMENT { AMDGPU_DOORBELL_KIQ = 0x000, AMDGPU_DOORBELL_HIQ = 0x001, AMDGPU_DOORBELL_DIQ = 0x002, @@ -109,10 +108,10 @@ typedef enum _AMDGPU_DOORBELL_ASSIGNMENT AMDGPU_DOORBELL_IH = 0x1E8, AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, AMDGPU_DOORBELL_INVALID = 0xFFFF -} AMDGPU_DOORBELL_ASSIGNMENT; +}; + +enum AMDGPU_VEGA20_DOORBELL_ASSIGNMENT { -typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT -{ /* Compute + GFX: 0~255 */ AMDGPU_VEGA20_DOORBELL_KIQ = 0x000, AMDGPU_VEGA20_DOORBELL_HIQ = 0x001, @@ -176,10 +175,10 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x1F7, AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF -} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT; +}; + +enum AMDGPU_NAVI10_DOORBELL_ASSIGNMENT { -typedef enum _AMDGPU_NAVI10_DOORBELL_ASSIGNMENT -{ /* Compute + GFX: 0~255 */ AMDGPU_NAVI10_DOORBELL_KIQ = 0x000, AMDGPU_NAVI10_DOORBELL_HIQ = 0x001, @@ -227,13 +226,12 @@ typedef enum _AMDGPU_NAVI10_DOORBELL_ASSIGNMENT AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT = 0x18F, AMDGPU_NAVI10_DOORBELL_INVALID = 0xFFFF -} AMDGPU_NAVI10_DOORBELL_ASSIGNMENT; +}; /* * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space */ -typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT -{ +enum AMDGPU_DOORBELL64_ASSIGNMENT { /* * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range. @@ -309,9 +307,10 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, AMDGPU_DOORBELL64_INVALID = 0xFFFF -} AMDGPU_DOORBELL64_ASSIGNMENT; +}; + +enum AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 { -typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 { /* XCC0: 0x00 ~20, XCC1: 20 ~ 2F ... */ /* KIQ/HIQ/DIQ */ @@ -339,13 +338,19 @@ typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 { AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT = 0x1D4, AMDGPU_DOORBELL_LAYOUT1_INVALID = 0xFFFF -} AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1; +}; u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index); void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); +/* + * GPU doorbell aperture helpers function. + */ +int amdgpu_doorbell_init(struct amdgpu_device *adev); +void amdgpu_doorbell_fini(struct amdgpu_device *adev); + #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c new file mode 100644 index 000000000000..31db526d4921 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" + +/** + * amdgpu_mm_rdoorbell - read a doorbell dword + * + * @adev: amdgpu_device pointer + * @index: doorbell index + * + * Returns the value in the doorbell aperture at the + * requested doorbell index (CIK). + */ +u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) +{ + if (amdgpu_device_skip_hw_access(adev)) + return 0; + + if (index < adev->doorbell.num_kernel_doorbells) + return readl(adev->doorbell.ptr + index); + + DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + return 0; +} + +/** + * amdgpu_mm_wdoorbell - write a doorbell dword + * + * @adev: amdgpu_device pointer + * @index: doorbell index + * @v: value to write + * + * Writes @v to the doorbell aperture at the + * requested doorbell index (CIK). + */ +void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) +{ + if (amdgpu_device_skip_hw_access(adev)) + return; + + if (index < adev->doorbell.num_kernel_doorbells) + writel(v, adev->doorbell.ptr + index); + else + DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); +} + +/** + * amdgpu_mm_rdoorbell64 - read a doorbell Qword + * + * @adev: amdgpu_device pointer + * @index: doorbell index + * + * Returns the value in the doorbell aperture at the + * requested doorbell index (VEGA10+). + */ +u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) +{ + if (amdgpu_device_skip_hw_access(adev)) + return 0; + + if (index < adev->doorbell.num_kernel_doorbells) + return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); + + DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + return 0; +} + +/** + * amdgpu_mm_wdoorbell64 - write a doorbell Qword + * + * @adev: amdgpu_device pointer + * @index: doorbell index + * @v: value to write + * + * Writes @v to the doorbell aperture at the + * requested doorbell index (VEGA10+). + */ +void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) +{ + if (amdgpu_device_skip_hw_access(adev)) + return; + + if (index < adev->doorbell.num_kernel_doorbells) + atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); + else + DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); +} + +/* + * GPU doorbell aperture helpers function. + */ +/** + * amdgpu_doorbell_init - Init doorbell driver information. + * + * @adev: amdgpu_device pointer + * + * Init doorbell driver information (CIK) + * Returns 0 on success, error on failure. + */ +int amdgpu_doorbell_init(struct amdgpu_device *adev) +{ + + /* No doorbell on SI hardware generation */ + if (adev->asic_type < CHIP_BONAIRE) { + adev->doorbell.base = 0; + adev->doorbell.size = 0; + adev->doorbell.num_kernel_doorbells = 0; + adev->doorbell.ptr = NULL; + return 0; + } + + if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) + return -EINVAL; + + amdgpu_asic_init_doorbell_index(adev); + + /* doorbell bar mapping */ + adev->doorbell.base = pci_resource_start(adev->pdev, 2); + adev->doorbell.size = pci_resource_len(adev->pdev, 2); + + if (adev->enable_mes) { + adev->doorbell.num_kernel_doorbells = + adev->doorbell.size / sizeof(u32); + } else { + adev->doorbell.num_kernel_doorbells = + min_t(u32, adev->doorbell.size / sizeof(u32), + adev->doorbell_index.max_assignment+1); + if (adev->doorbell.num_kernel_doorbells == 0) + return -EINVAL; + + /* For Vega, reserve and map two pages on doorbell BAR since SDMA + * paging queue doorbell use the second page. The + * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the + * doorbells are in the first page. So with paging queue enabled, + * the max num_kernel_doorbells should + 1 page (0x400 in dword) + */ + if (adev->asic_type >= CHIP_VEGA10) + adev->doorbell.num_kernel_doorbells += 0x400; + } + + adev->doorbell.ptr = ioremap(adev->doorbell.base, + adev->doorbell.num_kernel_doorbells * + sizeof(u32)); + if (adev->doorbell.ptr == NULL) + return -ENOMEM; + + return 0; +} + +/** + * amdgpu_doorbell_fini - Tear down doorbell driver information. + * + * @adev: amdgpu_device pointer + * + * Tear down doorbell driver information (CIK) + */ +void amdgpu_doorbell_fini(struct amdgpu_device *adev) +{ + iounmap(adev->doorbell.ptr); + adev->doorbell.ptr = NULL; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e90f730eb715..d6439d56dcd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -313,9 +313,7 @@ module_param_named(msi, amdgpu_msi, int, 0444); * jobs is 10000. The timeout for compute is 60000. */ MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; " - "for passthrough or sriov, 10000 for all jobs." - " 0: keep default value. negative: infinity timeout), " - "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " + "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); @@ -584,7 +582,7 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644); */ #ifdef CONFIG_DRM_AMDGPU_SI -#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) +#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) int amdgpu_si_support = 0; MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))"); #else @@ -603,7 +601,7 @@ module_param_named(si_support, amdgpu_si_support, int, 0444); */ #ifdef CONFIG_DRM_AMDGPU_CIK -#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) +#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) int amdgpu_cik_support = 0; MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))"); #else @@ -620,8 +618,7 @@ module_param_named(cik_support, amdgpu_cik_support, int, 0444); * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled). */ MODULE_PARM_DESC(smu_memory_pool_size, - "reserve gtt for smu debug usage, 0 = disable," - "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); + "reserve gtt for smu debug usage, 0 = disable,0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444); /** @@ -791,9 +788,9 @@ module_param(hws_gws_support, bool, 0444); MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)"); /** - * DOC: queue_preemption_timeout_ms (int) - * queue preemption timeout in ms (1 = Minimum, 9000 = default) - */ + * DOC: queue_preemption_timeout_ms (int) + * queue preemption timeout in ms (1 = Minimum, 9000 = default) + */ int queue_preemption_timeout_ms = 9000; module_param(queue_preemption_timeout_ms, int, 0644); MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)"); @@ -2417,7 +2414,6 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) amdgpu_amdkfd_device_init(adev); amdgpu_ttm_set_buffer_funcs_status(adev, true); } - return; } static int amdgpu_pmops_prepare(struct device *dev) @@ -2614,6 +2610,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) /* wait for all rings to drain before suspending */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) { ret = amdgpu_fence_wait_empty(ring); if (ret) @@ -2738,6 +2735,7 @@ long amdgpu_drm_ioctl(struct file *filp, struct drm_file *file_priv = filp->private_data; struct drm_device *dev; long ret; + dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0) @@ -2802,9 +2800,8 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) if (!filp) return -EINVAL; - if (filp->f_op != &amdgpu_driver_kms_fops) { + if (filp->f_op != &amdgpu_driver_kms_fops) return -EINVAL; - } file = filp->private_data; *fpriv = file->driver_priv; @@ -2891,16 +2888,13 @@ static struct pci_error_handlers amdgpu_pci_err_handler = { .resume = amdgpu_pci_resume, }; -extern const struct attribute_group amdgpu_vram_mgr_attr_group; -extern const struct attribute_group amdgpu_gtt_mgr_attr_group; - static const struct attribute_group *amdgpu_sysfs_groups[] = { &amdgpu_vram_mgr_attr_group, &amdgpu_gtt_mgr_attr_group, + &amdgpu_flash_attr_group, NULL, }; - static struct pci_driver amdgpu_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c index 7d2a908438e9..e71768661ca8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c @@ -183,6 +183,8 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, { const struct i2c_adapter_quirks *quirks = i2c_adap->quirks; u16 limit; + u16 ps; /* Partial size */ + int res = 0, r; if (!quirks) limit = 0; @@ -200,28 +202,25 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, eeprom_addr, buf_size, read ? "read" : "write", EEPROM_OFFSET_SIZE); return -EINVAL; - } else { - u16 ps; /* Partial size */ - int res = 0, r; - - /* The "limit" includes all data bytes sent/received, - * which would include the EEPROM_OFFSET_SIZE bytes. - * Account for them here. - */ - limit -= EEPROM_OFFSET_SIZE; - for ( ; buf_size > 0; - buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) { - ps = min(limit, buf_size); - - r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, - eeprom_buf, ps, read); - if (r < 0) - return r; - res += r; - } + } - return res; + /* The "limit" includes all data bytes sent/received, + * which would include the EEPROM_OFFSET_SIZE bytes. + * Account for them here. + */ + limit -= EEPROM_OFFSET_SIZE; + for ( ; buf_size > 0; + buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) { + ps = min(limit, buf_size); + + r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, + eeprom_buf, ps, read); + if (r < 0) + return r; + res += r; } + + return res; } int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index 13d7413d4ca3..6038b5021b27 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -89,7 +89,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid); drm_printf(p, "drm-driver:\t%s\n", file->minor->dev->driver->name); drm_printf(p, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn); - drm_printf(p, "drm-client-id:\t%Lu\n", vm->immediate.fence_context); + drm_printf(p, "drm-client-id:\t%llu\n", vm->immediate.fence_context); drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); @@ -109,7 +109,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) if (!usage[hw_ip]) continue; - drm_printf(p, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip], + drm_printf(p, "drm-engine-%s:\t%lld ns\n", amdgpu_ip_name[hw_ip], ktime_to_ns(usage[hw_ip])); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 4620c4712ce3..8c3ee042556a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -60,10 +60,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) switch (adev->asic_type) { case CHIP_VEGA20: /* D161 and D163 are the VG20 server SKUs */ - if (strnstr(atom_ctx->vbios_version, "D161", - sizeof(atom_ctx->vbios_version)) || - strnstr(atom_ctx->vbios_version, "D163", - sizeof(atom_ctx->vbios_version))) { + if (strnstr(atom_ctx->vbios_pn, "D161", + sizeof(atom_ctx->vbios_pn)) || + strnstr(atom_ctx->vbios_pn, "D163", + sizeof(atom_ctx->vbios_pn))) { if (fru_addr) *fru_addr = FRU_EEPROM_MADDR_6; return true; @@ -72,22 +72,23 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) } case CHIP_ALDEBARAN: /* All Aldebaran SKUs have an FRU */ - if (!strnstr(atom_ctx->vbios_version, "D673", - sizeof(atom_ctx->vbios_version))) + if (!strnstr(atom_ctx->vbios_pn, "D673", + sizeof(atom_ctx->vbios_pn))) if (fru_addr) *fru_addr = FRU_EEPROM_MADDR_6; return true; case CHIP_SIENNA_CICHLID: - if (strnstr(atom_ctx->vbios_version, "D603", - sizeof(atom_ctx->vbios_version))) { - if (strnstr(atom_ctx->vbios_version, "D603GLXE", - sizeof(atom_ctx->vbios_version))) { + if (strnstr(atom_ctx->vbios_pn, "D603", + sizeof(atom_ctx->vbios_pn))) { + if (strnstr(atom_ctx->vbios_pn, "D603GLXE", + sizeof(atom_ctx->vbios_pn))) { return false; - } else { - if (fru_addr) - *fru_addr = FRU_EEPROM_MADDR_6; - return true; } + + if (fru_addr) + *fru_addr = FRU_EEPROM_MADDR_6; + return true; + } else { return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c index 2ca3c329de6d..2d4b67175b55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c @@ -32,17 +32,15 @@ #include "soc15_common.h" #define FW_ATTESTATION_DB_COOKIE 0x143b6a37 -#define FW_ATTESTATION_RECORD_VALID 1 +#define FW_ATTESTATION_RECORD_VALID 1 #define FW_ATTESTATION_MAX_SIZE 4096 -typedef struct FW_ATT_DB_HEADER -{ +struct FW_ATT_DB_HEADER { uint32_t AttDbVersion; /* version of the fwar feature */ uint32_t AttDbCookie; /* cookie as an extra check for corrupt data */ -} FW_ATT_DB_HEADER; +}; -typedef struct FW_ATT_RECORD -{ +struct FW_ATT_RECORD { uint16_t AttFwIdV1; /* Legacy FW Type field */ uint16_t AttFwIdV2; /* V2 FW ID field */ uint32_t AttFWVersion; /* FW Version */ @@ -50,7 +48,7 @@ typedef struct FW_ATT_RECORD uint8_t AttSource; /* FW source indicator */ uint8_t RecordValid; /* Indicates whether the record is a valid entry */ uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */ -} FW_ATT_RECORD; +}; static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, char __user *buf, @@ -60,15 +58,15 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; uint64_t records_addr = 0; uint64_t vram_pos = 0; - FW_ATT_DB_HEADER fw_att_hdr = {0}; - FW_ATT_RECORD fw_att_record = {0}; + struct FW_ATT_DB_HEADER fw_att_hdr = {0}; + struct FW_ATT_RECORD fw_att_record = {0}; - if (size < sizeof(FW_ATT_RECORD)) { + if (size < sizeof(struct FW_ATT_RECORD)) { DRM_WARN("FW attestation input buffer not enough memory"); return -EINVAL; } - if ((*pos + sizeof(FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) { + if ((*pos + sizeof(struct FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) { DRM_WARN("FW attestation out of bounds"); return 0; } @@ -83,8 +81,8 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, if (*pos == 0) { amdgpu_device_vram_access(adev, vram_pos, - (uint32_t*)&fw_att_hdr, - sizeof(FW_ATT_DB_HEADER), + (uint32_t *)&fw_att_hdr, + sizeof(struct FW_ATT_DB_HEADER), false); if (fw_att_hdr.AttDbCookie != FW_ATTESTATION_DB_COOKIE) { @@ -96,20 +94,20 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, } amdgpu_device_vram_access(adev, - vram_pos + sizeof(FW_ATT_DB_HEADER) + *pos, - (uint32_t*)&fw_att_record, - sizeof(FW_ATT_RECORD), + vram_pos + sizeof(struct FW_ATT_DB_HEADER) + *pos, + (uint32_t *)&fw_att_record, + sizeof(struct FW_ATT_RECORD), false); if (fw_att_record.RecordValid != FW_ATTESTATION_RECORD_VALID) return 0; - if (copy_to_user(buf, (void*)&fw_att_record, sizeof(FW_ATT_RECORD))) + if (copy_to_user(buf, (void *)&fw_att_record, sizeof(struct FW_ATT_RECORD))) return -EINVAL; - *pos += sizeof(FW_ATT_RECORD); + *pos += sizeof(struct FW_ATT_RECORD); - return sizeof(FW_ATT_RECORD); + return sizeof(struct FW_ATT_RECORD); } static const struct file_operations amdgpu_fw_attestation_debugfs_ops = { @@ -136,7 +134,7 @@ void amdgpu_fw_attestation_debugfs_init(struct amdgpu_device *adev) return; debugfs_create_file("amdgpu_fw_attestation", - S_IRUSR, + 0400, adev_to_drm(adev)->primary->debugfs_root, adev, &amdgpu_fw_attestation_debugfs_ops); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7da871972a8e..693b1fd1191a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -182,11 +182,10 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, return r; bo_va = amdgpu_vm_bo_find(vm, abo); - if (!bo_va) { + if (!bo_va) bo_va = amdgpu_vm_bo_add(adev, vm, abo); - } else { + else ++bo_va->ref_count; - } amdgpu_bo_unreserve(abo); return 0; } @@ -461,9 +460,9 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, struct amdgpu_bo *robj; gobj = drm_gem_object_lookup(filp, handle); - if (gobj == NULL) { + if (!gobj) return -ENOENT; - } + robj = gem_to_amdgpu_bo(gobj); if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { @@ -480,6 +479,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, { union drm_amdgpu_gem_mmap *args = data; uint32_t handle = args->in.handle; + memset(args, 0, sizeof(*args)); return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); } @@ -506,7 +506,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); /* clamp timeout to avoid unsigned-> signed overflow */ - if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT ) + if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT) return MAX_SCHEDULE_TIMEOUT - 1; return timeout_jiffies; @@ -524,9 +524,9 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, long ret; gobj = drm_gem_object_lookup(filp, handle); - if (gobj == NULL) { + if (!gobj) return -ENOENT; - } + robj = gem_to_amdgpu_bo(gobj); ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, true, timeout); @@ -553,7 +553,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, struct amdgpu_bo *robj; int r = -1; - DRM_DEBUG("%d \n", args->handle); + DRM_DEBUG("%d\n", args->handle); gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) return -ENOENT; @@ -680,7 +680,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { dev_dbg(dev->dev, - "va_address 0x%LX is in reserved area 0x%LX\n", + "va_address 0x%llx is in reserved area 0x%llx\n", args->va_address, AMDGPU_VA_RESERVED_SIZE); return -EINVAL; } @@ -688,7 +688,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (args->va_address >= AMDGPU_GMC_HOLE_START && args->va_address < AMDGPU_GMC_HOLE_END) { dev_dbg(dev->dev, - "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", + "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n", args->va_address, AMDGPU_GMC_HOLE_START, AMDGPU_GMC_HOLE_END); return -EINVAL; @@ -808,9 +808,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, int r; gobj = drm_gem_object_lookup(filp, args->handle); - if (gobj == NULL) { + if (!gobj) return -ENOENT; - } + robj = gem_to_amdgpu_bo(gobj); r = amdgpu_bo_reserve(robj, false); @@ -936,9 +936,9 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_put(gobj); - if (r) { + if (r) return r; - } + args->handle = handle; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index a33d4bc34cee..c76b6bfc4dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -110,9 +110,9 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, * The bitmask of CUs to be disabled in the shader array determined by se and * sh is stored in mask[se * max_sh + sh]. */ -void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh) +void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh) { - unsigned se, sh, cu; + unsigned int se, sh, cu; const char *p; memset(mask, 0, sizeof(*mask) * max_se * max_sh); @@ -124,6 +124,7 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s for (;;) { char *next; int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); + if (ret < 3) { DRM_ERROR("amdgpu: could not parse disable_cu\n"); return; @@ -349,7 +350,7 @@ void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id) } int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, - unsigned hpd_size, int xcc_id) + unsigned int hpd_size, int xcc_id) { int r; u32 *hpd; @@ -376,7 +377,7 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, /* create MQD for each compute/gfx queue */ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, - unsigned mqd_size, int xcc_id) + unsigned int mqd_size, int xcc_id) { int r, i, j; struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; @@ -407,8 +408,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, /* prepare MQD backup */ kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL); - if (!kiq->mqd_backup) - dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); + if (!kiq->mqd_backup) { + dev_warn(adev->dev, + "no memory to create MQD backup for ring %s\n", ring->name); + return -ENOMEM; + } } if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { @@ -427,8 +431,10 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->gfx.me.mqd_backup[i]) + if (!adev->gfx.me.mqd_backup[i]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); + return -ENOMEM; + } } } } @@ -449,8 +455,10 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->gfx.mec.mqd_backup[j]) + if (!adev->gfx.mec.mqd_backup[j]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); + return -ENOMEM; + } } } @@ -1281,11 +1289,11 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, return sysfs_emit(buf, "%s\n", supported_partition); } -static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(current_compute_partition, 0644, amdgpu_gfx_get_current_compute_partition, amdgpu_gfx_set_compute_partition); -static DEVICE_ATTR(available_compute_partition, S_IRUGO, +static DEVICE_ATTR(available_compute_partition, 0444, amdgpu_gfx_get_available_compute_partition, NULL); int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 56d73fade568..fdc25cd559b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -331,6 +331,8 @@ struct amdgpu_gmc { u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[16]; u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[16]; u64 MC_VM_MX_L1_TLB_CNTL; + + u64 noretry_flags; }; #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index ebeddc9a37e9..6aa3b1d845ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -62,7 +62,7 @@ * Returns 0 on success, error on failure. */ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, - unsigned size, enum amdgpu_ib_pool_type pool_type, + unsigned int size, enum amdgpu_ib_pool_type pool_type, struct amdgpu_ib *ib) { int r; @@ -123,7 +123,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, * a CONST_IB), it will be put on the ring prior to the DE IB. Prior * to SI there was just a DE IB. */ -int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, +int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, struct amdgpu_ib *ibs, struct amdgpu_job *job, struct dma_fence **f) { @@ -131,16 +131,16 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_ib *ib = &ibs[0]; struct dma_fence *tmp = NULL; bool need_ctx_switch; - unsigned patch_offset = ~0; + unsigned int patch_offset = ~0; struct amdgpu_vm *vm; uint64_t fence_ctx; uint32_t status = 0, alloc_size; - unsigned fence_flags = 0; + unsigned int fence_flags = 0; bool secure, init_shadow; u64 shadow_va, csa_va, gds_va; int vmid = AMDGPU_JOB_GET_VMID(job); - unsigned i; + unsigned int i; int r = 0; bool need_pipe_sync = false; @@ -282,7 +282,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0); if (ring->funcs->init_cond_exec) { - unsigned ce_offset = ~0; + unsigned int ce_offset = ~0; ce_offset = amdgpu_ring_init_cond_exec(ring); if (ce_offset != ~0 && ring->funcs->patch_cond_exec) @@ -385,7 +385,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) { long tmo_gfx, tmo_mm; int r, ret = 0; - unsigned i; + unsigned int i; tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; if (amdgpu_sriov_vf(adev)) { @@ -402,7 +402,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) /* for CP & SDMA engines since they are scheduled together so * need to make the timeout width enough to cover the time * cost waiting for it coming back under RUNTIME only - */ + */ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; } else if (adev->gmc.xgmi.hive_id) { tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT; @@ -465,13 +465,13 @@ static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused) { struct amdgpu_device *adev = m->private; - seq_printf(m, "--------------------- DELAYED --------------------- \n"); + seq_puts(m, "--------------------- DELAYED ---------------------\n"); amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED], m); - seq_printf(m, "-------------------- IMMEDIATE -------------------- \n"); + seq_puts(m, "-------------------- IMMEDIATE --------------------\n"); amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE], m); - seq_printf(m, "--------------------- DIRECT ---------------------- \n"); + seq_puts(m, "--------------------- DIRECT ----------------------\n"); amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index fceb3b384955..f3b0aaf3ebc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -138,6 +138,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) /** * amdgpu_ih_ring_write - write IV to the ring buffer * + * @adev: amdgpu_device pointer * @ih: ih ring to write to * @iv: the iv to write * @num_dw: size of the iv in dw @@ -145,8 +146,8 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) * Writes an IV to the ring buffer using the CPU and increment the wptr. * Used for testing and delegating IVs to a software ring. */ -void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, - unsigned int num_dw) +void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + const uint32_t *iv, unsigned int num_dw) { uint32_t wptr = le32_to_cpu(*ih->wptr_cpu) >> 2; unsigned int i; @@ -161,6 +162,9 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, if (wptr != READ_ONCE(ih->rptr)) { wmb(); WRITE_ONCE(*ih->wptr_cpu, cpu_to_le32(wptr)); + } else if (adev->irq.retry_cam_enabled) { + dev_warn_once(adev->dev, "IH soft ring buffer overflow 0x%X, 0x%X\n", + wptr, ih->rptr); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index dd1c2eded6b9..6c6184f0dbc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -27,6 +27,9 @@ /* Maximum number of IVs processed at once */ #define AMDGPU_IH_MAX_NUM_IVS 32 +#define IH_RING_SIZE (256 * 1024) +#define IH_SW_RING_SIZE (8 * 1024) /* enough for 256 CAM entries */ + struct amdgpu_device; struct amdgpu_iv_entry; @@ -97,8 +100,8 @@ struct amdgpu_ih_funcs { int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, unsigned ring_size, bool use_bus_addr); void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); -void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, - unsigned int num_dw); +void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + const uint32_t *iv, unsigned int num_dw); int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 5273decc5753..fa6d0adcec20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -493,7 +493,7 @@ void amdgpu_irq_delegate(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry, unsigned int num_dw) { - amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw); + amdgpu_ih_ring_write(adev, &adev->irq.ih_soft, entry->iv_entry, num_dw); schedule_work(&adev->irq.ih_soft_work); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index cca5a495611f..631c5ab3f7dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -557,6 +557,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) crtc = (struct drm_crtc *)minfo->crtcs[i]; if (crtc && crtc->base.id == info->mode_crtc.id) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + ui32 = amdgpu_crtc->crtc_id; found = 1; break; @@ -575,7 +576,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (ret) return ret; - ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); + ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip))); return ret ? -EFAULT : 0; } case AMDGPU_INFO_HW_IP_COUNT: { @@ -721,17 +722,18 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ? -EFAULT : 0; } case AMDGPU_INFO_READ_MMR_REG: { - unsigned n, alloc_size; + unsigned int n, alloc_size; uint32_t *regs; - unsigned se_num = (info->read_mmr_reg.instance >> + unsigned int se_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & AMDGPU_INFO_MMR_SE_INDEX_MASK; - unsigned sh_num = (info->read_mmr_reg.instance >> + unsigned int sh_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & AMDGPU_INFO_MMR_SH_INDEX_MASK; /* set full masks if the userspace set all bits - * in the bitfields */ + * in the bitfields + */ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) se_num = 0xffffffff; else if (se_num >= AMDGPU_GFX_MAX_SE) @@ -896,7 +898,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return ret; } case AMDGPU_INFO_VCE_CLOCK_TABLE: { - unsigned i; + unsigned int i; struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; struct amd_vce_state *vce_state; @@ -1102,6 +1104,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct drm_amdgpu_info_video_caps *caps; int r; + if (!adev->asic_funcs->query_video_codecs) + return -EINVAL; + switch (info->video_cap.type) { case AMDGPU_INFO_VIDEO_CAPS_DECODE: r = amdgpu_asic_query_video_codecs(adev, false, &codecs); @@ -1229,13 +1234,13 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) pasid = 0; } - r = amdgpu_vm_init(adev, &fpriv->vm); + r = amdgpu_xcp_open_device(adev, fpriv, file_priv); if (r) goto error_pasid; - r = amdgpu_xcp_open_device(adev, fpriv, file_priv); + r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id); if (r) - goto error_vm; + goto error_pasid; r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid); if (r) @@ -1719,7 +1724,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); - seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); + seq_printf(m, "VBIOS version: %s\n", ctx->vbios_pn); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 3e3dadd6d0f3..37f15abf7543 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -643,6 +643,8 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, unsigned long flags; int r; + memset(&queue_input, 0, sizeof(struct mes_add_queue_input)); + /* allocate the mes queue buffer */ queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL); if (!queue) { @@ -1378,7 +1380,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev) goto error_pasid; } - r = amdgpu_vm_init(adev, vm); + r = amdgpu_vm_init(adev, vm, -1); if (r) { DRM_ERROR("failed to initialize vm\n"); goto error_pasid; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 2d6ac30b7135..2053954a235c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -224,6 +224,7 @@ struct mes_add_queue_input { uint32_t is_kfd_process; uint32_t is_aql_queue; uint32_t queue_size; + uint32_t exclusively_scheduled; }; struct mes_remove_queue_input { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index f7905bce0de1..88419927570a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1575,23 +1575,31 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) { struct dma_buf_attachment *attachment; struct dma_buf *dma_buf; - unsigned int domain; const char *placement; unsigned int pin_count; u64 size; - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); - switch (domain) { - case AMDGPU_GEM_DOMAIN_VRAM: - placement = "VRAM"; - break; - case AMDGPU_GEM_DOMAIN_GTT: - placement = " GTT"; - break; - case AMDGPU_GEM_DOMAIN_CPU: - default: - placement = " CPU"; - break; + if (dma_resv_trylock(bo->tbo.base.resv)) { + unsigned int domain; + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); + switch (domain) { + case AMDGPU_GEM_DOMAIN_VRAM: + if (amdgpu_bo_in_cpu_visible_vram(bo)) + placement = "VRAM VISIBLE"; + else + placement = "VRAM"; + break; + case AMDGPU_GEM_DOMAIN_GTT: + placement = "GTT"; + break; + case AMDGPU_GEM_DOMAIN_CPU: + default: + placement = "CPU"; + break; + } + dma_resv_unlock(bo->tbo.base.resv); + } else { + placement = "UNKNOWN"; } size = amdgpu_bo_size(bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 6d676bdd1505..4e428060a1fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -45,9 +45,6 @@ #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) -static int psp_sysfs_init(struct amdgpu_device *adev); -static void psp_sysfs_fini(struct amdgpu_device *adev); - static int psp_load_smu_fw(struct psp_context *psp); static int psp_rap_terminate(struct psp_context *psp); static int psp_securedisplay_terminate(struct psp_context *psp); @@ -180,9 +177,11 @@ static int psp_early_init(void *handle) psp->autoload_supported = false; break; case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 7): + adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); + fallthrough; case IP_VERSION(11, 0, 5): case IP_VERSION(11, 0, 9): - case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 5, 0): case IP_VERSION(11, 0, 12): @@ -202,8 +201,8 @@ static int psp_early_init(void *handle) case IP_VERSION(13, 0, 3): case IP_VERSION(13, 0, 5): case IP_VERSION(13, 0, 8): - case IP_VERSION(13, 0, 10): case IP_VERSION(13, 0, 11): + case IP_VERSION(14, 0, 0): psp_v13_0_set_psp_funcs(psp); psp->autoload_supported = true; break; @@ -215,8 +214,10 @@ static int psp_early_init(void *handle) break; case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 7): + case IP_VERSION(13, 0, 10): psp_v13_0_set_psp_funcs(psp); psp->autoload_supported = true; + adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); break; case IP_VERSION(13, 0, 4): psp_v13_0_4_set_psp_funcs(psp); @@ -462,13 +463,6 @@ static int psp_sw_init(void *handle) } } - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) { - ret = psp_sysfs_init(adev); - if (ret) - return ret; - } - ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, @@ -498,11 +492,11 @@ static int psp_sw_init(void *handle) return 0; failed2: - amdgpu_bo_free_kernel(&psp->fw_pri_bo, - &psp->fw_pri_mc_addr, &psp->fw_pri_buf); -failed1: amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); +failed1: + amdgpu_bo_free_kernel(&psp->fw_pri_bo, + &psp->fw_pri_mc_addr, &psp->fw_pri_buf); return ret; } @@ -520,10 +514,6 @@ static int psp_sw_fini(void *handle) amdgpu_ucode_release(&psp->cap_fw); amdgpu_ucode_release(&psp->toc_fw); - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) - psp_sysfs_fini(adev); - kfree(cmd); cmd = NULL; @@ -2459,8 +2449,8 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, return ret; } -static int psp_execute_non_psp_fw_load(struct psp_context *psp, - struct amdgpu_firmware_info *ucode) +int psp_execute_ip_fw_load(struct psp_context *psp, + struct amdgpu_firmware_info *ucode) { int ret = 0; struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); @@ -2503,7 +2493,7 @@ static int psp_load_smu_fw(struct psp_context *psp) DRM_WARN("Failed to set MP1 state prepare for reload\n"); } - ret = psp_execute_non_psp_fw_load(psp, ucode); + ret = psp_execute_ip_fw_load(psp, ucode); if (ret) DRM_ERROR("PSP load smu failed!\n"); @@ -2545,7 +2535,7 @@ int psp_load_fw_list(struct psp_context *psp, for (i = 0; i < ucode_count; ++i) { ucode = ucode_list[i]; psp_print_fw_hdr(psp, ucode); - ret = psp_execute_non_psp_fw_load(psp, ucode); + ret = psp_execute_ip_fw_load(psp, ucode); if (ret) return ret; } @@ -2592,7 +2582,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp) psp_print_fw_hdr(psp, ucode); - ret = psp_execute_non_psp_fw_load(psp, ucode); + ret = psp_execute_ip_fw_load(psp, ucode); if (ret) return ret; @@ -2931,19 +2921,6 @@ int psp_rlc_autoload_start(struct psp_context *psp) return ret; } -int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, - uint64_t cmd_gpu_addr, int cmd_size) -{ - struct amdgpu_firmware_info ucode = {0}; - - ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : - AMDGPU_UCODE_ID_VCN0_RAM; - ucode.mc_addr = cmd_gpu_addr; - ucode.ucode_size = cmd_size; - - return psp_execute_non_psp_fw_load(&adev->psp, &ucode); -} - int psp_ring_cmd_submit(struct psp_context *psp, uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, @@ -3584,6 +3561,11 @@ void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size drm_dev_exit(idx); } +/** + * DOC: usbc_pd_fw + * Reading from this file will retrieve the USB-C PD firmware version. Writing to + * this file will trigger the update process. + */ static DEVICE_ATTR(usbc_pd_fw, 0644, psp_usbc_pd_fw_sysfs_read, psp_usbc_pd_fw_sysfs_write); @@ -3624,7 +3606,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, adev->psp.vbflash_image_size += count; mutex_unlock(&adev->psp.mutex); - dev_info(adev->dev, "VBIOS flash write PSP done"); + dev_dbg(adev->dev, "IFWI staged for update"); return count; } @@ -3644,7 +3626,7 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, if (adev->psp.vbflash_image_size == 0) return -EINVAL; - dev_info(adev->dev, "VBIOS flash to PSP started"); + dev_dbg(adev->dev, "PSP IFWI flash process initiated"); ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, AMDGPU_GPU_PAGE_SIZE, @@ -3669,14 +3651,32 @@ rel_buf: adev->psp.vbflash_image_size = 0; if (ret) { - dev_err(adev->dev, "Failed to load VBIOS FW, err = %d", ret); + dev_err(adev->dev, "Failed to load IFWI, err = %d", ret); return ret; } - dev_info(adev->dev, "VBIOS flash to PSP done"); + dev_dbg(adev->dev, "PSP IFWI flash process done"); return 0; } +/** + * DOC: psp_vbflash + * Writing to this file will stage an IFWI for update. Reading from this file + * will trigger the update process. + */ +static struct bin_attribute psp_vbflash_bin_attr = { + .attr = {.name = "psp_vbflash", .mode = 0660}, + .size = 0, + .write = amdgpu_psp_vbflash_write, + .read = amdgpu_psp_vbflash_read, +}; + +/** + * DOC: psp_vbflash_status + * The status of the flash process. + * 0: IFWI flash not complete. + * 1: IFWI flash complete. + */ static ssize_t amdgpu_psp_vbflash_status(struct device *dev, struct device_attribute *attr, char *buf) @@ -3693,39 +3693,49 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev, return sysfs_emit(buf, "0x%x\n", vbflash_status); } +static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); -static const struct bin_attribute psp_vbflash_bin_attr = { - .attr = {.name = "psp_vbflash", .mode = 0660}, - .size = 0, - .write = amdgpu_psp_vbflash_write, - .read = amdgpu_psp_vbflash_read, +static struct bin_attribute *bin_flash_attrs[] = { + &psp_vbflash_bin_attr, + NULL }; -static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); +static struct attribute *flash_attrs[] = { + &dev_attr_psp_vbflash_status.attr, + &dev_attr_usbc_pd_fw.attr, + NULL +}; -int amdgpu_psp_sysfs_init(struct amdgpu_device *adev) +static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { - int ret = 0; + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (attr == &dev_attr_usbc_pd_fw.attr) + return adev->psp.sup_pd_fw_up ? 0660 : 0; - switch (adev->ip_versions[MP0_HWIP][0]) { - case IP_VERSION(13, 0, 0): - case IP_VERSION(13, 0, 7): - case IP_VERSION(13, 0, 10): - ret = sysfs_create_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr); - if (ret) - dev_err(adev->dev, "Failed to create device file psp_vbflash"); - ret = device_create_file(adev->dev, &dev_attr_psp_vbflash_status); - if (ret) - dev_err(adev->dev, "Failed to create device file psp_vbflash_status"); - return ret; - default: - return 0; - } + return adev->psp.sup_ifwi_up ? 0440 : 0; } +static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, + struct bin_attribute *attr, + int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return adev->psp.sup_ifwi_up ? 0660 : 0; +} + +const struct attribute_group amdgpu_flash_attr_group = { + .attrs = flash_attrs, + .bin_attrs = bin_flash_attrs, + .is_bin_visible = amdgpu_bin_flash_attr_is_visible, + .is_visible = amdgpu_flash_attr_is_visible, +}; + const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, @@ -3744,27 +3754,6 @@ const struct amd_ip_funcs psp_ip_funcs = { .set_powergating_state = psp_set_powergating_state, }; -static int psp_sysfs_init(struct amdgpu_device *adev) -{ - int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); - - if (ret) - DRM_ERROR("Failed to create USBC PD FW control file!"); - - return ret; -} - -void amdgpu_psp_sysfs_fini(struct amdgpu_device *adev) -{ - sysfs_remove_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr); - device_remove_file(adev->dev, &dev_attr_psp_vbflash_status); -} - -static void psp_sysfs_fini(struct amdgpu_device *adev) -{ - device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); -} - const struct amdgpu_ip_block_version psp_v3_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_PSP, .major = 3, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 2cae0b1a0b8a..c3203de4a007 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -309,8 +309,8 @@ struct psp_runtime_scpm_entry { struct psp_context { - struct amdgpu_device *adev; - struct psp_ring km_ring; + struct amdgpu_device *adev; + struct psp_ring km_ring; struct psp_gfx_cmd_resp *cmd; const struct psp_funcs *funcs; @@ -339,7 +339,7 @@ struct psp_context uint64_t tmr_mc_addr; /* asd firmware */ - const struct firmware *asd_fw; + const struct firmware *asd_fw; /* toc firmware */ const struct firmware *toc_fw; @@ -384,9 +384,13 @@ struct psp_context uint32_t boot_cfg_bitmask; - char *vbflash_tmp_buf; - size_t vbflash_image_size; - bool vbflash_done; + /* firmware upgrades supported */ + bool sup_pd_fw_up; + bool sup_ifwi_up; + + char *vbflash_tmp_buf; + size_t vbflash_image_size; + bool vbflash_done; }; struct amdgpu_psp_funcs { @@ -458,9 +462,10 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, uint32_t field_val, uint32_t mask, uint32_t msec_timeout); +int psp_execute_ip_fw_load(struct psp_context *psp, + struct amdgpu_firmware_info *ucode); + int psp_gpu_reset(struct amdgpu_device *adev); -int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, - uint64_t cmd_gpu_addr, int cmd_size); int psp_ta_init_shared_buf(struct psp_context *psp, struct ta_mem_context *mem_ctx); @@ -525,6 +530,4 @@ int psp_spatial_partition(struct psp_context *psp, int mode); int is_psp_fw_valid(struct psp_bin_desc bin); -int amdgpu_psp_sysfs_init(struct amdgpu_device *adev); -void amdgpu_psp_sysfs_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 8aaa427f8c0f..62011a521833 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1159,7 +1159,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, } /* Calculate XGMI relative offset */ - if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (adev->gmc.xgmi.num_physical_nodes > 1 && + info->head.block != AMDGPU_RAS_BLOCK__GFX) { block_info.address = amdgpu_xgmi_get_relative_phy_addr(adev, block_info.address); @@ -2414,6 +2415,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) if (adev->asic_type == CHIP_IP_DISCOVERY) { switch (adev->ip_versions[MP0_HWIP][0]) { case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 10): return true; default: @@ -2440,10 +2442,10 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) if (!ctx) return; - if (strnstr(ctx->vbios_version, "D16406", - sizeof(ctx->vbios_version)) || - strnstr(ctx->vbios_version, "D36002", - sizeof(ctx->vbios_version))) + if (strnstr(ctx->vbios_pn, "D16406", + sizeof(ctx->vbios_pn)) || + strnstr(ctx->vbios_pn, "D36002", + sizeof(ctx->vbios_pn))) adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); } @@ -2515,8 +2517,18 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) /* hw_supported needs to be aligned with RAS block mask. */ adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; - adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : - adev->ras_hw_enabled & amdgpu_ras_mask; + + /* + * Disable ras feature for aqua vanjaram + * by default on apu platform. + */ + if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6) && + adev->gmc.is_app_apu) + adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 : + adev->ras_hw_enabled & amdgpu_ras_mask; + else + adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : + adev->ras_hw_enabled & amdgpu_ras_mask; } static void amdgpu_ras_counte_dw(struct work_struct *work) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 0648dfe559af..4287743e1212 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -194,9 +194,9 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev, /* VEGA20 and ARCTURUS */ if (adev->asic_type == CHIP_VEGA20) control->i2c_address = EEPROM_I2C_MADDR_0; - else if (strnstr(atom_ctx->vbios_version, + else if (strnstr(atom_ctx->vbios_pn, "D342", - sizeof(atom_ctx->vbios_version))) + sizeof(atom_ctx->vbios_pn))) control->i2c_address = EEPROM_I2C_MADDR_0; else control->i2c_address = EEPROM_I2C_MADDR_4; @@ -205,8 +205,8 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev, control->i2c_address = EEPROM_I2C_MADDR_0; return true; case IP_VERSION(13, 0, 2): - if (strnstr(atom_ctx->vbios_version, "D673", - sizeof(atom_ctx->vbios_version))) + if (strnstr(atom_ctx->vbios_pn, "D673", + sizeof(atom_ctx->vbios_pn))) control->i2c_address = EEPROM_I2C_MADDR_4; else control->i2c_address = EEPROM_I2C_MADDR_0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index eec41ad30406..5fed06ffcc6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -87,7 +87,7 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev, reset_handler = adev->reset_cntl->get_reset_handler( adev->reset_cntl, reset_context); if (!reset_handler) - return -ENOSYS; + return -EOPNOTSUPP; return reset_handler->prepare_hwcontext(adev->reset_cntl, reset_context); @@ -103,7 +103,7 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev, reset_handler = adev->reset_cntl->get_reset_handler( adev->reset_cntl, reset_context); if (!reset_handler) - return -ENOSYS; + return -EOPNOTSUPP; ret = reset_handler->perform_reset(adev->reset_cntl, reset_context); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h index b22d4fb2a847..d3186b570b82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h @@ -56,6 +56,15 @@ enum amdgpu_ring_mux_offset_type { AMDGPU_MUX_OFFSET_TYPE_CE, }; +enum ib_complete_status { + /* IB not started/reset value, default value. */ + IB_COMPLETION_STATUS_DEFAULT = 0, + /* IB preempted, started but not completed. */ + IB_COMPLETION_STATUS_PREEMPTED = 1, + /* IB completed. */ + IB_COMPLETION_STATUS_COMPLETED = 2, +}; + struct amdgpu_ring_mux { struct amdgpu_ring *real_ring; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index 80b263646966..b591d33af264 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -26,6 +26,8 @@ #include "clearstate_defs.h" +#define AMDGPU_MAX_RLC_INSTANCES 8 + /* firmware ID used in rlc toc */ typedef enum _FIRMWARE_ID_ { FIRMWARE_ID_INVALID = 0, @@ -201,7 +203,7 @@ struct amdgpu_rlc { u32 cp_table_size; /* safe mode for updating CG/PG state */ - bool in_safe_mode[8]; + bool in_safe_mode[AMDGPU_MAX_RLC_INSTANCES]; const struct amdgpu_rlc_funcs *funcs; /* for firmware data */ @@ -257,7 +259,7 @@ struct amdgpu_rlc { bool rlcg_reg_access_supported; /* registers for rlcg indirect reg access */ - struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl; + struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl[AMDGPU_MAX_RLC_INSTANCES]; }; void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index dacf281d2b21..e2b9392d7f0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -239,9 +239,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, sizeof(struct amdgpu_sdma_instance)); } - if (amdgpu_sriov_vf(adev)) - return 0; - DRM_DEBUG("psp_load == '%s'\n", adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index ae455aab5d29..36b55d2bd51a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -1239,3 +1239,18 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) return 0; } + +int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, + enum AMDGPU_UCODE_ID ucode_id) +{ + struct amdgpu_firmware_info ucode = { + .ucode_id = (ucode_id ? ucode_id : + (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : + AMDGPU_UCODE_ID_VCN0_RAM)), + .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, + .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - + (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr), + }; + + return psp_execute_ip_fw_load(&adev->psp, &ucode); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 92d5534df5f4..a3eed90b6af0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -161,6 +161,7 @@ } while (0) #define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2) +#define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4) #define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6) #define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8) #define AMDGPU_VCN_SW_RING_FLAG (1 << 9) @@ -180,6 +181,8 @@ #define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0) #define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1) +#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2 + enum fw_queue_mode { FW_QUEUE_RING_RESET = 1, FW_QUEUE_DPG_HOLD_OFF = 2, @@ -343,6 +346,11 @@ struct amdgpu_fw_shared_rb_setup { uint32_t reserved[6]; }; +struct amdgpu_fw_shared_drm_key_wa { + uint8_t method; + uint8_t reserved[3]; +}; + struct amdgpu_vcn4_fw_shared { uint32_t present_flag_0; uint8_t pad[12]; @@ -352,6 +360,7 @@ struct amdgpu_vcn4_fw_shared { uint8_t pad2[20]; struct amdgpu_fw_shared_rb_setup rb_setup; struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface; + struct amdgpu_fw_shared_drm_key_wa drm_key_wa; }; struct amdgpu_vcn_fwlog { @@ -414,4 +423,7 @@ int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, + enum AMDGPU_UCODE_ID ucode_id); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 41aa853a07d2..ec044f711eb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -835,6 +835,16 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad return mode; } +void amdgpu_virt_post_reset(struct amdgpu_device *adev) +{ + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) { + /* force set to GFXOFF state after reset, + * to avoid some invalid operation before GC enable + */ + adev->gfx.is_poweron = false; + } +} + bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id) { switch (adev->ip_versions[MP0_HWIP][0]) { @@ -845,6 +855,17 @@ bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_i return false; else return true; + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + /* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */ + if (ucode_id == AMDGPU_UCODE_ID_RLC_G + || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL + || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM + || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM + || ucode_id == AMDGPU_UCODE_ID_SMC) + return true; + else + return false; case IP_VERSION(13, 0, 10): /* white list */ if (ucode_id == AMDGPU_UCODE_ID_CAP @@ -954,7 +975,7 @@ static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, return ret; } -static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag) +static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id) { struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; uint32_t timeout = 50000; @@ -972,7 +993,12 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v return 0; } - reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) { + dev_err(adev->dev, "invalid xcc\n"); + return 0; + } + + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id]; scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0; scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1; scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; @@ -1037,13 +1063,13 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v void amdgpu_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, - u32 acc_flags, u32 hwip) + u32 acc_flags, u32 hwip, u32 xcc_id) { u32 rlcg_flag; if (!amdgpu_sriov_runtime(adev) && amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) { - amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag); + amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id); return; } @@ -1054,13 +1080,13 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev, } u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, - u32 offset, u32 acc_flags, u32 hwip) + u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id) { u32 rlcg_flag; if (!amdgpu_sriov_runtime(adev) && amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag)) - return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag); + return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id); if (acc_flags & AMDGPU_REGS_NO_KIQ) return RREG32_NO_KIQ(offset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 4f7bab52282a..fabb83e9d9ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -355,9 +355,10 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, struct amdgpu_video_codec_info *decode, uint32_t decode_array_size); void amdgpu_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, - u32 acc_flags, u32 hwip); + u32 acc_flags, u32 hwip, u32 xcc_id); u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, - u32 offset, u32 acc_flags, u32 hwip); + u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id); bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id); +void amdgpu_virt_post_reset(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 53ff91fc6cf6..7148a216ae2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) DRM_WARN("%s: vblank timer overrun\n", __func__); ret = drm_crtc_handle_vblank(crtc); + /* Don't queue timer again when vblank is disabled. */ if (!ret) - DRM_ERROR("amdgpu_vkms failure on handling vblank"); + return HRTIMER_NORESTART; return HRTIMER_RESTART; } @@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - hrtimer_cancel(&amdgpu_crtc->vblank_timer); + hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer); } static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, @@ -500,8 +501,6 @@ static int amdgpu_vkms_sw_init(void *handle) adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; - adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; - r = amdgpu_display_modeset_create_props(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d0e6009a985b..f5daadcec865 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -112,9 +112,9 @@ struct amdgpu_prt_cb { }; /** - * struct amdgpu_vm_tlb_seq_cb - Helper to increment the TLB flush sequence + * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence */ -struct amdgpu_vm_tlb_seq_cb { +struct amdgpu_vm_tlb_seq_struct { /** * @vm: pointer to the amdgpu_vm structure to set the fence sequence on */ @@ -829,7 +829,7 @@ error: static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { - struct amdgpu_vm_tlb_seq_cb *tlb_cb; + struct amdgpu_vm_tlb_seq_struct *tlb_cb; tlb_cb = container_of(cb, typeof(*tlb_cb), cb); atomic64_inc(&tlb_cb->vm->tlb_seq); @@ -867,7 +867,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence) { struct amdgpu_vm_update_params params; - struct amdgpu_vm_tlb_seq_cb *tlb_cb; + struct amdgpu_vm_tlb_seq_struct *tlb_cb; struct amdgpu_res_cursor cursor; enum amdgpu_sync_mode sync_mode; int r, idx; @@ -2117,13 +2117,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) * * @adev: amdgpu_device pointer * @vm: requested vm + * @xcp_id: GPU partition selection id * * Init @vm fields. * * Returns: * 0 for success, error for failure. */ -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id) { struct amdgpu_bo *root_bo; struct amdgpu_bo_vm *root; @@ -2173,7 +2174,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->evicting = false; r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, - false, &root); + false, &root, xcp_id); if (r) goto error_free_delayed; root_bo = &root->bo; @@ -2275,16 +2276,13 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) goto unreserve_bo; vm->update_funcs = &amdgpu_vm_cpu_funcs; + r = amdgpu_vm_pt_map_tables(adev, vm); + if (r) + goto unreserve_bo; + } else { vm->update_funcs = &amdgpu_vm_sdma_funcs; } - /* - * Make sure root PD gets mapped. As vm_update_mode could be changed - * when turning a GFX VM into a compute VM. - */ - r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo)); - if (r) - goto unreserve_bo; dma_fence_put(vm->last_update); vm->last_update = dma_fence_get_stub(); @@ -2600,7 +2598,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, /* Intentionally setting invalid PTE flag * combination to force a no-retry-fault */ - flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT; + flags = AMDGPU_VM_NORETRY_FLAGS; value = 0; } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { /* Redirect the access to the dummy page */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 45bcf46c6f59..204ab13184ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -86,7 +86,13 @@ struct amdgpu_mem_stats; /* PDE Block Fragment Size for VEGA10 */ #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) +/* Flag combination to set no-retry with TF disabled */ +#define AMDGPU_VM_NORETRY_FLAGS (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \ + AMDGPU_PTE_TF) +/* Flag combination to set no-retry with TF enabled */ +#define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \ + AMDGPU_PTE_PRT) /* For GFX9 */ #define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57) #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL) @@ -394,7 +400,7 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid); long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id); int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); @@ -476,7 +482,8 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_vm *vmbo, bool immediate); int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int level, bool immediate, struct amdgpu_bo_vm **vmbo); + int level, bool immediate, struct amdgpu_bo_vm **vmbo, + int32_t xcp_id); void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm); bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev, struct amdgpu_vm *vm); @@ -492,6 +499,8 @@ void amdgpu_vm_pt_free_work(struct work_struct *work); void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); #endif +int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm); + /** * amdgpu_vm_tlb_seq - return tlb flush sequence number * @vm: the amdgpu_vm structure to query diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 31913ae86de6..6e31621452de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -31,6 +31,7 @@ */ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table) { + table->bo.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; return amdgpu_bo_kmap(&table->bo, NULL); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index dea1a64be44d..96d601e209b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -498,11 +498,12 @@ exit: * @level: the page table level * @immediate: use a immediate update * @vmbo: pointer to the buffer object pointer + * @xcp_id: GPU partition id */ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int level, bool immediate, struct amdgpu_bo_vm **vmbo) + int level, bool immediate, struct amdgpu_bo_vm **vmbo, + int32_t xcp_id) { - struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm); struct amdgpu_bo_param bp; struct amdgpu_bo *bo; struct dma_resv *resv; @@ -535,7 +536,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.type = ttm_bo_type_kernel; bp.no_wait_gpu = immediate; - bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; + bp.xcp_id_plus1 = xcp_id + 1; if (vm->root.bo) bp.resv = vm->root.bo->tbo.base.resv; @@ -561,7 +562,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.type = ttm_bo_type_kernel; bp.resv = bo->tbo.base.resv; bp.bo_ptr_size = sizeof(struct amdgpu_bo); - bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; + bp.xcp_id_plus1 = xcp_id + 1; r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); @@ -606,7 +607,8 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev, return 0; amdgpu_vm_eviction_unlock(vm); - r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); + r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt, + vm->root.bo->xcp_id); amdgpu_vm_eviction_lock(vm); if (r) return r; @@ -778,6 +780,27 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params, 1, 0, flags); } +/** + * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags + * + * @adev: amdgpu_device pointer + * @flags: pointer to PTE flags + * + * Update PTE no-retry flags when TF is enabled. + */ +static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev, + uint64_t *flags) +{ + /* + * Update no-retry flags with the corresponding TF + * no-retry combination. + */ + if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) { + *flags &= ~AMDGPU_VM_NORETRY_FLAGS; + *flags |= adev->gmc.noretry_flags; + } +} + /* * amdgpu_vm_pte_update_flags - figure out flags for PTE updates * @@ -804,6 +827,16 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params, flags |= AMDGPU_PTE_EXECUTABLE; } + /* + * Update no-retry flags to use the no-retry flag combination + * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination + * does not work when TF is enabled. So, replace them with + * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for + * all cases. + */ + if (level == AMDGPU_VM_PTB) + amdgpu_vm_pte_update_noretry_flags(adev, &flags); + /* APUs mapping system memory may need different MTYPEs on different * NUMA nodes. Only do this for contiguous ranges that can be assumed * to be on the same NUMA node. @@ -1044,3 +1077,31 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, return 0; } + +/** + * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible + * @adev: amdgpu device structure + * @vm: amdgpu vm structure + * + * make root page directory and everything below it cpu accessible. + */ +int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm) +{ + struct amdgpu_vm_pt_cursor cursor; + struct amdgpu_vm_bo_base *entry; + + for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) { + + struct amdgpu_bo_vm *bo; + int r; + + if (entry->bo) { + bo = to_amdgpu_bo_vm(entry->bo); + r = vm->update_funcs->map_table(bo); + if (r) + return r; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index d175e862f222..9c9cca129498 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -363,7 +363,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, if (!adev->xcp_mgr) return 0; - fpriv->xcp_id = ~0; + fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION; for (i = 0; i < MAX_XCP; ++i) { if (!adev->xcp_mgr->xcp[i].ddev) break; @@ -381,7 +381,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, } } - fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 : + fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 : adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 0f8026d64ea5..9a1036aeec2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -37,6 +37,8 @@ #define AMDGPU_XCP_FL_NONE 0 #define AMDGPU_XCP_FL_LOCKED (1 << 0) +#define AMDGPU_XCP_NO_PARTITION (~0) + struct amdgpu_fpriv; enum AMDGPU_XCP_IP_BLOCK { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 03dc59cbe8aa..7e91b24784e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -500,6 +500,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) hive = kzalloc(sizeof(*hive), GFP_KERNEL); if (!hive) { dev_err(adev->dev, "XGMI: allocation failed\n"); + ret = -ENOMEM; hive = NULL; goto pro_end; } diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 16471b81a1f5..d0fc62784e82 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -68,7 +68,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, enum AMDGPU_XCP_IP_BLOCK ip_blk; uint32_t inst_mask; - ring->xcp_id = ~0; + ring->xcp_id = AMDGPU_XCP_NO_PARTITION; if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) return; @@ -134,7 +134,7 @@ static int aqua_vanjaram_xcp_sched_list_update( for (i = 0; i < AMDGPU_MAX_RINGS; i++) { ring = adev->rings[i]; - if (!ring || !ring->sched.ready) + if (!ring || !ring->sched.ready || ring->no_scheduler) continue; aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); @@ -177,7 +177,7 @@ static int aqua_vanjaram_select_scheds( u32 sel_xcp_id; int i; - if (fpriv->xcp_id == ~0) { + if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) { u32 least_ref_cnt = ~0; fpriv->xcp_id = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 5f610e9a5f0f..9f63ddb89b75 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -1438,6 +1438,8 @@ static void atom_get_vbios_pn(struct atom_context *ctx) ctx->vbios_pn[count] = 0; } + + pr_info("ATOM BIOS: %s\n", ctx->vbios_pn); } static void atom_get_vbios_version(struct atom_context *ctx) @@ -1460,11 +1462,9 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) int base; struct atom_context *ctx = kzalloc(sizeof(struct atom_context), GFP_KERNEL); - char *str; struct _ATOM_ROM_HEADER *atom_rom_header; struct _ATOM_MASTER_DATA_TABLE *master_table; struct _ATOM_FIRMWARE_INFO *atom_fw_info; - u16 idx; if (!ctx) return NULL; @@ -1502,16 +1502,6 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) return NULL; } - idx = CU16(ATOM_ROM_PART_NUMBER_PTR); - if (idx == 0) - idx = 0x80; - - str = CSTR(idx); - if (*str != '\0') { - pr_info("ATOM BIOS: %s\n", str); - strscpy(ctx->vbios_version, str, sizeof(ctx->vbios_version)); - } - atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base); if (atom_rom_header->usMasterDataTableOffset != 0) { master_table = (struct _ATOM_MASTER_DATA_TABLE *) diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h index 0c1839824520..55bf99d5288d 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.h +++ b/drivers/gpu/drm/amd/amdgpu/atom.h @@ -33,7 +33,6 @@ struct drm_device; #define ATOM_ATI_MAGIC_PTR 0x30 #define ATOM_ATI_MAGIC " 761295520" #define ATOM_ROM_TABLE_PTR 0x48 -#define ATOM_ROM_PART_NUMBER_PTR 0x6E #define ATOM_ROM_MAGIC "ATOM" #define ATOM_ROM_MAGIC_PTR 4 @@ -143,7 +142,6 @@ struct atom_context { int io_mode; uint32_t *scratch; int scratch_size_bytes; - char vbios_version[20]; uint8_t name[STRLEN_LONG]; uint8_t vbios_pn[STRLEN_LONG]; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 44af8022b89f..0aee9c8288a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -271,8 +271,7 @@ MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec2.bin"); MODULE_FIRMWARE("amdgpu/gc_10_3_7_rlc.bin"); -static const struct soc15_reg_golden golden_settings_gc_10_1[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100), @@ -315,13 +314,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000) }; -static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = { /* Pending on emulation bring up */ }; -static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] = -{ +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), @@ -1376,8 +1373,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) }; -static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), @@ -1418,8 +1414,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000), }; -static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), @@ -1464,13 +1459,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000) }; -static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = { /* Pending on emulation bring up */ }; -static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] = -{ +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), @@ -2093,13 +2086,11 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) }; -static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = { /* Pending on emulation bring up */ }; -static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] = -{ +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), @@ -3154,8 +3145,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) }; -static const struct soc15_reg_golden golden_settings_gc_10_3[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), @@ -3164,7 +3154,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), - SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCUTCL2_CGTT_CLK_CTRL_Sienna_Cichlid, 0xff000000, 0xff008080), @@ -3201,13 +3191,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000) }; -static const struct soc15_reg_golden golden_settings_gc_10_3_sienna_cichlid[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_sienna_cichlid[] = { /* Pending on emulation bring up */ }; -static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), @@ -3254,8 +3242,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020), }; -static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4), @@ -3285,8 +3272,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020), }; -static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), @@ -3309,8 +3295,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00100000) }; -static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0x30000000, 0x30000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0x7e000000, 0x7e000100), @@ -3380,7 +3365,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_5[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX,0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000) }; @@ -3421,8 +3406,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_cyan_skillfish[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000) }; -static const struct soc15_reg_golden golden_settings_gc_10_3_6[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_6[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x00000044), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), @@ -3506,6 +3490,8 @@ static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev); static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, uint16_t pasid, uint32_t flush_type, bool all_hub, uint8_t dst_sel); +static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, + unsigned int vmid); static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { @@ -3714,8 +3700,8 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) break; case IP_VERSION(10, 3, 4): soc15_program_register_sequence(adev, - golden_settings_gc_10_3_4, - (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4)); + golden_settings_gc_10_3_4, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4)); break; case IP_VERSION(10, 3, 5): soc15_program_register_sequence(adev, @@ -3782,7 +3768,7 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); uint32_t tmp = 0; - unsigned i; + unsigned int i; int r; WREG32(scratch, 0xCAFEDEAD); @@ -3820,7 +3806,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; - unsigned index; + unsigned int index; uint64_t gpu_addr; volatile uint32_t *cpu_ptr; long r; @@ -3951,7 +3937,7 @@ static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev) break; } - return ret ; + return ret; } static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) @@ -4151,7 +4137,7 @@ static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) { struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; - reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1); reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2); @@ -4159,14 +4145,14 @@ static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL); reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX); switch (adev->ip_versions[GC_HWIP][0]) { - case IP_VERSION(10, 3, 0): - reg_access_ctrl->spare_int = - SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid); - break; - default: - reg_access_ctrl->spare_int = - SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT); - break; + case IP_VERSION(10, 3, 0): + reg_access_ctrl->spare_int = + SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid); + break; + default: + reg_access_ctrl->spare_int = + SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT); + break; } adev->gfx.rlc.rlcg_reg_access_supported = true; } @@ -4187,11 +4173,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) return r; } - /* init spm vmid with 0xf */ - if (adev->gfx.rlc.funcs->update_spm_vmid) - adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); - - return 0; } @@ -4213,7 +4194,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev) int r; u32 *hpd; const __le32 *fw_data = NULL; - unsigned fw_size; + unsigned int fw_size; u32 *fw = NULL; size_t mec_hpd_size; @@ -4295,7 +4276,8 @@ static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id { /* in gfx10 the SIMD_ID is specified as part of the INSTANCE * field when performing a select_se_sh so it should be - * zero here */ + * zero here + */ WARN_ON(simd != 0); /* type 2 wave data */ @@ -4474,7 +4456,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int mec, int pipe, int queue) { - unsigned irq_type; + unsigned int irq_type; struct amdgpu_ring *ring; unsigned int hw_prio; @@ -4795,7 +4777,8 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade uint32_t pa_sc_tile_steering_override; /* for ASICs that integrates GFX v10.3 - * pa_sc_tile_steering_override should be set to 0 */ + * pa_sc_tile_steering_override should be set to 0 + */ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) return 0; @@ -4871,8 +4854,10 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - access. These should be enabled by FW for target VMIDs. */ + /* + * Initialize all compute VMIDs to have no GDS, GWS, or OA + * access. These should be enabled by FW for target VMIDs. + */ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); @@ -5108,8 +5093,10 @@ static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, static void gfx_v10_0_rlc_start(struct amdgpu_device *adev) { - /* TODO: enable rlc & smu handshake until smu - * and gfxoff feature works as expected */ + /* + * TODO: enable rlc & smu handshake until smu + * and gfxoff feature works as expected + */ if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) gfx_v10_0_rlc_smu_handshake_cntl(adev, false); @@ -5132,7 +5119,7 @@ static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev) { const struct rlc_firmware_header_v2_0 *hdr; const __le32 *fw_data; - unsigned i, fw_size; + unsigned int i, fw_size; if (!adev->gfx.rlc_fw) return -EINVAL; @@ -5169,6 +5156,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) gfx_v10_0_init_csb(adev); + gfx_v10_0_update_spm_vmid_internal(adev, 0xf); + if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ gfx_v10_0_rlc_enable_srm(adev); } else { @@ -5199,6 +5188,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) gfx_v10_0_init_csb(adev); + gfx_v10_0_update_spm_vmid_internal(adev, 0xf); + adev->gfx.rlc.funcs->start(adev); if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { @@ -5207,6 +5198,7 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) return r; } } + return 0; } @@ -5674,11 +5666,10 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); - } else { + else WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); - } if (adev->job_hang && !enable) return 0; @@ -5700,7 +5691,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) int r; const struct gfx_firmware_header_v1_0 *pfp_hdr; const __le32 *fw_data; - unsigned i, fw_size; + unsigned int i, fw_size; uint32_t tmp; uint32_t usec_timeout = 50000; /* wait for 50ms */ @@ -5778,7 +5769,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) int r; const struct gfx_firmware_header_v1_0 *ce_hdr; const __le32 *fw_data; - unsigned i, fw_size; + unsigned int i, fw_size; uint32_t tmp; uint32_t usec_timeout = 50000; /* wait for 50ms */ @@ -5855,7 +5846,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) int r; const struct gfx_firmware_header_v1_0 *me_hdr; const __le32 *fw_data; - unsigned i, fw_size; + unsigned int i, fw_size; uint32_t tmp; uint32_t usec_timeout = 50000; /* wait for 50ms */ @@ -6243,7 +6234,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) { const struct gfx_firmware_header_v1_0 *mec_hdr; const __le32 *fw_data; - unsigned i; + unsigned int i; u32 tmp; u32 usec_timeout = 50000; /* Wait for 50 ms */ @@ -6922,8 +6913,10 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) { uint32_t data, pattern = 0xDEADBEEF; - /* check if mmVGT_ESGS_RING_SIZE_UMD - * has been remapped to mmVGT_ESGS_RING_SIZE */ + /* + * check if mmVGT_ESGS_RING_SIZE_UMD + * has been remapped to mmVGT_ESGS_RING_SIZE + */ switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): @@ -6934,12 +6927,10 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern); if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid) == pattern) { - WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD , data); + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data); return true; - } else { - WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, data); - return false; } + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, data); break; case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 3): @@ -6954,12 +6945,12 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) { WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data); return true; - } else { - WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data); - return false; } + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data); break; } + + return false; } static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) @@ -6969,8 +6960,10 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return; - /* initialize cam_index to 0 - * index will auto-inc after each data writting */ + /* + * Initialize cam_index to 0 + * index will auto-inc after each data writing + */ WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0); switch (adev->ip_versions[GC_HWIP][0]) { @@ -7100,6 +7093,7 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) static void gfx_v10_0_disable_gpa_mode(struct amdgpu_device *adev) { uint32_t data; + data = RREG32_SOC15(GC, 0, mmCPC_PSP_DEBUG); data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; WREG32_SOC15(GC, 0, mmCPC_PSP_DEBUG, data); @@ -7216,7 +7210,7 @@ static bool gfx_v10_0_is_idle(void *handle) static int gfx_v10_0_wait_for_idle(void *handle) { - unsigned i; + unsigned int i; u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -7471,7 +7465,7 @@ static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev) static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; - unsigned i; + unsigned int i; data = RLC_SAFE_MODE__CMD_MASK; data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); @@ -7900,12 +7894,11 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } -static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, + unsigned int vmid) { u32 reg, data; - amdgpu_gfx_off_ctrl(adev, false); - /* not for *_SOC15 */ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) @@ -7920,6 +7913,13 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + +static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid) +{ + amdgpu_gfx_off_ctrl(adev, false); + + gfx_v10_0_update_spm_vmid_internal(adev, vmid); amdgpu_gfx_off_ctrl(adev, true); } @@ -8297,7 +8297,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib, uint32_t flags) { - unsigned vmid = AMDGPU_JOB_GET_VMID(job); + unsigned int vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; if (ib->flags & AMDGPU_IB_FLAG_CE) @@ -8338,7 +8338,7 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, uint32_t flags) { - unsigned vmid = AMDGPU_JOB_GET_VMID(job); + unsigned int vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); if (ring->is_mes_queue) @@ -8373,7 +8373,7 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring, } static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, - u64 seq, unsigned flags) + u64 seq, unsigned int flags) { bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; @@ -8429,7 +8429,7 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, } static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { if (ring->is_mes_queue) gfx_v10_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); @@ -8511,9 +8511,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, amdgpu_ring_write(ring, 0); } -static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) +static unsigned int gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) { - unsigned ret; + unsigned int ret; amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); @@ -8525,9 +8525,10 @@ static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) return ret; } -static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) +static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned int offset) { - unsigned cur; + unsigned int cur; + BUG_ON(offset > ring->buf_mask); BUG_ON(ring->ring[offset] != 0x55aa55aa); @@ -8750,7 +8751,7 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, } static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned vmid) + unsigned int vmid) { struct amdgpu_device *adev = ring->adev; uint32_t value = 0; @@ -8859,7 +8860,7 @@ static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { switch (type) { @@ -8956,7 +8957,7 @@ static int gfx_v10_0_eop_irq(struct amdgpu_device *adev, static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { switch (state) { @@ -8975,7 +8976,7 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev, static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { switch (state) { @@ -9342,7 +9343,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev) { - unsigned total_cu = adev->gfx.config.max_cu_per_sh * + unsigned int total_cu = adev->gfx.config.max_cu_per_sh * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; @@ -9423,7 +9424,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; - unsigned disable_masks[4 * 2]; + unsigned int disable_masks[4 * 2]; if (!adev || !cu_info) return -EINVAL; @@ -9540,8 +9541,7 @@ static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev) (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT)); } -const struct amdgpu_ip_block_version gfx_v10_0_ip_block = -{ +const struct amdgpu_ip_block_version gfx_v10_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, .major = 10, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 3a7af59e83ca..4d53d6f13c3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -663,7 +663,7 @@ static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) { struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; - reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 65577eca58f1..458faf657042 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -762,6 +762,8 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if, uint32_t instance_mask); static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev); +static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev, + unsigned int vmid); static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) @@ -1632,7 +1634,7 @@ static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) { struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; - reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1); reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2); @@ -1667,22 +1669,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) return r; } - switch (adev->ip_versions[GC_HWIP][0]) { - case IP_VERSION(9, 2, 2): - case IP_VERSION(9, 1, 0): - gfx_v9_0_init_lbpw(adev); - break; - case IP_VERSION(9, 4, 0): - gfx_v9_4_init_lbpw(adev); - break; - default: - break; - } - - /* init spm vmid with 0xf */ - if (adev->gfx.rlc.funcs->update_spm_vmid) - adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); - return 0; } @@ -2942,12 +2928,14 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 2, 2): case IP_VERSION(9, 1, 0): + gfx_v9_0_init_lbpw(adev); if (amdgpu_lbpw == 0) gfx_v9_0_enable_lbpw(adev, false); else gfx_v9_0_enable_lbpw(adev, true); break; case IP_VERSION(9, 4, 0): + gfx_v9_4_init_lbpw(adev); if (amdgpu_lbpw > 0) gfx_v9_0_enable_lbpw(adev, true); else @@ -2957,6 +2945,8 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) break; } + gfx_v9_0_update_spm_vmid_internal(adev, 0xf); + adev->gfx.rlc.funcs->start(adev); return 0; @@ -4881,12 +4871,11 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } -static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev, + unsigned int vmid) { u32 reg, data; - amdgpu_gfx_off_ctrl(adev, false); - reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(reg); @@ -4900,6 +4889,13 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + +static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid) +{ + amdgpu_gfx_off_ctrl(adev, false); + + gfx_v9_0_update_spm_vmid_internal(adev, vmid); amdgpu_gfx_off_ctrl(adev, true); } @@ -5230,6 +5226,9 @@ static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring, de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; } + ((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status = + IB_COMPLETION_STATUS_PREEMPTED; + if (offset + (payload_size >> 2) <= ring->buf_mask + 1) { memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 9e3b835bdbb2..d8d6807e7b96 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L #define GOLDEN_GB_ADDR_CONFIG 0x2a114042 +#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301 struct amdgpu_gfx_ras gfx_v9_4_3_ras; @@ -195,14 +196,11 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { dev_inst = GET_INST(GC, i); - if (dev_inst >= 2) - WREG32_SOC15(GC, dev_inst, regGRBM_MCM_ADDR, 0x4); + WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, + GOLDEN_GB_ADDR_CONFIG); /* Golden settings applied by driver for ASIC with rev_id 0 */ if (adev->rev_id == 0) { - WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, - GOLDEN_GB_ADDR_CONFIG); - WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1, REDUCE_FIFO_DEPTH_BY_2, 2); } @@ -624,7 +622,7 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, int num_xccs_per_xcp) { int ret, i, num_xcc; - u32 tmp = 0; + u32 tmp = 0, regval; if (adev->psp.funcs) { ret = psp_spatial_partition(&adev->psp, @@ -632,23 +630,24 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, num_xccs_per_xcp); if (ret) return ret; - } else { - num_xcc = NUM_XCC(adev->gfx.xcc_mask); + } - for (i = 0; i < num_xcc; i++) { - tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, - num_xccs_per_xcp); - tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, - i % num_xccs_per_xcp); + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + + for (i = 0; i < num_xcc; i++) { + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, + num_xccs_per_xcp); + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, + i % num_xccs_per_xcp); + regval = RREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL); + if (regval != tmp) WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp); - } - ret = 0; } adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; - return ret; + return 0; } static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) @@ -900,6 +899,7 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, int i; uint32_t sh_mem_config; uint32_t sh_mem_bases; + uint32_t data; /* * Configure apertures: @@ -919,6 +919,11 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, /* CP and shaders */ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases); + + /* Enable trap for each kfd vmid. */ + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data); } soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); @@ -1037,32 +1042,6 @@ static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data); } -static void gfx_v9_4_3_xcc_program_xcc_id(struct amdgpu_device *adev, - int xcc_id) -{ - uint32_t tmp = 0; - int num_xcc; - - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - switch (num_xcc) { - /* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */ - case 1: - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, 0x8); - break; - case 2: - case 4: - case 6: - case 8: - tmp = (xcc_id % adev->gfx.num_xcc_per_xcp) << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, VIRTUAL_XCC_ID); - tmp = tmp | (adev->gfx.num_xcc_per_xcp << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP)); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, tmp); - - break; - default: - break; - } -} - static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) { uint32_t rlc_setting; @@ -1101,6 +1080,24 @@ static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); } +static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) +{ + int xcc_id, num_xcc; + struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)]; + reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0); + reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1); + reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2); + reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3); + reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL); + reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX); + reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT); + } +} + static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) { /* init spm vmid with 0xf */ @@ -1736,7 +1733,7 @@ static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring, WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); - WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0); @@ -1920,9 +1917,6 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) return r; } - /* set the virtual and physical id based on partition_mode */ - gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id); - r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id); if (r) return r; @@ -2181,6 +2175,9 @@ static int gfx_v9_4_3_early_init(void *handle) gfx_v9_4_3_set_gds_init(adev); gfx_v9_4_3_set_rlc_funcs(adev); + /* init rlcg reg access ctrl */ + gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev); + return gfx_v9_4_3_init_microcode(adev); } @@ -2197,6 +2194,10 @@ static int gfx_v9_4_3_late_init(void *handle) if (r) return r; + if (adev->gfx.ras && + adev->gfx.ras->enable_watchdog_timer) + adev->gfx.ras->enable_watchdog_timer(adev); + return 0; } @@ -4043,6 +4044,34 @@ static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev, gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id); } +static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev, + void *ras_error_status, int xcc_id) +{ + uint32_t i; + uint32_t data; + + data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE, + amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0); + + if (amdgpu_watchdog_timer.timeout_fatal_disable && + (amdgpu_watchdog_timer.period < 1 || + amdgpu_watchdog_timer.period > 0x23)) { + dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n"); + amdgpu_watchdog_timer.period = 0x23; + } + data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL, + amdgpu_watchdog_timer.period); + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data); + } + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); +} + static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { @@ -4065,6 +4094,11 @@ static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev) amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status); } +static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) +{ + amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer); +} + static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { .name = "gfx_v9_4_3", .early_init = gfx_v9_4_3_early_init, @@ -4393,4 +4427,5 @@ struct amdgpu_gfx_ras gfx_v9_4_3_ras = { .ras_block = { .hw_ops = &gfx_v9_4_3_ras_ops, }, + .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index d94cc1ec7242..cdc290a474a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -103,7 +103,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); if (adev->apu_flags & AMD_APU_IS_RAVEN2) - /* + /* * Raven2 has a HW issue that it is unable to use the * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. * So here is the workaround that increase system @@ -248,7 +248,7 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned num_level, block_size; + unsigned int num_level, block_size; uint32_t tmp; int i; @@ -308,7 +308,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned i; + unsigned int i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, @@ -375,6 +375,7 @@ static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + tmp = RREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 4dabf910334b..dbdee1a0aefe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -140,7 +140,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); if (adev->apu_flags & AMD_APU_IS_RAVEN2) - /* + /* * Raven2 has a HW issue that it is unable to use the * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. * So here is the workaround that increase system @@ -315,7 +315,7 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev, uint32_t xcc_mask) { struct amdgpu_vmhub *hub; - unsigned num_level, block_size; + unsigned int num_level, block_size; uint32_t tmp; int i, j; @@ -402,18 +402,15 @@ static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev, static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev, uint32_t xcc_mask) { - uint32_t tmp_mask; int i; - tmp_mask = xcc_mask; /* * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are * VF copy registers so vbios post doesn't program them, for * SRIOV driver need to program them */ if (amdgpu_sriov_vf(adev)) { - for_each_inst(i, tmp_mask) { - i = ffs(tmp_mask) - 1; + for_each_inst(i, xcc_mask) { WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, adev->gmc.vram_start >> 24); WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index f173a61c6c15..a041c6c970e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -31,7 +31,7 @@ #include "soc15_common.h" -static const char *gfxhub_client_ids[] = { +static const char * const gfxhub_client_ids[] = { "CB/DB", "Reserved", "GE1", @@ -332,7 +332,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned i; + unsigned int i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, @@ -393,6 +393,7 @@ static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index d8fc3e8088cd..7708d5ded7b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -34,7 +34,7 @@ #define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP 0x16f8 #define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP_BASE_IDX 0 -static const char *gfxhub_client_ids[] = { +static const char * const gfxhub_client_ids[] = { "CB/DB", "Reserved", "GE1", @@ -341,7 +341,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned i; + unsigned int i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, @@ -582,6 +582,7 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev) static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev) { int i; + adev->gmc.VM_L2_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL); adev->gmc.VM_L2_CNTL2 = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2); adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL); @@ -616,6 +617,7 @@ static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev) static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev) { int i; + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, adev->gmc.VM_L2_CNTL); WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, adev->gmc.VM_L2_CNTL2); WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL, adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL); @@ -679,9 +681,8 @@ static void gfxhub_v2_1_halt(struct amdgpu_device *adev) tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); } - if (!time) { + if (!time) DRM_WARN("failed to wait for GRBM(EA) idle\n"); - } } const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c index c53147f9c9fc..e1c76c070ba9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c @@ -30,7 +30,7 @@ #include "navi10_enum.h" #include "soc15_common.h" -static const char *gfxhub_client_ids[] = { +static const char * const gfxhub_client_ids[] = { "CB/DB", "Reserved", "GE1", @@ -340,7 +340,7 @@ static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v3_0_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned i; + unsigned int i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c index ae777487d72e..07f369c7a1ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c @@ -33,7 +33,7 @@ #define regGCVM_L2_CNTL4_DEFAULT 0x000000c1 #define regGCVM_L2_CNTL5_DEFAULT 0x00003fe0 -static const char *gfxhub_client_ids[] = { +static const char * const gfxhub_client_ids[] = { "CB/DB", "Reserved", "GE1", @@ -345,7 +345,7 @@ static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned i; + unsigned int i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 0c8a47989576..fa87a85e1017 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -53,16 +53,9 @@ #include "amdgpu_reset.h" -#if 0 -static const struct soc15_reg_golden golden_settings_navi10_hdp[] = -{ - /* TODO add golden setting for hdp */ -}; -#endif - static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { return 0; @@ -70,7 +63,7 @@ static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, static int gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, unsigned type, + struct amdgpu_irq_src *src, unsigned int type, enum amdgpu_interrupt_state state) { switch (state) { @@ -109,9 +102,11 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ? + AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); + struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; bool retry_fault = !!(entry->src_data[1] & 0x80); bool write_fault = !!(entry->src_data[1] & 0x20); - struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; struct amdgpu_task_info task_info; uint32_t status = 0; u64 addr; @@ -164,8 +159,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); dev_err(adev->dev, - "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " - "for process %s pid %d thread %s pid %d)\n", + "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n", entry->vmid_src ? "mmhub" : "gfxhub", entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, @@ -244,7 +238,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); u32 tmp; /* Use register 17 for GART */ - const unsigned eng = 17; + const unsigned int eng = 17; unsigned int i; unsigned char hub_ip = 0; @@ -346,7 +340,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && down_read_trylock(&adev->reset_domain->sem)) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - const unsigned eng = 17; + const unsigned int eng = 17; u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; @@ -477,12 +471,12 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, } static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); - unsigned eng = ring->vm_inv_eng; + unsigned int eng = ring->vm_inv_eng; /* * It may lose gpuvm invalidate acknowldege state across power-gating @@ -524,8 +518,8 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { struct amdgpu_device *adev = ring->adev; uint32_t reg; @@ -645,10 +639,10 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); } -static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; @@ -751,6 +745,7 @@ static int gmc_v10_0_early_init(void *handle) adev->gmc.private_aperture_start = 0x1000000000000000ULL; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } @@ -972,7 +967,7 @@ static int gmc_v10_0_sw_init(void *handle) r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); if (r) { - printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); + dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); return r; } @@ -1081,7 +1076,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); return 0; @@ -1255,8 +1250,7 @@ const struct amd_ip_funcs gmc_v10_0_ip_funcs = { .get_clockgating_state = gmc_v10_0_get_clockgating_state, }; -const struct amdgpu_ip_block_version gmc_v10_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v10_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 10, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index c571f0d95994..e3b76fd28d15 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -50,7 +50,7 @@ static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { return 0; @@ -58,7 +58,7 @@ static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev, static int gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, unsigned type, + struct amdgpu_irq_src *src, unsigned int type, enum amdgpu_interrupt_state state) { switch (state) { @@ -97,7 +97,9 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; + uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ? + AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); + struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; uint32_t status = 0; u64 addr; @@ -124,8 +126,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); dev_err(adev->dev, - "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " - "for process %s pid %d thread %s pid %d)\n", + "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n", entry->vmid_src ? "mmhub" : "gfxhub", entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, @@ -198,7 +199,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); u32 tmp; /* Use register 17 for GART */ - const unsigned eng = 17; + const unsigned int eng = 17; unsigned int i; unsigned char hub_ip = 0; @@ -296,7 +297,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - const unsigned eng = 17; + const unsigned int eng = 17; u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; @@ -309,7 +310,6 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, mutex_lock(&adev->mman.gtt_window_lock); gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0); mutex_unlock(&adev->mman.gtt_window_lock); - return; } /** @@ -379,12 +379,12 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, } static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); - unsigned eng = ring->vm_inv_eng; + unsigned int eng = ring->vm_inv_eng; /* * It may lose gpuvm invalidate acknowldege state across power-gating @@ -426,8 +426,8 @@ static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { struct amdgpu_device *adev = ring->adev; uint32_t reg; @@ -547,10 +547,10 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); } -static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; @@ -651,6 +651,7 @@ static int gmc_v11_0_early_init(void *handle) adev->gmc.private_aperture_start = 0x1000000000000000ULL; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } @@ -727,9 +728,9 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev) adev->gmc.visible_vram_size = adev->gmc.real_vram_size; /* set the gart size */ - if (amdgpu_gart_size == -1) { + if (amdgpu_gart_size == -1) adev->gmc.gart_size = 512ULL << 20; - } else + else adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; gmc_v11_0_vram_gtt_location(adev, &adev->gmc); @@ -823,7 +824,7 @@ static int gmc_v11_0_sw_init(void *handle) r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); if (r) { - printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); + dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); return r; } @@ -926,7 +927,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index aa754c95a0b3..5b837a65fad2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -120,7 +120,8 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) case CHIP_HAINAN: chip_name = "hainan"; break; - default: BUG(); + default: + BUG(); } /* this memory configuration requires special firmware */ @@ -178,9 +179,8 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); } /* load the MC ucode */ - for (i = 0; i < ucode_size; i++) { + for (i = 0; i < ucode_size; i++) WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); - } /* put the engine back into the active state */ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); @@ -208,6 +208,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; amdgpu_gmc_vram_location(adev, mc, base); @@ -228,9 +229,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v6_0_wait_for_idle((void *)adev)) { + if (gmc_v6_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } if (adev->mode_info.num_crtc) { u32 tmp; @@ -256,9 +256,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (gmc_v6_0_wait_for_idle((void *)adev)) { + if (gmc_v6_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } } static int gmc_v6_0_mc_init(struct amdgpu_device *adev) @@ -269,13 +268,13 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) int r; tmp = RREG32(mmMC_ARB_RAMCFG); - if (tmp & (1 << 11)) { + if (tmp & (1 << 11)) chansize = 16; - } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) { + else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) chansize = 64; - } else { + else chansize = 32; - } + tmp = RREG32(mmMC_SHARED_CHMAP); switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { case 0: @@ -352,7 +351,7 @@ static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { uint32_t reg; @@ -405,11 +404,11 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, } /** - + * gmc_v8_0_set_prt - set PRT VM fault - + * - + * @adev: amdgpu_device pointer - + * @enable: enable/disable VM fault handling for PRT - +*/ + * gmc_v8_0_set_prt() - set PRT VM fault + * + * @adev: amdgpu_device pointer + * @enable: enable/disable VM fault handling for PRT + */ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) { u32 tmp; @@ -547,7 +546,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0); dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); return 0; } @@ -787,15 +786,16 @@ static int gmc_v6_0_late_init(void *handle) return 0; } -static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32(mmD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); @@ -814,6 +814,7 @@ static int gmc_v6_0_sw_init(void *handle) adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp = RREG32(mmMC_SEQ_MISC0); + tmp &= MC_SEQ_MISC0__MT__MASK; adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp); } @@ -964,7 +965,7 @@ static bool gmc_v6_0_is_idle(void *handle) static int gmc_v6_0_wait_for_idle(void *handle) { - unsigned i; + unsigned int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { @@ -995,10 +996,8 @@ static int gmc_v6_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v6_0_mc_stop(adev); - if (gmc_v6_0_wait_for_idle(adev)) { + if (gmc_v6_0_wait_for_idle(adev)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); - } - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1023,7 +1022,7 @@ static int gmc_v6_0_soft_reset(void *handle) static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { u32 tmp; @@ -1141,8 +1140,7 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs; } -const struct amdgpu_ip_block_version gmc_v6_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v6_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 6, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index acd2b407860f..6a6929ac2748 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -58,16 +58,14 @@ MODULE_FIRMWARE("amdgpu/bonaire_mc.bin"); MODULE_FIRMWARE("amdgpu/hawaii_mc.bin"); MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); -static const u32 golden_settings_iceland_a11[] = -{ +static const u32 golden_settings_iceland_a11[] = { mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff }; -static const u32 iceland_mgcg_cgcg_init[] = -{ +static const u32 iceland_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; @@ -151,7 +149,8 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) case CHIP_KABINI: case CHIP_MULLINS: return 0; - default: BUG(); + default: + return -EINVAL; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); @@ -237,6 +236,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; amdgpu_gmc_vram_location(adev, mc, base); @@ -266,9 +266,9 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v7_0_wait_for_idle((void *)adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } + if (adev->mode_info.num_crtc) { /* Lockout access through VGA aperture*/ tmp = RREG32(mmVGA_HDP_CONTROL); @@ -290,9 +290,8 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (gmc_v7_0_wait_for_idle((void *)adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -324,11 +323,11 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) /* Get VRAM informations */ tmp = RREG32(mmMC_ARB_RAMCFG); - if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { + if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) chansize = 64; - } else { + else chansize = 32; - } + tmp = RREG32(mmMC_SHARED_CHMAP); switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { case 0: @@ -472,7 +471,7 @@ static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { uint32_t reg; @@ -488,8 +487,8 @@ static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); } @@ -700,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); return 0; } @@ -761,7 +760,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) * Print human readable fault information (CIK). */ static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, - u32 addr, u32 mc_client, unsigned pasid) + u32 addr, u32 mc_client, unsigned int pasid) { u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -942,6 +941,7 @@ static int gmc_v7_0_early_init(void *handle) adev->gmc.shared_aperture_end + 1; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } @@ -956,15 +956,16 @@ static int gmc_v7_0_late_init(void *handle) return 0; } -static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32(mmD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); @@ -984,6 +985,7 @@ static int gmc_v7_0_sw_init(void *handle) adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp = RREG32(mmMC_SEQ_MISC0); + tmp &= MC_SEQ_MISC0__MT__MASK; adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp); } @@ -1152,7 +1154,7 @@ static bool gmc_v7_0_is_idle(void *handle) static int gmc_v7_0_wait_for_idle(void *handle) { - unsigned i; + unsigned int i; u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1190,10 +1192,8 @@ static int gmc_v7_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v7_0_mc_stop(adev); - if (gmc_v7_0_wait_for_idle((void *)adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); - } - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1219,7 +1219,7 @@ static int gmc_v7_0_soft_reset(void *handle) static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { u32 tmp; @@ -1383,8 +1383,7 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs; } -const struct amdgpu_ip_block_version gmc_v7_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v7_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 7, .minor = 0, @@ -1392,8 +1391,7 @@ const struct amdgpu_ip_block_version gmc_v7_0_ip_block = .funcs = &gmc_v7_0_ip_funcs, }; -const struct amdgpu_ip_block_version gmc_v7_4_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v7_4_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 7, .minor = 4, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 85dead2a5702..5af235202513 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -64,8 +64,7 @@ MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); -static const u32 golden_settings_tonga_a11[] = -{ +static const u32 golden_settings_tonga_a11[] = { mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028, mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991, @@ -75,34 +74,29 @@ static const u32 golden_settings_tonga_a11[] = mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, }; -static const u32 tonga_mgcg_cgcg_init[] = -{ +static const u32 tonga_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; -static const u32 golden_settings_fiji_a10[] = -{ +static const u32 golden_settings_fiji_a10[] = { mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, }; -static const u32 fiji_mgcg_cgcg_init[] = -{ +static const u32 fiji_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; -static const u32 golden_settings_polaris11_a11[] = -{ +static const u32 golden_settings_polaris11_a11[] = { mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff }; -static const u32 golden_settings_polaris10_a11[] = -{ +static const u32 golden_settings_polaris10_a11[] = { mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, @@ -110,19 +104,16 @@ static const u32 golden_settings_polaris10_a11[] = mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff }; -static const u32 cz_mgcg_cgcg_init[] = -{ +static const u32 cz_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; -static const u32 stoney_mgcg_cgcg_init[] = -{ +static const u32 stoney_mgcg_cgcg_init[] = { mmATC_MISC_CG, 0xffffffff, 0x000c0200, mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; -static const u32 golden_settings_stoney_common[] = -{ +static const u32 golden_settings_stoney_common[] = { mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004, mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000 }; @@ -260,7 +251,8 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_STONEY: case CHIP_VEGAM: return 0; - default: BUG(); + default: + return -EINVAL; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); @@ -448,9 +440,9 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v8_0_wait_for_idle((void *)adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } + if (adev->mode_info.num_crtc) { /* Lockout access through VGA aperture*/ tmp = RREG32(mmVGA_HDP_CONTROL); @@ -483,9 +475,8 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (gmc_v8_0_wait_for_idle((void *)adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -517,11 +508,11 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) /* Get VRAM informations */ tmp = RREG32(mmMC_ARB_RAMCFG); - if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { + if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) chansize = 64; - } else { + else chansize = 32; - } + tmp = RREG32(mmMC_SHARED_CHMAP); switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { case 0: @@ -671,7 +662,7 @@ static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { uint32_t reg; @@ -687,8 +678,8 @@ static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); } @@ -759,11 +750,11 @@ static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev, } /** - * gmc_v8_0_set_prt - set PRT VM fault + * gmc_v8_0_set_prt() - set PRT VM fault * * @adev: amdgpu_device pointer * @enable: enable/disable VM fault handling for PRT -*/ + */ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) { u32 tmp; @@ -940,7 +931,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); return 0; } @@ -1001,7 +992,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) * Print human readable fault information (VI). */ static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, - u32 addr, u32 mc_client, unsigned pasid) + u32 addr, u32 mc_client, unsigned int pasid) { u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1056,6 +1047,7 @@ static int gmc_v8_0_early_init(void *handle) adev->gmc.shared_aperture_end + 1; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } @@ -1070,15 +1062,16 @@ static int gmc_v8_0_late_init(void *handle) return 0; } -static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32(mmD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); @@ -1282,7 +1275,7 @@ static bool gmc_v8_0_is_idle(void *handle) static int gmc_v8_0_wait_for_idle(void *handle) { - unsigned i; + unsigned int i; u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1318,13 +1311,15 @@ static bool gmc_v8_0_check_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } + if (srbm_soft_reset) { adev->gmc.srbm_soft_reset = srbm_soft_reset; return true; - } else { - adev->gmc.srbm_soft_reset = 0; - return false; } + + adev->gmc.srbm_soft_reset = 0; + + return false; } static int gmc_v8_0_pre_soft_reset(void *handle) @@ -1335,9 +1330,8 @@ static int gmc_v8_0_pre_soft_reset(void *handle) return 0; gmc_v8_0_mc_stop(adev); - if (gmc_v8_0_wait_for_idle(adev)) { + if (gmc_v8_0_wait_for_idle(adev)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); - } return 0; } @@ -1386,7 +1380,7 @@ static int gmc_v8_0_post_soft_reset(void *handle) static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { u32 tmp; @@ -1747,8 +1741,7 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs; } -const struct amdgpu_ip_block_version gmc_v8_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v8_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 8, .minor = 0, @@ -1756,8 +1749,7 @@ const struct amdgpu_ip_block_version gmc_v8_0_ip_block = .funcs = &gmc_v8_0_ip_funcs, }; -const struct amdgpu_ip_block_version gmc_v8_1_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v8_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 8, .minor = 1, @@ -1765,8 +1757,7 @@ const struct amdgpu_ip_block_version gmc_v8_1_ip_block = .funcs = &gmc_v8_0_ip_funcs, }; -const struct amdgpu_ip_block_version gmc_v8_5_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v8_5_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 8, .minor = 5, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 67e669e0141c..880460cd3239 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -81,7 +81,7 @@ #define MAX_MEM_RANGES 8 -static const char *gfxhub_client_ids[] = { +static const char * const gfxhub_client_ids[] = { "CB", "DB", "IA", @@ -332,14 +332,12 @@ static const char *mmhub_client_ids_aldebaran[][2] = { [384+0][1] = "OSS", }; -static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = -{ +static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = { SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa), SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565) }; -static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = -{ +static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = { SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800), SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008) }; @@ -416,13 +414,14 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { u32 bits, i, tmp, reg; /* Devices newer then VEGA10/12 shall have these programming - sequences performed by PSP BL */ + * sequences performed by PSP BL + */ if (adev->asic_type >= CHIP_VEGA20) return 0; @@ -466,7 +465,7 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { struct amdgpu_vmhub *hub; @@ -631,8 +630,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); dev_err(adev->dev, - "[%s] %s page fault (src_id:%u ring:%u vmid:%u " - "pasid:%u, for process %s pid %d thread %s pid %d)\n", + "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n", hub_name, retry_fault ? "retry" : "no-retry", entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, @@ -816,7 +814,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); - const unsigned eng = 17; + const unsigned int eng = 17; u32 j, inv_req, inv_req2, tmp; struct amdgpu_vmhub *hub; @@ -1033,13 +1031,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, } static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); struct amdgpu_device *adev = ring->adev; struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub]; uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); - unsigned eng = ring->vm_inv_eng; + unsigned int eng = ring->vm_inv_eng; /* * It may lose gpuvm invalidate acknowldege state across power-gating @@ -1081,8 +1079,8 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { struct amdgpu_device *adev = ring->adev; uint32_t reg; @@ -1373,10 +1371,10 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, } } -static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); - unsigned size; + unsigned int size; /* TODO move to DC so GMC doesn't need to hard-code DCN registers */ @@ -1622,6 +1620,7 @@ static int gmc_v9_0_early_init(void *handle) adev->gmc.private_aperture_start = 0x1000000000000000ULL; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } @@ -2150,7 +2149,7 @@ static int gmc_v9_0_sw_init(void *handle) dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44; r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); if (r) { - printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); + dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); return r; } adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits); @@ -2304,7 +2303,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) return r; DRM_INFO("PCIE GART of %uM enabled.\n", - (unsigned)(adev->gmc.gart_size >> 20)); + (unsigned int)(adev->gmc.gart_size >> 20)); if (adev->gmc.pdb0_bo) DRM_INFO("PDB0 located at 0x%016llX\n", (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo)); @@ -2490,8 +2489,7 @@ const struct amd_ip_funcs gmc_v9_0_ip_funcs = { .get_clockgating_state = gmc_v9_0_get_clockgating_state, }; -const struct amdgpu_ip_block_version gmc_v9_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v9_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 9, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index b02e1cef78a7..980b24120080 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -535,7 +535,7 @@ static int ih_v6_0_sw_init(void *handle) * use bus address for ih ring by psp bl */ use_bus_addr = (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr); if (r) return r; @@ -548,7 +548,7 @@ static int ih_v6_0_sw_init(void *handle) /* initialize ih control register offset */ ih_v6_0_init_register_offset(adev); - r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 36a123e6c8ee..eb06d749876f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -909,10 +909,12 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev, /* prepare MQD backup */ adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->mes.mqd_backup[pipe]) + if (!adev->mes.mqd_backup[pipe]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); + return -ENOMEM; + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 1bdaa00c0b46..11fda318064f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -210,9 +210,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; mes_add_queue_pkt.gds_size = input->queue_size; - /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ - mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; - mes_add_queue_pkt.gds_size = input->queue_size; + mes_add_queue_pkt.exclusively_scheduled = input->exclusively_scheduled; return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_add_queue_pkt, sizeof(mes_add_queue_pkt), @@ -1019,10 +1017,12 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev, /* prepare MQD backup */ adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->mes.mqd_backup[pipe]) + if (!adev->mes.mqd_backup[pipe]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); + return -ENOMEM; + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index eec13cb5bf75..b6a8478dabf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -565,7 +565,7 @@ static int navi10_ih_sw_init(void *handle) use_bus_addr = false; else use_bus_addr = true; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr); if (r) return r; @@ -578,7 +578,7 @@ static int navi10_ih_sw_init(void *handle) /* initialize ih control registers offset */ navi10_ih_init_register_offset(adev); - r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index f9cb0d2c89d1..d23827d3d8cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -49,6 +49,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin"); /* For large FW files the time to complete can be very long */ #define USBC_PD_POLLING_LIMIT_S 240 @@ -93,6 +96,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) case IP_VERSION(13, 0, 5): case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 11): + case IP_VERSION(14, 0, 0): err = psp_init_toc_microcode(psp, ucode_prefix); if (err) return err; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 5c4d4df9cf94..1cc34efb455b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -237,17 +237,15 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) // emulation only, won't work on real chip // navi10 real chip need to use PSP to load firmware static int sdma_v5_0_init_microcode(struct amdgpu_device *adev) -{ int ret, i; - - if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5))) - return 0; +{ + int ret, i; for (i = 0; i < adev->sdma.num_instances; i++) { ret = amdgpu_sdma_init_microcode(adev, i, false); if (ret) return ret; } - + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 96948a59f8dd..da683afa0222 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -37,65 +37,65 @@ #define SOC15_REG_OFFSET1(ip, inst, reg, offset) \ (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)+(offset)) -#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \ +#define __WREG32_SOC15_RLC__(reg, value, flag, hwip, inst) \ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \ - amdgpu_sriov_wreg(adev, reg, value, flag, hwip) : \ + amdgpu_sriov_wreg(adev, reg, value, flag, hwip, inst) : \ WREG32(reg, value)) -#define __RREG32_SOC15_RLC__(reg, flag, hwip) \ +#define __RREG32_SOC15_RLC__(reg, flag, hwip, inst) \ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \ - amdgpu_sriov_rreg(adev, reg, flag, hwip) : \ + amdgpu_sriov_rreg(adev, reg, flag, hwip, inst) : \ RREG32(reg)) #define WREG32_FIELD15(ip, idx, reg, field, val) \ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ (__RREG32_SOC15_RLC__( \ adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ - 0, ip##_HWIP) & \ + 0, ip##_HWIP, idx) & \ ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \ - 0, ip##_HWIP) + 0, ip##_HWIP, idx) #define WREG32_FIELD15_PREREG(ip, idx, reg_name, field, val) \ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name, \ (__RREG32_SOC15_RLC__( \ adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name, \ - 0, ip##_HWIP) & \ + 0, ip##_HWIP, idx) & \ ~REG_FIELD_MASK(reg_name, field)) | (val) << REG_FIELD_SHIFT(reg_name, field), \ - 0, ip##_HWIP) + 0, ip##_HWIP, idx) #define RREG32_SOC15(ip, inst, reg) \ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ - 0, ip##_HWIP) + 0, ip##_HWIP, inst) -#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP) +#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP, 0) -#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP) +#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0) #define RREG32_SOC15_NO_KIQ(ip, inst, reg) \ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ - AMDGPU_REGS_NO_KIQ, ip##_HWIP) + AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst) #define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \ __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)) + \ - (offset), 0, ip##_HWIP) + (offset), 0, ip##_HWIP, inst) #define WREG32_SOC15(ip, inst, reg, value) \ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \ - value, 0, ip##_HWIP) + value, 0, ip##_HWIP, inst) #define WREG32_SOC15_IP(ip, reg, value) \ - __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP) + __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP, 0) #define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \ - __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP) + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0) #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ - value, AMDGPU_REGS_NO_KIQ, ip##_HWIP) + value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst) #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \ - value, 0, ip##_HWIP) + value, 0, ip##_HWIP, inst) #define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \ amdgpu_device_wait_on_rreg(adev, inst, \ @@ -108,16 +108,16 @@ #reg, expected_value, mask) #define WREG32_RLC(reg, value) \ - __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP) + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP, 0) -#define WREG32_RLC_EX(prefix, reg, value) \ +#define WREG32_RLC_EX(prefix, reg, value, inst) \ do { \ if (amdgpu_sriov_fullaccess(adev)) { \ uint32_t i = 0; \ uint32_t retries = 50000; \ - uint32_t r0 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0; \ - uint32_t r1 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1; \ - uint32_t spare_int = adev->reg_offset[GC_HWIP][0][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT; \ + uint32_t r0 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0; \ + uint32_t r1 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1; \ + uint32_t spare_int = adev->reg_offset[GC_HWIP][inst][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT; \ WREG32(r0, value); \ WREG32(r1, (reg | 0x80000000)); \ WREG32(spare_int, 0x1); \ @@ -136,17 +136,17 @@ /* shadow the registers in the callback function */ #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \ - __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP, inst) /* for GC only */ #define RREG32_RLC(reg) \ __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP) #define WREG32_RLC_NO_KIQ(reg, value, hwip) \ - __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip) + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0) #define RREG32_RLC_NO_KIQ(reg, hwip) \ - __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip) + __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0) #define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \ do { \ @@ -167,32 +167,32 @@ } while (0) #define RREG32_SOC15_RLC(ip, inst, reg) \ - __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP) + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP, inst) #define WREG32_SOC15_RLC(ip, inst, reg, value) \ do { \ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\ - __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP); \ + __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP, inst); \ } while (0) #define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \ do { \ uint32_t target_reg = adev->reg_offset[GC_HWIP][inst][reg##_BASE_IDX] + reg;\ - WREG32_RLC_EX(prefix, target_reg, value); \ + WREG32_RLC_EX(prefix, target_reg, value, inst); \ } while (0) #define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \ (__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ - AMDGPU_REGS_RLC, ip##_HWIP) & \ + AMDGPU_REGS_RLC, ip##_HWIP, idx) & \ ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \ - AMDGPU_REGS_RLC, ip##_HWIP) + AMDGPU_REGS_RLC, ip##_HWIP, idx) #define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \ - __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP, inst) #define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \ - __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP) + __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP, inst) /* inst equals to ext for some IPs */ #define RREG32_SOC15_EXT(ip, inst, reg, ext) \ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index c975aed2f6c7..18794394c5a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -881,9 +881,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr)); + amdgpu_vcn_psp_update_sram(adev, 0, 0); /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index bb1875f926f1..6fbea38f4d3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -912,9 +912,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); ring = &adev->vcn.inst[inst_idx].ring_dec; /* force RBC into idle state */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index c8f63b3c6f69..b76ba21b5a89 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1037,9 +1037,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); ring = &adev->vcn.inst[inst_idx].ring_dec; /* force RBC into idle state */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 259795098173..6089c7deba8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -169,6 +169,12 @@ static int vcn_v4_0_sw_init(void *handle) fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; + if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) { + fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; + fw_shared->drm_key_wa.method = + AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; + } + if (amdgpu_sriov_vf(adev)) fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); @@ -993,9 +999,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo if (indirect) - psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); ring = &adev->vcn.inst[inst_idx].ring_enc[0]; @@ -1800,7 +1804,7 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, return 0; } -static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { +static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, @@ -1845,7 +1849,11 @@ static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << i)) continue; - adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_unified_ring_vm_funcs; + if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) + vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true; + + adev->vcn.inst[i].ring_enc[0].funcs = + (const struct amdgpu_ring_funcs *)&vcn_v4_0_unified_ring_vm_funcs; adev->vcn.inst[i].ring_enc[0].me = i; DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 5d67b8b8a3d6..550ac040b4be 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -778,9 +778,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, 0, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); ring = &adev->vcn.inst[inst_idx].ring_enc[0]; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 1e83db0c5438..d364c6dd152c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -485,7 +485,7 @@ static int vega10_ih_sw_init(void *handle) if (r) return r; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, true); if (r) return r; @@ -510,7 +510,7 @@ static int vega10_ih_sw_init(void *handle) /* initialize ih control registers offset */ vega10_ih_init_register_offset(adev); - r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index 4d719df376a7..544ee55a22da 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -539,7 +539,7 @@ static int vega20_ih_sw_init(void *handle) (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) use_bus_addr = false; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr); if (r) return r; @@ -565,7 +565,7 @@ static int vega20_ih_sw_init(void *handle) /* initialize ih control registers offset */ vega20_ih_init_register_offset(adev); - r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, use_bus_addr); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, use_bus_addr); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 6a27b000a246..aef8e12df61f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1487,7 +1487,8 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep, goto out_unlock; } - if (!kfd_dbg_has_gws_support(dev) && p->debug_trap_enabled) { + if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) || + kfd_dbg_has_cwsr_workaround(dev))) { retval = -EBUSY; goto out_unlock; } @@ -1845,22 +1846,21 @@ static uint32_t get_process_num_bos(struct kfd_process *p) idr_for_each_entry(&pdd->alloc_idr, mem, id) { struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; - if ((uint64_t)kgd_mem->va > pdd->gpuvm_base) + if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base) num_of_bos++; } } return num_of_bos; } -static int criu_get_prime_handle(struct drm_gem_object *gobj, int flags, +static int criu_get_prime_handle(struct kgd_mem *mem, int flags, u32 *shared_fd) { struct dma_buf *dmabuf; int ret; - dmabuf = amdgpu_gem_prime_export(gobj, flags); - if (IS_ERR(dmabuf)) { - ret = PTR_ERR(dmabuf); + ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf); + if (ret) { pr_err("dmabuf export failed for the BO\n"); return ret; } @@ -1918,7 +1918,11 @@ static int criu_checkpoint_bos(struct kfd_process *p, kgd_mem = (struct kgd_mem *)mem; dumper_bo = kgd_mem->bo; - if ((uint64_t)kgd_mem->va <= pdd->gpuvm_base) + /* Skip checkpointing BOs that are used for Trap handler + * code and state. Currently, these BOs have a VA that + * is less GPUVM Base + */ + if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base) continue; bo_bucket = &bo_buckets[bo_index]; @@ -1940,7 +1944,7 @@ static int criu_checkpoint_bos(struct kfd_process *p, } if (bo_bucket->alloc_flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { - ret = criu_get_prime_handle(&dumper_bo->tbo.base, + ret = criu_get_prime_handle(kgd_mem, bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0, &bo_bucket->dmabuf_fd); @@ -2402,7 +2406,7 @@ static int criu_restore_bo(struct kfd_process *p, /* create the dmabuf object and export the bo */ if (bo_bucket->alloc_flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { - ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base, DRM_RDWR, + ret = criu_get_prime_handle(kgd_mem, DRM_RDWR, &bo_bucket->dmabuf_fd); if (ret) return ret; @@ -2755,6 +2759,16 @@ static int runtime_enable(struct kfd_process *p, uint64_t r_debug, if (pdd->qpd.queue_count) return -EEXIST; + + /* + * Setup TTMPs by default. + * Note that this call must remain here for MES ADD QUEUE to + * skip_process_ctx_clear unconditionally as the first call to + * SET_SHADER_DEBUGGER clears any stale process context data + * saved in MES. + */ + if (pdd->dev->kfd->shared_resources.enable_mes) + kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); } p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED; @@ -2848,7 +2862,8 @@ static int runtime_disable(struct kfd_process *p) if (!pdd->dev->kfd->shared_resources.enable_mes) debug_refresh_runlist(pdd->dev->dqm); else - kfd_dbg_set_mes_debug_mode(pdd); + kfd_dbg_set_mes_debug_mode(pdd, + !kfd_dbg_has_cwsr_workaround(pdd->dev)); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index fff3ccc04fa9..9ec750666382 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -302,8 +302,7 @@ static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable) if (!q) return 0; - if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || - KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0)) + if (!kfd_dbg_has_cwsr_workaround(q->device)) return 0; if (enable && q->properties.is_user_cu_masked) @@ -345,11 +344,10 @@ unwind: return r; } -int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) +int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en) { uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; uint32_t flags = pdd->process->dbg_flags; - bool sq_trap_en = !!spi_dbg_cntl; if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) return 0; @@ -433,7 +431,7 @@ int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd, if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_map_and_unlock(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); kfd_dbg_clear_dev_watch_id(pdd, watch_id); @@ -446,7 +444,8 @@ int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, uint32_t *watch_id, uint32_t watch_mode) { - int r = kfd_dbg_get_dev_watch_id(pdd, watch_id); + int xcc_id, r = kfd_dbg_get_dev_watch_id(pdd, watch_id); + uint32_t xcc_mask = pdd->dev->xcc_mask; if (r) return r; @@ -460,19 +459,21 @@ int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, } amdgpu_gfx_off_ctrl(pdd->dev->adev, false); - pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch( + for_each_inst(xcc_id, xcc_mask) + pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch( pdd->dev->adev, watch_address, watch_address_mask, *watch_id, watch_mode, - pdd->dev->vm_info.last_vmid_kfd); + pdd->dev->vm_info.last_vmid_kfd, + xcc_id); amdgpu_gfx_off_ctrl(pdd->dev->adev, true); if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_map_and_unlock(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); /* HWS is broken so no point in HW rollback but release the watchpoint anyways */ if (r) @@ -514,7 +515,7 @@ int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags) if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_refresh_runlist(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); if (r) { target->dbg_flags = prev_flags; @@ -537,7 +538,7 @@ int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags) if (!pdd->dev->kfd->shared_resources.enable_mes) debug_refresh_runlist(pdd->dev->dqm); else - kfd_dbg_set_mes_debug_mode(pdd); + kfd_dbg_set_mes_debug_mode(pdd, true); } } @@ -599,7 +600,7 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind if (!pdd->dev->kfd->shared_resources.enable_mes) debug_refresh_runlist(pdd->dev->dqm); else - kfd_dbg_set_mes_debug_mode(pdd); + kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); } kfd_dbg_set_workaround(target, false); @@ -715,7 +716,7 @@ int kfd_dbg_trap_activate(struct kfd_process *target) if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_refresh_runlist(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); if (r) { target->runtime_info.runtime_state = @@ -751,7 +752,8 @@ int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, if (!KFD_IS_SOC15(pdd->dev)) return -ENODEV; - if (!kfd_dbg_has_gws_support(pdd->dev) && pdd->qpd.num_gws) + if (pdd->qpd.num_gws && (!kfd_dbg_has_gws_support(pdd->dev) || + kfd_dbg_has_cwsr_workaround(pdd->dev))) return -EBUSY; } @@ -848,7 +850,7 @@ int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target, if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_refresh_runlist(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); if (r) break; @@ -880,7 +882,7 @@ int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target, if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_refresh_runlist(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); if (r) break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index a289e59ceb79..fd0ff64d4184 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -76,8 +76,9 @@ int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) { - return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || - KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0); + return (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || + KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || + KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0)); } void debug_event_write_work_handler(struct work_struct *work); @@ -100,6 +101,12 @@ static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev) KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1)); } +static inline bool kfd_dbg_has_cwsr_workaround(struct kfd_node *dev) +{ + return KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) && + KFD_GC_VERSION(dev) <= IP_VERSION(11, 0, 3); +} + static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev) { if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1) @@ -119,5 +126,14 @@ static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev) return true; } -int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd); +int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en); + +static inline bool kfd_dbg_has_ttmps_always_setup(struct kfd_node *dev) +{ + return (KFD_GC_VERSION(dev) < IP_VERSION(11, 0, 0) && + KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 2)) || + (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) && + KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0) && + (dev->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 70); +} #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0b3dc754e06b..ebc9674d3ce1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -508,6 +508,7 @@ static int kfd_gws_init(struct kfd_node *node) { int ret = 0; struct kfd_dev *kfd = node->kfd; + uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) return 0; @@ -524,7 +525,10 @@ static int kfd_gws_init(struct kfd_node *node) (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3)) || (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0) && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0) - && kfd->mec2_fw_version >= 0x6b)))) + && kfd->mec2_fw_version >= 0x6b) || + (KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0) + && KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0) + && mes_rev >= 68)))) ret = amdgpu_amdkfd_alloc_gws(node->adev, node->adev->gds.gws_size, &node->gws); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f515cb8f30ca..ccaf85fc12c2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -226,9 +226,10 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, queue_input.paging = false; queue_input.tba_addr = qpd->tba_addr; queue_input.tma_addr = qpd->tma_addr; - queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || - KFD_GC_VERSION(q->device) > IP_VERSION(11, 0, 3); + queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device); queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled; + queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled || + kfd_dbg_has_ttmps_always_setup(q->device); queue_type = convert_to_mes_queue_type(q->properties.type); if (queue_type < 0) { @@ -238,10 +239,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, } queue_input.queue_type = (uint32_t)queue_type; - if (q->gws) { - queue_input.gws_base = 0; - queue_input.gws_size = qpd->num_gws; - } + queue_input.exclusively_scheduled = q->properties.is_gws; amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); @@ -251,7 +249,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, q->properties.doorbell_off); pr_err("MES might be in unrecoverable state, issue a GPU reset\n"); kfd_hws_hang(dqm); -} + } return r; } @@ -1621,7 +1619,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm) if (dqm->dev->kfd2kgd->get_iq_wait_times) dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev, - &dqm->wait_times); + &dqm->wait_times, + ffs(dqm->dev->xcc_mask) - 1); return 0; } @@ -1663,6 +1662,26 @@ static int start_cpsch(struct device_queue_manager *dqm) if (!dqm->dev->kfd->shared_resources.enable_mes) execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); + + /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */ + if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu && + (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) { + uint32_t reg_offset = 0; + uint32_t grace_period = 1; + + retval = pm_update_grace_period(&dqm->packet_mgr, + grace_period); + if (retval) + pr_err("Setting grace timeout failed\n"); + else if (dqm->dev->kfd2kgd->build_grace_period_packet_info) + /* Update dqm->wait_times maintained in software */ + dqm->dev->kfd2kgd->build_grace_period_packet_info( + dqm->dev->adev, dqm->wait_times, + grace_period, ®_offset, + &dqm->wait_times, + ffs(dqm->dev->xcc_mask) - 1); + } + dqm_unlock(dqm); return 0; @@ -1806,8 +1825,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, */ q->properties.is_evicted = !!qpd->evicted; q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled && - KFD_GC_VERSION(q->device) >= IP_VERSION(11, 0, 0) && - KFD_GC_VERSION(q->device) <= IP_VERSION(11, 0, 3); + kfd_dbg_has_cwsr_workaround(q->device); if (qd) mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 94c0fc2e57b7..83699392c808 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -318,6 +318,26 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; } +static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id) +{ + int err; + struct v10_compute_mqd *m; + u32 doorbell_off; + + m = get_mqd(mqd); + + doorbell_off = m->cp_hqd_pq_doorbell_control >> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + + err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0); + if (err) + pr_debug("Destroy HIQ MQD failed: %d\n", err); + + return err; +} + static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -460,7 +480,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->free_mqd = free_mqd_hiq_sdma; mqd->load_mqd = kfd_hiq_load_mqd_kiq; mqd->update_mqd = update_mqd; - mqd->destroy_mqd = kfd_destroy_mqd_cp; + mqd->destroy_mqd = destroy_hiq_mqd; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct v10_compute_mqd); mqd->mqd_stride = kfd_mqd_stride; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 31fec5e70d13..2319467d2d95 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -335,6 +335,26 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; } +static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id) +{ + int err; + struct v11_compute_mqd *m; + u32 doorbell_off; + + m = get_mqd(mqd); + + doorbell_off = m->cp_hqd_pq_doorbell_control >> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + + err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0); + if (err) + pr_debug("Destroy HIQ MQD failed: %d\n", err); + + return err; +} + static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -449,7 +469,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, mqd->free_mqd = free_mqd_hiq_sdma; mqd->load_mqd = kfd_hiq_load_mqd_kiq; mqd->update_mqd = update_mqd; - mqd->destroy_mqd = kfd_destroy_mqd_cp; + mqd->destroy_mqd = destroy_hiq_mqd; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct v11_compute_mqd); #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 601bb9f68048..e23d32f35607 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -405,6 +405,25 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; } +static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id) +{ + int err; + struct v9_mqd *m; + u32 doorbell_off; + + m = get_mqd(mqd); + + doorbell_off = m->cp_hqd_pq_doorbell_control >> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0); + if (err) + pr_debug("Destroy HIQ MQD failed: %d\n", err); + + return err; +} + static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -548,16 +567,19 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, { uint32_t xcc_mask = mm->dev->xcc_mask; int xcc_id, err, inst = 0; - void *xcc_mqd; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); + struct v9_mqd *m; + u32 doorbell_off; for_each_inst(xcc_id, xcc_mask) { - xcc_mqd = mqd + hiq_mqd_size * inst; - err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, - type, timeout, pipe_id, - queue_id, xcc_id); + m = get_mqd(mqd + hiq_mqd_size * inst); + + doorbell_off = m->cp_hqd_pq_doorbell_control >> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + + err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, xcc_id); if (err) { - pr_debug("Destroy MQD failed for xcc: %d\n", inst); + pr_debug("Destroy HIQ MQD failed for xcc: %d\n", inst); break; } ++inst; @@ -846,7 +868,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, } else { mqd->init_mqd = init_mqd_hiq; mqd->load_mqd = kfd_hiq_load_mqd_kiq; - mqd->destroy_mqd = kfd_destroy_mqd_cp; + mqd->destroy_mqd = destroy_hiq_mqd; } break; case KFD_MQD_TYPE_DIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 29a2d0499b67..8fda16e6fee6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -298,7 +298,8 @@ static int pm_set_grace_period_v9(struct packet_manager *pm, pm->dqm->wait_times, grace_period, ®_offset, - ®_data); + ®_data, + 0); if (grace_period == USE_DEFAULT_GRACE_PERIOD) reg_data = pm->dqm->wait_times; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index ba9d69054119..60e6b37b43ba 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -123,7 +123,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, if (!gws && pdd->qpd.num_gws == 0) return -EINVAL; - if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3)) { + if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) && !dev->kfd->shared_resources.enable_mes) { if (gws) ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info, gws, &mem); @@ -136,7 +136,9 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, } else { /* * Intentionally set GWS to a non-NULL value - * for GFX 9.4.3. + * for devices that do not use GWS for global wave + * synchronization but require the formality + * of setting GWS for cooperative groups. */ pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL; } @@ -173,7 +175,8 @@ void pqm_uninit(struct process_queue_manager *pqm) list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { if (pqn->q && pqn->q->gws && - KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3)) + KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) && + !pqn->q->device->kfd->shared_resources.enable_mes) amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info, pqn->q->gws); kfd_procfs_del_queue(pqn->q); @@ -455,7 +458,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) } if (pqn->q->gws) { - if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3)) + if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) && + !dev->kfd->shared_resources.enable_mes) amdgpu_amdkfd_remove_gws_from_process( pqm->process->kgd_process_info, pqn->q->gws); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index aa32b06eea77..01c7de2d6e19 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -48,6 +48,8 @@ * page table is updated. */ #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC) +#define dynamic_svm_range_dump(svms) \ + _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms) /* Giant svm range split into smaller ranges based on this, it is decided using * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to @@ -1521,6 +1523,8 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) struct kfd_process_device *pdd; pdd = kfd_process_device_from_gpuidx(p, gpuidx); + if (!pdd) + return NULL; return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev); } @@ -1595,12 +1599,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { - if (!prange->mapped_to_gpu) { + bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); + if (!prange->mapped_to_gpu || + bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { r = 0; goto free_ctx; } - - bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); } if (prange->actual_loc && !prange->ttm_res) { @@ -3560,7 +3564,7 @@ out_unlock_range: break; } - svm_range_debug_dump(svms); + dynamic_svm_range_dump(svms); mutex_unlock(&svms->lock); mmap_read_unlock(mm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 61fc62f3e003..3b0749390388 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -38,6 +38,7 @@ #include "kfd_device_queue_manager.h" #include "kfd_iommu.h" #include "kfd_svm.h" +#include "kfd_debug.h" #include "amdgpu_amdkfd.h" #include "amdgpu_ras.h" #include "amdgpu.h" @@ -1931,23 +1932,27 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED | HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED; - if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { - dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 | - HSA_DBG_WATCH_ADDR_MASK_HI_BIT; + if (kfd_dbg_has_ttmps_always_setup(dev->gpu)) + dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; - if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 4, 2)) + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { + if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3)) dev->node_props.debug_prop |= - HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; + HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9_4_3 | + HSA_DBG_WATCH_ADDR_MASK_HI_BIT_GFX9_4_3; else + dev->node_props.debug_prop |= + HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 | + HSA_DBG_WATCH_ADDR_MASK_HI_BIT; + + if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2)) dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; } else { dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | HSA_DBG_WATCH_ADDR_MASK_HI_BIT; - if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(11, 0, 0)) - dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; - else + if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0)) dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index cba2cd5ed9d1..dea32a9e5506 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -32,9 +32,12 @@ #define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32 #define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 6 +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9_4_3 7 #define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 7 #define HSA_DBG_WATCH_ADDR_MASK_HI_BIT \ (29 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT) +#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_GFX9_4_3 \ + (30 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT) struct kfd_node_properties { uint64_t hive_id; diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index bf0a655d009e..901d1961b739 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -5,7 +5,7 @@ menu "Display Engine Configuration" config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y - depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64 + depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64 select SND_HDA_COMPONENT if SND_HDA_CORE # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752 select DRM_AMD_DC_FP if (X86 || LOONGARCH || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG)) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ff0a217b9d56..d96b6eda3320 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -245,51 +245,52 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, */ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) { + struct amdgpu_crtc *acrtc = NULL; + if (crtc >= adev->mode_info.num_crtc) return 0; - else { - struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; - if (acrtc->dm_irq_params.stream == NULL) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", - crtc); - return 0; - } + acrtc = adev->mode_info.crtcs[crtc]; - return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); + if (!acrtc->dm_irq_params.stream) { + DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + crtc); + return 0; } + + return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); } static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position) { u32 v_blank_start, v_blank_end, h_position, v_position; + struct amdgpu_crtc *acrtc = NULL; if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) return -EINVAL; - else { - struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; - if (acrtc->dm_irq_params.stream == NULL) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", - crtc); - return 0; - } - - /* - * TODO rework base driver to use values directly. - * for now parse it back into reg-format - */ - dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, - &v_blank_start, - &v_blank_end, - &h_position, - &v_position); + acrtc = adev->mode_info.crtcs[crtc]; - *position = v_position | (h_position << 16); - *vbl = v_blank_start | (v_blank_end << 16); + if (!acrtc->dm_irq_params.stream) { + DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + crtc); + return 0; } + /* + * TODO rework base driver to use values directly. + * for now parse it back into reg-format + */ + dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, + &v_blank_start, + &v_blank_end, + &h_position, + &v_position); + + *position = v_position | (h_position << 16); + *vbl = v_blank_start | (v_blank_end << 16); + return 0; } @@ -424,12 +425,12 @@ static void dm_pflip_high_irq(void *interrupt_params) spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); - if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ - DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", - amdgpu_crtc->pflip_status, - AMDGPU_FLIP_SUBMITTED, - amdgpu_crtc->crtc_id, - amdgpu_crtc); + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED, + amdgpu_crtc->crtc_id, + amdgpu_crtc); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return; } @@ -883,7 +884,7 @@ static int dm_set_powergating_state(void *handle, } /* Prototypes of private functions */ -static int dm_early_init(void* handle); +static int dm_early_init(void *handle); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) @@ -1282,7 +1283,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; - pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; + pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; @@ -1347,6 +1348,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) if (amdgpu_in_reset(adev)) goto skip; + if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || + offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); + spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); + offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; + spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + goto skip; + } + mutex_lock(&adev->dm.dc_lock); if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { dc_link_dp_handle_automated_test(dc_link); @@ -1365,8 +1375,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) DP_TEST_RESPONSE, &test_response.raw, sizeof(test_response)); - } - else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && + } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && dc_link_check_link_loss_status(dc_link, &offload_work->data) && dc_link_dp_allow_hpd_rx_irq(dc_link)) { /* offload_work->data is from handle_hpd_rx_irq-> @@ -1554,7 +1563,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); - if(amdgpu_dm_irq_init(adev)) { + if (amdgpu_dm_irq_init(adev)) { DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); goto error; } @@ -1696,9 +1705,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) adev->dm.dc->debug.disable_stutter = true; - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) adev->dm.dc->debug.disable_dsc = true; - } if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) adev->dm.dc->debug.disable_clock_gate = true; @@ -1942,8 +1950,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) mutex_destroy(&adev->dm.audio_lock); mutex_destroy(&adev->dm.dc_lock); mutex_destroy(&adev->dm.dpia_aux_lock); - - return; } static int load_dmcu_fw(struct amdgpu_device *adev) @@ -1952,7 +1958,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) int r; const struct dmcu_firmware_header_v1_0 *hdr; - switch(adev->asic_type) { + switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: @@ -2709,7 +2715,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, struct dc_scaling_info scaling_infos[MAX_SURFACES]; struct dc_flip_addrs flip_addrs[MAX_SURFACES]; struct dc_stream_update stream_update; - } * bundle; + } *bundle; int k, m; bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); @@ -2739,8 +2745,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, cleanup: kfree(bundle); - - return; } static int dm_resume(void *handle) @@ -2954,8 +2958,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .set_powergating_state = dm_set_powergating_state, }; -const struct amdgpu_ip_block_version dm_ip_block = -{ +const struct amdgpu_ip_block_version dm_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 1, .minor = 0, @@ -3000,9 +3003,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - if (caps->ext_caps->bits.oled == 1 /*|| - caps->ext_caps->bits.sdr_aux_backlight_control == 1 || - caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) + if (caps->ext_caps->bits.oled == 1 + /* + * || + * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || + * caps->ext_caps->bits.hdr_aux_backlight_control == 1 + */) caps->aux_support = true; if (amdgpu_backlight == 0) @@ -3236,86 +3242,6 @@ static void handle_hpd_irq(void *param) } -static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) -{ - u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; - u8 dret; - bool new_irq_handled = false; - int dpcd_addr; - int dpcd_bytes_to_read; - - const int max_process_count = 30; - int process_count = 0; - - const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); - - if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { - dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; - /* DPCD 0x200 - 0x201 for downstream IRQ */ - dpcd_addr = DP_SINK_COUNT; - } else { - dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; - /* DPCD 0x2002 - 0x2005 for downstream IRQ */ - dpcd_addr = DP_SINK_COUNT_ESI; - } - - dret = drm_dp_dpcd_read( - &aconnector->dm_dp_aux.aux, - dpcd_addr, - esi, - dpcd_bytes_to_read); - - while (dret == dpcd_bytes_to_read && - process_count < max_process_count) { - u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; - u8 retry; - dret = 0; - - process_count++; - - DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); - /* handle HPD short pulse irq */ - if (aconnector->mst_mgr.mst_state) - drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, - esi, - ack, - &new_irq_handled); - - if (new_irq_handled) { - /* ACK at DPCD to notify down stream */ - for (retry = 0; retry < 3; retry++) { - ssize_t wret; - - wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, - dpcd_addr + 1, - ack[1]); - if (wret == 1) - break; - } - - if (retry == 3) { - DRM_ERROR("Failed to ack MST event.\n"); - return; - } - - drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); - /* check if there is new irq to be handled */ - dret = drm_dp_dpcd_read( - &aconnector->dm_dp_aux.aux, - dpcd_addr, - esi, - dpcd_bytes_to_read); - - new_irq_handled = false; - } else { - break; - } - } - - if (process_count == max_process_count) - DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); -} - static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, union hpd_irq_data hpd_irq_data) { @@ -3377,7 +3303,23 @@ static void handle_hpd_rx_irq(void *param) if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { - dm_handle_mst_sideband_msg(aconnector); + bool skip = false; + + /* + * DOWN_REP_MSG_RDY is also handled by polling method + * mgr->cbs->poll_hpd_irq() + */ + spin_lock(&offload_wq->offload_lock); + skip = offload_wq->is_handling_mst_msg_rdy_event; + + if (!skip) + offload_wq->is_handling_mst_msg_rdy_event = true; + + spin_unlock(&offload_wq->offload_lock); + + if (!skip) + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + goto out; } @@ -3468,7 +3410,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev) aconnector = to_amdgpu_dm_connector(connector); dc_link = aconnector->dc_link; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.irq_source = dc_link->irq_source_hpd; @@ -3477,7 +3419,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev) (void *) aconnector); } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { /* Also register for DP short pulse (hpd_rx). */ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; @@ -3486,11 +3428,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev) amdgpu_dm_irq_register_interrupt(adev, &int_params, handle_hpd_rx_irq, (void *) aconnector); - - if (adev->dm.hpd_rx_offload_wq) - adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector = - aconnector; } + + if (adev->dm.hpd_rx_offload_wq) + adev->dm.hpd_rx_offload_wq[connector->index].aconnector = + aconnector; } } @@ -3503,7 +3445,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) struct dc_interrupt_params int_params = {0}; int r; int i; - unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; + unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -3517,11 +3459,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * coming from DC hardware. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC - * for acknowledging and handling. */ + * for acknowledging and handling. + */ /* Use VBLANK interrupt */ for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); + r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); if (r) { DRM_ERROR("Failed to add crtc irq id!\n"); return r; @@ -3529,7 +3472,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = - dc_interrupt_to_irq_source(dc, i+1 , 0); + dc_interrupt_to_irq_source(dc, i + 1, 0); c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; @@ -3585,7 +3528,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) struct dc_interrupt_params int_params = {0}; int r; int i; - unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; + unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; if (adev->family >= AMDGPU_FAMILY_AI) client_id = SOC15_IH_CLIENTID_DCE; @@ -3602,7 +3545,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * coming from DC hardware. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC - * for acknowledging and handling. */ + * for acknowledging and handling. + */ /* Use VBLANK interrupt */ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { @@ -4049,7 +3993,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, } static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, - unsigned *min, unsigned *max) + unsigned int *min, unsigned int *max) { if (!caps) return 0; @@ -4069,7 +4013,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, uint32_t brightness) { - unsigned min, max; + unsigned int min, max; if (!get_brightness_range(caps, &min, &max)) return brightness; @@ -4082,7 +4026,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, uint32_t brightness) { - unsigned min, max; + unsigned int min, max; if (!get_brightness_range(caps, &min, &max)) return brightness; @@ -4148,6 +4092,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, int bl_idx) { + int ret; struct amdgpu_dm_backlight_caps caps; struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; @@ -4162,13 +4107,14 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, if (!rc) return dm->brightness[bl_idx]; return convert_brightness_to_user(&caps, avg); - } else { - int ret = dc_link_get_backlight_level(link); - - if (ret == DC_ERROR_UNEXPECTED) - return dm->brightness[bl_idx]; - return convert_brightness_to_user(&caps, ret); } + + ret = dc_link_get_backlight_level(link); + + if (ret == DC_ERROR_UNEXPECTED) + return dm->brightness[bl_idx]; + + return convert_brightness_to_user(&caps, ret); } static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) @@ -4562,7 +4508,6 @@ fail: static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) { drm_atomic_private_obj_fini(&dm->atomic_obj); - return; } /****************************************************************************** @@ -5394,6 +5339,7 @@ static bool adjust_colour_depth_from_display_info( { enum dc_color_depth depth = timing_out->display_color_depth; int normalized_clk; + do { normalized_clk = timing_out->pix_clk_100hz / 10; /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ @@ -5609,6 +5555,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector) { struct dc_sink_init_data sink_init_data = { 0 }; struct dc_sink *sink = NULL; + sink_init_data.link = aconnector->dc_link; sink_init_data.sink_signal = aconnector->dc_link->connector_signal; @@ -5732,7 +5679,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, return &aconnector->freesync_vid_base; /* Find the preferred mode */ - list_for_each_entry (m, list_head, head) { + list_for_each_entry(m, list_head, head) { if (m->type & DRM_MODE_TYPE_PREFERRED) { m_pref = m; break; @@ -5756,7 +5703,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, * For some monitors, preferred mode is not the mode with highest * supported refresh rate. */ - list_for_each_entry (m, list_head, head) { + list_for_each_entry(m, list_head, head) { current_refresh = drm_mode_vrefresh(m); if (m->hdisplay == m_pref->hdisplay && @@ -5849,6 +5796,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, edp_min_bpp_x16, edp_max_bpp_x16, dsc_caps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &bw_range)) { if (bw_range.max_kbps < link_bw_in_kbps) { @@ -5857,6 +5805,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, &dsc_options, 0, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &dsc_cfg)) { stream->timing.dsc_cfg = dsc_cfg; stream->timing.flags.DSC = 1; @@ -5871,6 +5820,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, &dsc_options, link_bw_in_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &dsc_cfg)) { stream->timing.dsc_cfg = dsc_cfg; stream->timing.flags.DSC = 1; @@ -5914,12 +5864,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, &dsc_options, link_bandwidth_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); } } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { - timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link)); max_supported_bw_in_kbps = link_bandwidth_kbps; dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; @@ -5931,6 +5883,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, &dsc_options, dsc_max_supported_bw_in_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", @@ -6028,7 +5981,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, * This may not be an error, the use case is when we have no * usermode calls to reset and set mode upon hotplug. In this * case, we call set mode ourselves to restore the previous mode - * and the modelist may not be filled in in time. + * and the modelist may not be filled in time. */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { @@ -6051,9 +6004,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, drm_mode_set_crtcinfo(&mode, 0); /* - * If scaling is enabled and refresh rate didn't change - * we copy the vic and polarities of the old timings - */ + * If scaling is enabled and refresh rate didn't change + * we copy the vic and polarities of the old timings + */ if (!scale || mode_refresh != preferred_refresh) fill_stream_properties_from_drm_display_mode( stream, &mode, &aconnector->base, con_state, NULL, @@ -6817,6 +6770,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; + is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && aconnector->force_yuv420_output; color_depth = convert_color_depth_from_display_info(connector, @@ -7135,7 +7089,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, { struct drm_display_mode *m; - list_for_each_entry (m, &aconnector->base.probed_modes, head) { + list_for_each_entry(m, &aconnector->base.probed_modes, head) { if (drm_mode_equal(m, mode)) return true; } @@ -7295,6 +7249,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); mutex_init(&aconnector->hpd_lock); + mutex_init(&aconnector->handle_mst_msg_ready); /* * configure support HPD hot plug connector_>polled default value is 0 @@ -7454,7 +7409,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, link->priv = aconnector; - DRM_DEBUG_DRIVER("%s()\n", __func__); i2c = create_i2c(link->ddc, link->link_index, &res); if (!i2c) { @@ -7982,7 +7936,6 @@ static inline uint32_t get_mem_type(struct drm_framebuffer *fb) } static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, - struct dc_state *dc_state, struct drm_device *dev, struct amdgpu_display_manager *dm, struct drm_crtc *pcrtc, @@ -8125,7 +8078,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * Only allow immediate flips for fast updates that don't * change memory domain, FB pitch, DCC state, rotation or * mirroring. + * + * dm_crtc_helper_atomic_check() only accepts async flips with + * fast updates. */ + if (crtc->state->async_flip && + acrtc_state->update_type != UPDATE_TYPE_FAST) + drm_warn_once(state->dev, + "[PLANE:%d:%s] async flip with non-fast update\n", + plane->base.id, plane->name); bundle->flip_addrs[planes_count].flip_immediate = crtc->state->async_flip && acrtc_state->update_type == UPDATE_TYPE_FAST && @@ -8168,8 +8129,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * DRI3/Present extension with defined target_msc. */ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); - } - else { + } else { /* For variable refresh rate mode only: * Get vblank of last completed flip to avoid > 1 vrr * flips per video frame by use of throttling, but allow @@ -8455,55 +8415,20 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } -/** - * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. - * @state: The atomic state to commit - * - * This will tell DC to commit the constructed DC state from atomic_check, - * programming the hardware. Any failures here implies a hardware failure, since - * atomic check should have filtered anything non-kosher. - */ -static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, + struct dc_state *dc_state) { struct drm_device *dev = state->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; - struct dm_atomic_state *dm_state; - struct dc_state *dc_state = NULL, *dc_state_temp = NULL; - u32 i, j; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; - unsigned long flags; - bool wait_for_vblank = true; - struct drm_connector *connector; - struct drm_connector_state *old_con_state, *new_con_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; - int crtc_disable_count = 0; bool mode_set_reset_required = false; - int r; - - trace_amdgpu_dm_atomic_commit_tail_begin(state); - - r = drm_atomic_helper_wait_for_fences(dev, state, false); - if (unlikely(r)) - DRM_ERROR("Waiting for fences timed out!"); - - drm_atomic_helper_update_legacy_modeset_state(dev, state); - drm_dp_mst_atomic_wait_for_dependencies(state); - - dm_state = dm_atomic_get_new_state(state); - if (dm_state && dm_state->context) { - dc_state = dm_state->context; - } else { - /* No state changes, retain current state. */ - dc_state_temp = dc_create_state(dm->dc); - ASSERT(dc_state_temp); - dc_state = dc_state_temp; - dc_resource_state_copy_construct_current(dm->dc, dc_state); - } + u32 i; - for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state, - new_crtc_state, i) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -8526,9 +8451,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); drm_dbg_state(state->dev, - "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " - "planes_changed:%d, mode_changed:%d,active_changed:%d," - "connectors_changed:%d\n", + "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", acrtc->crtc_id, new_crtc_state->enable, new_crtc_state->active, @@ -8601,24 +8524,22 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } } /* for_each_crtc_in_state() */ - if (dc_state) { - /* if there mode set or reset, disable eDP PSR */ - if (mode_set_reset_required) { - if (dm->vblank_control_workqueue) - flush_workqueue(dm->vblank_control_workqueue); + /* if there mode set or reset, disable eDP PSR */ + if (mode_set_reset_required) { + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); - amdgpu_dm_psr_disable_all(dm); - } + amdgpu_dm_psr_disable_all(dm); + } - dm_enable_per_frame_crtc_master_sync(dc_state); - mutex_lock(&dm->dc_lock); - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + dm_enable_per_frame_crtc_master_sync(dc_state); + mutex_lock(&dm->dc_lock); + WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); - /* Allow idle optimization when vblank count is 0 for display off */ - if (dm->active_vblank_irq_count == 0) - dc_allow_idle_optimizations(dm->dc, true); - mutex_unlock(&dm->dc_lock); - } + /* Allow idle optimization when vblank count is 0 for display off */ + if (dm->active_vblank_irq_count == 0) + dc_allow_idle_optimizations(dm->dc, true); + mutex_unlock(&dm->dc_lock); for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); @@ -8638,6 +8559,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) acrtc->otg_inst = status->primary_otg_inst; } } +} + +/** + * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. + * @state: The atomic state to commit + * + * This will tell DC to commit the constructed DC state from atomic_check, + * programming the hardware. Any failures here implies a hardware failure, since + * atomic check should have filtered anything non-kosher. + */ +static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_display_manager *dm = &adev->dm; + struct dm_atomic_state *dm_state; + struct dc_state *dc_state = NULL; + u32 i, j; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + unsigned long flags; + bool wait_for_vblank = true; + struct drm_connector *connector; + struct drm_connector_state *old_con_state, *new_con_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + int crtc_disable_count = 0; + + trace_amdgpu_dm_atomic_commit_tail_begin(state); + + drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_dp_mst_atomic_wait_for_dependencies(state); + + dm_state = dm_atomic_get_new_state(state); + if (dm_state && dm_state->context) { + dc_state = dm_state->context; + amdgpu_dm_commit_streams(state, dc_state); + } + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); @@ -8760,13 +8719,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct dc_surface_update dummy_updates[MAX_SURFACES]; + struct dc_surface_update *dummy_updates; struct dc_stream_update stream_update; struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; bool abm_changed, hdr_changed, scaling_changed; - memset(&dummy_updates, 0, sizeof(dummy_updates)); memset(&stream_update, 0, sizeof(stream_update)); if (acrtc) { @@ -8825,6 +8783,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * Here we create an empty update on each plane. * To fix this, DC should permit updating only stream properties. */ + dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; @@ -8836,6 +8795,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_new_crtc_state->stream, &stream_update); mutex_unlock(&dm->dc_lock); + kfree(dummy_updates); } /** @@ -8914,8 +8874,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->stream) - amdgpu_dm_commit_planes(state, dc_state, dev, - dm, crtc, wait_for_vblank); + amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); } /* Update audio instances for each connector. */ @@ -8970,9 +8929,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for (i = 0; i < crtc_disable_count; i++) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); - - if (dc_state_temp) - dc_release_state(dc_state_temp); } static int dm_force_atomic_commit(struct drm_connector *connector) @@ -9104,8 +9060,8 @@ static int do_aquire_global_lock(struct drm_device *dev, &commit->flip_done, 10*HZ); if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " - "timed out\n", crtc->base.id, crtc->name); + DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", + crtc->base.id, crtc->name); drm_crtc_commit_put(commit); } @@ -9190,7 +9146,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, return false; } -static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { +static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) +{ u64 num, den, res; struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; @@ -9312,9 +9269,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, goto skip_modeset; drm_dbg_state(state->dev, - "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " - "planes_changed:%d, mode_changed:%d,active_changed:%d," - "connectors_changed:%d\n", + "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", acrtc->crtc_id, new_crtc_state->enable, new_crtc_state->active, @@ -9343,8 +9298,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, old_crtc_state)) { new_crtc_state->mode_changed = false; DRM_DEBUG_DRIVER( - "Mode change not required for front porch change, " - "setting mode_changed to %d", + "Mode change not required for front porch change, setting mode_changed to %d", new_crtc_state->mode_changed); set_freesync_fixed_config(dm_new_crtc_state); @@ -9356,9 +9310,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, struct drm_display_mode *high_mode; high_mode = get_highest_refresh_rate_mode(aconnector, false); - if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { + if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) set_freesync_fixed_config(dm_new_crtc_state); - } } ret = dm_atomic_get_state(state, &dm_state); @@ -9526,6 +9479,7 @@ static bool should_reset_plane(struct drm_atomic_state *state, */ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { struct amdgpu_framebuffer *old_afb, *new_afb; + if (other->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -9624,11 +9578,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, } /* Core DRM takes care of checking FB modifiers, so we only need to - * check tiling flags when the FB doesn't have a modifier. */ + * check tiling flags when the FB doesn't have a modifier. + */ if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { if (adev->family < AMDGPU_FAMILY_AI) { linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && - AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && + AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; } else { linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; @@ -9762,8 +9717,8 @@ static int dm_update_plane_state(struct dc *dc, if (plane->type == DRM_PLANE_TYPE_OVERLAY) { if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) return -EINVAL; - else - *is_top_most_overlay = false; + + *is_top_most_overlay = false; } DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", @@ -9850,12 +9805,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, /* On DCE and DCN there is no dedicated hardware cursor plane. We get a * cursor per pipe but it's going to inherit the scaling and * positioning from the underlying pipe. Check the cursor plane's - * blending properties match the underlying planes'. */ + * blending properties match the underlying planes'. + */ new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); - if (!new_cursor_state || !new_cursor_state->fb) { + if (!new_cursor_state || !new_cursor_state->fb) return 0; - } dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; @@ -9900,6 +9855,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm struct drm_connector_state *conn_state, *old_conn_state; struct amdgpu_dm_connector *aconnector = NULL; int i; + for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { if (!conn_state->crtc) conn_state = old_conn_state; @@ -10334,7 +10290,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } /* Store the overall update type for use later in atomic check. */ - for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -10356,7 +10312,7 @@ fail: else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); else - DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); + DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret); trace_amdgpu_dm_atomic_check_finish(state, ret); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 4561f55afa99..9fb5bb3a75a7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -195,6 +195,11 @@ struct hpd_rx_irq_offload_work_queue { */ bool is_handling_link_loss; /** + * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message + * ready event when we're already handling mst message ready event + */ + bool is_handling_mst_msg_rdy_event; + /** * @aconnector: The aconnector that this work queue is attached to */ struct amdgpu_dm_connector *aconnector; @@ -638,6 +643,8 @@ struct amdgpu_dm_connector { struct drm_dp_mst_port *mst_output_port; struct amdgpu_dm_connector *mst_root; struct drm_dp_aux *dsc_aux; + struct mutex handle_mst_msg_ready; + /* TODO see if we can merge with ddc_bus or make a dm_connector */ struct amdgpu_i2c_adapter *i2c; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 0802f8e8fac5..52ecfa746b54 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -123,9 +123,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work); crtc = secure_display_ctx->crtc; - if (!crtc) { + if (!crtc) return; - } psp = &drm_to_adev(crtc->dev)->psp; @@ -151,9 +150,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); if (!ret) { - if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { + if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); - } } mutex_unlock(&psp->securedisplay_context.mutex); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 440fc0869a34..30d4c6fd95f5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, return -EINVAL; } + /* + * Only allow async flips for fast updates that don't change the FB + * pitch, the DCC state, rotation, etc. + */ + if (crtc_state->async_flip && + dm_crtc_state->update_type != UPDATE_TYPE_FAST) { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] async flips are only supported for fast updates\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + /* In some use cases, like reset, no stream is attached */ if (!dm_crtc_state->stream) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index d63ee636483b..7c21e21bcc51 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1075,24 +1075,24 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data) switch (dm_crtc_state->stream->output_color_space) { case COLOR_SPACE_SRGB: - seq_printf(m, "sRGB"); + seq_puts(m, "sRGB"); break; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR601_LIMITED: - seq_printf(m, "BT601_YCC"); + seq_puts(m, "BT601_YCC"); break; case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: - seq_printf(m, "BT709_YCC"); + seq_puts(m, "BT709_YCC"); break; case COLOR_SPACE_ADOBERGB: - seq_printf(m, "opRGB"); + seq_puts(m, "opRGB"); break; case COLOR_SPACE_2020_RGB_FULLRANGE: - seq_printf(m, "BT2020_RGB"); + seq_puts(m, "BT2020_RGB"); break; case COLOR_SPACE_2020_YCBCR: - seq_printf(m, "BT2020_YCC"); + seq_puts(m, "BT2020_YCC"); break; default: goto unlock; @@ -3022,7 +3022,7 @@ static int edp_ilr_show(struct seq_file *m, void *unused) seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); } } else { - seq_printf(m, "ILR is not supported by this eDP panel.\n"); + seq_puts(m, "ILR is not supported by this eDP panel.\n"); } return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 5536d17306d0..8db47f66eac0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -39,10 +39,10 @@ static bool lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) { - struct dc_link *link = handle; struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; - struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } @@ -52,8 +52,10 @@ lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint3 { struct dc_link *link = handle; - struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; - struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, + {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } @@ -76,7 +78,6 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) { - struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { @@ -96,13 +97,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version; *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size; - return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf; } -static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version) +static int psp_set_srm(struct psp_context *psp, + u8 *srm, uint32_t srm_size, uint32_t *srm_version) { - struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { @@ -119,7 +119,8 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || + hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX) return -EINVAL; @@ -150,7 +151,6 @@ static void process_output(struct hdcp_workqueue *hdcp_work) static void link_lock(struct hdcp_workqueue *work, bool lock) { - int i = 0; for (i = 0; i < work->max_link; i++) { @@ -160,10 +160,11 @@ static void link_lock(struct hdcp_workqueue *work, bool lock) mutex_unlock(&work[i].mutex); } } + void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, - uint8_t content_type, + u8 content_type, bool enable_encryption) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -178,18 +179,19 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, query.display = NULL; mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); - if (query.display != NULL) { + if (query.display) { memcpy(display, query.display, sizeof(struct mod_hdcp_display)); mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; if (enable_encryption) { - /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp - * (s3 resume case) + /* Explicitly set the saved SRM as sysfs call will be after + * we already enabled hdcp (s3 resume case) */ if (hdcp_work->srm_size > 0) - psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size, + psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, + hdcp_work->srm_size, &hdcp_work->srm_version); display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; @@ -219,7 +221,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, } static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, - unsigned int link_index, + unsigned int link_index, struct amdgpu_dm_connector *aconnector) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -238,7 +240,8 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n", - aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms); + aconnector->base.index, conn_state->hdcp_content_type, + aconnector->base.dpms); } mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); @@ -246,6 +249,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, process_output(hdcp_w); mutex_unlock(&hdcp_w->mutex); } + void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -274,15 +278,12 @@ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index schedule_work(&hdcp_w->cpirq_work); } - - - static void event_callback(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, - callback_dwork); + callback_dwork); mutex_lock(&hdcp_work->mutex); @@ -294,13 +295,12 @@ static void event_callback(struct work_struct *work) process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); - - } static void event_property_update(struct work_struct *work) { - struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, + property_update_work); struct amdgpu_dm_connector *aconnector = NULL; struct drm_device *dev; long ret; @@ -334,11 +334,10 @@ static void event_property_update(struct work_struct *work) mutex_lock(&hdcp_work->mutex); if (conn_state->commit) { - ret = wait_for_completion_interruptible_timeout( - &conn_state->commit->hw_done, 10 * HZ); + ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done, + 10 * HZ); if (ret == 0) { - DRM_ERROR( - "HDCP state unknown! Setting it to DESIRED"); + DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n"); hdcp_work->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; } @@ -349,24 +348,20 @@ static void event_property_update(struct work_struct *work) DRM_MODE_HDCP_CONTENT_TYPE0 && hdcp_work->encryption_status[conn_index] <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) { - DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n"); - drm_hdcp_update_content_protection( - connector, - DRM_MODE_CONTENT_PROTECTION_ENABLED); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); } else if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && hdcp_work->encryption_status[conn_index] == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) { - drm_hdcp_update_content_protection( - connector, - DRM_MODE_CONTENT_PROTECTION_ENABLED); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); } } else { DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n"); - drm_hdcp_update_content_protection( - connector, DRM_MODE_CONTENT_PROTECTION_DESIRED); - + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED); } mutex_unlock(&hdcp_work->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); @@ -402,7 +397,7 @@ static void event_property_validate(struct work_struct *work) &query); DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n", - aconnector->base.index, + aconnector->base.index, aconnector->base.state->content_protection, query.encryption_status, hdcp_work->encryption_status[conn_index]); @@ -410,7 +405,8 @@ static void event_property_validate(struct work_struct *work) if (query.encryption_status != hdcp_work->encryption_status[conn_index]) { DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n", - hdcp_work->encryption_status[conn_index], query.encryption_status); + hdcp_work->encryption_status[conn_index], + query.encryption_status); hdcp_work->encryption_status[conn_index] = query.encryption_status; @@ -429,7 +425,7 @@ static void event_watchdog_timer(struct work_struct *work) struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), - struct hdcp_workqueue, + struct hdcp_workqueue, watchdog_timer_dwork); mutex_lock(&hdcp_work->mutex); @@ -443,7 +439,6 @@ static void event_watchdog_timer(struct work_struct *work) process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); - } static void event_cpirq(struct work_struct *work) @@ -459,10 +454,8 @@ static void event_cpirq(struct work_struct *work) process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); - } - void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) { int i = 0; @@ -478,10 +471,8 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) kfree(hdcp_work); } - static bool enable_assr(void *handle, struct dc_link *link) { - struct hdcp_workqueue *hdcp_work = handle; struct mod_hdcp hdcp = hdcp_work->hdcp; struct psp_context *psp = hdcp.config.psp.handle; @@ -499,7 +490,8 @@ static bool enable_assr(void *handle, struct dc_link *link) memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE; - dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst; + dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = + link->link_enc_hw_inst; dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; psp_dtm_invoke(psp, dtm_cmd->cmd_id); @@ -541,7 +533,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) else if (aconnector->dc_em_sink) sink = aconnector->dc_em_sink; - if (sink != NULL) + if (sink) link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal); display->controller = CONTROLLER_ID_D0 + config->otg_inst; @@ -567,16 +559,20 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) conn_state = aconnector->base.state; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index, - (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1, - (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1); + (!!aconnector->base.state) ? + aconnector->base.state->content_protection : -1, + (!!aconnector->base.state) ? + aconnector->base.state->hdcp_content_type : -1); if (conn_state) hdcp_update_display(hdcp_work, link_index, aconnector, - conn_state->hdcp_content_type, false); + conn_state->hdcp_content_type, false); } - -/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel +/** + * DOC: Add sysfs interface for set/get srm + * + * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel * will automatically call once or twice depending on the size * * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is @@ -587,23 +583,23 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on * the last call we will send the full SRM. PSP will fail on every call before the last. * - * This means we don't know if the SRM is good until the last call. And because of this limitation we - * cannot throw errors early as it will stop the kernel from writing to sysfs + * This means we don't know if the SRM is good until the last call. And because of this + * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs * * Example 1: - * Good SRM size = 5096 - * first call to write 4096 -> PSP fails - * Second call to write 1000 -> PSP Pass -> SRM is set + * Good SRM size = 5096 + * first call to write 4096 -> PSP fails + * Second call to write 1000 -> PSP Pass -> SRM is set * * Example 2: - * Bad SRM size = 4096 - * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this - * is the last call) + * Bad SRM size = 4096 + * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this + * is the last call) * * Solution?: - * 1: Parse the SRM? -> It is signed so we don't know the EOF - * 2: We can have another sysfs that passes the size before calling set. -> simpler solution - * below + * 1: Parse the SRM? -> It is signed so we don't know the EOF + * 2: We can have another sysfs that passes the size before calling set. -> simpler solution + * below * * Easy Solution: * Always call get after Set to verify if set was successful. @@ -612,20 +608,21 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) * +----------------------+ * PSP will only update its srm if its older than the one we are trying to load. * Always do set first than get. - * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer - * version and save it + * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer + * version and save it * - * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the - * same(newer) version back and save it + * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the + * same(newer) version back and save it * - * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is - * incorrect/corrupted and we should correct our SRM by getting it from PSP + * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is + * incorrect/corrupted and we should correct our SRM by getting it from PSP */ -static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, +static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; - uint32_t srm_version = 0; + u32 srm_version = 0; work = container_of(bin_attr, struct hdcp_workqueue, attr); link_lock(work, true); @@ -639,19 +636,19 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bi work->srm_version = srm_version; } - link_lock(work, false); return count; } -static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, +static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; - uint8_t *srm = NULL; - uint32_t srm_version; - uint32_t srm_size; + u8 *srm = NULL; + u32 srm_version; + u32 srm_size; size_t ret = count; work = container_of(bin_attr, struct hdcp_workqueue, attr); @@ -684,12 +681,12 @@ ret: /* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory. * * For example, - * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" - * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent - * across boot/reboots/suspend/resume/shutdown + * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" + * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent + * across boot/reboots/suspend/resume/shutdown * - * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need - * to make the SRM persistent. + * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP + * we need to make the SRM persistent. * * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory. * -The kernel cannot write to the file systems. @@ -699,8 +696,8 @@ ret: * * Usermode can read/write to/from PSP using the sysfs interface * For example: - * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile - * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm + * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile + * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm */ static const struct bin_attribute data_attr = { .attr = {.name = "hdcp_srm", .mode = 0664}, @@ -709,10 +706,9 @@ static const struct bin_attribute data_attr = { .read = srm_data_read, }; - -struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc) +struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, + struct cp_psp *cp_psp, struct dc *dc) { - int max_caps = dc->caps.max_links; struct hdcp_workqueue *hdcp_work; int i = 0; @@ -721,14 +717,16 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct if (ZERO_OR_NULL_PTR(hdcp_work)) return NULL; - hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL); + hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm), GFP_KERNEL); - if (hdcp_work->srm == NULL) + if (!hdcp_work->srm) goto fail_alloc_context; - hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL); + hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm_temp), GFP_KERNEL); - if (hdcp_work->srm_temp == NULL) + if (!hdcp_work->srm_temp) goto fail_alloc_context; hdcp_work->max_link = max_caps; @@ -781,10 +779,5 @@ fail_alloc_context: kfree(hdcp_work); return NULL; - - - } - - diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index d9a482908380..e94eeeb97688 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -68,15 +68,15 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) } } -/* dm_helpers_parse_edid_caps - * - * Parse edid caps +/** + * dm_helpers_parse_edid_caps() - Parse edid caps * + * @link: current detected link * @edid: [in] pointer to edid - * edid_caps: [in] pointer to edid caps - * @return - * void - * */ + * @edid_caps: [in] pointer to edid caps + * + * Return: void + */ enum dc_edid_status dm_helpers_parse_edid_caps( struct dc_link *link, const struct dc_edid *edid, @@ -255,7 +255,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( /* Accessing the connector state is required for vcpi_slots allocation * and directly relies on behaviour in commit check * that blocks before commit guaranteeing that the state - * is not gonna be swapped while still in use in commit tail */ + * is not gonna be swapped while still in use in commit tail + */ if (!aconnector || !aconnector->mst_root) return false; @@ -282,7 +283,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each * stream. AMD ASIC stream slot allocation should follow the same - * sequence. copy DRM MST allocation to dc */ + * sequence. copy DRM MST allocation to dc + */ fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table); return true; @@ -426,7 +428,7 @@ void dm_dtn_log_append_v(struct dc_context *ctx, total = log_ctx->pos + n + 1; if (total > log_ctx->size) { - char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL); + char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL); if (buf) { memcpy(buf, log_ctx->buf, log_ctx->pos); @@ -633,7 +635,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); if (ret < 0) { - DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret); + DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); return false; } @@ -655,7 +657,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); } - DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success); + DC_LOG_DC("%s: success = %d\n", __func__, success); return success; } @@ -664,7 +666,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) { unsigned char data[16] = {0}; - DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n"); + DC_LOG_DC("Start %s\n", __func__); // Step 2 data[0] = 'P'; @@ -722,7 +724,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) return; - DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n"); + DC_LOG_DC("Done %s\n", __func__); } /* MST Dock */ @@ -995,9 +997,8 @@ void dm_helpers_override_panel_settings( struct dc_panel_config *panel_config) { // Feature DSC - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) panel_config->dsc.disable_dsc_edp = true; - } } void *dm_helpers_allocate_gpu_mem( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 19f543ba7205..51467f132c26 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -120,7 +120,8 @@ static void dm_irq_work_func(struct work_struct *work) /* Call a DAL subcomponent which registered for interrupt notification * at INTERRUPT_LOW_IRQ_CONTEXT. - * (The most common use is HPD interrupt) */ + * (The most common use is HPD interrupt) + */ } /* @@ -172,7 +173,8 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev, if (handler_removed == false) { /* Not necessarily an error - caller may not - * know the context. */ + * know the context. + */ return NULL; } @@ -261,7 +263,7 @@ validate_irq_registration_params(struct dc_interrupt_params *int_params, static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, irq_handler_idx handler_idx) { - if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) { + if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) { DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); return false; } @@ -343,7 +345,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, /* This pointer will be stored by code which requested interrupt * registration. * The same pointer will be needed in order to unregister the - * interrupt. */ + * interrupt. + */ DRM_DEBUG_KMS( "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", @@ -390,7 +393,8 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, if (handler_list == NULL) { /* If we got here, it means we searched all irq contexts - * for this irq source, but the handler was not found. */ + * for this irq source, but the handler was not found. + */ DRM_ERROR( "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", ih, irq_source); @@ -450,7 +454,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) DM_IRQ_TABLE_LOCK(adev, irq_table_flags); /* The handler was removed from the table, * it means it is safe to flush all the 'work' - * (because no code can schedule a new one). */ + * (because no code can schedule a new one). + */ lh = &adev->dm.irq_handler_list_low_tab[src]; DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); @@ -494,7 +499,7 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); if (!list_empty(hnd_list_l)) { - list_for_each_safe (entry, tmp, hnd_list_l) { + list_for_each_safe(entry, tmp, hnd_list_l) { handler = list_entry( entry, struct amdgpu_dm_irq_handler_data, @@ -571,7 +576,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, if (list_empty(handler_list)) return; - list_for_each_entry (handler_data, handler_list, list) { + list_for_each_entry(handler_data, handler_list, list) { if (queue_work(system_highpri_wq, &handler_data->work)) { work_queued = true; break; @@ -627,7 +632,8 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, &adev->dm.irq_handler_list_high_tab[irq_source], list) { /* Call a subcomponent which registered for immediate - * interrupt notification */ + * interrupt notification + */ handler_data->handler(handler_data->handler_arg); } @@ -664,7 +670,7 @@ static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, return 0; } -static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) +static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type) { switch (type) { case AMDGPU_HPD_1: @@ -686,7 +692,7 @@ static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); @@ -698,7 +704,7 @@ static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, static inline int dm_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state, const enum irq_type dal_irq_type, const char *func) @@ -729,7 +735,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev, static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( @@ -743,7 +749,7 @@ static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( @@ -893,13 +899,13 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, true); } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd_rx, true); @@ -928,13 +934,13 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false); } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd_rx, false); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 46d0a8f57e55..943959012d04 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -296,6 +296,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector->edid) { struct edid *edid; + edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port); if (!edid) { @@ -619,8 +620,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, return connector; } +void dm_handle_mst_sideband_msg_ready_event( + struct drm_dp_mst_topology_mgr *mgr, + enum mst_msg_ready_type msg_rdy_type) +{ + uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; + uint8_t dret; + bool new_irq_handled = false; + int dpcd_addr; + uint8_t dpcd_bytes_to_read; + const uint8_t max_process_count = 30; + uint8_t process_count = 0; + u8 retry; + struct amdgpu_dm_connector *aconnector = + container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + + + const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); + + if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { + dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; + /* DPCD 0x200 - 0x201 for downstream IRQ */ + dpcd_addr = DP_SINK_COUNT; + } else { + dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; + /* DPCD 0x2002 - 0x2005 for downstream IRQ */ + dpcd_addr = DP_SINK_COUNT_ESI; + } + + mutex_lock(&aconnector->handle_mst_msg_ready); + + while (process_count < max_process_count) { + u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; + + process_count++; + + dret = drm_dp_dpcd_read( + &aconnector->dm_dp_aux.aux, + dpcd_addr, + esi, + dpcd_bytes_to_read); + + if (dret != dpcd_bytes_to_read) { + DRM_DEBUG_KMS("DPCD read and acked number is not as expected!"); + break; + } + + DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); + + switch (msg_rdy_type) { + case DOWN_REP_MSG_RDY_EVENT: + /* Only handle DOWN_REP_MSG_RDY case*/ + esi[1] &= DP_DOWN_REP_MSG_RDY; + break; + case UP_REQ_MSG_RDY_EVENT: + /* Only handle UP_REQ_MSG_RDY case*/ + esi[1] &= DP_UP_REQ_MSG_RDY; + break; + default: + /* Handle both cases*/ + esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); + break; + } + + if (!esi[1]) + break; + + /* handle MST irq */ + if (aconnector->mst_mgr.mst_state) + drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, + esi, + ack, + &new_irq_handled); + + if (new_irq_handled) { + /* ACK at DPCD to notify down stream */ + for (retry = 0; retry < 3; retry++) { + ssize_t wret; + + wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, + dpcd_addr + 1, + ack[1]); + if (wret == 1) + break; + } + + if (retry == 3) { + DRM_ERROR("Failed to ack MST event.\n"); + break; + } + + drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); + + new_irq_handled = false; + } else { + break; + } + } + + mutex_unlock(&aconnector->handle_mst_msg_ready); + + if (process_count == max_process_count) + DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); +} + +static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr) +{ + dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT); +} + static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { .add_connector = dm_dp_add_mst_connector, + .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready, }; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, @@ -717,6 +828,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p &dsc_options, 0, params[i].timing, + dc_link_get_highest_encoding_format(params[i].aconnector->dc_link), ¶ms[i].timing->dsc_cfg)) { params[i].timing->flags.DSC = 1; @@ -767,7 +879,9 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) param.sink->ctx->dc->res_pool->dscs[0], ¶m.sink->dsc_caps.dsc_dec_caps, &dsc_options, - (int) kbps, param.timing, &dsc_config); + (int) kbps, param.timing, + dc_link_get_highest_encoding_format(param.aconnector->dc_link), + &dsc_config); return dsc_config.bits_per_pixel; } @@ -1005,8 +1119,11 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, dsc_policy.min_target_bpp * 16, dsc_policy.max_target_bpp * 16, &stream->sink->dsc_caps.dsc_dec_caps, - &stream->timing, ¶ms[count].bw_range)) - params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + &stream->timing, + dc_link_get_highest_encoding_format(dc_link), + ¶ms[count].bw_range)) + params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(dc_link)); count++; } @@ -1466,7 +1583,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream, dsc_policy.min_target_bpp * 16, dsc_policy.max_target_bpp * 16, &stream->sink->dsc_caps.dsc_dec_caps, - &stream->timing, bw_range); + &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range); return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 1e4ede1e57ab..37c820ab0fdb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -49,6 +49,13 @@ #define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031 #define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000 +enum mst_msg_ready_type { + NONE_MSG_RDY_EVENT = 0, + DOWN_REP_MSG_RDY_EVENT = 1, + UP_REQ_MSG_RDY_EVENT = 2, + DOWN_OR_UP_MSG_RDY_EVENT = 3 +}; + struct amdgpu_display_manager; struct amdgpu_dm_connector; @@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, void dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); +void dm_handle_mst_sideband_msg_ready_event( + struct drm_dp_mst_topology_mgr *mgr, + enum mst_msg_ready_type msg_rdy_type); + struct dsc_mst_fairness_vars { int pbn; bool dsc_enabled; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 322668973747..2198df96ed6f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -113,6 +113,11 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state DRM_FORMAT_ARGB8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_ARGB16161616, + DRM_FORMAT_ABGR16161616, + DRM_FORMAT_ARGB16161616F, }; uint32_t format = plane_state->fb->format->format; unsigned int i; @@ -164,7 +169,7 @@ static bool modifier_has_dcc(uint64_t modifier) return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); } -static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier) +static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) { if (modifier == DRM_FORMAT_MOD_LINEAR) return 0; @@ -581,7 +586,7 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev, int pkrs = 0; u32 gb_addr_config; u8 i = 0; - unsigned swizzle_r_x; + unsigned int swizzle_r_x; uint64_t modifier_r_x; uint64_t modifier_dcc_best; uint64_t modifier_dcc_4k; @@ -698,8 +703,8 @@ static int get_plane_formats(const struct drm_plane *plane, * caps list. */ - switch (plane->type) { - case DRM_PLANE_TYPE_PRIMARY: + if (plane->type == DRM_PLANE_TYPE_PRIMARY || + (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) { for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { if (num_formats >= max_formats) break; @@ -717,25 +722,29 @@ static int get_plane_formats(const struct drm_plane *plane, formats[num_formats++] = DRM_FORMAT_XBGR16161616F; formats[num_formats++] = DRM_FORMAT_ABGR16161616F; } - break; + } else { + switch (plane->type) { + case DRM_PLANE_TYPE_OVERLAY: + for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { + if (num_formats >= max_formats) + break; - case DRM_PLANE_TYPE_OVERLAY: - for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { - if (num_formats >= max_formats) - break; + formats[num_formats++] = overlay_formats[i]; + } + break; - formats[num_formats++] = overlay_formats[i]; - } - break; + case DRM_PLANE_TYPE_CURSOR: + for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { + if (num_formats >= max_formats) + break; - case DRM_PLANE_TYPE_CURSOR: - for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { - if (num_formats >= max_formats) - break; + formats[num_formats++] = cursor_formats[i]; + } + break; - formats[num_formats++] = cursor_formats[i]; + default: + break; } - break; } return num_formats; @@ -1459,6 +1468,15 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, drm_plane_create_blend_mode_property(plane, blend_caps); } + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + drm_plane_create_zpos_immutable_property(plane, 0); + } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) { + unsigned int zpos = 1 + drm_plane_index(plane); + drm_plane_create_zpos_property(plane, zpos, 1, 254); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + drm_plane_create_zpos_immutable_property(plane, 255); + } + if (plane->type == DRM_PLANE_TYPE_PRIMARY && plane_cap && (plane_cap->pixel_format_support.nv12 || diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 75284e2cec74..848c5b4bb301 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -334,7 +334,8 @@ bool dm_pp_get_clock_levels_by_type( if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { /* This clock is higher the validation clock. * Than means the previous one is the highest - * non-boosted one. */ + * non-boosted one. + */ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", dc_clks->num_levels, i); dc_clks->num_levels = i > 0 ? i : 1; @@ -406,10 +407,10 @@ bool dm_pp_notify_wm_clock_changes( * TODO: expand this to other ASICs */ if ((adev->asic_type >= CHIP_POLARIS10) && - (adev->asic_type <= CHIP_VEGAM) && - !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, - (void *)wm_with_clock_ranges)) - return true; + (adev->asic_type <= CHIP_VEGAM) && + !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, + (void *)wm_with_clock_ranges)) + return true; return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 4f61d4f257cd..08ce3bb8f640 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -166,6 +166,7 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) */ if (vsync_rate_hz != 0) { unsigned int frame_time_microsec = 1000000 / vsync_rate_hz; + num_frames_static = (30000 / frame_time_microsec) + 1; } diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c index 352e9afb85c6..e295a839ab47 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c @@ -24,7 +24,7 @@ */ #include "dm_services.h" -#include "conversion.h" +#include "basics/conversion.h" #define DIVIDER 10000 diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c index 84aeccf36b4b..6d2924114a3e 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/vector.c +++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c @@ -50,12 +50,11 @@ bool dal_vector_construct( return true; } -static bool dal_vector_presized_costruct( - struct vector *vector, - struct dc_context *ctx, - uint32_t count, - void *initial_value, - uint32_t struct_size) +static bool dal_vector_presized_costruct(struct vector *vector, + struct dc_context *ctx, + uint32_t count, + void *initial_value, + uint32_t struct_size) { uint32_t i; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 27af9d3c2b73..4f005ae1516c 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2593,11 +2593,10 @@ static struct integrated_info *bios_parser_create_integrated_info( return NULL; } -static enum bp_result update_slot_layout_info( - struct dc_bios *dcb, - unsigned int i, - struct slot_layout_info *slot_layout_info, - unsigned int record_offset) +static enum bp_result update_slot_layout_info(struct dc_bios *dcb, + unsigned int i, + struct slot_layout_info *slot_layout_info, + unsigned int record_offset) { unsigned int j; struct bios_parser *bp; @@ -2696,10 +2695,9 @@ static enum bp_result update_slot_layout_info( } -static enum bp_result get_bracket_layout_record( - struct dc_bios *dcb, - unsigned int bracket_layout_id, - struct slot_layout_info *slot_layout_info) +static enum bp_result get_bracket_layout_record(struct dc_bios *dcb, + unsigned int bracket_layout_id, + struct slot_layout_info *slot_layout_info) { unsigned int i; unsigned int record_offset; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index cce47d3f1a13..540d19efad8f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -340,9 +340,8 @@ static struct atom_display_object_path_v2 *get_bios_object( } /* from graphics_object_id, find display path which includes the object_id */ -static struct atom_display_object_path_v3 *get_bios_object_from_path_v3( - struct bios_parser *bp, - struct graphics_object_id id) +static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(struct bios_parser *bp, + struct graphics_object_id id) { unsigned int i; struct graphics_object_id obj_id = {0}; @@ -521,9 +520,8 @@ static enum bp_result get_gpio_i2c_info( return BP_RESULT_OK; } -static struct atom_hpd_int_record *get_hpd_record_for_path_v3( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_hpd_int_record *get_hpd_record_for_path_v3(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; @@ -2175,9 +2173,8 @@ static struct atom_disp_connector_caps_record *get_disp_connector_caps_record( return NULL; } -static struct atom_connector_caps_record *get_connector_caps_record( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_connector_caps_record *get_connector_caps_record(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; @@ -2264,9 +2261,8 @@ static enum bp_result bios_parser_get_disp_connector_caps_info( return BP_RESULT_OK; } -static struct atom_connector_speed_record *get_connector_speed_cap_record( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_connector_speed_record *get_connector_speed_cap_record(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 7ccd96959256..3db4ef564b99 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa( stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) tmds_present = true; + + /* Checking stream / link detection ensuring that PHY is active*/ + if (dc_is_dp_signal(stream->signal) && !stream->dpms_off) + display_count++; + } for (i = 0; i < dc->link_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 2f7c8996b19d..3ba2e13d691d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -87,6 +87,14 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L +#define regCLK1_CLK2_BYPASS_CNTL 0x029c +#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0 + +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0 +#define CLK1_CLK2_BYPASS_CNTL__LK2_BYPASS_DIV__SHIFT 0x10 +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L +#define CLK1_CLK2_BYPASS_CNTL__LK2_BYPASS_DIV_MASK 0x000F0000L + #define REG(reg_name) \ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) @@ -436,6 +444,11 @@ static DpmClocks314_t dummy_clocks; static struct dcn314_watermarks dummy_wms = { 0 }; +static struct dcn314_ss_info_table ss_info_table = { + .ss_divider = 1000, + .ss_percentage = {0, 0, 375, 375, 375} +}; + static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table) { int i, num_valid_sets; @@ -715,6 +728,20 @@ static struct clk_mgr_funcs dcn314_funcs = { }; extern struct clk_mgr_funcs dcn3_fpga_funcs; +static void dcn314_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) +{ + uint32_t clock_source; + + REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); + + clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; + + if (clk_mgr->dprefclk_ss_percentage != 0) { + clk_mgr->ss_on_dprefclk = true; + clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider; + } +} + void dcn314_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn314 *clk_mgr, @@ -781,9 +808,11 @@ void dcn314_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; - dce_clock_read_ss_info(&clk_mgr->base); + + dcn314_read_ss_info_from_lut(&clk_mgr->base); /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ - //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz); + clk_mgr->base.base.dprefclk_khz = + dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); clk_mgr->base.base.bw_params = &dcn314_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h index 171f84340eb2..e0670dafe260 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h @@ -28,6 +28,8 @@ #define __DCN314_CLK_MGR_H__ #include "clk_mgr_internal.h" +#define NUM_CLOCK_SOURCES 5 + struct dcn314_watermarks; struct dcn314_smu_watermark_set { @@ -40,6 +42,11 @@ struct clk_mgr_dcn314 { struct dcn314_smu_watermark_set smu_wm_set; }; +struct dcn314_ss_info_table { + uint32_t ss_divider; + uint32_t ss_percentage[NUM_CLOCK_SOURCES]; +}; + bool dcn314_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index d7de756301cf..0349631991b8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -55,14 +55,6 @@ struct IP_BASE struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; -static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0 } }, - { { 0x00016E00, 0x02401C00, 0, 0, 0, 0 } }, - { { 0x00017000, 0x02402000, 0, 0, 0, 0 } }, - { { 0x00017200, 0x02402400, 0, 0, 0, 0 } }, - { { 0x0001B000, 0x0242D800, 0, 0, 0, 0 } }, - { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0 } }, - { { 0x0001B400, 0x0242E000, 0, 0, 0, 0 } } } }; - #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 @@ -73,9 +65,6 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L -#define REG(reg_name) \ - (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) - #define TO_CLK_MGR_DCN316(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn316, base) @@ -577,36 +566,6 @@ static struct clk_mgr_funcs dcn316_funcs = { }; extern struct clk_mgr_funcs dcn3_fpga_funcs; -static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) -{ - /* get FbMult value */ - struct fixed31_32 pll_req; - unsigned int fbmult_frac_val = 0; - unsigned int fbmult_int_val = 0; - - /* - * Register value of fbmult is in 8.16 format, we are converting to 31.32 - * to leverage the fix point operations available in driver - */ - - REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ - REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ - - pll_req = dc_fixpt_from_int(fbmult_int_val); - - /* - * since fractional part is only 16 bit in register definition but is 32 bit - * in our fix point definiton, need to shift left by 16 to obtain correct value - */ - pll_req.value |= fbmult_frac_val << 16; - - /* multiply by REFCLK period */ - pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); - - /* integer part is now VCO frequency in kHz */ - return dc_fixpt_floor(pll_req); -} - void dcn316_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn316 *clk_mgr, @@ -660,7 +619,8 @@ void dcn316_clk_mgr_construct( clk_mgr->base.smu_present = true; // Skip this for now as it did not work on DCN315, renable during bring up - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + //clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* in case we don't get a value from the register, use default */ if (clk_mgr->base.base.dentist_vco_freq_khz == 0) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index cb992aca760d..0701d03b88a9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -297,7 +297,7 @@ void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz, prev_dppclk_khz; + int dpp_inst = 0, dppclk_khz, prev_dppclk_khz; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index d133e4186a52..7cac14f493f6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1047,8 +1047,10 @@ static void disable_all_writeback_pipes_for_stream( stream->writeback_info[i].wb_enabled = false; } -static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, - struct dc_stream_state *stream, bool lock) +static void apply_ctx_interdependent_lock(struct dc *dc, + struct dc_state *context, + struct dc_stream_state *stream, + bool lock) { int i; @@ -3582,9 +3584,9 @@ static void commit_planes_for_stream_fast(struct dc *dc, context->block_sequence_steps); /* Clear update flags so next flip doesn't have redundant programming * (if there's no stream update, the update flags are not cleared). + * Surface updates are cleared unconditionally at the beginning of each flip, + * so no need to clear here. */ - if (top_pipe_to_program->plane_state) - top_pipe_to_program->plane_state->update_flags.raw = 0; if (top_pipe_to_program->stream) top_pipe_to_program->stream->update_flags.raw = 0; } @@ -4088,9 +4090,9 @@ static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *transition_base_context) { struct dc_state *transition_context = dc_create_state(dc); - enum pipe_split_policy tmp_mpc_policy; - bool temp_dynamic_odm_policy; - bool temp_subvp_policy; + enum pipe_split_policy tmp_mpc_policy = 0; + bool temp_dynamic_odm_policy = 0; + bool temp_subvp_policy = 0; enum dc_status ret = DC_ERROR_UNEXPECTED; unsigned int i, j; unsigned int pipe_in_use = 0; @@ -4284,7 +4286,8 @@ static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_c return false; } -static bool full_update_required(struct dc_surface_update *srf_updates, +static bool full_update_required(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_update *stream_update, struct dc_stream_state *stream) @@ -4292,6 +4295,7 @@ static bool full_update_required(struct dc_surface_update *srf_updates, int i; struct dc_stream_status *stream_status; + const struct dc_state *context = dc->current_state; for (i = 0; i < surface_count; i++) { if (srf_updates && @@ -4302,7 +4306,11 @@ static bool full_update_required(struct dc_surface_update *srf_updates, srf_updates[i].in_transfer_func || srf_updates[i].func_shaper || srf_updates[i].lut3d_func || - srf_updates[i].blend_tf)) + srf_updates[i].blend_tf || + srf_updates[i].surface->force_full_update || + (srf_updates[i].flip_addr && + srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || + !is_surface_in_context(context, srf_updates[i].surface))) return true; } @@ -4340,18 +4348,21 @@ static bool full_update_required(struct dc_surface_update *srf_updates, if (stream_status == NULL || stream_status->plane_count != surface_count) return true; } + if (dc->idle_optimizations_allowed) + return true; return false; } -static bool fast_update_only(struct dc_fast_update *fast_update, +static bool fast_update_only(struct dc *dc, + struct dc_fast_update *fast_update, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_update *stream_update, struct dc_stream_state *stream) { return fast_updates_exist(fast_update, surface_count) - && !full_update_required(srf_updates, surface_count, stream_update, stream); + && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); } bool dc_update_planes_and_stream(struct dc *dc, @@ -4369,8 +4380,8 @@ bool dc_update_planes_and_stream(struct dc *dc, * cause underflow. Apply stream configuration with minimal pipe * split first to avoid unsupported transitions for active pipes. */ - bool force_minimal_pipe_splitting; - bool is_plane_addition; + bool force_minimal_pipe_splitting = 0; + bool is_plane_addition = 0; populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( @@ -4423,7 +4434,7 @@ bool dc_update_planes_and_stream(struct dc *dc, } update_seamless_boot_flags(dc, context, surface_count, stream); - if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) && + if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, @@ -4569,7 +4580,7 @@ void dc_commit_updates_for_stream(struct dc *dc, TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); update_seamless_boot_flags(dc, context, surface_count, stream); - if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) && + if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, @@ -5273,3 +5284,56 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); } + +/***************************************************************************** + * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause + * ABM + * @dc: dc structure + * @stream: stream where vsync int state changed + * @pData: abm hw states + * + ****************************************************************************/ +bool dc_abm_save_restore( + struct dc *dc, + struct dc_stream_state *stream, + struct abm_save_restore *pData) +{ + int i; + int edp_num; + struct pipe_ctx *pipe = NULL; + struct dc_link *link = stream->sink->link; + struct dc_link *edp_links[MAX_NUM_EDP]; + + + /*find primary pipe associated with stream*/ + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream == stream && pipe->stream_res.tg) + break; + } + + if (i == MAX_PIPES) { + ASSERT(0); + return false; + } + + dc_get_edp_links(dc, edp_links, &edp_num); + + /* Determine panel inst */ + for (i = 0; i < edp_num; i++) + if (edp_links[i] == link) + break; + + if (i == edp_num) + return false; + + if (pipe->stream_res.abm && + pipe->stream_res.abm->funcs->save_restore) + return pipe->stream_res.abm->funcs->save_restore( + pipe->stream_res.abm, + i, + pData); + return false; +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index cb2bf9a466f5..f99ec1b0efaf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -187,6 +187,7 @@ static bool is_ycbcr709_limited_type( ret = true; return ret; } + static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space) { enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index 18e098568cb4..0d19d4cd1916 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -314,6 +314,24 @@ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link) return link->dc->link_srv->dp_get_verified_link_cap(link); } +enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link) +{ + if (dc_is_dp_signal(link->connector_signal)) { + if (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_DVI_DONGLE && + link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE) + return DC_LINK_ENCODING_HDMI_TMDS; + else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) == + DP_8b_10b_ENCODING) + return DC_LINK_ENCODING_DP_8b_10b; + else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) == + DP_128b_132b_ENCODING) + return DC_LINK_ENCODING_DP_128b_132b; + } else if (dc_is_hdmi_signal(link->connector_signal)) { + } + + return DC_LINK_ENCODING_UNSPECIFIED; +} + bool dc_link_is_dp_sink_present(struct dc_link *link) { return link->dc->link_srv->dp_is_sink_present(link); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 2f3d9a698486..d0f4b86cadf1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -69,9 +69,16 @@ #include "../dcn32/dcn32_resource.h" #include "../dcn321/dcn321_resource.h" +#define VISUAL_CONFIRM_BASE_DEFAULT 3 +#define VISUAL_CONFIRM_BASE_MIN 1 +#define VISUAL_CONFIRM_BASE_MAX 10 +#define VISUAL_CONFIRM_DPP_OFFSET 3 #define DC_LOGGER_INIT(logger) +#define HEAD_NOT_IN_ODM -2 +#define UNABLE_TO_SPLIT -1 + enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) { enum dce_version dc_version = DCE_VERSION_UNKNOWN; @@ -740,7 +747,12 @@ int get_num_mpc_splits(struct pipe_ctx *pipe) int get_num_odm_splits(struct pipe_ctx *pipe) { int odm_split_count = 0; - struct pipe_ctx *next_pipe = pipe->next_odm_pipe; + struct pipe_ctx *next_pipe = NULL; + + while (pipe->top_pipe) + pipe = pipe->top_pipe; + + next_pipe = pipe->next_odm_pipe; while (next_pipe) { odm_split_count++; next_pipe = next_pipe->next_odm_pipe; @@ -753,32 +765,35 @@ int get_num_odm_splits(struct pipe_ctx *pipe) return odm_split_count; } -static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx) +static int get_odm_split_index(struct pipe_ctx *pipe_ctx) { - *split_count = get_num_odm_splits(pipe_ctx); - *split_idx = 0; - if (*split_count == 0) { - /*Check for mpc split*/ - struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; - - *split_count = get_num_mpc_splits(pipe_ctx); - while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { - (*split_idx)++; - split_pipe = split_pipe->top_pipe; - } + struct pipe_ctx *split_pipe = NULL; + int index = 0; - /* MPO window on right side of ODM split */ - if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) - (*split_idx)++; - } else { - /*Get odm split index*/ - struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe; + while (pipe_ctx->top_pipe) + pipe_ctx = pipe_ctx->top_pipe; - while (split_pipe) { - (*split_idx)++; - split_pipe = split_pipe->prev_odm_pipe; - } + split_pipe = pipe_ctx->prev_odm_pipe; + + while (split_pipe) { + index++; + split_pipe = split_pipe->prev_odm_pipe; } + + return index; +} + +static int get_mpc_split_index(struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; + int index = 0; + + while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { + index++; + split_pipe = split_pipe->top_pipe; + } + + return index; } /* @@ -800,82 +815,357 @@ static void calculate_viewport_size(struct pipe_ctx *pipe_ctx) } } -static void calculate_recout(struct pipe_ctx *pipe_ctx) +static struct rect intersect_rec(const struct rect *r0, const struct rect *r1) { - const struct dc_plane_state *plane_state = pipe_ctx->plane_state; - const struct dc_stream_state *stream = pipe_ctx->stream; - struct scaler_data *data = &pipe_ctx->plane_res.scl_data; - struct rect surf_clip = plane_state->clip_rect; - bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; - int split_count, split_idx; + struct rect rec; + int r0_x_end = r0->x + r0->width; + int r1_x_end = r1->x + r1->width; + int r0_y_end = r0->y + r0->height; + int r1_y_end = r1->y + r1->height; + + rec.x = r0->x > r1->x ? r0->x : r1->x; + rec.width = r0_x_end > r1_x_end ? r1_x_end - rec.x : r0_x_end - rec.x; + rec.y = r0->y > r1->y ? r0->y : r1->y; + rec.height = r0_y_end > r1_y_end ? r1_y_end - rec.y : r0_y_end - rec.y; + + /* in case that there is no intersection */ + if (rec.width < 0 || rec.height < 0) + memset(&rec, 0, sizeof(rec)); + + return rec; +} - calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); - if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE) - split_idx = 0; +static struct rect shift_rec(const struct rect *rec_in, int x, int y) +{ + struct rect rec_out = *rec_in; + + rec_out.x += x; + rec_out.y += y; + + return rec_out; +} + +static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ctx) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + int odm_slice_count = get_num_odm_splits(pipe_ctx) + 1; + int odm_slice_idx = get_odm_split_index(pipe_ctx); + bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count; + int h_active = stream->timing.h_addressable + + stream->timing.h_border_left + + stream->timing.h_border_right; + int odm_slice_width = h_active / odm_slice_count; + struct rect odm_rec; + + odm_rec.x = odm_slice_width * odm_slice_idx; + odm_rec.width = is_last_odm_slice ? + /* last slice width is the reminder of h_active */ + h_active - odm_slice_width * (odm_slice_count - 1) : + /* odm slice width is the floor of h_active / count */ + odm_slice_width; + odm_rec.y = 0; + odm_rec.height = stream->timing.v_addressable + + stream->timing.v_border_bottom + + stream->timing.v_border_top; + + return odm_rec; +} +static struct rect calculate_plane_rec_in_timing_active( + struct pipe_ctx *pipe_ctx, + const struct rect *rec_in) +{ /* - * Only the leftmost ODM pipe should be offset by a nonzero distance + * The following diagram shows an example where we map a 1920x1200 + * desktop to a 2560x1440 timing with a plane rect in the middle + * of the screen. To map a plane rect from Stream Source to Timing + * Active space, we first multiply stream scaling ratios (i.e 2304/1920 + * horizontal and 1440/1200 vertical) to the plane's x and y, then + * we add stream destination offsets (i.e 128 horizontal, 0 vertical). + * This will give us a plane rect's position in Timing Active. However + * we have to remove the fractional. The rule is that we find left/right + * and top/bottom positions and round the value to the adjacent integer. + * + * Stream Source Space + * ------------ + * __________________________________________________ + * |Stream Source (1920 x 1200) ^ | + * | y | + * | <------- w --------|> | + * | __________________V | + * |<-- x -->|Plane//////////////| ^ | + * | |(pre scale)////////| | | + * | |///////////////////| | | + * | |///////////////////| h | + * | |///////////////////| | | + * | |///////////////////| | | + * | |///////////////////| V | + * | | + * | | + * |__________________________________________________| + * + * + * Timing Active Space + * --------------------------------- + * + * Timing Active (2560 x 1440) + * __________________________________________________ + * |*****| Stteam Destination (2304 x 1440) |*****| + * |*****| |*****| + * |<128>| |*****| + * |*****| __________________ |*****| + * |*****| |Plane/////////////| |*****| + * |*****| |(post scale)//////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |*****| + * |*****| |*****| + * |*****| |*****| + * |*****|______________________________________|*****| + * + * So the resulting formulas are shown below: + * + * recout_x = 128 + round(plane_x * 2304 / 1920) + * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x + * recout_y = 0 + round(plane_y * 1440 / 1280) + * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y + * + * NOTE: fixed point division is not error free. To reduce errors + * introduced by fixed point division, we divide only after + * multiplication is complete. */ - if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) { - /* MPO window on right side of ODM split */ - data->recout.x = stream->dst.x + (surf_clip.x - stream->src.x - stream->src.width/2) * - stream->dst.width / stream->src.width; - } else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) { - data->recout.x = stream->dst.x; - if (stream->src.x < surf_clip.x) - data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width - / stream->src.width; - } else - data->recout.x = 0; - - if (stream->src.x > surf_clip.x) - surf_clip.width -= stream->src.x - surf_clip.x; - data->recout.width = surf_clip.width * stream->dst.width / stream->src.width; - if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width) - data->recout.width = stream->dst.x + stream->dst.width - data->recout.x; - - data->recout.y = stream->dst.y; - if (stream->src.y < surf_clip.y) - data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height - / stream->src.height; - else if (stream->src.y > surf_clip.y) - surf_clip.height -= stream->src.y - surf_clip.y; - - data->recout.height = surf_clip.height * stream->dst.height / stream->src.height; - if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height) - data->recout.height = stream->dst.y + stream->dst.height - data->recout.y; - - /* Handle h & v split */ - if (split_tb) { - ASSERT(data->recout.height % 2 == 0); - data->recout.height /= 2; - } else if (split_count) { - if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) { - /* extra pixels in the division remainder need to go to pipes after - * the extra pixel index minus one(epimo) defined here as: - */ - int epimo = split_count - data->recout.width % (split_count + 1); + const struct dc_stream_state *stream = pipe_ctx->stream; + struct rect rec_out = {0}; + struct fixed31_32 temp; - data->recout.x += (data->recout.width / (split_count + 1)) * split_idx; - if (split_idx > epimo) - data->recout.x += split_idx - epimo - 1; - ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0); - data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0); - } else { - /* odm */ - if (split_idx == split_count) { - /* rightmost pipe is the remainder recout */ - data->recout.width -= data->h_active * split_count - data->recout.x; - - /* ODM combine cases with MPO we can get negative widths */ - if (data->recout.width < 0) - data->recout.width = 0; - - data->recout.x = 0; - } else - data->recout.width = data->h_active - data->recout.x; - } + temp = dc_fixpt_from_fraction(rec_in->x * stream->dst.width, + stream->src.width); + rec_out.x = stream->dst.x + dc_fixpt_round(temp); + + temp = dc_fixpt_from_fraction( + (rec_in->x + rec_in->width) * stream->dst.width, + stream->src.width); + rec_out.width = stream->dst.x + dc_fixpt_round(temp) - rec_out.x; + + temp = dc_fixpt_from_fraction(rec_in->y * stream->dst.height, + stream->src.height); + rec_out.y = stream->dst.y + dc_fixpt_round(temp); + + temp = dc_fixpt_from_fraction( + (rec_in->y + rec_in->height) * stream->dst.height, + stream->src.height); + rec_out.height = stream->dst.y + dc_fixpt_round(temp) - rec_out.y; + + return rec_out; +} + +static struct rect calculate_mpc_slice_in_timing_active( + struct pipe_ctx *pipe_ctx, + struct rect *plane_clip_rec) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + int mpc_slice_count = get_num_mpc_splits(pipe_ctx) + 1; + int mpc_slice_idx = get_mpc_split_index(pipe_ctx); + int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1; + struct rect mpc_rec; + + mpc_rec.width = plane_clip_rec->width / mpc_slice_count; + mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx; + mpc_rec.height = plane_clip_rec->height; + mpc_rec.y = plane_clip_rec->y; + ASSERT(mpc_slice_count == 1 || + stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || + mpc_rec.width % 2 == 0); + + /* extra pixels in the division remainder need to go to pipes after + * the extra pixel index minus one(epimo) defined here as: + */ + if (mpc_slice_idx > epimo) { + mpc_rec.x += mpc_slice_idx - epimo - 1; + mpc_rec.width += 1; } + + if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { + ASSERT(mpc_rec.height % 2 == 0); + mpc_rec.height /= 2; + } + return mpc_rec; +} + +static void adjust_recout_for_visual_confirm(struct rect *recout, + struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + int dpp_offset, base_offset; + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) + return; + + dpp_offset = pipe_ctx->plane_res.dpp->inst * VISUAL_CONFIRM_DPP_OFFSET; + + if ((dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_BASE_MIN) && + dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_BASE_MAX) + base_offset = dc->debug.visual_confirm_rect_height; + else + base_offset = VISUAL_CONFIRM_BASE_DEFAULT; + + recout->height -= base_offset; + recout->height -= dpp_offset; +} + +/* + * The function maps a plane clip from Stream Source Space to ODM Slice Space + * and calculates the rec of the overlapping area of MPC slice of the plane + * clip, ODM slice associated with the pipe context and stream destination rec. + */ +static void calculate_recout(struct pipe_ctx *pipe_ctx) +{ + /* + * A plane clip represents the desired plane size and position in Stream + * Source Space. Stream Source is the destination where all planes are + * blended (i.e. positioned, scaled and overlaid). It is a canvas where + * all planes associated with the current stream are drawn together. + * After Stream Source is completed, we will further scale and + * reposition the entire canvas of the stream source to Stream + * Destination in Timing Active Space. This could be due to display + * overscan adjustment where we will need to rescale and reposition all + * the planes so they can fit into a TV with overscan or downscale + * upscale features such as GPU scaling or VSR. + * + * This two step blending is a virtual procedure in software. In + * hardware there is no such thing as Stream Source. all planes are + * blended once in Timing Active Space. Software virtualizes a Stream + * Source space to decouple the math complicity so scaling param + * calculation focuses on one step at a time. + * + * In the following two diagrams, user applied 10% overscan adjustment + * so the Stream Source needs to be scaled down a little before mapping + * to Timing Active Space. As a result the Plane Clip is also scaled + * down by the same ratio, Plane Clip position (i.e. x and y) with + * respect to Stream Source is also scaled down. To map it in Timing + * Active Space additional x and y offsets from Stream Destination are + * added to Plane Clip as well. + * + * Stream Source Space + * ------------ + * __________________________________________________ + * |Stream Source (3840 x 2160) ^ | + * | y | + * | | | + * | __________________V | + * |<-- x -->|Plane Clip/////////| | + * | |(pre scale)////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | | + * | | + * |__________________________________________________| + * + * + * Timing Active Space (3840 x 2160) + * --------------------------------- + * + * Timing Active + * __________________________________________________ + * | y_____________________________________________ | + * |x |Stream Destination (3456 x 1944) | | + * | | | | + * | | __________________ | | + * | | |Plane Clip////////| | | + * | | |(post scale)//////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | | | + * | | | | + * | |____________________________________________| | + * |__________________________________________________| + * + * + * In Timing Active Space a plane clip could be further sliced into + * pieces called MPC slices. Each Pipe Context is responsible for + * processing only one MPC slice so the plane processing workload can be + * distributed to multiple DPP Pipes. MPC slices could be blended + * together to a single ODM slice. Each ODM slice is responsible for + * processing a portion of Timing Active divided horizontally so the + * output pixel processing workload can be distributed to multiple OPP + * pipes. All ODM slices are mapped together in ODM block so all MPC + * slices belong to different ODM slices could be pieced together to + * form a single image in Timing Active. MPC slices must belong to + * single ODM slice. If an MPC slice goes across ODM slice boundary, it + * needs to be divided into two MPC slices one for each ODM slice. + * + * In the following diagram the output pixel processing workload is + * divided horizontally into two ODM slices one for each OPP blend tree. + * OPP0 blend tree is responsible for processing left half of Timing + * Active, while OPP2 blend tree is responsible for processing right + * half. + * + * The plane has two MPC slices. However since the right MPC slice goes + * across ODM boundary, two DPP pipes are needed one for each OPP blend + * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree). + * + * Assuming that we have a Pipe Context associated with OPP0 and DPP1 + * working on processing the plane in the diagram. We want to know the + * width and height of the shaded rectangle and its relative position + * with respect to the ODM slice0. This is called the recout of the pipe + * context. + * + * Planes can be at arbitrary size and position and there could be an + * arbitrary number of MPC and ODM slices. The algorithm needs to take + * all scenarios into account. + * + * Timing Active Space (3840 x 2160) + * --------------------------------- + * + * Timing Active + * __________________________________________________ + * |OPP0(ODM slice0)^ |OPP2(ODM slice1) | + * | y | | + * | | <- w -> | + * | _____V________|____ | + * | |DPP0 ^ |DPP1 |DPP2| | + * |<------ x |-----|->|/////| | | + * | | | |/////| | | + * | | h |/////| | | + * | | | |/////| | | + * | |_____V__|/////|____| | + * | | | + * | | | + * | | | + * |_________________________|________________________| + * + * + */ + struct rect plane_clip; + struct rect mpc_slice_of_plane_clip; + struct rect odm_slice; + struct rect overlapping_area; + + plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx, + &pipe_ctx->plane_state->clip_rect); + /* guard plane clip from drawing beyond stream dst here */ + plane_clip = intersect_rec(&plane_clip, + &pipe_ctx->stream->dst); + mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active( + pipe_ctx, &plane_clip); + odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx); + overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice); + /* shift the overlapping area so it is with respect to current ODM + * slice's position + */ + pipe_ctx->plane_res.scl_data.recout = shift_rec( + &overlapping_area, + -odm_slice.x, -odm_slice.y); + + adjust_recout_for_visual_confirm(&pipe_ctx->plane_res.scl_data.recout, + pipe_ctx); } static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) @@ -997,34 +1287,25 @@ static void calculate_init_and_vp( static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; - const struct dc_stream_state *stream = pipe_ctx->stream; struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct rect src = plane_state->src_rect; + struct rect recout_dst_in_active_timing; + struct rect recout_clip_in_active_timing; + struct rect recout_clip_in_recout_dst; + struct rect odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx); int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; - int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y; bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir; - calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); - /* - * recout full is what the recout would have been if we didnt clip - * the source plane at all. We only care about left(ro_lb) and top(ro_tb) - * offsets of recout within recout full because those are the directions - * we scan from and therefore the only ones that affect inits. - */ - recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x) - * stream->dst.width / stream->src.width; - recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y) - * stream->dst.height / stream->src.height; - if (pipe_ctx->prev_odm_pipe && split_idx) - ro_lb = data->h_active * split_idx - recout_full_x; - else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe) - ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x; - else - ro_lb = data->recout.x - recout_full_x; - ro_tb = data->recout.y - recout_full_y; - ASSERT(ro_lb >= 0 && ro_tb >= 0); - + recout_clip_in_active_timing = shift_rec( + &data->recout, odm_slice.x, odm_slice.y); + recout_dst_in_active_timing = calculate_plane_rec_in_timing_active( + pipe_ctx, &plane_state->dst_rect); + recout_clip_in_recout_dst = shift_rec(&recout_clip_in_active_timing, + -recout_dst_in_active_timing.x, + -recout_dst_in_active_timing.y); + ASSERT(recout_clip_in_recout_dst.x >= 0 && + recout_clip_in_recout_dst.y >= 0); /* * Work in recout rotation since that requires less transformations */ @@ -1042,7 +1323,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) calculate_init_and_vp( flip_horz_scan_dir, - ro_lb, + recout_clip_in_recout_dst.x, data->recout.width, src.width, data->taps.h_taps, @@ -1052,7 +1333,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport.width); calculate_init_and_vp( flip_horz_scan_dir, - ro_lb, + recout_clip_in_recout_dst.x, data->recout.width, src.width / vpc_div, data->taps.h_taps_c, @@ -1062,7 +1343,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport_c.width); calculate_init_and_vp( flip_vert_scan_dir, - ro_tb, + recout_clip_in_recout_dst.y, data->recout.height, src.height, data->taps.v_taps, @@ -1072,7 +1353,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport.height); calculate_init_and_vp( flip_vert_scan_dir, - ro_tb, + recout_clip_in_recout_dst.y, data->recout.height, src.height / vpc_div, data->taps.v_taps_c, @@ -1097,6 +1378,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx); bool res = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -1121,30 +1403,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->stream->dst.y += timing->v_border_top; /* Calculate H and V active size */ - pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + - timing->h_border_left + timing->h_border_right; - pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + - timing->v_border_top + timing->v_border_bottom; - if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) { - pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1; - - DC_LOG_SCALER("%s pipe %d: next_odm_pipe:%d prev_odm_pipe:%d\n", - __func__, - pipe_ctx->pipe_idx, - pipe_ctx->next_odm_pipe ? pipe_ctx->next_odm_pipe->pipe_idx : -1, - pipe_ctx->prev_odm_pipe ? pipe_ctx->prev_odm_pipe->pipe_idx : -1); - } /* ODM + windows MPO, where window is on either right or left ODM half */ - else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe)) { - - pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1; - - DC_LOG_SCALER("%s ODM + windows MPO: pipe:%d top_pipe:%d top_pipe->next_odm_pipe:%d top_pipe->prev_odm_pipe:%d\n", - __func__, - pipe_ctx->pipe_idx, - pipe_ctx->top_pipe->pipe_idx, - pipe_ctx->top_pipe->next_odm_pipe ? pipe_ctx->top_pipe->next_odm_pipe->pipe_idx : -1, - pipe_ctx->top_pipe->prev_odm_pipe ? pipe_ctx->top_pipe->prev_odm_pipe->pipe_idx : -1); - } + pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width; + pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height; + /* depends on h_active */ calculate_recout(pipe_ctx); /* depends on pixel format */ @@ -1449,7 +1710,24 @@ static int acquire_first_split_pipe( return i; } else if (split_pipe->prev_odm_pipe && split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) { + + // Fix case where ODM slice has child planes + // Re-attach child planes + struct pipe_ctx *temp_head_pipe = resource_get_head_pipe_for_stream(res_ctx, split_pipe->stream); + + if (split_pipe->bottom_pipe && temp_head_pipe) { + + struct pipe_ctx *temp_tail_pipe = resource_get_tail_pipe(res_ctx, temp_head_pipe); + + if (temp_tail_pipe) { + + split_pipe->bottom_pipe->top_pipe = temp_tail_pipe; + temp_tail_pipe->bottom_pipe = split_pipe->bottom_pipe; + } + } + split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe; + if (split_pipe->next_odm_pipe) split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe; @@ -1457,6 +1735,11 @@ static int acquire_first_split_pipe( resource_build_scaling_params(split_pipe->prev_odm_pipe); memset(split_pipe, 0, sizeof(*split_pipe)); + + // We cannot split if head pipe is not odm + if (temp_head_pipe && !temp_head_pipe->next_odm_pipe && !temp_head_pipe->prev_odm_pipe) + return HEAD_NOT_IN_ODM; + split_pipe->stream_res.tg = pool->timing_generators[i]; split_pipe->plane_res.hubp = pool->hubps[i]; split_pipe->plane_res.ipp = pool->ipps[i]; @@ -1469,7 +1752,7 @@ static int acquire_first_split_pipe( return i; } } - return -1; + return UNABLE_TO_SPLIT; } bool dc_add_plane_to_context( @@ -1521,6 +1804,10 @@ bool dc_add_plane_to_context( int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); if (pipe_idx >= 0) free_pipe = &context->res_ctx.pipe_ctx[pipe_idx]; + else if (pipe_idx == HEAD_NOT_IN_ODM) + break; + else + ASSERT(false); } if (!free_pipe) { @@ -1677,12 +1964,14 @@ bool dc_add_plane_to_context( (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <= free_pipe->stream->src.x + free_pipe->stream->src.width/2))) { if (!free_pipe->next_odm_pipe && - tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) { + tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe && + tail_pipe->next_odm_pipe->bottom_pipe->plane_state == free_pipe->plane_state) { free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe; tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe; } if (!free_pipe->prev_odm_pipe && - tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) { + tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe && + tail_pipe->prev_odm_pipe->bottom_pipe->plane_state == free_pipe->plane_state) { free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe; tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 6e11d2b701f8..ea3d4b328e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -306,6 +306,32 @@ bool dc_optimize_timing_for_fsft( } #endif +static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream) +{ + uint32_t refresh_rate; + struct dc *dc = stream->ctx->dc; + + refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 + + stream->timing.v_total * stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, stream->timing.h_total); + + /* If there's any stream that fits the SubVP high refresh criteria, + * we must return true. This is because cursor updates are asynchronous + * with full updates, so we could transition into a SubVP config and + * remain in HW cursor mode if there's no cursor update which will + * then cause corruption. + */ + if ((refresh_rate >= 120 && refresh_rate <= 165 && + stream->timing.v_addressable >= 1440 && + stream->timing.v_addressable <= 2160) && + (dc->current_state->stream_count > 1 || + (dc->current_state->stream_count == 1 && !stream->allow_freesync))) + return true; + + return false; +} + /* * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address */ @@ -334,12 +360,13 @@ bool dc_stream_set_cursor_attributes( /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4. * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case: - * 1. For single display cases, if resolution is >= 5K and refresh rate < 120hz - * 2. For multi display cases, if resolution is >= 4K and refresh rate < 120hz - * - * [< 120hz is a requirement for SubVP configs] + * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs) + * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz + * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz */ if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) { + if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream)) + return false; if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 63948170fd6d..eadb53853131 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -40,12 +40,14 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" +struct abm_save_restore; + /* forward declaration */ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.241" +#define DC_VER "3.2.244" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -506,7 +508,7 @@ enum dcn_zstate_support_state { DCN_ZSTATE_SUPPORT_DISALLOW, }; -/** +/* * struct dc_clocks - DC pipe clocks * * For any clocks that may differ per pipe only the max is stored in this @@ -728,7 +730,7 @@ struct resource_pool; struct dce_hwseq; struct link_service; -/** +/* * struct dc_debug_options - DC debug struct * * This struct provides a simple mechanism for developers to change some @@ -756,7 +758,7 @@ struct dc_debug_options { bool use_max_lb; enum dcc_option disable_dcc; - /** + /* * @pipe_split_policy: Define which pipe split policy is used by the * display core. */ @@ -861,6 +863,7 @@ struct dc_debug_options { bool psr_skip_crtc_disable; union dpia_debug_options dpia_debug; bool disable_fixed_vs_aux_timeout_wa; + uint32_t fixed_vs_aux_delay_config_wa; bool force_disable_subvp; bool force_subvp_mclk_switch; bool allow_sw_cursor_fallback; @@ -1334,7 +1337,7 @@ struct dc_validation_set { struct dc_stream_state *stream; /** - * @plane_state: Surface state + * @plane_states: Surface state */ struct dc_plane_state *plane_states[MAX_SURFACES]; @@ -1409,10 +1412,14 @@ struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane); +void dc_set_disable_128b_132b_stream_overhead(bool disable); + /* The function returns minimum bandwidth required to drive a given timing * return - minimum required timing bandwidth in kbps. */ -uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing); +uint32_t dc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding); /* Link Interfaces */ /* @@ -1514,6 +1521,7 @@ struct dc_link { enum edp_revision edp_revision; union dpcd_sink_ext_caps dpcd_sink_ext_caps; + struct backlight_settings backlight_settings; struct psr_settings psr_settings; /* Drive settings read from integrated info table */ @@ -1849,6 +1857,14 @@ enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format( */ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link); +/* Get the highest encoding format that the link supports; highest meaning the + * encoding format which supports the maximum bandwidth. + * + * @link - a link with DP RX connection + * return - highest encoding format link supports. + */ +enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link); + /* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected * to a link with dp connector signal type. * @link - a link with dp connector signal type @@ -2230,6 +2246,11 @@ void dc_z10_save_init(struct dc *dc); bool dc_is_dmub_outbox_supported(struct dc *dc); bool dc_enable_dmub_notifications(struct dc *dc); +bool dc_abm_save_restore( + struct dc *dc, + struct dc_stream_state *stream, + struct abm_save_restore *pData); + void dc_enable_dmub_outbox(struct dc *dc); bool dc_process_dmub_aux_transfer_async(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index c753c6f30dd7..24433409d7de 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -381,6 +381,9 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) { union dmub_rb_cmd cmd = { 0 }; + if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) + return; + memset(&cmd, 0, sizeof(cmd)); /* Prepare fw command */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 9491b76d61f5..fe3078b8789e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -73,6 +73,7 @@ bool dc_dsc_compute_bandwidth_range( uint32_t max_bpp_x16, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range); bool dc_dsc_compute_config( @@ -81,6 +82,7 @@ bool dc_dsc_compute_config( const struct dc_dsc_config_options *options, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg); uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 0ce7728a5a4b..14d7804b70b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -189,7 +189,6 @@ struct dc_panel_patch { unsigned int disable_fams; unsigned int skip_avmute; unsigned int mst_start_top_delay; - unsigned int delay_disable_aux_intercept_ms; }; struct dc_edid_caps { @@ -879,7 +878,7 @@ struct dsc_dec_dpcd_caps { uint32_t branch_overall_throughput_0_mps; /* In MPs */ uint32_t branch_overall_throughput_1_mps; /* In MPs */ uint32_t branch_max_line_width; - bool is_dp; + bool is_dp; /* Decoded format */ }; struct dc_golden_table { @@ -902,6 +901,14 @@ enum dc_gpu_mem_alloc_type { DC_MEM_ALLOC_TYPE_AGP }; +enum dc_link_encoding_format { + DC_LINK_ENCODING_UNSPECIFIED = 0, + DC_LINK_ENCODING_DP_8b_10b, + DC_LINK_ENCODING_DP_128b_132b, + DC_LINK_ENCODING_HDMI_TMDS, + DC_LINK_ENCODING_HDMI_FRL +}; + enum dc_psr_version { DC_PSR_VERSION_1 = 0, DC_PSR_VERSION_SU_1 = 1, @@ -995,6 +1002,10 @@ struct link_mst_stream_allocation_table { struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM]; }; +struct backlight_settings { + uint32_t backlight_millinits; +}; + /* PSR feature flags */ struct psr_settings { bool psr_feature_enabled; // PSR is supported by sink diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 63009db8b5a7..b87bfecb7755 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -76,9 +76,9 @@ static bool dce_dmcu_init(struct dmcu *dmcu) } static bool dce_dmcu_load_iram(struct dmcu *dmcu, - unsigned int start_offset, - const char *src, - unsigned int bytes) + unsigned int start_offset, + const char *src, + unsigned int bytes) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int count = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 6d1b01c267b7..4f552c3e7663 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -442,10 +442,9 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( return dce_i2c_hw; } -static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result( - struct dce_i2c_hw *dce_i2c_hw, - uint32_t timeout, - enum i2c_channel_operation_result expected_result) +static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(struct dce_i2c_hw *dce_i2c_hw, + uint32_t timeout, + enum i2c_channel_operation_result expected_result) { enum i2c_channel_operation_result result; uint32_t i = 0; @@ -509,11 +508,10 @@ static uint32_t get_transaction_timeout_hw( return period_timeout * num_of_clock_stretches; } -static bool dce_i2c_hw_engine_submit_payload( - struct dce_i2c_hw *dce_i2c_hw, - struct i2c_payload *payload, - bool middle_of_transaction, - uint32_t speed) +static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw, + struct i2c_payload *payload, + bool middle_of_transaction, + uint32_t speed) { struct i2c_request_transaction_data request; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c index f1aeb6d1967c..e188447c8156 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -367,6 +367,7 @@ static bool dce_i2c_sw_engine_acquire_engine( return true; } + bool dce_i2c_engine_acquire_sw( struct dce_i2c_sw *dce_i2c_sw, struct ddc *ddc_handle) @@ -392,12 +393,8 @@ bool dce_i2c_engine_acquire_sw( return result; } - - - -static void dce_i2c_sw_engine_submit_channel_request( - struct dce_i2c_sw *engine, - struct i2c_request_transaction_data *req) +static void dce_i2c_sw_engine_submit_channel_request(struct dce_i2c_sw *engine, + struct i2c_request_transaction_data *req) { struct ddc *ddc = engine->ddc; uint16_t clock_delay_div_4 = engine->clock_delay >> 2; @@ -439,10 +436,9 @@ static void dce_i2c_sw_engine_submit_channel_request( I2C_CHANNEL_OPERATION_FAILED; } -static bool dce_i2c_sw_engine_submit_payload( - struct dce_i2c_sw *engine, - struct i2c_payload *payload, - bool middle_of_transaction) +static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine, + struct i2c_payload *payload, + bool middle_of_transaction) { struct i2c_request_transaction_data request; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index 2fb9572ce25d..d3e6544022b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -27,6 +27,7 @@ #include "dmub_abm_lcd.h" #include "dc.h" #include "core_types.h" +#include "dmub_cmd.h" #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) @@ -118,6 +119,32 @@ static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int pane return ret; } +/***************************************************************************** + * dmub_abm_save_restore_ex() - calls dmub_abm_save_restore for preserving DMUB's + * Varibright states for LCD only. OLED is TBD + * @abm: used to check get dc context + * @panel_inst: panel instance index + * @pData: contains command to pause/un-pause abm and abm parameters + * + * + ***************************************************************************/ +static bool dmub_abm_save_restore_ex( + struct abm *abm, + unsigned int panel_inst, + struct abm_save_restore *pData) +{ + bool ret = false; + unsigned int feature_support; + struct dc_context *dc = abm->ctx; + + feature_support = abm_feature_support(abm, panel_inst); + + if (feature_support == ABM_LCD_SUPPORT) + ret = dmub_abm_save_restore(dc, panel_inst, pData); + + return ret; +} + static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) { bool ret = false; @@ -155,6 +182,7 @@ static const struct abm_funcs abm_funcs = { .get_target_backlight = dmub_abm_get_target_backlight_ex, .init_abm_config = dmub_abm_init_config_ex, .set_abm_pause = dmub_abm_set_pause_ex, + .save_restore = dmub_abm_save_restore_ex, .set_pipe_ex = dmub_abm_set_pipe_ex, .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex, }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index 39da73eba86e..592a8f7a1c6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -208,6 +208,52 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un return true; } + +/***************************************************************************** + * dmub_abm_save_restore() - dmub interface for abm save+pause and restore+ + * un-pause + * @dc: dc context + * @panel_inst: panel instance index + * @pData: contains command to pause/un-pause abm and exchange abm parameters + * + * When called Pause will get abm data and store in pData, and un-pause will + * set/apply abm data stored in pData. + * + *****************************************************************************/ +bool dmub_abm_save_restore( + struct dc_context *dc, + unsigned int panel_inst, + struct abm_save_restore *pData) +{ + union dmub_rb_cmd cmd; + uint8_t panel_mask = 0x01 << panel_inst; + unsigned int bytes = sizeof(struct abm_save_restore); + + // TODO: Optimize by only reading back final 4 bytes + dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); + + // Copy iramtable into cw7 + memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)pData, bytes); + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_save_restore.header.type = DMUB_CMD__ABM; + cmd.abm_save_restore.header.sub_type = DMUB_CMD__ABM_SAVE_RESTORE; + + cmd.abm_save_restore.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.abm_save_restore.abm_init_config_data.bytes = bytes; + cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask; + + cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + // Copy iramtable data into local structure + memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); + + return true; +} + bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) { union dmub_rb_cmd cmd; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h index 00b4e268768e..853564d7f471 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h @@ -28,6 +28,8 @@ #include "abm.h" +struct abm_save_restore; + void dmub_abm_init(struct abm *abm, uint32_t backlight); bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask); unsigned int dmub_abm_get_current_backlight(struct abm *abm); @@ -38,6 +40,10 @@ void dmub_abm_init_config(struct abm *abm, unsigned int inst); bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst); +bool dmub_abm_save_restore( + struct dc_context *dc, + unsigned int panel_inst, + struct abm_save_restore *pData); bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst); bool dmub_abm_set_backlight_level(struct abm *abm, unsigned int backlight_pwm_u16_16, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 6c9ca43d1040..20d4d08a6a2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1792,10 +1792,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) hws->funcs.edp_backlight_control(edp_link_with_sink, false); } /*resume from S3, no vbios posting, no need to power down again*/ + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + power_down_all_hw_blocks(dc); disable_vga_and_power_gate_all_controllers(dc); if (edp_link_with_sink && !keep_edp_vdd_on) dc->hwss.edp_power_control(edp_link_with_sink, false); + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); } bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 3935fd455f0f..061221394ce0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -58,13 +58,13 @@ #include "dce/dce_i2c.h" /* TODO remove this include */ -#include "dce80_resource.h" - #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_7_1_d.h" #include "gmc/gmc_7_1_sh_mask.h" #endif +#include "dce80/dce80_resource.h" + #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x1CDE #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index b33955928bd0..7e140c35a0ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -39,9 +39,6 @@ #define BLACK_OFFSET_RGB_Y 0x0 #define BLACK_OFFSET_CBCR 0x8000 -#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 -#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 -#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 #define REG(reg)\ dpp->tf_regs->reg @@ -591,18 +588,6 @@ static void dpp1_dscl_set_manual_ratio_init( static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, const struct rect *recout) { - int visual_confirm_on = 0; - unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; - - if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) - visual_confirm_on = 1; - - /* Check bounds to ensure the VC bar height was set to a sane value */ - if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && - (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { - visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height; - } - REG_SET_2(RECOUT_START, 0, /* First pixel of RECOUT in the active OTG area */ RECOUT_START_X, recout->x, @@ -613,8 +598,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, /* Number of RECOUT horizontal pixels */ RECOUT_WIDTH, recout->width, /* Number of RECOUT vertical lines */ - RECOUT_HEIGHT, recout->height - - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height)); + RECOUT_HEIGHT, recout->height); } /** diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a50309039d08..9834b75f1837 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect( if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); - if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) + if (pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; hubp->funcs->set_blank(hubp, true); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index ee08b545aaea..377f1ba1a81b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -1056,7 +1056,7 @@ void dcn10_link_encoder_disable_output( struct bp_transmitter_control cntl = { 0 }; enum bp_result result; - if (!dcn10_is_dig_enabled(enc)) { + if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) { /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ /*in DP_Alt_No_Connect case, we turn off the dig already, after excuation the PHY w/a sequence, not allow touch PHY any more*/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 4492bc2392b6..e32d3246e82a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1054,9 +1054,9 @@ void dcn20_blank_pixel_data( enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; struct pipe_ctx *odm_pipe; int odm_cnt = 1; - - int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; - int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; + int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; + int odm_slice_width, last_odm_slice_width, offset = 0; if (stream->link->test_pattern_enabled) return; @@ -1066,8 +1066,8 @@ void dcn20_blank_pixel_data( for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) odm_cnt++; - - width = width / odm_cnt; + odm_slice_width = h_active / odm_cnt; + last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1); if (blank) { dc->hwss.set_abm_immediate_disable(pipe_ctx); @@ -1080,29 +1080,32 @@ void dcn20_blank_pixel_data( test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; } - dc->hwss.set_disp_pattern_generator(dc, - pipe_ctx, - test_pattern, - test_pattern_color_space, - stream->timing.display_color_depth, - &black_color, - width, - height, - 0); + odm_pipe = pipe_ctx; - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + while (odm_pipe->next_odm_pipe) { dc->hwss.set_disp_pattern_generator(dc, - odm_pipe, - dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? - CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, + pipe_ctx, + test_pattern, test_pattern_color_space, stream->timing.display_color_depth, &black_color, - width, - height, - 0); + odm_slice_width, + v_active, + offset); + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } + dc->hwss.set_disp_pattern_generator(dc, + odm_pipe, + test_pattern, + test_pattern_color_space, + stream->timing.display_color_depth, + &black_color, + last_odm_slice_width, + v_active, + offset); + if (!blank && dc->debug.enable_single_display_2to1_odm_policy) { /* when exiting dynamic ODM need to reinit DPG state for unused pipes */ struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe; @@ -2123,6 +2126,15 @@ void dcn20_optimize_bandwidth( if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + dc_dmub_srv_p_state_delegate(dc, + true, context); + context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + dc->clk_mgr->clks.fw_based_mclk_switching = true; + } else { + dc->clk_mgr->clks.fw_based_mclk_switching = false; + } + dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c index 33fc9aa8621b..d07c04458d31 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c @@ -43,7 +43,7 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) +static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h index e44a37491c1e..b7efa777ec73 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h @@ -32,6 +32,5 @@ struct dccg *dccg21_create( const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask); -void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); #endif /* __DCN21_DCCG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index d693ea42d033..82dfcf773b1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -854,8 +854,8 @@ bool dcn21_fast_validate_bw(struct dc *dc, /* We only support full screen mpo with ODM */ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index bf8864bc8a99..4cd4ae07d73d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -949,13 +949,36 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, } void dcn30_prepare_bandwidth(struct dc *dc, - struct dc_state *context) + struct dc_state *context) { + bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; + /* Any transition into an FPO config should disable MCLK switching first to avoid + * driver and FW P-State synchronization issues. + */ + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + dc->optimized_required = true; + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + } + if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); + /* + * enabled -> enabled: do not disable + * enabled -> disabled: disable + * disabled -> enabled: don't care + * disabled -> disabled: don't care + */ + if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + dc_dmub_srv_p_state_delegate(dc, false, context); + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + /* After disabling P-State, restore the original value to ensure we get the correct P-State + * on the next optimize. */ + context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; + } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index dfb8f62765f2..5bf4d0aa6230 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc, optc1->opp_count = 1; } -static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, +void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode); } -static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h index fb06dc9a4893..d3a056c12b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h @@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable); void optc3_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); +void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing); +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc); void optc3_tg_init(struct timing_generator *optc); void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); #endif /* __DC_OPTC_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index abe4c12a10b5..f5bfcd2a0dbc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -1705,8 +1705,8 @@ noinline bool dcn30_internal_validate_bw( /* We only support full screen mpo with ODM */ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 7aa628c21973..9002cb10a6ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -11,7 +11,8 @@ # Makefile for dcn30. DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ - dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o + dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \ + dcn301_optc.o AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c new file mode 100644 index 000000000000..b3cfcb887905 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c @@ -0,0 +1,185 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn301_optc.h" +#include "dc.h" +#include "dcn_calc_math.h" +#include "dc_dmub_srv.h" + +#include "dml/dcn30/dcn30_fpu.h" +#include "dc_trace.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + + +/** + * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. + * + * @optc: timing_generator instance. + * @params: parameters used for Dynamic Refresh Rate. + */ +void optc301_set_drr( + struct timing_generator *optc, + const struct drr_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (params != NULL && + params->vertical_total_max > 0 && + params->vertical_total_min > 0) { + + if (params->vertical_total_mid != 0) { + + REG_SET(OTG_V_TOTAL_MID, 0, + OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + + REG_UPDATE_2(OTG_V_TOTAL_CONTROL, + OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, + OTG_VTOTAL_MID_FRAME_NUM, + (uint8_t)params->vertical_total_mid_frame_num); + + } + + optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); + + REG_UPDATE_5(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK_EN, 0, + OTG_SET_V_TOTAL_MIN_MASK, 0); + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); + + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } +} + + +void optc301_setup_manual_trigger(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_8(OTG_TRIGA_CNTL, 0, + OTG_TRIGA_SOURCE_SELECT, 21, + OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, + OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, + OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, + OTG_TRIGA_POLARITY_SELECT, 0, + OTG_TRIGA_FREQUENCY_SELECT, 0, + OTG_TRIGA_DELAY, 0, + OTG_TRIGA_CLEAR, 1); +} + +static struct timing_generator_funcs dcn30_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc2_enable_crtc, + .disable_crtc = optc1_disable_crtc, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank_color = optc3_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .triplebuffer_lock = optc3_triplebuffer_lock, + .triplebuffer_unlock = optc2_triplebuffer_unlock, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc3_lock, + .unlock = optc1_unlock, + .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, + .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc301_set_drr, + .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, + .set_vtotal_min_max = optc3_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .tg_init = optc3_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .setup_global_swap_lock = NULL, + .get_crc = optc1_get_crc, + .configure_crc = optc2_configure_crc, + .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, + .set_dwb_source = NULL, + .set_odm_bypass = optc3_set_odm_bypass, + .set_odm_combine = optc3_set_odm_combine, + .get_optc_source = optc2_get_optc_source, + .set_out_mux = optc3_set_out_mux, + .set_drr_trigger_window = optc3_set_drr_trigger_window, + .set_vtotal_change_limit = optc3_set_vtotal_change_limit, + .set_gsl = optc2_set_gsl, + .set_gsl_source_select = optc2_set_gsl_source_select, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc301_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, + .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, +}; + +void dcn301_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn30_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4; + optc1->min_v_sync_width = 1; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h new file mode 100644 index 000000000000..b49585682a15 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN301_H__ +#define __DC_OPTC_DCN301_H__ + +#include "dcn20/dcn20_optc.h" +#include "dcn30/dcn30_optc.h" + +void dcn301_timing_generator_init(struct optc *optc1); +void optc301_setup_manual_trigger(struct timing_generator *optc); +void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params); + +#endif /* __DC_OPTC_DCN301_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 3485fbb1093e..f856a4773c27 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -42,7 +42,7 @@ #include "dcn30/dcn30_hubp.h" #include "irq/dcn30/irq_service_dcn30.h" #include "dcn30/dcn30_dpp.h" -#include "dcn30/dcn30_optc.h" +#include "dcn301/dcn301_optc.h" #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_hwseq.h" #include "dce110/dce110_hw_sequencer.h" @@ -855,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create( tgn10->tg_shift = &optc_shift; tgn10->tg_mask = &optc_mask; - dcn30_timing_generator_init(tgn10); + dcn301_timing_generator_init(tgn10); return &tgn10->base; } @@ -1425,9 +1425,9 @@ static bool dcn301_resource_construct( dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; - dc->caps.max_slave_planes = 1; - dc->caps.max_slave_yuv_planes = 1; - dc->caps.max_slave_rgb_planes = 1; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 45956ef6f3f9..131b8b82afc0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 65c1d754e2d6..8664f0c4c9b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk( struct dcn_dccg *dccg_dcn, enum phyd32clk_clock_source src) { - if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && + dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { if (src == PHYD32CLKC) src = PHYD32CLKF; if (src == PHYD32CLKD) @@ -284,19 +285,11 @@ void dccg31_enable_symclk32_le( /* select one of the PHYD32CLKs as the source for symclk32_le */ switch (hpo_le_inst) { case 0: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE0_GATE_DISABLE, 1, - SYMCLK32_ROOT_LE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, phyd32clk, SYMCLK32_LE0_EN, 1); break; case 1: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE1_GATE_DISABLE, 1, - SYMCLK32_ROOT_LE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, phyd32clk, SYMCLK32_LE1_EN, 1); @@ -319,19 +312,38 @@ void dccg31_disable_symclk32_le( REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, 0, SYMCLK32_LE0_EN, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE0_GATE_DISABLE, 0, - SYMCLK32_ROOT_LE0_GATE_DISABLE, 0); break; case 1: REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, 0, SYMCLK32_LE1_EN, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE1_GATE_DISABLE, 0, - SYMCLK32_ROOT_LE1_GATE_DISABLE, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_set_symclk32_le_root_clock_gating( + struct dccg *dccg, + int hpo_le_inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + return; + + switch (hpo_le_inst) { + case 0: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE0_GATE_DISABLE, enable ? 1 : 0, + SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 1 : 0); + break; + case 1: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE1_GATE_DISABLE, enable ? 1 : 0, + SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 1 : 0); break; default: BREAK_TO_DEBUGGER(); @@ -660,10 +672,8 @@ void dccg31_init(struct dccg *dccg) dccg31_disable_symclk32_se(dccg, 2); dccg31_disable_symclk32_se(dccg, 3); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) { - dccg31_disable_symclk32_le(dccg, 0); - dccg31_disable_symclk32_le(dccg, 1); - } + dccg31_set_symclk32_le_root_clock_gating(dccg, 0, false); + dccg31_set_symclk32_le_root_clock_gating(dccg, 1, false); if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { dccg31_disable_dpstreamclk(dccg, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index 0902ce5eb8a1..e3caaacf7493 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -179,6 +179,11 @@ void dccg31_disable_symclk32_le( struct dccg *dccg, int hpo_le_inst); +void dccg31_set_symclk32_le_root_clock_gating( + struct dccg *dccg, + int hpo_le_inst, + bool enable); + void dccg31_set_physymclk( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index bd62502380d8..4596f3bac1b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -558,7 +558,7 @@ void dcn31_link_encoder_disable_output( struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 }; struct dc_link *link; - if (!dcn10_is_dig_enabled(enc)) + if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) return; link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 0278bae50a9d..45143459eedd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -154,7 +154,7 @@ static void dcn31_hpo_dp_stream_enc_dp_blank( VID_STREAM_STATUS, 0, 10, 5000); - /* Disable SDP tranmission */ + /* Disable SDP transmission */ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index 0746ed31d1d1..ad3f019a784f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -362,6 +362,7 @@ static const struct dccg_funcs dccg314_funcs = { .disable_symclk32_se = dccg31_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, .disable_symclk32_le = dccg31_disable_symclk32_le, + .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating, .set_physymclk = dccg31_set_physymclk, .set_dtbclk_dto = dccg314_set_dtbclk_dto, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 6a9024aa3285..9b8e0f6f32b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -908,15 +908,15 @@ static const struct dc_debug_options debug_defaults_drv = { .root_clock_optimization = { .bits = { .dpp = true, - .dsc = false, - .hdmistream = false, - .hdmichar = false, - .dpstream = false, - .symclk32_se = false, - .symclk32_le = false, - .symclk_fe = false, - .physymclk = false, - .dpiasymclk = false, + .dsc = true, + .hdmistream = true, + .hdmichar = true, + .dpstream = true, + .symclk32_se = true, + .symclk32_le = true, + .symclk_fe = true, + .physymclk = true, + .dpiasymclk = true, } }, diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index df3a438abda8..2e3fa0fb8bd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -1659,7 +1659,7 @@ static int dcn315_populate_dml_pipes_from_context( { int i, pipe_cnt, crb_idx, crb_pipes; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = NULL; const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB; int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB; bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c index 11e28e056cf7..61ceff6bc0b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c @@ -49,7 +49,10 @@ static void dccg32_trigger_dio_fifo_resync( uint32_t dispclk_rdivider_value = 0; REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); - REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); + + /* Not valid for the WDIVIDER to be set to 0 */ + if (dispclk_rdivider_value != 0) + REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); } static void dccg32_get_pixel_rate_div( diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index d52d5feeb311..a87afb796f47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -47,11 +47,9 @@ #include "clk_mgr.h" #include "dsc.h" #include "dcn20/dcn20_optc.h" -#include "dmub_subvp_state.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn32_resource.h" #include "link.h" -#include "dmub/inc/dmub_subvp_state.h" #define DC_LOGGER_INIT(logger) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 1cc09799f92d..0d1f18f8348e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1892,7 +1892,7 @@ int dcn32_populate_dml_pipes_from_context( { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = NULL; bool subvp_in_use = false; struct dc_crtc_timing *timing; bool vsr_odm_support = false; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index 5be242a1b82c..db9c55a09d9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -641,6 +641,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) uint8_t non_subvp_pipes = 0; bool drr_pipe_found = false; bool drr_psr_capable = false; + uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -649,8 +650,14 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) continue; if (pipe->plane_state && !pipe->top_pipe) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { subvp_count++; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + } if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { non_subvp_pipes++; drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe)); @@ -662,7 +669,8 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) } } - if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable) + if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && + ((uint32_t)refresh_rate < 120)) result = true; return result; @@ -693,6 +701,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int bool drr_pipe_found = false; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; bool vblank_psr_capable = false; + uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -701,8 +710,14 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int continue; if (pipe->plane_state && !pipe->top_pipe) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { subvp_count++; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + } if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { non_subvp_pipes++; vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe)); @@ -715,7 +730,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int } if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable && - vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) + ((uint32_t)refresh_rate < 120) && + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) result = true; return result; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index f294f2f8c75b..57cf0358cc43 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -3194,7 +3194,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevels; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double HostVMInefficiencyFactor; double VRatioClamped; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 43016c462251..eba51144fee7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3505,7 +3505,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double LineTime = v->HTotal[k] / v->PixelClock[k]; if (v->GPUVMEnable == true && v->HostVMEnable == true) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index d9e049e7ff0a..07adb614366e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -31,6 +31,7 @@ #include "dml/dcn20/dcn20_fpu.h" #include "dml/dcn31/dcn31_fpu.h" #include "dml/display_mode_vba.h" +#include "dml/dml_inline_defs.h" struct _vcs_dpi_ip_params_st dcn3_14_ip = { .VBlankNomDefaultUS = 668, @@ -273,6 +274,25 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } +/* + * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing + * + * @param: num_us: number of microseconds + * @return: number of vertical lines. If exact number of vertical lines is not found then + * it will round up to next number of lines to guarantee num_us + */ +static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing) +{ + unsigned int num_lines = 0; + unsigned int lines_time_in_ns = 1000.0 * + (((float)timing->h_total * 1000.0) / + ((float)timing->pix_clk_100hz / 10.0)); + + num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0); + + return num_lines; +} + int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) @@ -289,15 +309,22 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; + unsigned int num_lines = 0; if (!res_ctx->pipe_ctx[i].stream) continue; pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; - pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; + num_lines = micro_sec_to_vert_lines(dcn3_14_ip.VBlankNomDefaultUS, timing); + + if (pipe->stream->adjust.v_total_min != 0) + pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; + else + pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; + pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; - pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS); + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines); pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width); pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 9010c47476e9..32251af76935 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -3613,7 +3613,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double LineTime = v->HTotal[k] / v->PixelClock[k]; if (v->GPUVMEnable == true && v->HostVMEnable == true) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index a95034801712..0f882b879b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1040,7 +1040,7 @@ static bool subvp_subvp_admissable(struct dc *dc, uint32_t i; uint8_t subvp_count = 0; uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0; - uint32_t refresh_rate = 0; + uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1050,19 +1050,21 @@ static bool subvp_subvp_admissable(struct dc *dc, if (pipe->plane_state && !pipe->top_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + - pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) - / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); - if (refresh_rate < min_refresh) - min_refresh = refresh_rate; - if (refresh_rate > max_refresh) - max_refresh = refresh_rate; + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + + if ((uint32_t)refresh_rate < min_refresh) + min_refresh = (uint32_t)refresh_rate; + if ((uint32_t)refresh_rate > max_refresh) + max_refresh = (uint32_t)refresh_rate; subvp_count++; } } if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) || - (min_refresh >= 120 && max_refresh >= 120))) + (min_refresh >= 120 && max_refresh <= 165))) result = true; return result; @@ -1715,8 +1717,8 @@ bool dcn32_internal_validate_bw(struct dc *dc, if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && !dc->config.enable_windowed_mpo_odm && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index a50e7f4dce42..ecea008f19d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -3459,6 +3459,7 @@ bool dml32_CalculatePrefetchSchedule( double TimeForFetchingMetaPTE = 0; double TimeForFetchingRowInVBlank = 0; double LinesToRequestPrefetchPixelData = 0; + double LinesForPrefetchBandwidth = 0; unsigned int HostVMDynamicLevelsTrips; double trip_to_mem; double Tvm_trips; @@ -3888,11 +3889,15 @@ bool dml32_CalculatePrefetchSchedule( TimeForFetchingMetaPTE = Tvm_oto; TimeForFetchingRowInVBlank = Tr0_oto; *PrefetchBandwidth = prefetch_bw_oto; + /* Clamp to oto for bandwidth calculation */ + LinesForPrefetchBandwidth = dst_y_prefetch_oto; } else { *DestinationLinesForPrefetch = dst_y_prefetch_equ; TimeForFetchingMetaPTE = Tvm_equ; TimeForFetchingRowInVBlank = Tr0_equ; *PrefetchBandwidth = prefetch_bw_equ; + /* Clamp to equ for bandwidth calculation */ + LinesForPrefetchBandwidth = dst_y_prefetch_equ; } *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0; @@ -3900,7 +3905,7 @@ bool dml32_CalculatePrefetchSchedule( *DestinationLinesToRequestRowInVBlank = dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0; - LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - + LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank; #ifdef __DML_VBA_DEBUG__ @@ -4124,7 +4129,7 @@ void dml32_CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; if (GPUVMEnable == true && HostVMEnable == true) HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 58dd62cce4bb..3966845c7694 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -40,6 +40,8 @@ static bool dsc_policy_enable_dsc_when_not_needed; static bool dsc_policy_disable_dsc_stream_overhead; +static bool disable_128b_132b_stream_overhead; + #ifndef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) #endif @@ -47,8 +49,44 @@ static bool dsc_policy_disable_dsc_stream_overhead; #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #endif +/* Need to account for padding due to pixel-to-symbol packing + * for uncompressed 128b/132b streams. + */ +static uint32_t apply_128b_132b_stream_overhead( + const struct dc_crtc_timing *timing, const uint32_t kbps) +{ + uint32_t total_kbps = kbps; + + if (disable_128b_132b_stream_overhead) + return kbps; + + if (!timing->flags.DSC) { + struct fixed31_32 bpp; + struct fixed31_32 overhead_factor; + + bpp = dc_fixpt_from_int(kbps); + bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10); + + /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size) + * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive + */ + overhead_factor = dc_fixpt_from_int(timing->h_addressable); + overhead_factor = dc_fixpt_mul(overhead_factor, bpp); + overhead_factor = dc_fixpt_div_int(overhead_factor, 128); + overhead_factor = dc_fixpt_div( + dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)), + overhead_factor); + + total_kbps = dc_fixpt_ceil( + dc_fixpt_mul_int(overhead_factor, total_kbps)); + } + + return total_kbps; +} + uint32_t dc_bandwidth_in_kbps_from_timing( - const struct dc_crtc_timing *timing) + const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding) { uint32_t bits_per_channel = 0; uint32_t kbps; @@ -96,6 +134,9 @@ uint32_t dc_bandwidth_in_kbps_from_timing( kbps = kbps * 2 / 3; } + if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) + kbps = apply_128b_132b_stream_overhead(timing, kbps); + return kbps; } @@ -107,6 +148,7 @@ static bool decide_dsc_bandwidth_range( const uint32_t num_slices_h, const struct dsc_enc_caps *dsc_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range); static uint32_t compute_bpp_x16_from_target_bandwidth( @@ -133,6 +175,7 @@ static bool setup_dsc_config( int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg); static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) @@ -398,6 +441,7 @@ bool dc_dsc_compute_bandwidth_range( uint32_t max_bpp_x16, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range) { bool is_dsc_possible = false; @@ -417,11 +461,11 @@ bool dc_dsc_compute_bandwidth_range( if (is_dsc_possible) is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, - &options, &config); + &options, link_encoding, &config); if (is_dsc_possible) is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, - config.num_slices_h, &dsc_common_caps, timing, range); + config.num_slices_h, &dsc_common_caps, timing, link_encoding, range); return is_dsc_possible; } @@ -557,6 +601,7 @@ static bool decide_dsc_bandwidth_range( const uint32_t num_slices_h, const struct dsc_enc_caps *dsc_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range) { uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16; @@ -586,7 +631,7 @@ static bool decide_dsc_bandwidth_range( /* populate output structure */ if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) { /* native stream bandwidth */ - range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); + range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing, link_encoding); /* max dsc target bpp */ range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, @@ -612,6 +657,7 @@ static bool decide_dsc_target_bpp_x16( const int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const int num_slices_h, + const enum dc_link_encoding_format link_encoding, int *target_bpp_x16) { struct dc_dsc_bw_range range; @@ -619,7 +665,7 @@ static bool decide_dsc_target_bpp_x16( *target_bpp_x16 = 0; if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, - num_slices_h, dsc_common_caps, timing, &range)) { + num_slices_h, dsc_common_caps, timing, link_encoding, &range)) { if (target_bandwidth_kbps >= range.stream_kbps) { if (policy->enable_dsc_when_not_needed) /* enable max bpp even dsc is not needed */ @@ -796,6 +842,7 @@ static bool setup_dsc_config( int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; @@ -995,6 +1042,7 @@ static bool setup_dsc_config( target_bandwidth_kbps, timing, num_slices_h, + link_encoding, &target_bpp); dsc_cfg->bits_per_pixel = target_bpp; } @@ -1023,6 +1071,7 @@ bool dc_dsc_compute_config( const struct dc_dsc_config_options *options, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg) { bool is_dsc_possible = false; @@ -1032,7 +1081,7 @@ bool dc_dsc_compute_config( is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, options, dsc_cfg); + timing, options, link_encoding, dsc_cfg); return is_dsc_possible; } @@ -1165,6 +1214,11 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable) dsc_policy_disable_dsc_stream_overhead = disable; } +void dc_set_disable_128b_132b_stream_overhead(bool disable) +{ + disable_128b_132b_stream_overhead = disable; +} + void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options) { options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index d2190a3320f6..33db15d69f23 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -27,6 +27,8 @@ #include "dm_services_types.h" +struct abm_save_restore; + struct abm { struct dc_context *ctx; const struct abm_funcs *funcs; @@ -55,6 +57,10 @@ struct abm_funcs { unsigned int bytes, unsigned int inst); bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst); + bool (*save_restore)( + struct abm *abm, + unsigned int panel_inst, + struct abm_save_restore *pData); bool (*set_pipe_ex)(struct abm *abm, unsigned int otg_inst, unsigned int option, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index 7254182b7c72..af6b9509d09d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -172,8 +172,6 @@ struct aux_engine_funcs { struct aux_engine *engine, uint8_t *returned_bytes); bool (*is_engine_available)(struct aux_engine *engine); - enum i2caux_engine_type (*get_engine_type)( - const struct aux_engine *engine); bool (*acquire)( struct aux_engine *engine, struct ddc *ddc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 8dc804bbe98b..93592281de32 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -123,6 +123,11 @@ struct dccg_funcs { struct dccg *dccg, int hpo_le_inst); + void (*set_symclk32_le_root_clock_gating)( + struct dccg *dccg, + int hpo_le_inst, + bool enable); + void (*set_physymclk)( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c index c923b2af8510..37bc98faa7a0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c @@ -38,10 +38,9 @@ #define DCN_BASE__INST0_SEG2 0x000034C0 -static enum dc_irq_source to_dal_irq_source_dcn314( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index db9f1baa27e5..bce0428ad612 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -428,15 +428,24 @@ static void set_crtc_test_pattern(struct dc_link *link, stream->timing.display_color_depth; struct bit_depth_reduction_params params; struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; - int width = pipe_ctx->stream->timing.h_addressable + + struct pipe_ctx *odm_pipe; + int odm_cnt = 1; + int h_active = pipe_ctx->stream->timing.h_addressable + pipe_ctx->stream->timing.h_border_left + pipe_ctx->stream->timing.h_border_right; - int height = pipe_ctx->stream->timing.v_addressable + + int v_active = pipe_ctx->stream->timing.v_addressable + pipe_ctx->stream->timing.v_border_bottom + pipe_ctx->stream->timing.v_border_top; + int odm_slice_width, last_odm_slice_width, offset = 0; memset(¶ms, 0, sizeof(params)); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_cnt++; + + odm_slice_width = h_active / odm_cnt; + last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1); + switch (test_pattern) { case DP_TEST_PATTERN_COLOR_SQUARES: controller_test_pattern = @@ -473,16 +482,13 @@ static void set_crtc_test_pattern(struct dc_link *link, { /* disable bit depth reduction */ pipe_ctx->stream->bit_depth_params = params; - opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, controller_test_pattern, color_depth); - else if (link->dc->hwss.set_disp_pattern_generator) { - struct pipe_ctx *odm_pipe; + } else if (link->dc->hwss.set_disp_pattern_generator) { enum controller_dp_color_space controller_color_space; - int opp_cnt = 1; - int offset = 0; - int dpg_width = width; + struct output_pixel_processor *odm_opp; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: @@ -502,36 +508,33 @@ static void set_crtc_test_pattern(struct dc_link *link, break; } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - dpg_width = width / opp_cnt; - offset = dpg_width; - - link->dc->hwss.set_disp_pattern_generator(link->dc, - pipe_ctx, - controller_test_pattern, - controller_color_space, - color_depth, - NULL, - dpg_width, - height, - 0); - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - + odm_pipe = pipe_ctx; + while (odm_pipe->next_odm_pipe) { + odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, - odm_pipe, + pipe_ctx, controller_test_pattern, controller_color_space, color_depth, NULL, - dpg_width, - height, + odm_slice_width, + v_active, offset); - offset += offset; + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); + link->dc->hwss.set_disp_pattern_generator(link->dc, + odm_pipe, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + last_odm_slice_width, + v_active, + offset); } } break; @@ -540,23 +543,17 @@ static void set_crtc_test_pattern(struct dc_link *link, /* restore bitdepth reduction */ resource_build_bit_depth_reduction_params(pipe_ctx->stream, ¶ms); pipe_ctx->stream->bit_depth_params = params; - opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - color_depth); - else if (link->dc->hwss.set_disp_pattern_generator) { - struct pipe_ctx *odm_pipe; - int opp_cnt = 1; - int dpg_width; - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - - dpg_width = width / opp_cnt; - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + color_depth); + } else if (link->dc->hwss.set_disp_pattern_generator) { + struct output_pixel_processor *odm_opp; + odm_pipe = pipe_ctx; + while (odm_pipe->next_odm_pipe) { + odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, odm_pipe, @@ -564,19 +561,23 @@ static void set_crtc_test_pattern(struct dc_link *link, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - dpg_width, - height, - 0); + odm_slice_width, + v_active, + offset); + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, - pipe_ctx, + odm_pipe, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - dpg_width, - height, - 0); + last_odm_slice_width, + v_active, + offset); } } break; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c index 586fe25c1702..dc1cb5478e08 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -108,6 +108,11 @@ static void enable_hpo_dp_link_output(struct dc_link *link, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + true); link_res->hpo_dp_link_enc->funcs->enable_link_phy( link_res->hpo_dp_link_enc, link_settings, @@ -122,6 +127,11 @@ static void disable_hpo_dp_link_output(struct dc_link *link, link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); link_res->hpo_dp_link_enc->funcs->disable_link_phy( link_res->hpo_dp_link_enc, signal); + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + false); } static void set_hpo_dp_link_test_pattern(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 8041b8369e45..c9b6676eaf53 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -876,8 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link, (link->dpcd_sink_ext_caps.bits.oled == 1)) { dpcd_set_source_specific_data(link); msleep(post_oui_delay); - set_default_brightness_aux(link); - //TODO: use cached + set_cached_brightness_aux(link); } return true; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 1a7b93e41e35..7997936613fc 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -1079,8 +1079,14 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) { uint64_t kbps; + enum dc_link_encoding_format link_encoding; - kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); + if (dp_is_128b_132b_signal(pipe_ctx)) + link_encoding = DC_LINK_ENCODING_DP_128b_132b; + else + link_encoding = DC_LINK_ENCODING_DP_8b_10b; + + kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing, link_encoding); return get_pbn_from_bw_in_kbps(kbps); } @@ -1538,7 +1544,8 @@ struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); struct fixed31_32 timing_bw = dc_fixpt_from_int( - dc_bandwidth_in_kbps_from_timing(&stream->timing)); + dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(link))); struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_div(timing_bw, timeslot_bw_effective); @@ -1971,6 +1978,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) bool is_vga_mode = (stream->timing.h_addressable == 640) && (stream->timing.v_addressable == 480); struct dc *dc = pipe_ctx->stream->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); if (stream->phy_pix_clk == 0) stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; @@ -2010,6 +2018,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) display_color_depth = COLOR_DEPTH_888; + /* We need to enable stream encoder for TMDS first to apply 1/4 TMDS + * character clock in case that beyond 340MHz. + */ + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + link_hwss->setup_stream_encoder(pipe_ctx); + dc->hwss.enable_tmds_link_output( link, &pipe_ctx->link_res, @@ -2129,7 +2143,8 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->dpcd_sink_ext_caps.bits.oled == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { - set_default_brightness_aux(link); // TODO: use cached if known + set_cached_brightness_aux(link); + if (link->dpcd_sink_ext_caps.bits.oled == 1) msleep(bl_oled_enable_delay); edp_backlight_enable_aux(link, true); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index e8b2fc4002a5..b45fda96eaf6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -130,7 +130,8 @@ static bool dp_active_dongle_validate_timing( /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ outputTiming.flags.DSC = 0; #endif - if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) + if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) > + dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) return false; } else { // DP to HDMI TMDS converter if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) @@ -285,7 +286,7 @@ static bool dp_validate_mode_timing( link_setting = &link->verified_link_cap; */ - req_bw = dc_bandwidth_in_kbps_from_timing(timing); + req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link)); max_bw = dp_link_bandwidth_kbps(link, link_setting); if (req_bw <= max_bw) { @@ -357,7 +358,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un for (uint8_t i = 0; i < num_streams; ++i) { link[i] = stream[i].link; - bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing); + bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, + dc_link_get_highest_encoding_format(link[i])); } ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c index 0fa1228bc178..0f19c07011b5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -427,7 +427,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc, if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa && - ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) { + ddc->ctx->dce_version == DCN_VERSION_3_1) { /* Fixed VS workaround for AUX timeout */ const uint32_t fixed_vs_address = 0xF004F; const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc}; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 3a5e80b57711..b38ac3ea06b0 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -906,7 +906,7 @@ bool link_decide_link_settings(struct dc_stream_state *stream, struct dc_link_settings *link_setting) { struct dc_link *link = stream->link; - uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link)); memset(link_setting, 0, sizeof(*link_setting)); @@ -939,7 +939,8 @@ bool link_decide_link_settings(struct dc_stream_state *stream, tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; tmp_timing.flags.DSC = 0; - orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); + orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing, + dc_link_get_highest_encoding_format(link)); edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw); max_link_rate = tmp_link_setting.link_rate; } @@ -2165,7 +2166,9 @@ static bool dp_verify_link_cap( link, &irq_data)) (*fail_count)++; - + } else if (status == LINK_TRAINING_LINK_LOSS) { + success = true; + (*fail_count)++; } else { (*fail_count)++; } @@ -2188,6 +2191,7 @@ bool dp_verify_link_cap_with_retries( int i = 0; bool success = false; int fail_count = 0; + struct dc_link_settings last_verified_link_cap = fail_safe_link_settings; dp_trace_detect_lt_init(link); @@ -2204,10 +2208,14 @@ bool dp_verify_link_cap_with_retries( if (!link_detect_connection_type(link, &type) || type == dc_connection_none) { link->verified_link_cap = fail_safe_link_settings; break; - } else if (dp_verify_link_cap(link, known_limit_link_setting, - &fail_count) && fail_count == 0) { - success = true; - break; + } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) { + last_verified_link_cap = link->verified_link_cap; + if (fail_count == 0) { + success = true; + break; + } + } else { + link->verified_link_cap = last_verified_link_cap; } fsleep(10 * 1000); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index e011df4bdaf2..90339c2dfd84 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1699,13 +1699,20 @@ bool perform_link_training_with_retries( } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ uint32_t req_bw; uint32_t link_bw; + enum dc_link_encoding_format link_encoding = DC_LINK_ENCODING_UNSPECIFIED; decide_fallback_link_setting(link, &max_link_settings, &cur_link_settings, status); + + if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) + link_encoding = DC_LINK_ENCODING_DP_8b_10b; + else if (link_dp_get_encoding_format(&cur_link_settings) == DP_128b_132b_ENCODING) + link_encoding = DC_LINK_ENCODING_DP_128b_132b; + /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to * minimum link bandwidth. */ - req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, link_encoding); link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings); is_link_bw_low = (req_bw > link_bw); is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index 15faaf645b14..ca0543e62917 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -236,6 +236,11 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; uint32_t vendor_lttpr_write_address = 0xF004F; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; @@ -244,10 +249,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( uint8_t toggle_rate; uint8_t rate; - if (link->local_sink) - pre_disable_intercept_delay_ms = - link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; - /* Only 8b/10b is supported */ ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING); @@ -260,10 +261,13 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( if (offset != 0xFF) { vendor_lttpr_write_address += ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + if (offset == 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; /* Certain display and cable configuration require extra delay */ - if (offset > 2) - pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2; + } else if (offset > 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } } /* Vendor specific: Reset lane settings */ @@ -339,6 +343,34 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_1[0], + sizeof(vendor_lttpr_write_data_4lane_1)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_2[0], + sizeof(vendor_lttpr_write_data_4lane_2)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_3[0], + sizeof(vendor_lttpr_write_data_4lane_3)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_4[0], + sizeof(vendor_lttpr_write_data_4lane_4)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_5[0], + sizeof(vendor_lttpr_write_data_4lane_5)); + } + /* 2. Perform link training */ /* Perform Clock Recovery Sequence */ @@ -596,9 +628,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E}; const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01}; const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68}; + uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - uint32_t pre_disable_intercept_delay_ms = 0; + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; uint32_t vendor_lttpr_write_address = 0xF004F; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; @@ -607,10 +644,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( uint8_t toggle_rate; uint8_t rate; - if (link->local_sink) - pre_disable_intercept_delay_ms = - link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; - /* Only 8b/10b is supported */ ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING); @@ -623,10 +656,13 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( if (offset != 0xFF) { vendor_lttpr_write_address += ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + if (offset == 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; /* Certain display and cable configuration require extra delay */ - if (offset > 2) - pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2; + } else if (offset > 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } } /* Vendor specific: Reset lane settings */ @@ -702,6 +738,34 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_1[0], + sizeof(vendor_lttpr_write_data_4lane_1)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_2[0], + sizeof(vendor_lttpr_write_data_4lane_2)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_3[0], + sizeof(vendor_lttpr_write_data_4lane_3)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_4[0], + sizeof(vendor_lttpr_write_data_4lane_4)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_4lane_5[0], + sizeof(vendor_lttpr_write_data_4lane_5)); + } + /* 2. Perform link training */ /* Perform Clock Recovery Sequence */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 2039a345f23a..8b360c09e0e8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -46,43 +46,42 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) { union dpcd_edp_config edp_config_set; bool panel_mode_edp = false; + enum dc_status result; memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); - if (panel_mode != DP_PANEL_MODE_DEFAULT) { + switch (panel_mode) { + case DP_PANEL_MODE_EDP: + case DP_PANEL_MODE_SPECIAL: + panel_mode_edp = true; + break; - switch (panel_mode) { - case DP_PANEL_MODE_EDP: - case DP_PANEL_MODE_SPECIAL: - panel_mode_edp = true; - break; + default: + break; + } - default: - break; - } + /*set edp panel mode in receiver*/ + result = core_link_read_dpcd( + link, + DP_EDP_CONFIGURATION_SET, + &edp_config_set.raw, + sizeof(edp_config_set.raw)); - /*set edp panel mode in receiver*/ - core_link_read_dpcd( + if (result == DC_OK && + edp_config_set.bits.PANEL_MODE_EDP + != panel_mode_edp) { + + edp_config_set.bits.PANEL_MODE_EDP = + panel_mode_edp; + result = core_link_write_dpcd( link, DP_EDP_CONFIGURATION_SET, &edp_config_set.raw, sizeof(edp_config_set.raw)); - if (edp_config_set.bits.PANEL_MODE_EDP - != panel_mode_edp) { - enum dc_status result; - - edp_config_set.bits.PANEL_MODE_EDP = - panel_mode_edp; - result = core_link_write_dpcd( - link, - DP_EDP_CONFIGURATION_SET, - &edp_config_set.raw, - sizeof(edp_config_set.raw)); - - ASSERT(result == DC_OK); - } + ASSERT(result == DC_OK); } + link->panel_mode = panel_mode; DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " "eDP panel mode enabled: %d \n", @@ -164,6 +163,7 @@ bool edp_set_backlight_level_nits(struct dc_link *link, *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + link->backlight_settings.backlight_millinits = backlight_millinits; if (!link->dpcd_caps.panel_luminance_control) { if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, @@ -251,10 +251,20 @@ static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millin link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, - (uint8_t *) backlight_millinits, - sizeof(uint32_t))) - return false; + if (!link->dpcd_caps.panel_luminance_control) { + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)backlight_millinits, + sizeof(uint32_t))) + return false; + } else { + //setting to 0 as a precaution, since target_luminance_value is 3 bytes + memset(backlight_millinits, 0, sizeof(uint32_t)); + + if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, + (uint8_t *)backlight_millinits, + sizeof(struct target_luminance_value))) + return false; + } return true; } @@ -276,6 +286,16 @@ bool set_default_brightness_aux(struct dc_link *link) return false; } +bool set_cached_brightness_aux(struct dc_link *link) +{ + if (link->backlight_settings.backlight_millinits) + return edp_set_backlight_level_nits(link, true, + link->backlight_settings.backlight_millinits, 0); + else + return set_default_brightness_aux(link); + return false; +} + bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing) { @@ -309,7 +329,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link, core_link_read_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, sizeof(lane_count_set)); - req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); + req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link)); if (!crtc_timing->flags.DSC) edp_decide_link_settings(link, &link_setting, req_bw); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 28f552080558..fa89bdb3a336 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -30,6 +30,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool set_default_brightness_aux(struct dc_link *link); +bool set_cached_brightness_aux(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); int edp_get_backlight_level(const struct dc_link *link); bool edp_get_backlight_level_nits(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 4585e0419da6..2d995c87fbb9 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -378,6 +378,7 @@ struct dmub_srv_hw_funcs { union dmub_fw_boot_status (*get_fw_status)(struct dmub_srv *dmub); + union dmub_fw_boot_options (*get_fw_boot_option)(struct dmub_srv *dmub); void (*set_gpint)(struct dmub_srv *dmub, union dmub_gpint_data_register reg); @@ -778,9 +779,15 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb); enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, union dmub_fw_boot_status *status); +enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub, + union dmub_fw_boot_options *option); + enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); +enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, + bool skip); + bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry); bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index af1f50742371..adde1d84d773 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -170,6 +170,95 @@ extern "C" { #endif #pragma pack(push, 1) +#define ABM_NUM_OF_ACE_SEGMENTS 5 + +union abm_flags { + struct { + /** + * @abm_enabled: Indicates if ABM is enabled. + */ + unsigned int abm_enabled : 1; + + /** + * @disable_abm_requested: Indicates if driver has requested ABM to be disabled. + */ + unsigned int disable_abm_requested : 1; + + /** + * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled + * immediately. + */ + unsigned int disable_abm_immediately : 1; + + /** + * @disable_abm_immediate_keep_gain: Indicates if driver has requested ABM + * to be disabled immediately and keep gain. + */ + unsigned int disable_abm_immediate_keep_gain : 1; + + /** + * @fractional_pwm: Indicates if fractional duty cycle for backlight PWM is enabled. + */ + unsigned int fractional_pwm : 1; + + /** + * @abm_gradual_bl_change: Indicates if algorithm has completed gradual adjustment + * of user backlight level. + */ + unsigned int abm_gradual_bl_change : 1; + } bitfields; + + unsigned int u32All; +}; + +struct abm_save_restore { + /** + * @flags: Misc. ABM flags. + */ + union abm_flags flags; + + /** + * @pause: true: pause ABM and get state + * false: unpause ABM after setting state + */ + uint32_t pause; + + /** + * @next_ace_slope: Next ACE slopes to be programmed in HW (u3.13) + */ + uint32_t next_ace_slope[ABM_NUM_OF_ACE_SEGMENTS]; + + /** + * @next_ace_thresh: Next ACE thresholds to be programmed in HW (u10.6) + */ + uint32_t next_ace_thresh[ABM_NUM_OF_ACE_SEGMENTS]; + + /** + * @next_ace_offset: Next ACE offsets to be programmed in HW (u10.6) + */ + uint32_t next_ace_offset[ABM_NUM_OF_ACE_SEGMENTS]; + + + /** + * @knee_threshold: Current x-position of ACE knee (u0.16). + */ + uint32_t knee_threshold; + /** + * @current_gain: Current backlight reduction (u16.16). + */ + uint32_t current_gain; + /** + * @curr_bl_level: Current actual backlight level converging to target backlight level. + */ + uint16_t curr_bl_level; + + /** + * @curr_user_bl_level: Current nominal backlight level converging to level requested by user. + */ + uint16_t curr_user_bl_level; + +}; + /** * union dmub_addr - DMUB physical/virtual 64-bit address. */ @@ -2672,6 +2761,12 @@ enum dmub_cmd_abm_type { * unregister vertical interrupt after steady state is reached */ DMUB_CMD__ABM_PAUSE = 6, + + /** + * Save and Restore ABM state. On save we save parameters, and + * on restore we update state with passed in data. + */ + DMUB_CMD__ABM_SAVE_RESTORE = 7, }; /** @@ -3056,6 +3151,7 @@ struct dmub_cmd_abm_pause_data { uint8_t pad[1]; }; + /** * Definition of a DMUB_CMD__ABM_PAUSE command. */ @@ -3072,6 +3168,36 @@ struct dmub_rb_cmd_abm_pause { }; /** + * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command. + */ +struct dmub_rb_cmd_abm_save_restore { + /** + * Command header. + */ + struct dmub_cmd_header header; + + /** + * OTG hw instance + */ + uint8_t otg_inst; + + /** + * Enable or disable ABM pause + */ + uint8_t freeze; + + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t debug; + + /** + * Data passed from driver to FW in a DMUB_CMD__ABM_INIT_CONFIG command. + */ + struct dmub_cmd_abm_init_config_data abm_init_config_data; +}; + +/** * Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command. */ struct dmub_cmd_query_feature_caps_data { @@ -3509,6 +3635,11 @@ union dmub_rb_cmd { struct dmub_rb_cmd_abm_pause abm_pause; /** + * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command. + */ + struct dmub_rb_cmd_abm_save_restore abm_save_restore; + + /** * Definition of a DMUB_CMD__DP_AUX_ACCESS command. */ struct dmub_rb_cmd_dp_aux_access dp_aux_access; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h deleted file mode 100644 index 21b02bad696f..000000000000 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef DMUB_SUBVP_STATE_H -#define DMUB_SUBVP_STATE_H - -#include "dmub_cmd.h" - -#define DMUB_SUBVP_INST0 0 -#define DMUB_SUBVP_INST1 1 -#define SUBVP_MAX_WATERMARK 0xFFFF - -struct dmub_subvp_hubp_state { - uint32_t CURSOR0_0_CURSOR_POSITION; - uint32_t CURSOR0_0_CURSOR_HOT_SPOT; - uint32_t CURSOR0_0_CURSOR_DST_OFFSET; - uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH; - uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS; - uint32_t CURSOR0_0_CURSOR_SIZE; - uint32_t CURSOR0_0_CURSOR_CONTROL; - uint32_t HUBPREQ0_CURSOR_SETTINGS; - uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH; - uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; -}; - -enum subvp_error_code { - DMUB_SUBVP_INVALID_STATE, - DMUB_SUBVP_INVALID_TRANSITION, -}; - -enum subvp_state { - DMUB_SUBVP_DISABLED, - DMUB_SUBVP_IDLE, - DMUB_SUBVP_TRY_ACQUIRE_LOCKS, - DMUB_SUBVP_WAIT_FOR_LOCKS, - DMUB_SUBVP_PRECONFIGURE, - DMUB_SUBVP_PREPARE, - DMUB_SUBVP_ENABLE, - DMUB_SUBVP_SWITCHING, - DMUB_SUBVP_END, - DMUB_SUBVP_RESTORE, -}; - -/* Defines information for SUBVP to handle vertical interrupts. */ -struct dmub_subvp_vertical_interrupt_event { - /** - * @inst: Hardware instance of vertical interrupt. - */ - uint8_t otg_inst; - - /** - * @pad: Align structure to 4 byte boundary. - */ - uint8_t pad[3]; - - enum subvp_state curr_state; -}; - -struct dmub_subvp_vertical_interrupt_state { - /** - * @events: Event list. - */ - struct dmub_subvp_vertical_interrupt_event events[DMUB_MAX_STREAMS]; -}; - -struct dmub_subvp_vline_interrupt_event { - - uint8_t hubp_inst; - uint8_t pad[3]; -}; - -struct dmub_subvp_vline_interrupt_state { - struct dmub_subvp_vline_interrupt_event events[DMUB_MAX_PLANES]; -}; - -struct dmub_subvp_interrupt_ctx { - struct dmub_subvp_vertical_interrupt_state vertical_int; - struct dmub_subvp_vline_interrupt_state vline_int; -}; - -struct dmub_subvp_pipe_state { - uint32_t pix_clk_100hz; - uint16_t main_vblank_start; - uint16_t main_vblank_end; - uint16_t mall_region_lines; - uint16_t prefetch_lines; - uint16_t prefetch_to_mall_start_lines; - uint16_t processing_delay_lines; - uint8_t main_pipe_index; - uint8_t phantom_pipe_index; - uint16_t htotal; // htotal for main / phantom pipe - uint16_t vtotal; - uint16_t optc_underflow_count; - uint16_t hubp_underflow_count; - uint8_t pad[2]; -}; - -/** - * struct dmub_subvp_vblank_drr_info - Store DRR state when handling - * SubVP + VBLANK with DRR multi-display case. - * - * The info stored in this struct is only valid if drr_in_use = 1. - */ -struct dmub_subvp_vblank_drr_info { - uint8_t drr_in_use; - uint8_t drr_window_size_ms; // DRR window size -- indicates largest VMIN/VMAX adjustment per frame - uint16_t min_vtotal_supported; // Min VTOTAL that supports switching in VBLANK - uint16_t max_vtotal_supported; // Max VTOTAL that can still support SubVP static scheduling requirements - uint16_t prev_vmin; // Store VMIN value before MCLK switch (used to restore after MCLK end) - uint16_t prev_vmax; // Store VMAX value before MCLK switch (used to restore after MCLK end) - uint8_t use_ramping; // Use ramping or not - uint8_t pad[1]; -}; - -struct dmub_subvp_vblank_pipe_info { - uint32_t pix_clk_100hz; - uint16_t vblank_start; - uint16_t vblank_end; - uint16_t vstartup_start; - uint16_t vtotal; - uint16_t htotal; - uint8_t pipe_index; - uint8_t pad[1]; - struct dmub_subvp_vblank_drr_info drr_info; // DRR considered as part of SubVP + VBLANK case -}; - -enum subvp_switch_type { - DMUB_SUBVP_ONLY, // Used for SubVP only, and SubVP + VACTIVE - DMUB_SUBVP_AND_SUBVP, // 2 SubVP displays - DMUB_SUBVP_AND_VBLANK, - DMUB_SUBVP_AND_FPO, -}; - -/* SubVP state. */ -struct dmub_subvp_state { - struct dmub_subvp_pipe_state pipe_state[DMUB_MAX_SUBVP_STREAMS]; - struct dmub_subvp_interrupt_ctx int_ctx; - struct dmub_subvp_vblank_pipe_info vblank_info; - enum subvp_state state; // current state - enum subvp_switch_type switch_type; // enum take up 4 bytes (?) - uint8_t mclk_pending; - uint8_t num_subvp_streams; - uint8_t vertical_int_margin_us; - uint8_t pstate_allow_width_us; - uint32_t subvp_mclk_switch_count; - uint32_t subvp_wait_lock_count; - uint32_t driver_wait_lock_count; - uint32_t subvp_vblank_frame_count; - uint16_t watermark_a_cache; - uint8_t pad[2]; -}; - -#endif /* _DMUB_SUBVP_STATE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 5e952541e72d..094e9f864557 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -352,6 +352,14 @@ union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub) return status; } +union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub) +{ + union dmub_fw_boot_options option; + + option.all = REG_READ(DMCUB_SCRATCH14); + return option; +} + void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) { union dmub_fw_boot_options boot_options = {0}; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index 89c5a948b67d..4d520a893c7b 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -239,6 +239,8 @@ void dmub_dcn31_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip) union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub); +union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub); + void dmub_dcn31_setup_outbox0(struct dmub_srv *dmub, const struct dmub_region *outbox0); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index bdaf43892f47..93624ffe4eb8 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -255,6 +255,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_gpint_response = dmub_dcn31_get_gpint_response; funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout; funcs->get_fw_status = dmub_dcn31_get_fw_boot_status; + funcs->get_fw_boot_option = dmub_dcn31_get_fw_boot_option; funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options; funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence; //outbox0 call stacks @@ -639,11 +640,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, if (dmub->hw_funcs.enable_dmub_boot_options) dmub->hw_funcs.enable_dmub_boot_options(dmub, params); - if (dmub->hw_funcs.skip_dmub_panel_power_sequence) + if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual) dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, params->skip_panel_power_sequence); - if (dmub->hw_funcs.reset_release) + if (dmub->hw_funcs.reset_release && !dmub->is_virtual) dmub->hw_funcs.reset_release(dmub); dmub->hw_init = true; @@ -846,6 +847,32 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, return DMUB_STATUS_OK; } +enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub, + union dmub_fw_boot_options *option) +{ + option->all = 0; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.get_fw_boot_option) + *option = dmub->hw_funcs.get_fw_boot_option(dmub); + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, + bool skip) +{ + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual) + dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, skip); + + return DMUB_STATUS_OK; +} + enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, union dmub_rb_cmd *cmd) { diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index cd870af5fd25..1b8ab20f1715 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -53,7 +53,7 @@ enum { BITS_PER_DP_BYTE = 10, DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */ DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when FEC is enabled */ - DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */ + DATA_EFFICIENCY_128b_132b_x10000 = 9641, /* 96.71% data efficiency x 99.7% downspread factor */ }; enum lttpr_mode { diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f175e65b853a..abe829bbd54a 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -250,6 +250,7 @@ enum DC_DEBUG_MASK { DC_DISABLE_PSR = 0x10, DC_FORCE_SUBVP_MCLK_SWITCH = 0x20, DC_DISABLE_MPO = 0x40, + DC_ENABLE_DPIA_TRACE = 0x80, }; enum amd_dpm_forced_level; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index d0df3381539f..8433f99f6667 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -315,16 +315,19 @@ struct kfd2kgd_calls { uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid); + uint32_t debug_vmid, + uint32_t inst); uint32_t (*clear_address_watch)(struct amdgpu_device *adev, uint32_t watch_id); void (*get_iq_wait_times)(struct amdgpu_device *adev, - uint32_t *wait_times); + uint32_t *wait_times, + uint32_t inst); void (*build_grace_period_packet_info)(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data); + uint32_t *reg_data, + uint32_t inst); void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, int *wave_cnt, int *max_waves_per_cu, uint32_t inst); void (*program_trap_handler_settings)(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9f542f6e19ed..90989405eddc 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -892,4 +892,73 @@ struct gpu_metrics_v2_3 { uint16_t average_temperature_core[8]; // average CPU core temperature on APUs uint16_t average_temperature_l3[2]; }; + +struct gpu_metrics_v2_4 { + struct metrics_table_header common_header; + + /* Temperature (unit: centi-Celsius) */ + uint16_t temperature_gfx; + uint16_t temperature_soc; + uint16_t temperature_core[8]; + uint16_t temperature_l3[2]; + + /* Utilization (unit: centi) */ + uint16_t average_gfx_activity; + uint16_t average_mm_activity; + + /* Driver attached timestamp (in ns) */ + uint64_t system_clock_counter; + + /* Power/Energy (unit: mW) */ + uint16_t average_socket_power; + uint16_t average_cpu_power; + uint16_t average_soc_power; + uint16_t average_gfx_power; + uint16_t average_core_power[8]; + + /* Average clocks (unit: MHz) */ + uint16_t average_gfxclk_frequency; + uint16_t average_socclk_frequency; + uint16_t average_uclk_frequency; + uint16_t average_fclk_frequency; + uint16_t average_vclk_frequency; + uint16_t average_dclk_frequency; + + /* Current clocks (unit: MHz) */ + uint16_t current_gfxclk; + uint16_t current_socclk; + uint16_t current_uclk; + uint16_t current_fclk; + uint16_t current_vclk; + uint16_t current_dclk; + uint16_t current_coreclk[8]; + uint16_t current_l3clk[2]; + + /* Throttle status (ASIC dependent) */ + uint32_t throttle_status; + + /* Fans */ + uint16_t fan_pwm; + + uint16_t padding[3]; + + /* Throttle status (ASIC independent) */ + uint64_t indep_throttle_status; + + /* Average Temperature (unit: centi-Celsius) */ + uint16_t average_temperature_gfx; + uint16_t average_temperature_soc; + uint16_t average_temperature_core[8]; + uint16_t average_temperature_l3[2]; + + /* Power/Voltage (unit: mV) */ + uint16_t average_cpu_voltage; + uint16_t average_soc_voltage; + uint16_t average_gfx_voltage; + + /* Power/Current (unit: mA) */ + uint16_t average_cpu_current; + uint16_t average_soc_current; + uint16_t average_gfx_current; +}; #endif diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index 0997e999416a..b1db2b190187 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -275,7 +275,9 @@ union MESAPI__ADD_QUEUE { uint32_t trap_en : 1; uint32_t is_aql_queue : 1; uint32_t skip_process_ctx_clear : 1; - uint32_t reserved : 19; + uint32_t map_legacy_kq : 1; + uint32_t exclusively_scheduled : 1; + uint32_t reserved : 17; }; struct MES_API_STATUS api_status; uint64_t tma_addr; diff --git a/drivers/gpu/drm/amd/include/yellow_carp_offset.h b/drivers/gpu/drm/amd/include/yellow_carp_offset.h index 0fea6a746611..a2c8dca2425e 100644 --- a/drivers/gpu/drm/amd/include/yellow_carp_offset.h +++ b/drivers/gpu/drm/amd/include/yellow_carp_offset.h @@ -7,13 +7,11 @@ #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; } __maybe_unused; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 9ef88a0b1b57..3922dd274f30 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2049,8 +2049,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { if (gc_ver < IP_VERSION(9, 0, 0) || - gc_ver == IP_VERSION(9, 4, 1) || - gc_ver == IP_VERSION(9, 4, 2)) + !amdgpu_device_has_display_hardware(adev)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { if (mp1_ver < IP_VERSION(10, 0, 0)) diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h index 52045ad59bed..eec816f0cbf9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h @@ -24,8 +24,7 @@ #ifndef __AMDGPU_PM_H__ #define __AMDGPU_PM_H__ -struct cg_flag_name -{ +struct cg_flag_name { u64 flag; const char *name; }; diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h index 1dc7a065a6d4..251ed011b3b0 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h @@ -41,8 +41,7 @@ #define SMU_13_0_0_PP_OVERDRIVE_VERSION 0x83 // OverDrive 8 Table Version 0.2 #define SMU_13_0_0_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 -enum SMU_13_0_0_ODFEATURE_CAP -{ +enum SMU_13_0_0_ODFEATURE_CAP { SMU_13_0_0_ODCAP_GFXCLK_LIMITS = 0, SMU_13_0_0_ODCAP_UCLK_LIMITS, SMU_13_0_0_ODCAP_POWER_LIMIT, @@ -62,8 +61,7 @@ enum SMU_13_0_0_ODFEATURE_CAP SMU_13_0_0_ODCAP_COUNT, }; -enum SMU_13_0_0_ODFEATURE_ID -{ +enum SMU_13_0_0_ODFEATURE_ID { SMU_13_0_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_13_0_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature SMU_13_0_0_ODFEATURE_UCLK_LIMITS = 1 << SMU_13_0_0_ODCAP_UCLK_LIMITS, //UCLK Limit feature SMU_13_0_0_ODFEATURE_POWER_LIMIT = 1 << SMU_13_0_0_ODCAP_POWER_LIMIT, //Power Limit feature @@ -85,8 +83,7 @@ enum SMU_13_0_0_ODFEATURE_ID #define SMU_13_0_0_MAX_ODFEATURE 32 //Maximum Number of OD Features -enum SMU_13_0_0_ODSETTING_ID -{ +enum SMU_13_0_0_ODSETTING_ID { SMU_13_0_0_ODSETTING_GFXCLKFMAX = 0, SMU_13_0_0_ODSETTING_GFXCLKFMIN, SMU_13_0_0_ODSETTING_UCLKFMIN, @@ -123,8 +120,7 @@ enum SMU_13_0_0_ODSETTING_ID }; #define SMU_13_0_0_MAX_ODSETTING 64 //Maximum Number of ODSettings -enum SMU_13_0_0_PWRMODE_SETTING -{ +enum SMU_13_0_0_PWRMODE_SETTING { SMU_13_0_0_PMSETTING_POWER_LIMIT_QUIET = 0, SMU_13_0_0_PMSETTING_POWER_LIMIT_BALANCE, SMU_13_0_0_PMSETTING_POWER_LIMIT_TURBO, @@ -144,8 +140,7 @@ enum SMU_13_0_0_PWRMODE_SETTING }; #define SMU_13_0_0_MAX_PMSETTING 32 //Maximum Number of PowerMode Settings -struct smu_13_0_0_overdrive_table -{ +struct smu_13_0_0_overdrive_table { uint8_t revision; //Revision = SMU_13_0_0_PP_OVERDRIVE_VERSION uint8_t reserve[3]; //Zero filled field reserved for future use uint32_t feature_count; //Total number of supported features @@ -156,8 +151,7 @@ struct smu_13_0_0_overdrive_table int16_t pm_setting[SMU_13_0_0_MAX_PMSETTING]; //Optimized power mode feature settings }; -enum SMU_13_0_0_PPCLOCK_ID -{ +enum SMU_13_0_0_PPCLOCK_ID { SMU_13_0_0_PPCLOCK_GFXCLK = 0, SMU_13_0_0_PPCLOCK_SOCCLK, SMU_13_0_0_PPCLOCK_UCLK, @@ -175,8 +169,7 @@ enum SMU_13_0_0_PPCLOCK_ID }; #define SMU_13_0_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks -struct smu_13_0_0_powerplay_table -{ +struct smu_13_0_0_powerplay_table { struct atom_common_table_header header; //For SMU13, header.format_revision = 15, header.content_revision = 0 uint8_t table_revision; //For SMU13, table_revision = 2 uint8_t padding; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 6841a4bce186..1cb402264497 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -1798,17 +1798,6 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) return result; } -static bool intel_core_rkl_chk(void) -{ -#if IS_ENABLED(CONFIG_X86_64) - struct cpuinfo_x86 *c = &cpu_data(0); - - return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE); -#else - return false; -#endif -} - static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -1835,7 +1824,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; data->pcie_dpm_key_disabled = - intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); + !amdgpu_device_pcie_dynamic_switching_supported() || + !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); /* need to set voltage control types before EVV patching */ data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h index beab6d7b28b7..630132c4a76b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h @@ -52,8 +52,7 @@ static unsigned int DbiPrbs7[] = //4096 bytes, 256 byte aligned -static unsigned int NoDbiPrbs7[] = -{ +static unsigned int NoDbiPrbs7[] = { 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, @@ -121,8 +120,7 @@ static unsigned int NoDbiPrbs7[] = }; // 4096 bytes, 256 byte aligned -static unsigned int DbiPrbs7[] = -{ +static unsigned int DbiPrbs7[] = { 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h index eadbe0149cae..eb694f9f556d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h @@ -41,8 +41,7 @@ #define SMU_13_0_7_PP_OVERDRIVE_VERSION 0x83 // OverDrive 8 Table Version 0.2 #define SMU_13_0_7_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 -enum SMU_13_0_7_ODFEATURE_CAP -{ +enum SMU_13_0_7_ODFEATURE_CAP { SMU_13_0_7_ODCAP_GFXCLK_LIMITS = 0, SMU_13_0_7_ODCAP_UCLK_LIMITS, SMU_13_0_7_ODCAP_POWER_LIMIT, @@ -62,8 +61,7 @@ enum SMU_13_0_7_ODFEATURE_CAP SMU_13_0_7_ODCAP_COUNT, }; -enum SMU_13_0_7_ODFEATURE_ID -{ +enum SMU_13_0_7_ODFEATURE_ID { SMU_13_0_7_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_13_0_7_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature SMU_13_0_7_ODFEATURE_UCLK_LIMITS = 1 << SMU_13_0_7_ODCAP_UCLK_LIMITS, //UCLK Limit feature SMU_13_0_7_ODFEATURE_POWER_LIMIT = 1 << SMU_13_0_7_ODCAP_POWER_LIMIT, //Power Limit feature @@ -85,8 +83,7 @@ enum SMU_13_0_7_ODFEATURE_ID #define SMU_13_0_7_MAX_ODFEATURE 32 //Maximum Number of OD Features -enum SMU_13_0_7_ODSETTING_ID -{ +enum SMU_13_0_7_ODSETTING_ID { SMU_13_0_7_ODSETTING_GFXCLKFMAX = 0, SMU_13_0_7_ODSETTING_GFXCLKFMIN, SMU_13_0_7_ODSETTING_UCLKFMIN, @@ -123,8 +120,7 @@ enum SMU_13_0_7_ODSETTING_ID }; #define SMU_13_0_7_MAX_ODSETTING 64 //Maximum Number of ODSettings -enum SMU_13_0_7_PWRMODE_SETTING -{ +enum SMU_13_0_7_PWRMODE_SETTING { SMU_13_0_7_PMSETTING_POWER_LIMIT_QUIET = 0, SMU_13_0_7_PMSETTING_POWER_LIMIT_BALANCE, SMU_13_0_7_PMSETTING_POWER_LIMIT_TURBO, @@ -144,8 +140,7 @@ enum SMU_13_0_7_PWRMODE_SETTING }; #define SMU_13_0_7_MAX_PMSETTING 32 //Maximum Number of PowerMode Settings -struct smu_13_0_7_overdrive_table -{ +struct smu_13_0_7_overdrive_table { uint8_t revision; //Revision = SMU_13_0_7_PP_OVERDRIVE_VERSION uint8_t reserve[3]; //Zero filled field reserved for future use uint32_t feature_count; //Total number of supported features @@ -156,8 +151,7 @@ struct smu_13_0_7_overdrive_table int16_t pm_setting[SMU_13_0_7_MAX_PMSETTING]; //Optimized power mode feature settings }; -enum SMU_13_0_7_PPCLOCK_ID -{ +enum SMU_13_0_7_PPCLOCK_ID { SMU_13_0_7_PPCLOCK_GFXCLK = 0, SMU_13_0_7_PPCLOCK_SOCCLK, SMU_13_0_7_PPCLOCK_UCLK, @@ -175,8 +169,7 @@ enum SMU_13_0_7_PPCLOCK_ID }; #define SMU_13_0_7_MAX_PPCLOCK 16 //Maximum Number of PP Clocks -struct smu_13_0_7_powerplay_table -{ +struct smu_13_0_7_powerplay_table { struct atom_common_table_header header; //For PLUM_BONITO, header.format_revision = 15, header.content_revision = 0 uint8_t table_revision; //For PLUM_BONITO, table_revision = 2 uint8_t padding; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 3bb18396d2f9..c49f770c97b3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -598,7 +598,7 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; @@ -1482,7 +1482,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, return ret; if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && - (smu_version >=0x360d00)) { + (smu_version >= 0x360d00)) { ret = smu_cmn_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 95f6d821bacb..e655071516b7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -136,7 +136,7 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), - MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange, 0), + MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALDisableDummyPstateChange, 0), MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange, 0), MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0), MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0), @@ -556,7 +556,7 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; int ret = 0; @@ -642,7 +642,7 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; @@ -731,7 +731,7 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_NV12_legacy_t *metrics = (SmuMetrics_NV12_legacy_t *)smu_table->metrics_table; int ret = 0; @@ -817,7 +817,7 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_NV12_t *metrics = (SmuMetrics_NV12_t *)smu_table->metrics_table; int ret = 0; @@ -1686,7 +1686,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, return 0; break; case SMU_DCEFCLK: - dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n"); + dev_info(smu->adev->dev, "Setting DCEFCLK min/max dpm level is not supported!\n"); break; default: @@ -2182,7 +2182,7 @@ static int navi10_read_sensor(struct smu_context *smu, struct smu_table_context *table_context = &smu->smu_table; PPTable_t *pptable = table_context->driver_pptable; - if(!data || !size) + if (!data || !size) return -EINVAL; switch (sensor) { @@ -2317,15 +2317,15 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu, uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal; uint32_t max_memory_clock = max_sustainable_clocks->uclock; - if(smu->disable_uclk_switch == disable_memory_clock_switch) + if (smu->disable_uclk_switch == disable_memory_clock_switch) return 0; - if(disable_memory_clock_switch) + if (disable_memory_clock_switch) ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0); else ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0); - if(!ret) + if (!ret) smu->disable_uclk_switch = disable_memory_clock_switch; return ret; @@ -2559,7 +2559,8 @@ static int navi10_set_default_od_settings(struct smu_context *smu) return 0; } -static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) { +static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) +{ int i; int ret = 0; struct smu_table_context *table_context = &smu->smu_table; @@ -3368,7 +3369,7 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu, ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00)) ret = navi10_get_gpu_metrics(smu, table); else - ret =navi10_get_legacy_gpu_metrics(smu, table); + ret = navi10_get_legacy_gpu_metrics(smu, table); break; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index f6599c00a6fd..0cda3b276f61 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1927,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu, *size = 4; break; case AMDGPU_PP_SENSOR_GFX_MCLK: - ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data); + ret = sienna_cichlid_get_smu_metrics_data(smu, + METRICS_CURR_UCLK, + (uint32_t *)data); *(uint32_t *)data *= 100; *size = 4; break; case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data); + ret = sienna_cichlid_get_smu_metrics_data(smu, + METRICS_AVERAGE_GFXCLK, + (uint32_t *)data); *(uint32_t *)data *= 100; *size = 4; break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 067b4e0b026c..185d0b50ee8e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -1854,6 +1854,86 @@ static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu, return sizeof(struct gpu_metrics_v2_3); } +static ssize_t vangogh_get_gpu_metrics_v2_4(struct smu_context *smu, + void **table) +{ + SmuMetrics_t metrics; + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v2_4 *gpu_metrics = + (struct gpu_metrics_v2_4 *)smu_table->gpu_metrics_table; + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, &metrics, true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 4); + + gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; + gpu_metrics->temperature_soc = metrics.Current.SocTemperature; + memcpy(&gpu_metrics->temperature_core[0], + &metrics.Current.CoreTemperature[0], + sizeof(uint16_t) * 4); + gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; + + gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature; + gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature; + memcpy(&gpu_metrics->average_temperature_core[0], + &metrics.Average.CoreTemperature[0], + sizeof(uint16_t) * 4); + gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0]; + + gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; + gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; + + gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; + gpu_metrics->average_cpu_power = metrics.Current.Power[0]; + gpu_metrics->average_soc_power = metrics.Current.Power[1]; + gpu_metrics->average_gfx_power = metrics.Current.Power[2]; + + gpu_metrics->average_cpu_voltage = metrics.Current.Voltage[0]; + gpu_metrics->average_soc_voltage = metrics.Current.Voltage[1]; + gpu_metrics->average_gfx_voltage = metrics.Current.Voltage[2]; + + gpu_metrics->average_cpu_current = metrics.Current.Current[0]; + gpu_metrics->average_soc_current = metrics.Current.Current[1]; + gpu_metrics->average_gfx_current = metrics.Current.Current[2]; + + memcpy(&gpu_metrics->average_core_power[0], + &metrics.Average.CorePower[0], + sizeof(uint16_t) * 4); + + gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; + gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; + + gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; + gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; + gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_vclk = metrics.Current.VclkFrequency; + gpu_metrics->current_dclk = metrics.Current.DclkFrequency; + + memcpy(&gpu_metrics->current_coreclk[0], + &metrics.Current.CoreFrequency[0], + sizeof(uint16_t) * 4); + gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; + + gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; + gpu_metrics->indep_throttle_status = + smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, + vangogh_throttler_map); + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v2_4); +} + static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, void **table) { @@ -1923,23 +2003,34 @@ static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, { uint32_t if_version; uint32_t smu_version; + uint32_t smu_program; + uint32_t fw_version; int ret = 0; ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); - if (ret) { + if (ret) return ret; - } - if (smu_version >= 0x043F3E00) { - if (if_version < 0x3) - ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table); + smu_program = (smu_version >> 24) & 0xff; + fw_version = smu_version & 0xffffff; + if (smu_program == 6) { + if (fw_version >= 0x3F0800) + ret = vangogh_get_gpu_metrics_v2_4(smu, table); else ret = vangogh_get_gpu_metrics_v2_3(smu, table); + } else { - if (if_version < 0x3) - ret = vangogh_get_legacy_gpu_metrics(smu, table); - else - ret = vangogh_get_gpu_metrics(smu, table); + if (smu_version >= 0x043F3E00) { + if (if_version < 0x3) + ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table); + else + ret = vangogh_get_gpu_metrics_v2_3(smu, table); + } else { + if (if_version < 0x3) + ret = vangogh_get_legacy_gpu_metrics(smu, table); + else + ret = vangogh_get_gpu_metrics(smu, table); + } } return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 8a8ba25c9ad7..a7569354229d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -262,15 +262,15 @@ static int renoir_get_profiling_clk_mask(struct smu_context *smu, /* mclk levels are in reverse order */ *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1; } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { - if(sclk_mask) + if (sclk_mask) /* The sclk as gfxclk and has three level about max/min/current */ *sclk_mask = 3 - 1; - if(mclk_mask) + if (mclk_mask) /* mclk levels are in reverse order */ *mclk_mask = 0; - if(soc_mask) + if (soc_mask) *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index c788aa7a99a9..5e408a195860 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -205,7 +205,8 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu) return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); } -int smu_v12_0_mode2_reset(struct smu_context *smu){ +int smu_v12_0_mode2_reset(struct smu_context *smu) +{ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index b9bde5fa8f8f..3d188616ba24 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1734,7 +1734,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; - gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency; gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 3ba02131e682..b1f0937ccade 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -949,7 +949,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu, break; case AMDGPU_PP_SENSOR_GFX_MCLK: ret = smu_v13_0_7_get_smu_metrics_data(smu, - METRICS_AVERAGE_UCLK, + METRICS_CURR_UCLK, (uint32_t *)data); *(uint32_t *)data *= 100; *size = 4; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 3ecb900e6ecd..442d267088bc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -691,7 +691,7 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu, #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(fea) #fea -static const char* __smu_feature_names[] = { +static const char *__smu_feature_names[] = { SMU_FEATURE_MASKS }; @@ -927,7 +927,7 @@ int smu_cmn_get_metrics_table(struct smu_context *smu, void *metrics_table, bool bypass_cache) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; int ret = 0; @@ -969,7 +969,7 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) struct metrics_table_header *header = (struct metrics_table_header *)table; uint16_t structure_size; -#define METRICS_VERSION(a, b) ((a << 16) | b ) +#define METRICS_VERSION(a, b) ((a << 16) | b) switch (METRICS_VERSION(frev, crev)) { case METRICS_VERSION(1, 0): @@ -996,6 +996,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) case METRICS_VERSION(2, 3): structure_size = sizeof(struct gpu_metrics_v2_3); break; + case METRICS_VERSION(2, 4): + structure_size = sizeof(struct gpu_metrics_v2_4); + break; default: return; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 4153f302de7c..d19e796c2061 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -39,13 +39,12 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc, if (exynos_crtc->ops->atomic_disable) exynos_crtc->ops->atomic_disable(exynos_crtc); + spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event && !crtc->state->active) { - spin_lock_irq(&crtc->dev->event_lock); drm_crtc_send_vblank_event(crtc, crtc->state->event); - spin_unlock_irq(&crtc->dev->event_lock); - crtc->state->event = NULL; } + spin_unlock_irq(&crtc->dev->event_lock); } static int exynos_crtc_atomic_check(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 964dceb28c1e..34cdabc30b4f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1426,6 +1426,6 @@ struct platform_driver gsc_driver = { .name = "exynos-drm-gsc", .owner = THIS_MODULE, .pm = &gsc_pm_ops, - .of_match_table = of_match_ptr(exynos_drm_gsc_of_match), + .of_match_table = exynos_drm_gsc_of_match, }, }; diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 789dce9e2608..79f65eff6bb2 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -23,6 +23,11 @@ subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable) subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror +# Fine grained warnings disable +CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init) +CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init) +CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init) + subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! @@ -127,6 +132,7 @@ gt-y += \ gt/intel_sseu.o \ gt/intel_sseu_debugfs.o \ gt/intel_timeline.o \ + gt/intel_tlb.o \ gt/intel_wopcm.o \ gt/intel_workarounds.o \ gt/shmem_utils.o \ @@ -192,7 +198,8 @@ i915-y += \ gt/uc/intel_gsc_fw.o \ gt/uc/intel_gsc_proxy.o \ gt/uc/intel_gsc_uc.o \ - gt/uc/intel_gsc_uc_heci_cmd_submit.o\ + gt/uc/intel_gsc_uc_debugfs.o \ + gt/uc/intel_gsc_uc_heci_cmd_submit.o \ gt/uc/intel_guc.o \ gt/uc/intel_guc_ads.o \ gt/uc/intel_guc_capture.o \ diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 112d91d81fdc..4c7187f7913e 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -1259,6 +1259,9 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, struct drm_encoder *encoder; struct intel_connector *intel_connector; + if (!assert_port_valid(dev_priv, port)) + return false; + devdata = intel_bios_encoder_data_lookup(dev_priv, port); /* FIXME bail? */ @@ -1270,6 +1273,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, if (!dig_port) return false; + dig_port->aux_ch = AUX_CH_NONE; + intel_connector = intel_connector_alloc(); if (!intel_connector) goto err_connector_alloc; @@ -1373,6 +1378,9 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, intel_infoframe_init(dig_port); dig_port->aux_ch = intel_dp_aux_ch(intel_encoder); + if (dig_port->aux_ch == AUX_CH_NONE) + goto err_init_connector; + if (!intel_dp_init_connector(dig_port, intel_connector)) goto err_init_connector; diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 5c187e6e0472..634b14116d9d 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -659,6 +659,20 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector, return ret; } +static bool is_hdmi_port_valid(struct drm_i915_private *i915, enum port port) +{ + if (IS_G4X(i915) || IS_VALLEYVIEW(i915)) + return port == PORT_B || port == PORT_C; + else + return port == PORT_B || port == PORT_C || port == PORT_D; +} + +static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port) +{ + return !drm_WARN(&i915->drm, !is_hdmi_port_valid(i915, port), + "Platform does not support HDMI %c\n", port_name(port)); +} + void g4x_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port) { @@ -667,6 +681,12 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; + if (!assert_port_valid(dev_priv, port)) + return; + + if (!assert_hdmi_port_valid(dev_priv, port)) + return; + devdata = intel_bios_encoder_data_lookup(dev_priv, port); /* FIXME bail? */ @@ -678,6 +698,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, if (!dig_port) return; + dig_port->aux_ch = AUX_CH_NONE; + intel_connector = intel_connector_alloc(); if (!intel_connector) { kfree(dig_port); @@ -753,6 +775,5 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, intel_infoframe_init(dig_port); - dig_port->aux_ch = intel_dp_aux_ch(intel_encoder); intel_hdmi_init_connector(dig_port, intel_connector); } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index c133928a0655..f7ebc146f96d 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -528,31 +528,16 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, enum port port; enum phy phy; - /* Program T-INIT master registers */ - for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port), - DSI_T_INIT_MASTER_MASK, intel_dsi->init_count); - /* Program DPHY clock lanes timings */ - for_each_dsi_port(port, intel_dsi->ports) { + for_each_dsi_port(port, intel_dsi->ports) intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); - /* shadow register inside display core */ - intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port), - intel_dsi->dphy_reg); - } - /* Program DPHY data lanes timings */ - for_each_dsi_port(port, intel_dsi->ports) { + for_each_dsi_port(port, intel_dsi->ports) intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port), intel_dsi->dphy_data_lane_reg); - /* shadow register inside display core */ - intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port), - intel_dsi->dphy_data_lane_reg); - } - /* * If DSI link operating at or below an 800 MHz, * TA_SURE should be override and programmed to @@ -561,16 +546,10 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, */ if (DISPLAY_VER(dev_priv) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { - for_each_dsi_port(port, intel_dsi->ports) { + for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port), TA_SURE_MASK, TA_SURE_OVERRIDE | TA_SURE(0)); - - /* shadow register inside display core */ - intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port), - TA_SURE_MASK, - TA_SURE_OVERRIDE | TA_SURE(0)); - } } } @@ -581,6 +560,41 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, } } +static void +gen11_dsi_setup_timings(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + enum port port; + + /* Program T-INIT master registers */ + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port), + DSI_T_INIT_MASTER_MASK, intel_dsi->init_count); + + /* shadow register inside display core */ + for_each_dsi_port(port, intel_dsi->ports) + intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port), + intel_dsi->dphy_reg); + + /* shadow register inside display core */ + for_each_dsi_port(port, intel_dsi->ports) + intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port), + intel_dsi->dphy_data_lane_reg); + + /* shadow register inside display core */ + if (DISPLAY_VER(dev_priv) == 11) { + if (afe_clk(encoder, crtc_state) <= 800000) { + for_each_dsi_port(port, intel_dsi->ports) { + intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port), + TA_SURE_MASK, + TA_SURE_OVERRIDE | TA_SURE(0)); + } + } + } +} + static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -1090,11 +1104,15 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, /* step 4c: configure voltage swing and skew */ gen11_dsi_voltage_swing_program_seq(encoder); + /* setup D-PHY timings */ + gen11_dsi_setup_dphy_timings(encoder, crtc_state); + /* enable DDI buffer */ gen11_dsi_enable_ddi_buffer(encoder); - /* setup D-PHY timings */ - gen11_dsi_setup_dphy_timings(encoder, crtc_state); + gen11_dsi_gate_clocks(encoder); + + gen11_dsi_setup_timings(encoder, crtc_state); /* Since transcoder is configured to take events from GPIO */ gen11_dsi_config_util_pin(encoder, true); @@ -1104,9 +1122,6 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, /* Step (4h, 4i, 4j, 4k): Configure transcoder */ gen11_dsi_configure_transcoder(encoder, crtc_state); - - /* Step 4l: Gate DDI clocks */ - gen11_dsi_gate_clocks(encoder); } static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) @@ -1138,12 +1153,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) "error setting max return pkt size%d\n", tmp); } - /* panel power on related mipi dsi vbt sequences */ - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); - msleep(intel_dsi->panel_on_delay); - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP); - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); /* ensure all panel commands dispatched before enabling transcoder */ wait_for_cmds_dispatched_to_panel(encoder); @@ -1154,6 +1164,14 @@ static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + + intel_dsi_wait_panel_power_cycle(intel_dsi); + + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); + msleep(intel_dsi->panel_on_delay); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); + /* step2: enable IO power */ gen11_dsi_enable_io_power(encoder); @@ -1225,9 +1243,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc); - - drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); /* Wa_1409054076:icl,jsl,ehl */ icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true); @@ -1238,6 +1254,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state, /* step6d: enable dsi transcoder */ gen11_dsi_enable_transcoder(encoder); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); + /* step7: enable backlight */ intel_backlight_enable(crtc_state, conn_state); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); @@ -1271,8 +1289,6 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF); - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF); /* ensure cmds dispatched to panel */ wait_for_cmds_dispatched_to_panel(encoder); @@ -1373,11 +1389,21 @@ static void gen11_dsi_disable(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - struct intel_crtc *crtc = to_intel_crtc(old_conn_state->crtc); /* step1: turn off backlight */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); intel_backlight_disable(old_conn_state); +} + +static void gen11_dsi_post_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); + + intel_crtc_vblank_off(old_crtc_state); /* step2d,e: disable transcoder and wait */ gen11_dsi_disable_transcoder(encoder); @@ -1391,6 +1417,9 @@ static void gen11_dsi_disable(struct intel_atomic_state *state, /* step2h,i,j: deconfig trancoder */ gen11_dsi_deconfigure_trancoder(encoder); + intel_dsc_disable(old_crtc_state); + skl_scaler_disable(old_crtc_state); + /* step3: disable port */ gen11_dsi_disable_port(encoder); @@ -1398,18 +1427,13 @@ static void gen11_dsi_disable(struct intel_atomic_state *state, /* step4: disable IO power */ gen11_dsi_disable_io_power(encoder); -} -static void gen11_dsi_post_disable(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - intel_crtc_vblank_off(old_crtc_state); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); - intel_dsc_disable(old_crtc_state); + msleep(intel_dsi->panel_off_delay); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF); - skl_scaler_disable(old_crtc_state); + intel_dsi->panel_power_off_time = ktime_get_boottime(); } static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector, @@ -1909,7 +1933,8 @@ static void icl_dsi_add_properties(struct intel_connector *connector) fixed_mode->vdisplay); } -void icl_dsi_init(struct drm_i915_private *dev_priv) +void icl_dsi_init(struct drm_i915_private *dev_priv, + const struct intel_bios_encoder_data *devdata) { struct intel_dsi *intel_dsi; struct intel_encoder *encoder; @@ -1917,7 +1942,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) struct drm_connector *connector; enum port port; - if (!intel_bios_is_dsi_present(dev_priv, &port)) + port = intel_bios_encoder_port(devdata); + if (port == PORT_NONE) return; intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); @@ -1934,6 +1960,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) intel_dsi->attached_connector = intel_connector; connector = &intel_connector->base; + encoder->devdata = devdata; + /* register DSI encoder with DRM subsystem */ drm_encoder_init(&dev_priv->drm, &encoder->base, &gen11_dsi_encoder_funcs, DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); @@ -1957,6 +1985,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->get_power_domains = gen11_dsi_get_power_domains; encoder->disable_clock = gen11_dsi_gate_clocks; encoder->is_clock_enabled = gen11_dsi_is_clock_enabled; + encoder->shutdown = intel_dsi_shutdown; /* register DSI connector with DRM subsystem */ drm_connector_init(&dev_priv->drm, connector, &gen11_dsi_connector_funcs, @@ -1968,7 +1997,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) /* attach connector to encoder */ intel_connector_attach_encoder(intel_connector, encoder); - encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port); + intel_dsi->panel_power_off_time = ktime_get_boottime(); + intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL); mutex_lock(&dev_priv->drm.mode_config.mutex); diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h index b4861b56b5b2..43fa7d72eeb1 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.h +++ b/drivers/gpu/drm/i915/display/icl_dsi.h @@ -7,9 +7,11 @@ #define __ICL_DSI_H__ struct drm_i915_private; +struct intel_bios_encoder_data; struct intel_crtc_state; -void icl_dsi_init(struct drm_i915_private *i915); +void icl_dsi_init(struct drm_i915_private *dev_priv, + const struct intel_bios_encoder_data *devdata); void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); #endif /* __ICL_DSI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 7d9578ebae55..60a492e186ab 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -212,6 +212,7 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); const struct drm_framebuffer *fb = plane_state->hw.fb; int width, height; + unsigned int rel_data_rate; if (plane->id == PLANE_CURSOR) return 0; @@ -241,7 +242,11 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, height /= 2; } - return width * height * fb->format->cpp[color_plane]; + rel_data_rate = width * height * fb->format->cpp[color_plane]; + + return intel_adjusted_rate(&plane_state->uapi.src, + &plane_state->uapi.dst, + rel_data_rate); } int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 34a397adbd6b..858c959f7bab 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2230,122 +2230,6 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) return 0; } -static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin) -{ - enum port port; - - if (!ddc_pin) - return PORT_NONE; - - for_each_port(port) { - const struct intel_bios_encoder_data *devdata = - i915->display.vbt.ports[port]; - - if (devdata && ddc_pin == devdata->child.ddc_pin) - return port; - } - - return PORT_NONE; -} - -static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata, - enum port port) -{ - struct drm_i915_private *i915 = devdata->i915; - struct child_device_config *child; - u8 mapped_ddc_pin; - enum port p; - - if (!devdata->child.ddc_pin) - return; - - mapped_ddc_pin = map_ddc_pin(i915, devdata->child.ddc_pin); - if (!intel_gmbus_is_valid_pin(i915, mapped_ddc_pin)) { - drm_dbg_kms(&i915->drm, - "Port %c has invalid DDC pin %d, " - "sticking to defaults\n", - port_name(port), mapped_ddc_pin); - devdata->child.ddc_pin = 0; - return; - } - - p = get_port_by_ddc_pin(i915, devdata->child.ddc_pin); - if (p == PORT_NONE) - return; - - drm_dbg_kms(&i915->drm, - "port %c trying to use the same DDC pin (0x%x) as port %c, " - "disabling port %c DVI/HDMI support\n", - port_name(port), mapped_ddc_pin, - port_name(p), port_name(p)); - - /* - * If we have multiple ports supposedly sharing the pin, then dvi/hdmi - * couldn't exist on the shared port. Otherwise they share the same ddc - * pin and system couldn't communicate with them separately. - * - * Give inverse child device order the priority, last one wins. Yes, - * there are real machines (eg. Asrock B250M-HDV) where VBT has both - * port A and port E with the same AUX ch and we must pick port E :( - */ - child = &i915->display.vbt.ports[p]->child; - - child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING; - child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT; - - child->ddc_pin = 0; -} - -static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch) -{ - enum port port; - - if (!aux_ch) - return PORT_NONE; - - for_each_port(port) { - const struct intel_bios_encoder_data *devdata = - i915->display.vbt.ports[port]; - - if (devdata && aux_ch == devdata->child.aux_channel) - return port; - } - - return PORT_NONE; -} - -static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata, - enum port port) -{ - struct drm_i915_private *i915 = devdata->i915; - struct child_device_config *child; - enum port p; - - p = get_port_by_aux_ch(i915, devdata->child.aux_channel); - if (p == PORT_NONE) - return; - - drm_dbg_kms(&i915->drm, - "port %c trying to use the same AUX CH (0x%x) as port %c, " - "disabling port %c DP support\n", - port_name(port), devdata->child.aux_channel, - port_name(p), port_name(p)); - - /* - * If we have multiple ports supposedly sharing the aux channel, then DP - * couldn't exist on the shared port. Otherwise they share the same aux - * channel and system couldn't communicate with them separately. - * - * Give inverse child device order the priority, last one wins. Yes, - * there are real machines (eg. Asrock B250M-HDV) where VBT has both - * port A and port E with the same AUX ch and we must pick port E :( - */ - child = &i915->display.vbt.ports[p]->child; - - child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT; - child->aux_channel = 0; -} - static u8 dvo_port_type(u8 dvo_port) { switch (dvo_port) { @@ -2490,6 +2374,19 @@ dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port) } } +enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata) +{ + struct drm_i915_private *i915 = devdata->i915; + const struct child_device_config *child = &devdata->child; + enum port port; + + port = dvo_port_to_port(i915, child->dvo_port); + if (port == PORT_NONE && DISPLAY_VER(i915) >= 11) + port = dsi_dvo_port_to_port(i915, child->dvo_port); + + return port; +} + static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate) { switch (vbt_max_link_rate) { @@ -2600,7 +2497,7 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata) devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR; } -static bool +bool intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata) { return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT; @@ -2615,7 +2512,8 @@ intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata) /* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */ int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->display.vbt.version < 158) + if (!devdata || devdata->i915->display.vbt.version < 158 || + DISPLAY_VER(devdata->i915) >= 14) return -1; return devdata->child.hdmi_level_shifter_value; @@ -2658,13 +2556,17 @@ static bool is_port_valid(struct drm_i915_private *i915, enum port port) return true; } -static void print_ddi_port(const struct intel_bios_encoder_data *devdata, - enum port port) +static void print_ddi_port(const struct intel_bios_encoder_data *devdata) { struct drm_i915_private *i915 = devdata->i915; const struct child_device_config *child = &devdata->child; bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt; int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock; + enum port port; + + port = intel_bios_encoder_port(devdata); + if (port == PORT_NONE) + return; is_dvi = intel_bios_encoder_supports_dvi(devdata); is_dp = intel_bios_encoder_supports_dp(devdata); @@ -2728,12 +2630,9 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata, static void parse_ddi_port(struct intel_bios_encoder_data *devdata) { struct drm_i915_private *i915 = devdata->i915; - const struct child_device_config *child = &devdata->child; enum port port; - port = dvo_port_to_port(i915, child->dvo_port); - if (port == PORT_NONE && DISPLAY_VER(i915) >= 11) - port = dsi_dvo_port_to_port(i915, child->dvo_port); + port = intel_bios_encoder_port(devdata); if (port == PORT_NONE) return; @@ -2744,22 +2643,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata) return; } - if (i915->display.vbt.ports[port]) { - drm_dbg_kms(&i915->drm, - "More than one child device for port %c in VBT, using the first.\n", - port_name(port)); - return; - } - sanitize_device_type(devdata, port); - - if (intel_bios_encoder_supports_dvi(devdata)) - sanitize_ddc_pin(devdata, port); - - if (intel_bios_encoder_supports_dp(devdata)) - sanitize_aux_ch(devdata, port); - - i915->display.vbt.ports[port] = devdata; } static bool has_ddi_port_info(struct drm_i915_private *i915) @@ -2770,7 +2654,6 @@ static bool has_ddi_port_info(struct drm_i915_private *i915) static void parse_ddi_ports(struct drm_i915_private *i915) { struct intel_bios_encoder_data *devdata; - enum port port; if (!has_ddi_port_info(i915)) return; @@ -2778,10 +2661,8 @@ static void parse_ddi_ports(struct drm_i915_private *i915) list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) parse_ddi_port(devdata); - for_each_port(port) { - if (i915->display.vbt.ports[port]) - print_ddi_port(i915->display.vbt.ports[port], port); - } + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) + print_ddi_port(devdata); } static void @@ -3706,5 +3587,22 @@ bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata const struct intel_bios_encoder_data * intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port) { - return i915->display.vbt.ports[port]; + struct intel_bios_encoder_data *devdata; + + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + if (intel_bios_encoder_port(devdata) == port) + return devdata; + } + + return NULL; +} + +void intel_bios_for_each_encoder(struct drm_i915_private *i915, + void (*func)(struct drm_i915_private *i915, + const struct intel_bios_encoder_data *devdata)) +{ + struct intel_bios_encoder_data *devdata; + + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) + func(i915, devdata); } diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 45fae97d9719..9680e3e92bb5 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -263,10 +263,12 @@ bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdat bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata); +enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata); enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata); int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata); int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata); @@ -276,4 +278,8 @@ int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata); int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata); int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata); +void intel_bios_for_each_encoder(struct drm_i915_private *i915, + void (*func)(struct drm_i915_private *i915, + const struct intel_bios_encoder_data *devdata)); + #endif /* _INTEL_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 4207863b7b2a..dcc1f6941b60 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -37,6 +37,7 @@ #include "intel_pci_config.h" #include "intel_pcode.h" #include "intel_psr.h" +#include "intel_vdsc.h" #include "vlv_sideband.h" /** @@ -2607,9 +2608,16 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) * When we decide to use only one VDSC engine, since * each VDSC operates with 1 ppc throughput, pixel clock * cannot be higher than the VDSC clock (cdclk) + * If there 2 VDSC engines, then pixel clock can't be higher than + * VDSC clock(cdclk) * 2 and so on. */ - if (crtc_state->dsc.compression_enable && !crtc_state->dsc.dsc_split) - min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate); + if (crtc_state->dsc.compression_enable) { + int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state); + + min_cdclk = max_t(int, min_cdclk, + DIV_ROUND_UP(crtc_state->pixel_rate, + num_vdsc_instances)); + } /* * HACK. Currently for TGL/DG2 platforms we calculate diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 8966e6560516..454607b4a02a 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1453,6 +1453,16 @@ static int glk_degamma_lut_size(struct drm_i915_private *i915) return 35; } +/* + * change_lut_val_precision: helper function to upscale or downscale lut values. + * Parameters 'to' and 'from' needs to be less than 32. This should be sufficient + * as currently there are no lut values exceeding 32 bit. + */ +static u32 change_lut_val_precision(u32 lut_val, int to, int from) +{ + return mul_u32_u32(lut_val, (1 << to)) / (1 << from); +} + static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob) { @@ -1487,8 +1497,15 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, * ToDo: Extend to max 7.0. Enable 32 bit input value * as compared to just 16 to achieve this. */ + u32 lut_val; + + if (DISPLAY_VER(i915) >= 14) + lut_val = change_lut_val_precision(lut[i].green, 24, 16); + else + lut_val = lut[i].green; + ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), - lut[i].green); + lut_val); } /* Clamp values > 1.0. */ @@ -3439,6 +3456,14 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc) for (i = 0; i < lut_size; i++) { u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe)); + /* + * For MTL and beyond, convert back the 24 bit lut values + * read from HW to 16 bit values to maintain parity with + * userspace values + */ + if (DISPLAY_VER(dev_priv) >= 14) + val = change_lut_val_precision(val, 16, 24); + lut[i].red = val; lut[i].green = val; lut[i].blue = val; diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index ab7cd5e60a0a..809074758687 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -1064,6 +1064,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv) } if (HAS_DDI(dev_priv)) { + assert_port_valid(dev_priv, PORT_E); + crt->base.port = PORT_E; crt->base.get_config = hsw_crt_get_config; crt->base.get_hw_state = intel_ddi_get_hw_state; diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 719447ce86e7..1b00ef2c6185 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -116,6 +116,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port, XELPDP_MSGBUS_TIMEOUT_SLOW, val)) { drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", phy_name(phy), *val); + intel_cx0_bus_reset(i915, port, lane); return -ETIMEDOUT; } @@ -158,10 +159,8 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port, XELPDP_PORT_M2P_ADDRESS(addr)); ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val); - if (ack < 0) { - intel_cx0_bus_reset(i915, port, lane); + if (ack < 0) return ack; - } intel_clear_response_ready_flag(i915, port, lane); @@ -202,6 +201,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, int lane, u16 addr, u8 data, bool committed) { enum phy phy = intel_port_to_phy(i915, port); + int ack; u32 val; if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), @@ -230,10 +230,9 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, } if (committed) { - if (intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val) < 0) { - intel_cx0_bus_reset(i915, port, lane); - return -EINVAL; - } + ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); + if (ack < 0) + return ack; } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) & XELPDP_PORT_P2M_ERROR_SET)) { drm_dbg_kms(&i915->drm, diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h index f99809af257d..4c4db5cdcbd0 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h @@ -43,8 +43,5 @@ int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock); -void intel_cx0_phy_ddi_vswing_sequence(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - u32 level); int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder); #endif /* __INTEL_CX0_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 090f242e610c..3cd2191fa794 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -32,6 +32,7 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "icl_dsi.h" #include "intel_audio.h" #include "intel_audio_regs.h" #include "intel_backlight.h" @@ -4653,13 +4654,95 @@ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder #define port_tc_name(port) ((port) - PORT_TC1 + '1') #define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1') -void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) +static bool port_strap_detected(struct drm_i915_private *i915, enum port port) +{ + /* straps not used on skl+ */ + if (DISPLAY_VER(i915) >= 9) + return true; + + switch (port) { + case PORT_A: + return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; + case PORT_B: + return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED; + case PORT_C: + return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED; + case PORT_D: + return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED; + case PORT_E: + return true; /* no strap for DDI-E */ + default: + MISSING_CASE(port); + return false; + } +} + +static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + return init_dp || intel_phy_is_tc(i915, phy); +} + +static bool assert_has_icl_dsi(struct drm_i915_private *i915) +{ + return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) && + !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11, + "Platform does not support DSI\n"); +} + +static bool port_in_use(struct drm_i915_private *i915, enum port port) +{ + struct intel_encoder *encoder; + + for_each_intel_encoder(&i915->drm, encoder) { + /* FIXME what about second port for dual link DSI? */ + if (encoder->port == port) + return true; + } + + return false; +} + +void intel_ddi_init(struct drm_i915_private *dev_priv, + const struct intel_bios_encoder_data *devdata) { struct intel_digital_port *dig_port; struct intel_encoder *encoder; - const struct intel_bios_encoder_data *devdata; bool init_hdmi, init_dp; - enum phy phy = intel_port_to_phy(dev_priv, port); + enum port port; + enum phy phy; + + port = intel_bios_encoder_port(devdata); + if (port == PORT_NONE) + return; + + if (!port_strap_detected(dev_priv, port)) { + drm_dbg_kms(&dev_priv->drm, + "Port %c strap not detected\n", port_name(port)); + return; + } + + if (!assert_port_valid(dev_priv, port)) + return; + + if (port_in_use(dev_priv, port)) { + drm_dbg_kms(&dev_priv->drm, + "Port %c already claimed\n", port_name(port)); + return; + } + + if (intel_bios_encoder_supports_dsi(devdata)) { + /* BXT/GLK handled elsewhere, for now at least */ + if (!assert_has_icl_dsi(dev_priv)) + return; + + icl_dsi_init(dev_priv, devdata); + return; + } + + phy = intel_port_to_phy(dev_priv, port); /* * On platforms with HTI (aka HDPORT), if it's enabled at boot it may @@ -4673,14 +4756,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) return; } - devdata = intel_bios_encoder_data_lookup(dev_priv, port); - if (!devdata) { - drm_dbg_kms(&dev_priv->drm, - "VBT says port %c is not present\n", - port_name(port)); - return; - } - init_hdmi = intel_bios_encoder_supports_dvi(devdata) || intel_bios_encoder_supports_hdmi(devdata); init_dp = intel_bios_encoder_supports_dp(devdata); @@ -4715,6 +4790,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) if (!dig_port) return; + dig_port->aux_ch = AUX_CH_NONE; + encoder = &dig_port->base; encoder->devdata = devdata; @@ -4895,7 +4972,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) dig_port->dp.output_reg = INVALID_MMIO_REG; dig_port->max_lanes = intel_ddi_max_lanes(dig_port); - dig_port->aux_ch = intel_dp_aux_ch(encoder); + + if (need_aux_ch(encoder, init_dp)) { + dig_port->aux_ch = intel_dp_aux_ch(encoder); + if (dig_port->aux_ch == AUX_CH_NONE) + goto err; + } if (intel_phy_is_tc(dev_priv, phy)) { bool is_legacy = diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 2bc034042a93..4999c0ee229b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -11,6 +11,7 @@ struct drm_connector_state; struct drm_i915_private; struct intel_atomic_state; +struct intel_bios_encoder_data; struct intel_connector; struct intel_crtc; struct intel_crtc_state; @@ -50,7 +51,8 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, enum port port); -void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); +void intel_ddi_init(struct drm_i915_private *dev_priv, + const struct intel_bios_encoder_data *devdata); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index b7d20485bde5..5b2665a9d86d 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -1049,12 +1049,26 @@ static const union intel_ddi_buf_trans_entry _mtl_c10_trans_dp14[] = { { .snps = { 62, 0, 0 } }, /* preset 9 */ }; -static const struct intel_ddi_buf_trans mtl_cx0_trans = { +static const struct intel_ddi_buf_trans mtl_c10_trans_dp14 = { .entries = _mtl_c10_trans_dp14, .num_entries = ARRAY_SIZE(_mtl_c10_trans_dp14), .hdmi_default_entry = ARRAY_SIZE(_mtl_c10_trans_dp14) - 1, }; +/* DP1.4 */ +static const union intel_ddi_buf_trans_entry _mtl_c20_trans_dp14[] = { + { .snps = { 20, 0, 0 } }, /* preset 0 */ + { .snps = { 24, 0, 4 } }, /* preset 1 */ + { .snps = { 30, 0, 9 } }, /* preset 2 */ + { .snps = { 34, 0, 14 } }, /* preset 3 */ + { .snps = { 29, 0, 0 } }, /* preset 4 */ + { .snps = { 34, 0, 5 } }, /* preset 5 */ + { .snps = { 38, 0, 10 } }, /* preset 6 */ + { .snps = { 36, 0, 0 } }, /* preset 7 */ + { .snps = { 40, 0, 6 } }, /* preset 8 */ + { .snps = { 48, 0, 0 } }, /* preset 9 */ +}; + /* DP2.0 */ static const union intel_ddi_buf_trans_entry _mtl_c20_trans_uhbr[] = { { .snps = { 48, 0, 0 } }, /* preset 0 */ @@ -1072,7 +1086,7 @@ static const union intel_ddi_buf_trans_entry _mtl_c20_trans_uhbr[] = { { .snps = { 37, 4, 7 } }, /* preset 12 */ { .snps = { 33, 4, 11 } }, /* preset 13 */ { .snps = { 40, 8, 0 } }, /* preset 14 */ - { .snps = { 28, 2, 2 } }, /* preset 15 */ + { .snps = { 30, 2, 2 } }, /* preset 15 */ }; /* HDMI2.0 */ @@ -1090,6 +1104,12 @@ static const struct intel_ddi_buf_trans mtl_c20_trans_hdmi = { .hdmi_default_entry = 0, }; +static const struct intel_ddi_buf_trans mtl_c20_trans_dp14 = { + .entries = _mtl_c20_trans_dp14, + .num_entries = ARRAY_SIZE(_mtl_c20_trans_dp14), + .hdmi_default_entry = ARRAY_SIZE(_mtl_c20_trans_dp14) - 1, +}; + static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = { .entries = _mtl_c20_trans_uhbr, .num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr), @@ -1678,8 +1698,10 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder, return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy))) return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries); + else if (!intel_is_c10phy(i915, phy)) + return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries); else - return intel_get_buf_trans(&mtl_cx0_trans, n_entries); + return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries); } void intel_ddi_buf_trans_init(struct intel_encoder *encoder) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 16603d591f56..43cba98f7753 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -53,7 +53,6 @@ #include "i915_utils.h" #include "i9xx_plane.h" #include "i9xx_wm.h" -#include "icl_dsi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_audio.h" @@ -3153,6 +3152,10 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) if (DISPLAY_VER(dev_priv) >= 12) val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; + /* allow PSR with sprite enabled */ + if (IS_BROADWELL(dev_priv)) + val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; + intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); } @@ -7143,7 +7146,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) */ intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); } - intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref); + /* + * Delay re-enabling DC states by 17 ms to avoid the off->on->off + * toggling overhead at and above 60 FPS. + */ + intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); /* @@ -7387,6 +7394,12 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) return true; } +bool assert_port_valid(struct drm_i915_private *i915, enum port port) +{ + return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)), + "Platform does not support port %c\n", port_name(port)); +} + void intel_setup_outputs(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; @@ -7397,93 +7410,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_METEORLAKE(dev_priv)) { - intel_ddi_init(dev_priv, PORT_A); - intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_TC1); - intel_ddi_init(dev_priv, PORT_TC2); - intel_ddi_init(dev_priv, PORT_TC3); - intel_ddi_init(dev_priv, PORT_TC4); - } else if (IS_DG2(dev_priv)) { - intel_ddi_init(dev_priv, PORT_A); - intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_C); - intel_ddi_init(dev_priv, PORT_D_XELPD); - intel_ddi_init(dev_priv, PORT_TC1); - } else if (IS_ALDERLAKE_P(dev_priv)) { - intel_ddi_init(dev_priv, PORT_A); - intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_TC1); - intel_ddi_init(dev_priv, PORT_TC2); - intel_ddi_init(dev_priv, PORT_TC3); - intel_ddi_init(dev_priv, PORT_TC4); - icl_dsi_init(dev_priv); - } else if (IS_ALDERLAKE_S(dev_priv)) { - intel_ddi_init(dev_priv, PORT_A); - intel_ddi_init(dev_priv, PORT_TC1); - intel_ddi_init(dev_priv, PORT_TC2); - intel_ddi_init(dev_priv, PORT_TC3); - intel_ddi_init(dev_priv, PORT_TC4); - } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) { - intel_ddi_init(dev_priv, PORT_A); - intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_TC1); - intel_ddi_init(dev_priv, PORT_TC2); - } else if (DISPLAY_VER(dev_priv) >= 12) { - intel_ddi_init(dev_priv, PORT_A); - intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_TC1); - intel_ddi_init(dev_priv, PORT_TC2); - intel_ddi_init(dev_priv, PORT_TC3); - intel_ddi_init(dev_priv, PORT_TC4); - intel_ddi_init(dev_priv, PORT_TC5); - intel_ddi_init(dev_priv, PORT_TC6); - icl_dsi_init(dev_priv); - } else if (IS_JSL_EHL(dev_priv)) { - intel_ddi_init(dev_priv, PORT_A); - intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_C); - intel_ddi_init(dev_priv, PORT_D); - icl_dsi_init(dev_priv); - } else if (DISPLAY_VER(dev_priv) == 11) { - intel_ddi_init(dev_priv, PORT_A); - intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_C); - intel_ddi_init(dev_priv, PORT_D); - intel_ddi_init(dev_priv, PORT_E); - intel_ddi_init(dev_priv, PORT_F); - icl_dsi_init(dev_priv); - } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - intel_ddi_init(dev_priv, PORT_A); - intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_C); - vlv_dsi_init(dev_priv); - } else if (DISPLAY_VER(dev_priv) >= 9) { - intel_ddi_init(dev_priv, PORT_A); - intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_C); - intel_ddi_init(dev_priv, PORT_D); - intel_ddi_init(dev_priv, PORT_E); - } else if (HAS_DDI(dev_priv)) { - u32 found; - + if (HAS_DDI(dev_priv)) { if (intel_ddi_crt_present(dev_priv)) intel_crt_init(dev_priv); - /* Haswell uses DDI functions to detect digital outputs. */ - found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; - if (found) - intel_ddi_init(dev_priv, PORT_A); - - found = intel_de_read(dev_priv, SFUSE_STRAP); - if (found & SFUSE_STRAP_DDIB_DETECTED) - intel_ddi_init(dev_priv, PORT_B); - if (found & SFUSE_STRAP_DDIC_DETECTED) - intel_ddi_init(dev_priv, PORT_C); - if (found & SFUSE_STRAP_DDID_DETECTED) - intel_ddi_init(dev_priv, PORT_D); - if (found & SFUSE_STRAP_DDIF_DETECTED) - intel_ddi_init(dev_priv, PORT_F); + intel_bios_for_each_encoder(dev_priv, intel_ddi_init); + + if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + vlv_dsi_init(dev_priv); } else if (HAS_PCH_SPLIT(dev_priv)) { int found; diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index c744c021af23..49ac8473b988 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -113,7 +113,7 @@ enum i9xx_plane_id { #define for_each_dbuf_slice(__dev_priv, __slice) \ for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \ - for_each_if(INTEL_INFO(__dev_priv)->display->dbuf.slice_mask & BIT(__slice)) + for_each_if(DISPLAY_INFO(__dev_priv)->dbuf.slice_mask & BIT(__slice)) #define for_each_dbuf_slice_in_mask(__dev_priv, __slice, __mask) \ for_each_dbuf_slice((__dev_priv), (__slice)) \ @@ -539,6 +539,8 @@ void assert_transcoder(struct drm_i915_private *dev_priv, #define assert_transcoder_enabled(d, t) assert_transcoder(d, t, true) #define assert_transcoder_disabled(d, t) assert_transcoder(d, t, false) +bool assert_port_valid(struct drm_i915_private *i915, enum port port); + /* * Use I915_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw state sanity * checks to check for unexpected conditions which may not necessarily be a user diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 8d2243c71dd8..53e5c33e08c3 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -17,6 +17,7 @@ #include <drm/drm_modeset_lock.h> #include "intel_cdclk.h" +#include "intel_display_device.h" #include "intel_display_limits.h" #include "intel_display_power.h" #include "intel_dpll_mgr.h" @@ -33,7 +34,6 @@ struct i915_audio_component; struct i915_hdcp_arbiter; struct intel_atomic_state; struct intel_audio_funcs; -struct intel_bios_encoder_data; struct intel_cdclk_funcs; struct intel_cdclk_vals; struct intel_color_funcs; @@ -218,7 +218,6 @@ struct intel_vbt_data { struct list_head display_devices; struct list_head bdb_blocks; - struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */ struct sdvo_device_mapping { u8 initialized; u8 dvo_port; @@ -430,6 +429,14 @@ struct intel_display { } hti; struct { + /* Access with DISPLAY_INFO() */ + const struct intel_display_device_info *__device_info; + + /* Access with DISPLAY_RUNTIME_INFO() */ + struct intel_display_runtime_info __runtime_info; + } info; + + struct { bool false_color; } ips; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 165e2c7e3126..63c1fb9e479f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -819,8 +819,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file, if (IS_ERR(input_buffer)) return PTR_ERR(input_buffer); - drm_dbg(&to_i915(dev)->drm, - "Copied %d bytes from user\n", (unsigned int)len); + drm_dbg(dev, "Copied %d bytes from user\n", (unsigned int)len); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { @@ -839,8 +838,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file, status = kstrtoint(input_buffer, 10, &val); if (status < 0) break; - drm_dbg(&to_i915(dev)->drm, - "Got %d for test active\n", val); + drm_dbg(dev, "Got %d for test active\n", val); /* To prevent erroneous activation of the compliance * testing code, only accept an actual value of 1 here */ diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index 3fd30e7f0062..8286e79522d1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -16,9 +16,6 @@ #include "intel_display_reg_defs.h" #include "intel_fbc.h" -__diag_push(); -__diag_ignore_all("-Woverride-init", "Allow overriding inherited members"); - static const struct intel_display_device_info no_display = {}; #define PIPE_A_OFFSET 0x70000 @@ -187,10 +184,6 @@ static const struct intel_display_device_info no_display = {}; .__runtime_defaults.cpu_transcoder_mask = \ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) -static const struct intel_display_device_info i830_display = { - I830_DISPLAY, -}; - #define I845_DISPLAY \ .has_overlay = 1, \ .overlay_needs_physical = 1, \ @@ -203,19 +196,29 @@ static const struct intel_display_device_info i830_display = { .__runtime_defaults.pipe_mask = BIT(PIPE_A), \ .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) +static const struct intel_display_device_info i830_display = { + I830_DISPLAY, + + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C), /* DVO A/B/C */ +}; + static const struct intel_display_device_info i845_display = { I845_DISPLAY, + + .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */ }; static const struct intel_display_device_info i85x_display = { I830_DISPLAY, + .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info i865g_display = { I845_DISPLAY, + .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; @@ -228,7 +231,8 @@ static const struct intel_display_device_info i865g_display = { .__runtime_defaults.ip.ver = 3, \ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .__runtime_defaults.cpu_transcoder_mask = \ - BIT(TRANSCODER_A) | BIT(TRANSCODER_B) + BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) /* SDVO B/C */ static const struct intel_display_device_info i915g_display = { GEN3_DISPLAY, @@ -293,6 +297,8 @@ static const struct intel_display_device_info pnv_display = { static const struct intel_display_device_info i965g_display = { GEN4_DISPLAY, .has_overlay = 1, + + .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* SDVO B/C */ }; static const struct intel_display_device_info i965gm_display = { @@ -300,17 +306,21 @@ static const struct intel_display_device_info i965gm_display = { .has_overlay = 1, .supports_tv = 1, + .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* SDVO B/C */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info g45_display = { GEN4_DISPLAY, + + .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* SDVO/HDMI/DP B/C, DP D */ }; static const struct intel_display_device_info gm45_display = { GEN4_DISPLAY, .supports_tv = 1, + .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* SDVO/HDMI/DP B/C, DP D */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; @@ -323,7 +333,8 @@ static const struct intel_display_device_info gm45_display = { .__runtime_defaults.ip.ver = 5, \ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .__runtime_defaults.cpu_transcoder_mask = \ - BIT(TRANSCODER_A) | BIT(TRANSCODER_B) + BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */ static const struct intel_display_device_info ilk_d_display = { ILK_DISPLAY, @@ -345,6 +356,7 @@ static const struct intel_display_device_info snb_display = { .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; @@ -358,6 +370,7 @@ static const struct intel_display_device_info ivb_display = { .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; @@ -373,6 +386,7 @@ static const struct intel_display_device_info vlv_display = { .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), + .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* HDMI/DP B/C */ }; static const struct intel_display_device_info hsw_display = { @@ -380,6 +394,8 @@ static const struct intel_display_device_info hsw_display = { .has_dp_mst = 1, .has_fpga_dbg = 1, .has_hotplug = 1, + .has_psr = 1, + .has_psr_hw_tracking = 1, HSW_PIPE_OFFSETS, IVB_CURSOR_OFFSETS, IVB_COLORS, @@ -389,6 +405,7 @@ static const struct intel_display_device_info hsw_display = { .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E), .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; @@ -397,6 +414,8 @@ static const struct intel_display_device_info bdw_display = { .has_dp_mst = 1, .has_fpga_dbg = 1, .has_hotplug = 1, + .has_psr = 1, + .has_psr_hw_tracking = 1, HSW_PIPE_OFFSETS, IVB_CURSOR_OFFSETS, IVB_COLORS, @@ -406,6 +425,7 @@ static const struct intel_display_device_info bdw_display = { .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E), .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; @@ -421,6 +441,7 @@ static const struct intel_display_device_info chv_display = { .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), + .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* HDMI/DP B/C/D */ }; static const struct intel_display_device_info skl_display = { @@ -444,6 +465,7 @@ static const struct intel_display_device_info skl_display = { .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E), .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; @@ -467,7 +489,8 @@ static const struct intel_display_device_info skl_display = { .__runtime_defaults.cpu_transcoder_mask = \ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ - BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C) + BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) static const struct intel_display_device_info bxt_display = { GEN9_LP_DISPLAY, @@ -484,46 +507,57 @@ static const struct intel_display_device_info glk_display = { .__runtime_defaults.ip.ver = 10, }; -static const struct intel_display_device_info gen11_display = { - .abox_mask = BIT(0), - .dbuf.size = 2048, - .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), - .has_ddi = 1, - .has_dp_mst = 1, - .has_fpga_dbg = 1, - .has_hotplug = 1, - .has_ipc = 1, - .has_psr = 1, - .has_psr_hw_tracking = 1, - .pipe_offsets = { - [TRANSCODER_A] = PIPE_A_OFFSET, - [TRANSCODER_B] = PIPE_B_OFFSET, - [TRANSCODER_C] = PIPE_C_OFFSET, - [TRANSCODER_EDP] = PIPE_EDP_OFFSET, - [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, - [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, - }, - .trans_offsets = { - [TRANSCODER_A] = TRANSCODER_A_OFFSET, - [TRANSCODER_B] = TRANSCODER_B_OFFSET, - [TRANSCODER_C] = TRANSCODER_C_OFFSET, - [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, - [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, - [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, - }, - IVB_CURSOR_OFFSETS, - ICL_COLORS, +#define ICL_DISPLAY \ + .abox_mask = BIT(0), \ + .dbuf.size = 2048, \ + .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ + .has_ddi = 1, \ + .has_dp_mst = 1, \ + .has_fpga_dbg = 1, \ + .has_hotplug = 1, \ + .has_ipc = 1, \ + .has_psr = 1, \ + .has_psr_hw_tracking = 1, \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \ + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ + }, \ + IVB_CURSOR_OFFSETS, \ + ICL_COLORS, \ + \ + .__runtime_defaults.ip.ver = 11, \ + .__runtime_defaults.has_dmc = 1, \ + .__runtime_defaults.has_dsc = 1, \ + .__runtime_defaults.has_hdcp = 1, \ + .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .__runtime_defaults.cpu_transcoder_mask = \ + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ + .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A) - .__runtime_defaults.ip.ver = 11, - .__runtime_defaults.has_dmc = 1, - .__runtime_defaults.has_dsc = 1, - .__runtime_defaults.has_hdcp = 1, - .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .__runtime_defaults.cpu_transcoder_mask = - BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | - BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | - BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), - .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), +static const struct intel_display_device_info icl_display = { + ICL_DISPLAY, + + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E), +}; + +static const struct intel_display_device_info jsl_ehl_display = { + ICL_DISPLAY, + + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), }; #define XE_D_DISPLAY \ @@ -571,6 +605,20 @@ static const struct intel_display_device_info gen11_display = { static const struct intel_display_device_info tgl_display = { XE_D_DISPLAY, + + /* + * FIXME DDI C/combo PHY C missing due to combo PHY + * code making a mess on SKUs where the PHY is missing. + */ + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | + BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4) | BIT(PORT_TC5) | BIT(PORT_TC6), +}; + +static const struct intel_display_device_info dg1_display = { + XE_D_DISPLAY, + + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | + BIT(PORT_TC1) | BIT(PORT_TC2), }; static const struct intel_display_device_info rkl_display = { @@ -582,12 +630,17 @@ static const struct intel_display_device_info rkl_display = { .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | + BIT(PORT_TC1) | BIT(PORT_TC2), }; static const struct intel_display_device_info adl_s_display = { XE_D_DISPLAY, .has_hti = 1, .has_psr_hw_tracking = 0, + + .__runtime_defaults.port_mask = BIT(PORT_A) | + BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4), }; #define XE_LPD_FEATURES \ @@ -642,6 +695,8 @@ static const struct intel_display_device_info xe_lpd_display = { BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | + BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4), }; static const struct intel_display_device_info xe_hpd_display = { @@ -651,6 +706,8 @@ static const struct intel_display_device_info xe_hpd_display = { .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D), + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D_XELPD) | + BIT(PORT_TC1), }; static const struct intel_display_device_info xe_lpdp_display = { @@ -663,10 +720,10 @@ static const struct intel_display_device_info xe_lpdp_display = { .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D), + .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | + BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4), }; -__diag_pop(); - #undef INTEL_VGA_DEVICE #undef INTEL_QUANTA_VGA_DEVICE #define INTEL_VGA_DEVICE(id, info) { id, info } @@ -707,11 +764,11 @@ static const struct { INTEL_GLK_IDS(&glk_display), INTEL_KBL_IDS(&skl_display), INTEL_CFL_IDS(&skl_display), - INTEL_ICL_11_IDS(&gen11_display), - INTEL_EHL_IDS(&gen11_display), - INTEL_JSL_IDS(&gen11_display), + INTEL_ICL_11_IDS(&icl_display), + INTEL_EHL_IDS(&jsl_ehl_display), + INTEL_JSL_IDS(&jsl_ehl_display), INTEL_TGL_12_IDS(&tgl_display), - INTEL_DG1_IDS(&tgl_display), + INTEL_DG1_IDS(&dg1_display), INTEL_RKL_IDS(&rkl_display), INTEL_ADLS_IDS(&adl_s_display), INTEL_RPLS_IDS(&adl_s_display), @@ -796,6 +853,10 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915) struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915); enum pipe pipe; + BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->pipe_mask) < I915_MAX_PIPES); + BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->cpu_transcoder_mask) < I915_MAX_TRANSCODERS); + BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->port_mask) < I915_MAX_PORTS); + /* Wa_14011765242: adl-s A0,A1 */ if (IS_ADLS_DISPLAY_STEP(i915, STEP_A0, STEP_A2)) for_each_pipe(i915, pipe) @@ -915,3 +976,24 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915) display_fused_off: memset(display_runtime, 0, sizeof(*display_runtime)); } + +void intel_display_device_info_print(const struct intel_display_device_info *info, + const struct intel_display_runtime_info *runtime, + struct drm_printer *p) +{ + if (runtime->ip.rel) + drm_printf(p, "display version: %u.%02u\n", + runtime->ip.ver, + runtime->ip.rel); + else + drm_printf(p, "display version: %u\n", + runtime->ip.ver); + +#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name)) + DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); +#undef PRINT_FLAG + + drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp)); + drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc)); + drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc)); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 706ff2aa1f55..3324bd453ca7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -11,6 +11,7 @@ #include "intel_display_limits.h" struct drm_i915_private; +struct drm_printer; #define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \ /* Keep in alphabetical order */ \ @@ -79,6 +80,7 @@ struct intel_display_runtime_info { u8 pipe_mask; u8 cpu_transcoder_mask; + u16 port_mask; u8 num_sprites[I915_MAX_PIPES]; u8 num_scalers[I915_MAX_PIPES]; @@ -126,4 +128,8 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid, u16 *ver, u16 *rel, u16 *step); void intel_display_device_info_runtime_init(struct drm_i915_private *i915); +void intel_display_device_info_print(const struct intel_display_device_info *info, + const struct intel_display_runtime_info *runtime, + struct drm_printer *p); + #endif diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index ae2578741dfe..e6f172cc665a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -749,6 +749,20 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) if (de_iir & DE_ERR_INT_IVB) ivb_err_int_handler(dev_priv); + if (de_iir & DE_EDP_PSR_INT_HSW) { + struct intel_encoder *encoder; + + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + u32 psr_iir; + + psr_iir = intel_uncore_rmw(&dev_priv->uncore, + EDP_PSR_IIR, 0, 0); + intel_psr_irq_handler(intel_dp, psr_iir); + break; + } + } + if (de_iir & DE_AUX_CHANNEL_A_IVB) intel_dp_aux_irq_handler(dev_priv); @@ -1135,7 +1149,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) { - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = intel_uncore_regs(&i915->uncore); u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) @@ -1156,7 +1170,7 @@ void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) void gen11_display_irq_handler(struct drm_i915_private *i915) { - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = intel_uncore_regs(&i915->uncore); const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); disable_rpm_wakeref_asserts(&i915->runtime_pm); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index db5437043904..38225e5d311e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -10,6 +10,7 @@ #include "i915_reg.h" #include "intel_backlight_regs.h" #include "intel_cdclk.h" +#include "intel_clock_gating.h" #include "intel_combo_phy.h" #include "intel_de.h" #include "intel_display_power.h" @@ -457,6 +458,17 @@ async_put_domains_clear_domain(struct i915_power_domains *power_domains, clear_bit(domain, power_domains->async_put_domains[1].bits); } +static void +cancel_async_put_work(struct i915_power_domains *power_domains, bool sync) +{ + if (sync) + cancel_delayed_work_sync(&power_domains->async_put_work); + else + cancel_delayed_work(&power_domains->async_put_work); + + power_domains->async_put_next_delay = 0; +} + static bool intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) @@ -477,7 +489,7 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)) goto out_verify; - cancel_delayed_work(&power_domains->async_put_work); + cancel_async_put_work(power_domains, false); intel_runtime_pm_put_raw(&dev_priv->runtime_pm, fetch_and_zero(&power_domains->async_put_wakeref)); out_verify: @@ -608,7 +620,8 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv, static void queue_async_put_domains_work(struct i915_power_domains *power_domains, - intel_wakeref_t wakeref) + intel_wakeref_t wakeref, + int delay_ms) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, @@ -617,7 +630,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains, power_domains->async_put_wakeref = wakeref; drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, &power_domains->async_put_work, - msecs_to_jiffies(100))); + msecs_to_jiffies(delay_ms))); } static void @@ -680,13 +693,15 @@ intel_display_power_put_async_work(struct work_struct *work) bitmap_zero(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM); queue_async_put_domains_work(power_domains, - fetch_and_zero(&new_work_wakeref)); + fetch_and_zero(&new_work_wakeref), + power_domains->async_put_next_delay); + power_domains->async_put_next_delay = 0; } else { /* * Cancel the work that got queued after this one got dequeued, * since here we released the corresponding async-put reference. */ - cancel_delayed_work(&power_domains->async_put_work); + cancel_async_put_work(power_domains, false); } out_verify: @@ -705,19 +720,25 @@ out_verify: * @i915: i915 device instance * @domain: power domain to reference * @wakeref: wakeref acquired for the reference that is being released + * @delay_ms: delay of powering down the power domain * * This function drops the power domain reference obtained by * intel_display_power_get*() and schedules a work to power down the * corresponding hardware block if this is the last reference. + * The power down is delayed by @delay_ms if this is >= 0, or by a default + * 100 ms otherwise. */ void __intel_display_power_put_async(struct drm_i915_private *i915, enum intel_display_power_domain domain, - intel_wakeref_t wakeref) + intel_wakeref_t wakeref, + int delay_ms) { struct i915_power_domains *power_domains = &i915->display.power.domains; struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); + delay_ms = delay_ms >= 0 ? delay_ms : 100; + mutex_lock(&power_domains->lock); if (power_domains->domain_use_count[domain] > 1) { @@ -731,10 +752,13 @@ void __intel_display_power_put_async(struct drm_i915_private *i915, /* Let a pending work requeue itself or queue a new one. */ if (power_domains->async_put_wakeref) { set_bit(domain, power_domains->async_put_domains[1].bits); + power_domains->async_put_next_delay = max(power_domains->async_put_next_delay, + delay_ms); } else { set_bit(domain, power_domains->async_put_domains[0].bits); queue_async_put_domains_work(power_domains, - fetch_and_zero(&work_wakeref)); + fetch_and_zero(&work_wakeref), + delay_ms); } out_verify: @@ -774,7 +798,7 @@ void intel_display_power_flush_work(struct drm_i915_private *i915) async_put_domains_mask(power_domains, &async_put_mask); release_async_put_domains(power_domains, &async_put_mask); - cancel_delayed_work(&power_domains->async_put_work); + cancel_async_put_work(power_domains, false); out_verify: verify_async_put_domains_state(power_domains); @@ -798,7 +822,7 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915) struct i915_power_domains *power_domains = &i915->display.power.domains; intel_display_power_flush_work(i915); - cancel_delayed_work_sync(&power_domains->async_put_work); + cancel_async_put_work(power_domains, true); verify_async_put_domains_state(power_domains); @@ -1385,9 +1409,8 @@ static void hsw_disable_pc8(struct drm_i915_private *dev_priv) hsw_restore_lcpll(dev_priv); intel_init_pch_refclk(dev_priv); - if (HAS_PCH_LPT_LP(dev_priv)) - intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, - 0, PCH_LP_PARTITION_LEVEL_DISABLE); + /* Many display registers don't survive PC8+ */ + intel_clock_gating_init(dev_priv); } static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index df38632c6237..d3b5d04b7b07 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -12,9 +12,6 @@ #include "intel_wakeref.h" enum aux_ch; -enum dpio_channel; -enum dpio_phy; -enum i915_drm_suspend_mode; enum port; struct drm_i915_private; struct i915_power_well; @@ -154,6 +151,7 @@ struct i915_power_domains { struct delayed_work async_put_work; intel_wakeref_t async_put_wakeref; struct intel_power_domain_mask async_put_domains[2]; + int async_put_next_delay; struct i915_power_well *power_wells; }; @@ -200,7 +198,8 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); void __intel_display_power_put_async(struct drm_i915_private *i915, enum intel_display_power_domain domain, - intel_wakeref_t wakeref); + intel_wakeref_t wakeref, + int delay_ms); void intel_display_power_flush_work(struct drm_i915_private *i915); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void intel_display_power_put(struct drm_i915_private *dev_priv, @@ -211,7 +210,16 @@ intel_display_power_put_async(struct drm_i915_private *i915, enum intel_display_power_domain domain, intel_wakeref_t wakeref) { - __intel_display_power_put_async(i915, domain, wakeref); + __intel_display_power_put_async(i915, domain, wakeref, -1); +} + +static inline void +intel_display_power_put_async_delay(struct drm_i915_private *i915, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref, + int delay_ms) +{ + __intel_display_power_put_async(i915, domain, wakeref, delay_ms); } #else void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, @@ -230,7 +238,16 @@ intel_display_power_put_async(struct drm_i915_private *i915, enum intel_display_power_domain domain, intel_wakeref_t wakeref) { - __intel_display_power_put_async(i915, domain, -1); + __intel_display_power_put_async(i915, domain, -1, -1); +} + +static inline void +intel_display_power_put_async_delay(struct drm_i915_private *i915, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref, + int delay_ms) +{ + __intel_display_power_put_async(i915, domain, -1, delay_ms); } #endif diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index 1015bba4af01..a8736588314d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -11,7 +11,6 @@ #include "intel_dpio_phy.h" struct drm_i915_private; -struct i915_power_well; struct i915_power_well_ops; struct intel_encoder; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 9f40da20e88d..03675620e3ea 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -713,9 +713,18 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p /* * According to BSpec, 27 is the max DSC output bpp, - * 8 is the min DSC output bpp + * 8 is the min DSC output bpp. + * While we can still clamp higher bpp values to 27, saving bandwidth, + * if it is required to oompress up to bpp < 8, means we can't do + * that and probably means we can't fit the required mode, even with + * DSC enabled. */ - bits_per_pixel = clamp_t(u32, bits_per_pixel, 8, 27); + if (bits_per_pixel < 8) { + drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n", + bits_per_pixel); + return 0; + } + bits_per_pixel = min_t(u32, bits_per_pixel, 27); } else { /* Find the nearest match in the array of known BPPs from VESA */ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 197c6e81db14..2d173bd495a3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -14,7 +14,7 @@ #include "intel_pps.h" #include "intel_tc.h" -static u32 intel_dp_aux_pack(const u8 *src, int src_bytes) +u32 intel_dp_aux_pack(const u8 *src, int src_bytes) { int i; u32 v = 0; @@ -792,25 +792,60 @@ static enum aux_ch default_aux_ch(struct intel_encoder *encoder) return (enum aux_ch)encoder->port; } +static struct intel_encoder * +get_encoder_by_aux_ch(struct intel_encoder *encoder, + enum aux_ch aux_ch) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_encoder *other; + + for_each_intel_encoder(&i915->drm, other) { + if (other == encoder) + continue; + + if (!intel_encoder_is_dig_port(other)) + continue; + + if (enc_to_dig_port(other)->aux_ch == aux_ch) + return other; + } + + return NULL; +} + enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_encoder *other; + const char *source; enum aux_ch aux_ch; aux_ch = intel_bios_dp_aux_ch(encoder->devdata); - if (aux_ch != AUX_CH_NONE) { - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] using AUX %c (VBT)\n", - encoder->base.base.id, encoder->base.name, - aux_ch_name(aux_ch)); - return aux_ch; + source = "VBT"; + + if (aux_ch == AUX_CH_NONE) { + aux_ch = default_aux_ch(encoder); + source = "platform default"; } - aux_ch = default_aux_ch(encoder); + if (aux_ch == AUX_CH_NONE) + return AUX_CH_NONE; + + /* FIXME validate aux_ch against platform caps */ + + other = get_encoder_by_aux_ch(encoder, aux_ch); + if (other) { + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] AUX CH %c already claimed by [ENCODER:%d:%s]\n", + encoder->base.base.id, encoder->base.name, aux_ch_name(aux_ch), + other->base.base.id, other->base.name); + return AUX_CH_NONE; + } drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s] using AUX %c (platform default)\n", + "[ENCODER:%d:%s] Using AUX CH %c (%s)\n", encoder->base.base.id, encoder->base.name, - aux_ch_name(aux_ch)); + aux_ch_name(aux_ch), source); return aux_ch; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h index 5b608f9d3499..8447f3e601fe 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h @@ -6,6 +6,8 @@ #ifndef __INTEL_DP_AUX_H__ #define __INTEL_DP_AUX_H__ +#include <linux/types.h> + enum aux_ch; struct drm_i915_private; struct intel_dp; @@ -17,5 +19,6 @@ void intel_dp_aux_init(struct intel_dp *intel_dp); enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder); void intel_dp_aux_irq_handler(struct drm_i915_private *i915); +u32 intel_dp_aux_pack(const u8 *src, int src_bytes); #endif /* __INTEL_DP_AUX_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index 7c5fddb203ba..fbfd8f959f17 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -166,6 +166,8 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) i915_vma_get(vma); } + dpt->obj->mm.dirty = true; + atomic_dec(&i915->gpu_error.pending_fb_pin); intel_runtime_pm_put(&i915->runtime_pm, wakeref); @@ -261,7 +263,7 @@ intel_dpt_create(struct intel_framebuffer *fb) dpt_obj = i915_gem_object_create_stolen(i915, size); if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) { drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n"); - dpt_obj = i915_gem_object_create_internal(i915, size); + dpt_obj = i915_gem_object_create_shmem(i915, size); } if (IS_ERR(dpt_obj)) return ERR_CAST(dpt_obj); diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index 5efdd471ac2b..d3cf6a652221 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -9,6 +9,26 @@ #include "intel_dsi.h" #include "intel_panel.h" +void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi) +{ + ktime_t panel_power_on_time; + s64 panel_power_off_duration; + + panel_power_on_time = ktime_get_boottime(); + panel_power_off_duration = ktime_ms_delta(panel_power_on_time, + intel_dsi->panel_power_off_time); + + if (panel_power_off_duration < (s64)intel_dsi->panel_pwr_cycle_delay) + msleep(intel_dsi->panel_pwr_cycle_delay - panel_power_off_duration); +} + +void intel_dsi_shutdown(struct intel_encoder *encoder) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + + intel_dsi_wait_panel_power_cycle(intel_dsi); +} + int intel_dsi_bitrate(const struct intel_dsi *intel_dsi) { int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index ce80bd8be519..083390e5e442 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -173,5 +173,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, const struct mipi_dsi_host_ops *funcs, enum port port); +void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi); +void intel_dsi_shutdown(struct intel_encoder *encoder); #endif /* _INTEL_DSI_H */ diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index c7935ea498c4..e56ec3f2d84a 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -235,7 +235,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); u32 delay = *((const u32 *) data); - drm_dbg_kms(&i915->drm, "\n"); + drm_dbg_kms(&i915->drm, "%d usecs\n", delay); usleep_range(delay, delay + 10); data += 4; diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 9884678743b6..b386894c3a6d 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -509,6 +509,8 @@ void intel_dvo_init(struct drm_i915_private *i915) return; } + assert_port_valid(i915, intel_dvo->dev.port); + encoder->type = INTEL_OUTPUT_DVO; encoder->power_domain = POWER_DOMAIN_PORT_OTHER; encoder->port = intel_dvo->dev.port; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index f1439827bc80..31d0d695d567 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -135,9 +135,6 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) return i915_gem_fb_mmap(obj, vma); } -__diag_push(); -__diag_ignore_all("-Woverride-init", "Allow overriding the default ops"); - static const struct fb_ops intelfb_ops = { .owner = THIS_MODULE, __FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev), @@ -149,8 +146,6 @@ static const struct fb_ops intelfb_ops = { .fb_mmap = intel_fbdev_mmap, }; -__diag_pop(); - static int intelfb_alloc(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { @@ -187,8 +182,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper, * If the FB is too big, just don't use it since fbdev is not very * important and we should probably use that space with FBC or other * features. + * + * Also skip stolen on MTL as Wa_22018444074 mitigation. */ - if (size * 2 < dev_priv->dsm.usable_size) + if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size) obj = i915_gem_object_create_stolen(dev_priv, size); if (IS_ERR(obj)) obj = i915_gem_object_create_shmem(dev_priv, size); diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 17a7aa8b28c2..22392f94b626 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -167,7 +167,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front, enum fb_op_origin origin, unsigned int frontbuffer_bits) { - struct drm_i915_private *i915 = to_i915(front->obj->base.dev); + struct drm_i915_private *i915 = intel_bo_to_i915(front->obj); if (origin == ORIGIN_CS) { spin_lock(&i915->display.fb_tracking.lock); @@ -188,7 +188,7 @@ void __intel_fb_flush(struct intel_frontbuffer *front, enum fb_op_origin origin, unsigned int frontbuffer_bits) { - struct drm_i915_private *i915 = to_i915(front->obj->base.dev); + struct drm_i915_private *i915 = intel_bo_to_i915(front->obj); if (origin == ORIGIN_CS) { spin_lock(&i915->display.fb_tracking.lock); @@ -221,24 +221,18 @@ static void frontbuffer_retire(struct i915_active *ref) } static void frontbuffer_release(struct kref *ref) - __releases(&to_i915(front->obj->base.dev)->display.fb_tracking.lock) + __releases(&intel_bo_to_i915(front->obj)->display.fb_tracking.lock) { struct intel_frontbuffer *front = container_of(ref, typeof(*front), ref); struct drm_i915_gem_object *obj = front->obj; - struct i915_vma *vma; - drm_WARN_ON(obj->base.dev, atomic_read(&front->bits)); + drm_WARN_ON(&intel_bo_to_i915(obj)->drm, atomic_read(&front->bits)); - spin_lock(&obj->vma.lock); - for_each_ggtt_vma(vma, obj) { - i915_vma_clear_scanout(vma); - vma->display_alignment = I915_GTT_MIN_ALIGNMENT; - } - spin_unlock(&obj->vma.lock); + i915_ggtt_clear_scanout(obj); - RCU_INIT_POINTER(obj->frontbuffer, NULL); - spin_unlock(&to_i915(obj->base.dev)->display.fb_tracking.lock); + i915_gem_object_set_frontbuffer(obj, NULL); + spin_unlock(&intel_bo_to_i915(obj)->display.fb_tracking.lock); i915_active_fini(&front->write); @@ -249,10 +243,10 @@ static void frontbuffer_release(struct kref *ref) struct intel_frontbuffer * intel_frontbuffer_get(struct drm_i915_gem_object *obj) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct intel_frontbuffer *front; + struct drm_i915_private *i915 = intel_bo_to_i915(obj); + struct intel_frontbuffer *front, *cur; - front = __intel_frontbuffer_get(obj); + front = i915_gem_object_get_frontbuffer(obj); if (front) return front; @@ -269,24 +263,18 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) I915_ACTIVE_RETIRE_SLEEPS); spin_lock(&i915->display.fb_tracking.lock); - if (rcu_access_pointer(obj->frontbuffer)) { - kfree(front); - front = rcu_dereference_protected(obj->frontbuffer, true); - kref_get(&front->ref); - } else { - i915_gem_object_get(obj); - rcu_assign_pointer(obj->frontbuffer, front); - } + cur = i915_gem_object_set_frontbuffer(obj, front); spin_unlock(&i915->display.fb_tracking.lock); - - return front; + if (cur != front) + kfree(front); + return cur; } void intel_frontbuffer_put(struct intel_frontbuffer *front) { kref_put_lock(&front->ref, frontbuffer_release, - &to_i915(front->obj->base.dev)->display.fb_tracking.lock); + &intel_bo_to_i915(front->obj)->display.fb_tracking.lock); } /** @@ -315,13 +303,13 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old, BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); if (old) { - drm_WARN_ON(old->obj->base.dev, + drm_WARN_ON(&intel_bo_to_i915(old->obj)->drm, !(atomic_read(&old->bits) & frontbuffer_bits)); atomic_andnot(frontbuffer_bits, &old->bits); } if (new) { - drm_WARN_ON(new->obj->base.dev, + drm_WARN_ON(&intel_bo_to_i915(new->obj)->drm, atomic_read(&new->bits) & frontbuffer_bits); atomic_or(frontbuffer_bits, &new->bits); } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index 3c474ed937fb..72d89be3284b 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -28,7 +28,6 @@ #include <linux/bits.h> #include <linux/kref.h> -#include "gem/i915_gem_object_types.h" #include "i915_active_types.h" struct drm_i915_private; @@ -75,33 +74,6 @@ void intel_frontbuffer_flip(struct drm_i915_private *i915, void intel_frontbuffer_put(struct intel_frontbuffer *front); -static inline struct intel_frontbuffer * -__intel_frontbuffer_get(const struct drm_i915_gem_object *obj) -{ - struct intel_frontbuffer *front; - - if (likely(!rcu_access_pointer(obj->frontbuffer))) - return NULL; - - rcu_read_lock(); - do { - front = rcu_dereference(obj->frontbuffer); - if (!front) - break; - - if (unlikely(!kref_get_unless_zero(&front->ref))) - continue; - - if (likely(front == rcu_access_pointer(obj->frontbuffer))) - break; - - intel_frontbuffer_put(front); - } while (1); - rcu_read_unlock(); - - return front; -} - struct intel_frontbuffer * intel_frontbuffer_get(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 34fabadefaf6..a42549fa9691 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -177,8 +177,11 @@ bool intel_hdcp2_capable(struct intel_connector *connector) struct intel_gt *gt = i915->media_gt; struct intel_gsc_uc *gsc = gt ? >->uc.gsc : NULL; - if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) + if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) { + drm_dbg_kms(&i915->drm, + "GSC components required for HDCP2.2 are not ready\n"); return false; + } } /* MEI/GSC interface is solid depending on which is used */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c index 72573ce1d0e9..ad0405375881 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c @@ -621,24 +621,26 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915, struct intel_gt *gt = i915->media_gt; struct drm_i915_gem_object *obj = NULL; struct i915_vma *vma = NULL; - void *cmd; + void *cmd_in, *cmd_out; int err; - /* allocate object of one page for HDCP command memory and store it */ - obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); + /* allocate object of two page for HDCP command memory and store it */ + obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE); if (IS_ERR(obj)) { drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n"); return PTR_ERR(obj); } - cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true)); - if (IS_ERR(cmd)) { + cmd_in = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true)); + if (IS_ERR(cmd_in)) { drm_err(&i915->drm, "Failed to map gsc message page!\n"); - err = PTR_ERR(cmd); + err = PTR_ERR(cmd_in); goto out_unpin; } + cmd_out = cmd_in + PAGE_SIZE; + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -649,9 +651,10 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915, if (err) goto out_unmap; - memset(cmd, 0, obj->base.size); + memset(cmd_in, 0, obj->base.size); - hdcp_message->hdcp_cmd = cmd; + hdcp_message->hdcp_cmd_in = cmd_in; + hdcp_message->hdcp_cmd_out = cmd_out; hdcp_message->vma = vma; return 0; @@ -691,6 +694,8 @@ static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915) struct intel_hdcp_gsc_message *hdcp_message = i915->display.hdcp.hdcp_message; + hdcp_message->hdcp_cmd_in = NULL; + hdcp_message->hdcp_cmd_out = NULL; i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP); kfree(hdcp_message); } @@ -721,38 +726,42 @@ void intel_hdcp_gsc_fini(struct drm_i915_private *i915) } static int intel_gsc_send_sync(struct drm_i915_private *i915, - struct intel_gsc_mtl_header *header, u64 addr, + struct intel_gsc_mtl_header *header_in, + struct intel_gsc_mtl_header *header_out, + u64 addr_in, u64 addr_out, size_t msg_out_len) { struct intel_gt *gt = i915->media_gt; int ret; - header->flags = 0; - ret = intel_gsc_uc_heci_cmd_submit_packet(>->uc.gsc, addr, - header->message_size, - addr, - msg_out_len + sizeof(*header)); + ret = intel_gsc_uc_heci_cmd_submit_packet(>->uc.gsc, addr_in, + header_in->message_size, + addr_out, + msg_out_len + sizeof(*header_out)); if (ret) { drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret); return ret; } /* - * Checking validity marker for memory sanity + * Checking validity marker and header status to see if some error has + * blocked us from sending message to gsc cs */ - if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) { + if (header_out->validity_marker != GSC_HECI_VALIDITY_MARKER) { drm_err(&i915->drm, "invalid validity marker\n"); return -EINVAL; } - if (header->status != 0) { + if (header_out->status != 0) { drm_err(&i915->drm, "header status indicates error %d\n", - header->status); + header_out->status); return -EINVAL; } - if (header->flags & GSC_OUTFLAG_MSG_PENDING) + if (header_out->flags & GSC_OUTFLAG_MSG_PENDING) { + header_in->gsc_message_handle = header_out->gsc_message_handle; return -EAGAIN; + } return 0; } @@ -769,11 +778,11 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, size_t msg_out_len) { struct intel_gt *gt = i915->media_gt; - struct intel_gsc_mtl_header *header; - const size_t max_msg_size = PAGE_SIZE - sizeof(*header); + struct intel_gsc_mtl_header *header_in, *header_out; + const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in); struct intel_hdcp_gsc_message *hdcp_message; - u64 addr, host_session_id; - u32 reply_size, msg_size; + u64 addr_in, addr_out, host_session_id; + u32 reply_size, msg_size_in, msg_size_out; int ret, tries = 0; if (!intel_uc_uses_gsc_uc(>->uc)) @@ -782,16 +791,20 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) return -ENOSPC; + msg_size_in = msg_in_len + sizeof(*header_in); + msg_size_out = msg_out_len + sizeof(*header_out); hdcp_message = i915->display.hdcp.hdcp_message; - header = hdcp_message->hdcp_cmd; - addr = i915_ggtt_offset(hdcp_message->vma); + header_in = hdcp_message->hdcp_cmd_in; + header_out = hdcp_message->hdcp_cmd_out; + addr_in = i915_ggtt_offset(hdcp_message->vma); + addr_out = addr_in + PAGE_SIZE; - msg_size = msg_in_len + sizeof(*header); - memset(header, 0, msg_size); + memset(header_in, 0, msg_size_in); + memset(header_out, 0, msg_size_out); get_random_bytes(&host_session_id, sizeof(u64)); - intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_HDCP, - msg_size, host_session_id); - memcpy(hdcp_message->hdcp_cmd + sizeof(*header), msg_in, msg_in_len); + intel_gsc_uc_heci_cmd_emit_mtl_header(header_in, HECI_MEADDRESS_HDCP, + msg_size_in, host_session_id); + memcpy(hdcp_message->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len); /* * Keep sending request in case the pending bit is set no need to add @@ -800,7 +813,8 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, * 20 times each message 50 ms apart */ do { - ret = intel_gsc_send_sync(i915, header, addr, msg_out_len); + ret = intel_gsc_send_sync(i915, header_in, header_out, addr_in, + addr_out, msg_out_len); /* Only try again if gsc says so */ if (ret != -EAGAIN) @@ -814,7 +828,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, goto err; /* we use the same mem for the reply, so header is in the same loc */ - reply_size = header->message_size - sizeof(*header); + reply_size = header_out->message_size - sizeof(*header_out); if (reply_size > msg_out_len) { drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n", reply_size, (u32)msg_out_len); @@ -824,7 +838,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, reply_size, (u32)msg_out_len); } - memcpy(msg_out, hdcp_message->hdcp_cmd + sizeof(*header), msg_out_len); + memcpy(msg_out, hdcp_message->hdcp_cmd_out + sizeof(*header_out), msg_out_len); err: return ret; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h index 5cc9fd2e88f6..cbf96551e534 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h @@ -13,7 +13,8 @@ struct drm_i915_private; struct intel_hdcp_gsc_message { struct i915_vma *vma; - void *hdcp_cmd; + void *hdcp_cmd_in; + void *hdcp_cmd_out; }; bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 7ac5e6c5e00d..8d1c8abfcffa 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2880,21 +2880,12 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv, return ddc_pin; } -static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) +static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; u8 ddc_pin; - ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata); - if (ddc_pin) { - drm_dbg_kms(&dev_priv->drm, - "[ENCODER:%d:%s] Using DDC pin 0x%x (VBT)\n", - encoder->base.base.id, encoder->base.name, - ddc_pin); - return ddc_pin; - } - if (IS_ALDERLAKE_S(dev_priv)) ddc_pin = adls_port_to_ddc_pin(dev_priv, port); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) @@ -2916,10 +2907,62 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) else ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); - drm_dbg_kms(&dev_priv->drm, - "[ENCODER:%d:%s] Using DDC pin 0x%x (platform default)\n", + return ddc_pin; +} + +static struct intel_encoder * +get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_encoder *other; + + for_each_intel_encoder(&i915->drm, other) { + if (other == encoder) + continue; + + if (!intel_encoder_is_dig_port(other)) + continue; + + if (enc_to_dig_port(other)->hdmi.ddc_bus == ddc_pin) + return other; + } + + return NULL; +} + +static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_encoder *other; + const char *source; + u8 ddc_pin; + + ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata); + source = "VBT"; + + if (!ddc_pin) { + ddc_pin = intel_hdmi_default_ddc_pin(encoder); + source = "platform default"; + } + + if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) { + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Invalid DDC pin %d\n", + encoder->base.base.id, encoder->base.name, ddc_pin); + return 0; + } + + other = get_encoder_by_ddc_pin(encoder, ddc_pin); + if (other) { + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n", + encoder->base.base.id, encoder->base.name, ddc_pin, + other->base.base.id, other->base.name); + return 0; + } + + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] Using DDC pin 0x%x (%s)\n", encoder->base.base.id, encoder->base.name, - ddc_pin); + ddc_pin, source); return ddc_pin; } @@ -2990,6 +3033,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, return; intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(intel_encoder); + if (!intel_hdmi->ddc_bus) + return; + ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); drm_connector_init_with_ddc(dev, connector, diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 1160fa20433b..0ff5ed46ae1e 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -376,6 +376,8 @@ static void i915_hotplug_work_func(struct work_struct *work) u32 changed = 0, retry = 0; u32 hpd_event_bits; u32 hpd_retry_bits; + struct drm_connector *first_changed_connector = NULL; + int changed_connectors = 0; mutex_lock(&dev_priv->drm.mode_config.mutex); drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); @@ -428,6 +430,11 @@ static void i915_hotplug_work_func(struct work_struct *work) break; case INTEL_HOTPLUG_CHANGED: changed |= hpd_bit; + changed_connectors++; + if (!first_changed_connector) { + drm_connector_get(&connector->base); + first_changed_connector = &connector->base; + } break; case INTEL_HOTPLUG_RETRY: retry |= hpd_bit; @@ -438,9 +445,14 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev_priv->drm.mode_config.mutex); - if (changed) + if (changed_connectors == 1) + drm_kms_helper_connector_hotplug_event(first_changed_connector); + else if (changed_connectors > 0) drm_kms_helper_hotplug_event(&dev_priv->drm); + if (first_changed_connector) + drm_connector_put(first_changed_connector); + /* Remove shared HPD pins that have changed */ retry &= ~changed; if (retry) { diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index f95fa793fabb..95a7ea94f417 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -842,6 +842,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + else + intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -1049,7 +1051,7 @@ static void mtp_hpd_irq_setup(struct drm_i915_private *i915) enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd); - intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); mtp_hpd_invert(i915); ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs); diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index d6fe2bbabe55..09c1aa1427ad 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -1348,11 +1348,12 @@ out_unlock: static int get_registers(struct intel_overlay *overlay, bool use_phys) { struct drm_i915_private *i915 = overlay->i915; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj = ERR_PTR(-ENODEV); struct i915_vma *vma; int err; - obj = i915_gem_object_create_stolen(i915, PAGE_SIZE); + if (!IS_METEORLAKE(i915)) /* Wa_22018444074 */ + obj = i915_gem_object_create_stolen(i915, PAGE_SIZE); if (IS_ERR(obj)) obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 56c17283ba2d..04ab034a8d57 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -234,23 +234,91 @@ static u32 psr_irq_mask_get(struct intel_dp *intel_dp) EDP_PSR_MASK(intel_dp->psr.transcoder); } -static void psr_irq_control(struct intel_dp *intel_dp) +static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - i915_reg_t imr_reg; - u32 mask; + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_CTL(cpu_transcoder); + else + return HSW_SRD_CTL; +} + +static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_DEBUG(cpu_transcoder); + else + return HSW_SRD_DEBUG; +} + +static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_PERF_CNT(cpu_transcoder); + else + return HSW_SRD_PERF_CNT; +} + +static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_STATUS(cpu_transcoder); + else + return HSW_SRD_STATUS; +} + +static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (DISPLAY_VER(dev_priv) >= 12) + return TRANS_PSR_IMR(cpu_transcoder); + else + return EDP_PSR_IMR; +} +static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ if (DISPLAY_VER(dev_priv) >= 12) - imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); + return TRANS_PSR_IIR(cpu_transcoder); + else + return EDP_PSR_IIR; +} + +static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_AUX_CTL(cpu_transcoder); + else + return HSW_SRD_AUX_CTL; +} + +static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, int i) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return EDP_PSR_AUX_DATA(cpu_transcoder, i); else - imr_reg = EDP_PSR_IMR; + return HSW_SRD_AUX_DATA(i); +} + +static void psr_irq_control(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; + u32 mask; mask = psr_irq_psr_error_bit_get(intel_dp); if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ) mask |= psr_irq_post_exit_bit_get(intel_dp) | psr_irq_pre_entry_bit_get(intel_dp); - intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask); + intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder), + psr_irq_mask_get(intel_dp), ~mask); } static void psr_event_print(struct drm_i915_private *i915, @@ -296,12 +364,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; ktime_t time_ns = ktime_get(); - i915_reg_t imr_reg; - - if (DISPLAY_VER(dev_priv) >= 12) - imr_reg = TRANS_PSR_IMR(cpu_transcoder); - else - imr_reg = EDP_PSR_IMR; if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) { intel_dp->psr.last_entry_attempt = time_ns; @@ -339,7 +401,8 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) * again so we don't care about unmask the interruption * or unset irq_aux_error. */ - intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp)); + intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder), + 0, psr_irq_psr_error_bit_get(intel_dp)); queue_work(dev_priv->unordered_wq, &intel_dp->psr.work); } @@ -467,6 +530,43 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) } } +static void hsw_psr_setup_aux(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; + u32 aux_clock_divider, aux_ctl; + /* write DP_SET_POWER=D0 */ + static const u8 aux_msg[] = { + [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf), + [1] = (DP_SET_POWER >> 8) & 0xff, + [2] = DP_SET_POWER & 0xff, + [3] = 1 - 1, + [4] = DP_SET_POWER_D0, + }; + int i; + + BUILD_BUG_ON(sizeof(aux_msg) > 20); + for (i = 0; i < sizeof(aux_msg); i += 4) + intel_de_write(dev_priv, + psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2), + intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i)); + + aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); + + /* Start with bits set for DDI_AUX_CTL register */ + aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), + aux_clock_divider); + + /* Select only valid bits for SRD_AUX_CTL */ + aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK | + EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | + EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | + EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; + + intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder), + aux_ctl); +} + static void intel_psr_enable_sink(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -528,6 +628,15 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) else val |= EDP_PSR_TP2_TP3_TIME_2500us; + /* + * WA 0479: hsw,bdw + * "Do not skip both TP1 and TP2/TP3" + */ + if (DISPLAY_VER(dev_priv) < 9 && + connector->panel.vbt.psr.tp1_wakeup_time_us == 0 && + connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0) + val |= EDP_PSR_TP2_TP3_TIME_100us; + check_tp3_sel: if (intel_dp_source_supports_tps3(dev_priv) && drm_dp_tps3_supported(intel_dp->dpcd)) @@ -577,7 +686,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) if (DISPLAY_VER(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; - intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder), + intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val); } @@ -685,7 +794,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. */ - intel_de_write(dev_priv, EDP_PSR_CTL(cpu_transcoder), 0); + intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0); intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val); } @@ -697,8 +806,10 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_trans return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B; else if (DISPLAY_VER(dev_priv) >= 12) return cpu_transcoder == TRANSCODER_A; - else + else if (DISPLAY_VER(dev_priv) >= 9) return cpu_transcoder == TRANSCODER_EDP; + else + return false; } static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) @@ -1201,13 +1312,15 @@ static void intel_psr_activate(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; - if (transcoder_has_psr2(dev_priv, cpu_transcoder)) - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE); + drm_WARN_ON(&dev_priv->drm, + transcoder_has_psr2(dev_priv, cpu_transcoder) && + intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)) & EDP_PSR_ENABLE); + intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE); + drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active); + lockdep_assert_held(&intel_dp->psr.lock); /* psr1 and psr2 are mutually exclusive.*/ @@ -1272,6 +1385,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, u32 mask; /* + * Only HSW and BDW have PSR AUX registers that need to be setup. + * SKL+ use hardcoded values PSR AUX transactions + */ + if (DISPLAY_VER(dev_priv) < 9) + hsw_psr_setup_aux(intel_dp); + + /* * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also * mask LPSP to avoid dependency on other drivers that might block * runtime_pm besides preventing other hw tracking issues now we @@ -1282,11 +1402,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, EDP_PSR_DEBUG_MASK_LPSP | EDP_PSR_DEBUG_MASK_MAX_SLEEP; - if (DISPLAY_VER(dev_priv) < 11) + /* + * No separate pipe reg write mask on hsw/bdw, so have to unmask all + * registers in order to keep the CURSURFLIVE tricks working :( + */ + if (IS_DISPLAY_VER(dev_priv, 9, 10)) mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; - intel_de_write(dev_priv, EDP_PSR_DEBUG(cpu_transcoder), - mask); + /* allow PSR with sprite enabled */ + if (IS_HASWELL(dev_priv)) + mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE; + + intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask); psr_irq_control(intel_dp); @@ -1352,10 +1479,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp) * first time that PSR HW tries to activate so lets keep PSR disabled * to avoid any rendering problems. */ - if (DISPLAY_VER(dev_priv) >= 12) - val = intel_de_read(dev_priv, TRANS_PSR_IIR(cpu_transcoder)); - else - val = intel_de_read(dev_priv, EDP_PSR_IIR); + val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder)); val &= psr_irq_psr_error_bit_get(intel_dp); if (val) { intel_dp->psr.sink_not_reliable = true; @@ -1418,7 +1542,7 @@ static void intel_psr_exit(struct intel_dp *intel_dp) drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE); } - val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)); + val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)); drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE); return; @@ -1432,7 +1556,7 @@ static void intel_psr_exit(struct intel_dp *intel_dp) drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE)); } else { - val = intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder), + val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), EDP_PSR_ENABLE, 0); drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE)); @@ -1451,7 +1575,7 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp) psr_status = EDP_PSR2_STATUS(cpu_transcoder); psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; } else { - psr_status = EDP_PSR_STATUS(cpu_transcoder); + psr_status = psr_status_reg(dev_priv, cpu_transcoder); psr_status_mask = EDP_PSR_STATUS_STATE_MASK; } @@ -2151,7 +2275,7 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) * defensive enough to cover everything. */ return intel_de_wait_for_clear(dev_priv, - EDP_PSR_STATUS(cpu_transcoder), + psr_status_reg(dev_priv, cpu_transcoder), EDP_PSR_STATUS_STATE_MASK, 50); } @@ -2205,7 +2329,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) reg = EDP_PSR2_STATUS(cpu_transcoder); mask = EDP_PSR2_STATUS_STATE_MASK; } else { - reg = EDP_PSR_STATUS(cpu_transcoder); + reg = psr_status_reg(dev_priv, cpu_transcoder); mask = EDP_PSR_STATUS_STATE_MASK; } @@ -2825,7 +2949,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) "SRDOFFACK", "SRDENT_ON", }; - val = intel_de_read(dev_priv, EDP_PSR_STATUS(cpu_transcoder)); + val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder)); status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; @@ -2872,7 +2996,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)); enabled = val & EDP_PSR2_ENABLE; } else { - val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)); + val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)); enabled = val & EDP_PSR_ENABLE; } seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", @@ -2884,7 +3008,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) /* * SKL+ Perf counter is reset to 0 everytime DC state is entered */ - val = intel_de_read(dev_priv, EDP_PSR_PERF_CNT(cpu_transcoder)); + val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder)); seq_printf(m, "Performance counter: %u\n", REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val)); diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h index 8750cb0d8d9d..d39951383c92 100644 --- a/drivers/gpu/drm/i915/display/intel_psr_regs.h +++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h @@ -7,6 +7,7 @@ #define __INTEL_PSR_REGS_H__ #include "intel_display_reg_defs.h" +#include "intel_dp_aux_regs.h" #define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A) #define EXITLINE_ENABLE REG_BIT(31) @@ -19,6 +20,7 @@ * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one * instance of it */ +#define HSW_SRD_CTL _MMIO(0x64800) #define _SRD_CTL_A 0x60800 #define _SRD_CTL_EDP 0x6f800 #define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A) @@ -79,10 +81,22 @@ #define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \ _EDP_PSR_TRANS_SHIFT(trans)) +#define HSW_SRD_AUX_CTL _MMIO(0x64810) +#define _SRD_AUX_CTL_A 0x60810 +#define _SRD_AUX_CTL_EDP 0x6f810 +#define EDP_PSR_AUX_CTL(tran) _MMIO_TRANS2(tran, _SRD_AUX_CTL_A) +#define EDP_PSR_AUX_CTL_TIME_OUT_MASK DP_AUX_CH_CTL_TIME_OUT_MASK +#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK DP_AUX_CH_CTL_MESSAGE_SIZE_MASK +#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK DP_AUX_CH_CTL_PRECHARGE_2US_MASK +#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT REG_BIT(11) +#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK + +#define HSW_SRD_AUX_DATA(i) _MMIO(0x64814 + (i) * 4) /* 5 registers */ #define _SRD_AUX_DATA_A 0x60814 #define _SRD_AUX_DATA_EDP 0x6f814 #define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */ +#define HSW_SRD_STATUS _MMIO(0x64840) #define _SRD_STATUS_A 0x60840 #define _SRD_STATUS_EDP 0x6f840 #define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A) @@ -107,12 +121,14 @@ #define EDP_PSR_STATUS_SENDING_TP1 REG_BIT(4) #define EDP_PSR_STATUS_IDLE_MASK REG_GENMASK(3, 0) +#define HSW_SRD_PERF_CNT _MMIO(0x64844) #define _SRD_PERF_CNT_A 0x60844 #define _SRD_PERF_CNT_EDP 0x6f844 #define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A) #define EDP_PSR_PERF_CNT_MASK REG_GENMASK(23, 0) /* PSR_MASK on SKL+ */ +#define HSW_SRD_DEBUG _MMIO(0x64860) #define _SRD_DEBUG_A 0x60860 #define _SRD_DEBUG_EDP 0x6f860 #define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A) diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c index 6e86c0971d24..543cdc46aa1d 100644 --- a/drivers/gpu/drm/i915/display/intel_qp_tables.c +++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c @@ -17,13 +17,17 @@ /* from BPP 6 to 36 in steps of 0.5 */ #define RC_RANGE_QP444_12BPC_MAX_NUM_BPP 61 -/* from BPP 6 to 24 in steps of 0.5 */ +/* For YCbCr420 the bits_per_pixel sent in PPS params + * is double the target bpp. The below values represent + * the target bpp. + */ +/* from BPP 4 to 12 in steps of 0.5 */ #define RC_RANGE_QP420_8BPC_MAX_NUM_BPP 17 -/* from BPP 6 to 30 in steps of 0.5 */ +/* from BPP 4 to 15 in steps of 0.5 */ #define RC_RANGE_QP420_10BPC_MAX_NUM_BPP 23 -/* from BPP 6 to 36 in steps of 0.5 */ +/* from BPP 4 to 18 in steps of 0.5 */ #define RC_RANGE_QP420_12BPC_MAX_NUM_BPP 29 /* diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 21f92123c844..8298a86d1334 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -3313,13 +3313,19 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, return i2c_add_adapter(&sdvo->ddc) == 0; } -static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv, - enum port port) +static bool is_sdvo_port_valid(struct drm_i915_private *dev_priv, enum port port) { if (HAS_PCH_SPLIT(dev_priv)) - drm_WARN_ON(&dev_priv->drm, port != PORT_B); + return port == PORT_B; else - drm_WARN_ON(&dev_priv->drm, port != PORT_B && port != PORT_C); + return port == PORT_B || port == PORT_C; +} + +static bool assert_sdvo_port_valid(struct drm_i915_private *dev_priv, + enum port port) +{ + return !drm_WARN(&dev_priv->drm, !is_sdvo_port_valid(dev_priv, port), + "Platform does not support SDVO %c\n", port_name(port)); } bool intel_sdvo_init(struct drm_i915_private *dev_priv, @@ -3329,7 +3335,11 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, struct intel_sdvo *intel_sdvo; int i; - assert_sdvo_port_valid(dev_priv, port); + if (!assert_port_valid(dev_priv, port)) + return false; + + if (!assert_sdvo_port_valid(dev_priv, port)) + return false; intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL); if (!intel_sdvo) diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index bd9116d2cd76..9d76c2756784 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -53,22 +53,32 @@ static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder) } static void +intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf, + int bpp) +{ + int bpc = vdsc_cfg->bits_per_component; + + /* Read range_minqp and range_max_qp from qp tables */ + vdsc_cfg->rc_range_params[buf].range_min_qp = + intel_lookup_range_min_qp(bpc, buf, bpp, vdsc_cfg->native_420); + vdsc_cfg->rc_range_params[buf].range_max_qp = + intel_lookup_range_max_qp(bpc, buf, bpp, vdsc_cfg->native_420); +} + +/* + * We are using the method provided in DSC 1.2a C-Model in codec_main.c + * Above method use a common formula to derive values for any combination of DSC + * variables. The formula approach may yield slight differences in the derived PPS + * parameters from the original parameter sets. These differences are not consequential + * to the coding performance because all parameter sets have been shown to produce + * visually lossless quality (provides the same PPS values as + * DSCParameterValuesVESA V1-2 spreadsheet). + */ +static void calculate_rc_params(struct drm_dsc_config *vdsc_cfg) { int bpc = vdsc_cfg->bits_per_component; int bpp = vdsc_cfg->bits_per_pixel >> 4; - static const s8 ofs_und6[] = { - 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 - }; - static const s8 ofs_und8[] = { - 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 - }; - static const s8 ofs_und12[] = { - 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 - }; - static const s8 ofs_und15[] = { - 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 - }; int qp_bpc_modifier = (bpc - 8) * 2; u32 res, buf_i, bpp_i; @@ -78,6 +88,28 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg) else vdsc_cfg->first_line_bpg_offset = 2 * (vdsc_cfg->slice_height - 1); + /* + * According to DSC 1.2 spec in Section 4.1 if native_420 is set: + * -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice + * height < 8. + * -second_line_offset_adj is 512 as shown by emperical values to yield best chroma + * preservation in second line. + * -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded + * up to 16 fractional bits, we left shift second line offset by 11 to preserve 11 + * fractional bits. + */ + if (vdsc_cfg->native_420) { + if (vdsc_cfg->slice_height >= 8) + vdsc_cfg->second_line_bpg_offset = 12; + else + vdsc_cfg->second_line_bpg_offset = + 2 * (vdsc_cfg->slice_height - 1); + + vdsc_cfg->second_line_offset_adj = 512; + vdsc_cfg->nsl_bpg_offset = DIV_ROUND_UP(vdsc_cfg->second_line_bpg_offset << 11, + vdsc_cfg->slice_height - 1); + } + /* Our hw supports only 444 modes as of today */ if (bpp >= 12) vdsc_cfg->initial_offset = 2048; @@ -97,33 +129,88 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg) vdsc_cfg->rc_quant_incr_limit0 = 11 + qp_bpc_modifier; vdsc_cfg->rc_quant_incr_limit1 = 11 + qp_bpc_modifier; - bpp_i = (2 * (bpp - 6)); - for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { - u8 range_bpg_offset; - - /* Read range_minqp and range_max_qp from qp tables */ - vdsc_cfg->rc_range_params[buf_i].range_min_qp = - intel_lookup_range_min_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420); - vdsc_cfg->rc_range_params[buf_i].range_max_qp = - intel_lookup_range_max_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420); - - /* Calculate range_bpg_offset */ - if (bpp <= 6) { - range_bpg_offset = ofs_und6[buf_i]; - } else if (bpp <= 8) { - res = DIV_ROUND_UP(((bpp - 6) * (ofs_und8[buf_i] - ofs_und6[buf_i])), 2); - range_bpg_offset = ofs_und6[buf_i] + res; - } else if (bpp <= 12) { - range_bpg_offset = ofs_und8[buf_i]; - } else if (bpp <= 15) { - res = DIV_ROUND_UP(((bpp - 12) * (ofs_und15[buf_i] - ofs_und12[buf_i])), 3); - range_bpg_offset = ofs_und12[buf_i] + res; - } else { - range_bpg_offset = ofs_und15[buf_i]; + if (vdsc_cfg->native_420) { + static const s8 ofs_und4[] = { + 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 + }; + static const s8 ofs_und5[] = { + 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 + }; + static const s8 ofs_und6[] = { + 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 + }; + static const s8 ofs_und8[] = { + 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 + }; + + bpp_i = bpp - 8; + for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { + u8 range_bpg_offset; + + intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i); + + /* Calculate range_bpg_offset */ + if (bpp <= 8) { + range_bpg_offset = ofs_und4[buf_i]; + } else if (bpp <= 10) { + res = DIV_ROUND_UP(((bpp - 8) * + (ofs_und5[buf_i] - ofs_und4[buf_i])), 2); + range_bpg_offset = ofs_und4[buf_i] + res; + } else if (bpp <= 12) { + res = DIV_ROUND_UP(((bpp - 10) * + (ofs_und6[buf_i] - ofs_und5[buf_i])), 2); + range_bpg_offset = ofs_und5[buf_i] + res; + } else if (bpp <= 16) { + res = DIV_ROUND_UP(((bpp - 12) * + (ofs_und8[buf_i] - ofs_und6[buf_i])), 4); + range_bpg_offset = ofs_und6[buf_i] + res; + } else { + range_bpg_offset = ofs_und8[buf_i]; + } + + vdsc_cfg->rc_range_params[buf_i].range_bpg_offset = + range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK; + } + } else { + static const s8 ofs_und6[] = { + 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 + }; + static const s8 ofs_und8[] = { + 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 + }; + static const s8 ofs_und12[] = { + 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 + }; + static const s8 ofs_und15[] = { + 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 + }; + + bpp_i = (2 * (bpp - 6)); + for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { + u8 range_bpg_offset; + + intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i); + + /* Calculate range_bpg_offset */ + if (bpp <= 6) { + range_bpg_offset = ofs_und6[buf_i]; + } else if (bpp <= 8) { + res = DIV_ROUND_UP(((bpp - 6) * + (ofs_und8[buf_i] - ofs_und6[buf_i])), 2); + range_bpg_offset = ofs_und6[buf_i] + res; + } else if (bpp <= 12) { + range_bpg_offset = ofs_und8[buf_i]; + } else if (bpp <= 15) { + res = DIV_ROUND_UP(((bpp - 12) * + (ofs_und15[buf_i] - ofs_und12[buf_i])), 3); + range_bpg_offset = ofs_und12[buf_i] + res; + } else { + range_bpg_offset = ofs_und15[buf_i]; + } + + vdsc_cfg->rc_range_params[buf_i].range_bpg_offset = + range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK; } - - vdsc_cfg->rc_range_params[buf_i].range_bpg_offset = - range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK; } } @@ -190,30 +277,12 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) vdsc_cfg->bits_per_pixel = compressed_bpp << 4; /* - * According to DSC 1.2 specs in Section 4.1 if native_420 is set: - * -We need to double the current bpp. - * -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice - * height < 8. - * -second_line_offset_adj is 512 as shown by emperical values to yeild best chroma - * preservation in second line. - * -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded - * up to 16 fractional bits, we left shift second line offset by 11 to preserve 11 - * fractional bits. + * According to DSC 1.2 specs in Section 4.1 if native_420 is set + * we need to double the current bpp. */ - if (vdsc_cfg->native_420) { + if (vdsc_cfg->native_420) vdsc_cfg->bits_per_pixel <<= 1; - if (vdsc_cfg->slice_height >= 8) - vdsc_cfg->second_line_bpg_offset = 12; - else - vdsc_cfg->second_line_bpg_offset = - 2 * (vdsc_cfg->slice_height - 1); - - vdsc_cfg->second_line_offset_adj = 512; - vdsc_cfg->nsl_bpg_offset = DIV_ROUND_UP(vdsc_cfg->second_line_bpg_offset << 11, - vdsc_cfg->slice_height - 1); - } - vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3; drm_dsc_set_rc_buf_thresh(vdsc_cfg); @@ -237,18 +306,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) if (ret) return ret; - - /* - * FIXME: verify that the hardware actually needs these - * modifications rather than them being simple typos. - */ - if (compressed_bpp == 6 && - vdsc_cfg->bits_per_component == 8) - vdsc_cfg->rc_quant_incr_limit1 = 23; - - if (compressed_bpp == 8 && - vdsc_cfg->bits_per_component == 14) - vdsc_cfg->rc_range_params[0].range_bpg_offset = 0; } /* @@ -293,6 +350,16 @@ intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder) return POWER_DOMAIN_TRANSCODER_VDSC_PW2; } +int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state) +{ + int num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1; + + if (crtc_state->bigjoiner_pipes) + num_vdsc_instances *= 2; + + return num_vdsc_instances; +} + static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -303,11 +370,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) u32 pps_val = 0; u32 rc_buf_thresh_dword[4]; u32 rc_range_params_dword[8]; - u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1; int i = 0; - - if (crtc_state->bigjoiner_pipes) - num_vdsc_instances *= 2; + int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state); /* Populate PICTURE_PARAMETER_SET_0 registers */ pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor << diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h index 8763f00fa7e2..2cc41ff08909 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc.h @@ -22,6 +22,7 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state); enum intel_display_power_domain intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder); struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc); +int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state); void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_dsc_dp_pps_write(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index ae2f3ab3e73d..a96e7d028c5c 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -671,20 +671,6 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) intel_de_posting_read(dev_priv, port_ctrl); } } - -static void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi) -{ - ktime_t panel_power_on_time; - s64 panel_power_off_duration; - - panel_power_on_time = ktime_get_boottime(); - panel_power_off_duration = ktime_ms_delta(panel_power_on_time, - intel_dsi->panel_power_off_time); - - if (panel_power_off_duration < (s64)intel_dsi->panel_pwr_cycle_delay) - msleep(intel_dsi->panel_pwr_cycle_delay - panel_power_off_duration); -} - static void intel_dsi_prepare(struct intel_encoder *intel_encoder, const struct intel_crtc_state *pipe_config); static void intel_dsi_unprepare(struct intel_encoder *encoder); @@ -831,8 +817,6 @@ static void bxt_dsi_enable(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder); - intel_crtc_vblank_on(crtc_state); } @@ -943,13 +927,6 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, intel_dsi->panel_power_off_time = ktime_get_boottime(); } -static void intel_dsi_shutdown(struct intel_encoder *encoder) -{ - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - - intel_dsi_wait_panel_power_cycle(intel_dsi); -} - static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index dfaaa8b66ac3..ffddec1d2a76 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -68,10 +68,8 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) switch (obj->write_domain) { case I915_GEM_DOMAIN_GTT: spin_lock(&obj->vma.lock); - for_each_ggtt_vma(vma, obj) { - if (i915_vma_unset_ggtt_write(vma)) - intel_gt_flush_ggtt_writes(vma->vm->gt); - } + for_each_ggtt_vma(vma, obj) + i915_vma_flush_writes(vma); spin_unlock(&obj->vma.lock); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index cfd7929587d8..5a687a3686bd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2229,8 +2229,8 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) u32 *cs; int i; - if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) { - drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n"); + if (GRAPHICS_VER(rq->i915) != 7 || rq->engine->id != RCS0) { + drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -2691,6 +2691,7 @@ static int eb_select_engine(struct i915_execbuffer *eb) { struct intel_context *ce, *child; + struct intel_gt *gt; unsigned int idx; int err; @@ -2714,10 +2715,17 @@ eb_select_engine(struct i915_execbuffer *eb) } } eb->num_batches = ce->parallel.number_children + 1; + gt = ce->engine->gt; for_each_child(ce, child) intel_context_get(child); - intel_gt_pm_get(ce->engine->gt); + intel_gt_pm_get(gt); + /* + * Keep GT0 active on MTL so that i915_vma_parked() doesn't + * free VMAs while execbuf ioctl is validating VMAs. + */ + if (gt->info.id) + intel_gt_pm_get(to_gt(gt->i915)); if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { err = intel_context_alloc_state(ce); @@ -2756,7 +2764,10 @@ eb_select_engine(struct i915_execbuffer *eb) return err; err: - intel_gt_pm_put(ce->engine->gt); + if (gt->info.id) + intel_gt_pm_put(to_gt(gt->i915)); + + intel_gt_pm_put(gt); for_each_child(ce, child) intel_context_put(child); intel_context_put(ce); @@ -2769,6 +2780,12 @@ eb_put_engine(struct i915_execbuffer *eb) struct intel_context *child; i915_vm_put(eb->context->vm); + /* + * This works in conjunction with eb_select_engine() to prevent + * i915_vma_parked() from interfering while execbuf validates vmas. + */ + if (eb->gt->info.id) + intel_gt_pm_put(to_gt(eb->gt->i915)); intel_gt_pm_put(eb->gt); for_each_child(eb->context, child) intel_context_put(child); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 97ac6fb37958..148468098082 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -469,7 +469,7 @@ void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, { struct intel_frontbuffer *front; - front = __intel_frontbuffer_get(obj); + front = i915_gem_object_get_frontbuffer(obj); if (front) { intel_frontbuffer_flush(front, origin); intel_frontbuffer_put(front); @@ -481,7 +481,7 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, { struct intel_frontbuffer *front; - front = __intel_frontbuffer_get(obj); + front = i915_gem_object_get_frontbuffer(obj); if (front) { intel_frontbuffer_invalidate(front, origin); intel_frontbuffer_put(front); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 884a17275b3a..112c130cfaaa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -891,4 +891,71 @@ static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *o #endif +/** + * i915_gem_object_get_frontbuffer - Get the object's frontbuffer + * @obj: The object whose frontbuffer to get. + * + * Get pointer to object's frontbuffer if such exists. Please note that RCU + * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. + * + * Return: pointer to object's frontbuffer is such exists or NULL + */ +static inline struct intel_frontbuffer * +i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj) +{ + struct intel_frontbuffer *front; + + if (likely(!rcu_access_pointer(obj->frontbuffer))) + return NULL; + + rcu_read_lock(); + do { + front = rcu_dereference(obj->frontbuffer); + if (!front) + break; + + if (unlikely(!kref_get_unless_zero(&front->ref))) + continue; + + if (likely(front == rcu_access_pointer(obj->frontbuffer))) + break; + + intel_frontbuffer_put(front); + } while (1); + rcu_read_unlock(); + + return front; +} + +/** + * i915_gem_object_set_frontbuffer - Set the object's frontbuffer + * @obj: The object whose frontbuffer to set. + * @front: The frontbuffer to set + * + * Set object's frontbuffer pointer. If frontbuffer is already set for the + * object keep it and return it's pointer to the caller. Please note that RCU + * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This + * function is protected by i915->display.fb_tracking.lock + * + * Return: pointer to frontbuffer which was set. + */ +static inline struct intel_frontbuffer * +i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj, + struct intel_frontbuffer *front) +{ + struct intel_frontbuffer *cur = front; + + if (!front) { + RCU_INIT_POINTER(obj->frontbuffer, NULL); + } else if (rcu_access_pointer(obj->frontbuffer)) { + cur = rcu_dereference_protected(obj->frontbuffer, true); + kref_get(&cur->ref); + } else { + drm_gem_object_get(intel_bo_to_drm_bo(obj)); + rcu_assign_pointer(obj->frontbuffer, front); + } + + return cur; +} + #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index e72c57716bee..2292404007c8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -17,6 +17,8 @@ #include "i915_selftest.h" #include "i915_vma_resource.h" +#include "gt/intel_gt_defines.h" + struct drm_i915_gem_object; struct intel_fronbuffer; struct intel_memory_region; @@ -675,7 +677,7 @@ struct drm_i915_gem_object { */ bool dirty:1; - u32 tlb; + u32 tlb[I915_MAX_GT]; } mm; struct { @@ -718,6 +720,9 @@ struct drm_i915_gem_object { }; }; +#define intel_bo_to_drm_bo(bo) (&(bo)->base) +#define intel_bo_to_i915(bo) to_i915(intel_bo_to_drm_bo(bo)->dev) + static inline struct drm_i915_gem_object * to_intel_bo(struct drm_gem_object *gem) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 89fc8ea6bcfc..eae40e8f52ed 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -7,7 +7,7 @@ #include <drm/drm_cache.h> #include "gt/intel_gt.h" -#include "gt/intel_gt_pm.h" +#include "gt/intel_tlb.h" #include "i915_drv.h" #include "i915_gem_object.h" @@ -193,13 +193,16 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) static void flush_tlb_invalidate(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct intel_gt *gt = to_gt(i915); + struct intel_gt *gt; + int id; - if (!obj->mm.tlb) - return; + for_each_gt(gt, i915, id) { + if (!obj->mm.tlb[id]) + return; - intel_gt_invalidate_tlb(gt, obj->mm.tlb); - obj->mm.tlb = 0; + intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]); + obj->mm.tlb[id] = 0; + } } struct sg_table * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 3b094d36a0b0..5b0a5cf9a98a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -892,7 +892,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, } else { resource_size_t lmem_range; - lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF; + lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF; lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT; lmem_size *= SZ_1G; } diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index df6c9a84252c..6b9f6cf50bf6 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1246,8 +1246,10 @@ static int igt_write_huge(struct drm_i915_private *i915, * times in succession a possibility by enlarging the permutation array. */ order = i915_random_order(count * count, &prng); - if (!order) - return -ENOMEM; + if (!order) { + err = -ENOMEM; + goto out; + } max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); max = div_u64(max - size, max_page_size); diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c index 1c82caf525c3..8fe0499308ff 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c @@ -76,7 +76,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode) cmd = MI_FLUSH; if (mode & EMIT_INVALIDATE) { cmd |= MI_EXE_FLUSH; - if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5) + if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5) cmd |= MI_INVALIDATE_ISP; } diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 23857cc08eca..8a3cbb01bcbe 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -39,11 +39,11 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (GRAPHICS_VER(rq->engine->i915) == 9) + if (GRAPHICS_VER(rq->i915) == 9) vf_flush_wa = true; /* WaForGAMHang:kbl */ - if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(rq->i915, 0, STEP_C0)) dc_flush_wa = true; } @@ -165,14 +165,60 @@ static u32 preparser_disable(bool state) return MI_ARB_CHECK | 1 << 8 | state; } -u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg) +static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine) { - u32 gsi_offset = gt->uncore->gsi_offset; + switch (engine->id) { + case RCS0: + return GEN12_CCS_AUX_INV; + case BCS0: + return GEN12_BCS0_AUX_INV; + case VCS0: + return GEN12_VD0_AUX_INV; + case VCS2: + return GEN12_VD2_AUX_INV; + case VECS0: + return GEN12_VE0_AUX_INV; + case CCS0: + return GEN12_CCS0_AUX_INV; + default: + return INVALID_MMIO_REG; + } +} + +static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine) +{ + i915_reg_t reg = gen12_get_aux_inv_reg(engine); + + if (IS_PONTEVECCHIO(engine->i915)) + return false; + + /* + * So far platforms supported by i915 having flat ccs do not require + * AUX invalidation. Check also whether the engine requires it. + */ + return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915); +} + +u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs) +{ + i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine); + u32 gsi_offset = engine->gt->uncore->gsi_offset; + + if (!gen12_needs_ccs_aux_inv(engine)) + return cs; *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; *cs++ = AUX_INV; - *cs++ = MI_NOOP; + + *cs++ = MI_SEMAPHORE_WAIT_TOKEN | + MI_SEMAPHORE_REGISTER_POLL | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; + *cs++ = 0; + *cs++ = 0; return cs; } @@ -180,8 +226,8 @@ u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv static int mtl_dummy_pipe_control(struct i915_request *rq) { /* Wa_14016712196 */ - if (IS_MTL_GRAPHICS_STEP(rq->engine->i915, M, STEP_A0, STEP_B0) || - IS_MTL_GRAPHICS_STEP(rq->engine->i915, P, STEP_A0, STEP_B0)) { + if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) || + IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) { u32 *cs; /* dummy PIPE_CONTROL + depth flush */ @@ -202,8 +248,13 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) { struct intel_engine_cs *engine = rq->engine; - if (mode & EMIT_FLUSH) { - u32 flags = 0; + /* + * On Aux CCS platforms the invalidation of the Aux + * table requires quiescing memory traffic beforehand + */ + if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) { + u32 bit_group_0 = 0; + u32 bit_group_1 = 0; int err; u32 *cs; @@ -211,32 +262,40 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (err) return err; - flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; - flags |= PIPE_CONTROL_FLUSH_L3; - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH; + + /* + * When required, in MTL and beyond platforms we + * need to set the CCS_FLUSH bit in the pipe control + */ + if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70)) + bit_group_0 |= PIPE_CONTROL_CCS_FLUSH; + + bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH; + bit_group_1 |= PIPE_CONTROL_FLUSH_L3; + bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; /* Wa_1409600907:tgl,adl-p */ - flags |= PIPE_CONTROL_DEPTH_STALL; - flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; - flags |= PIPE_CONTROL_FLUSH_ENABLE; + bit_group_1 |= PIPE_CONTROL_DEPTH_STALL; + bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE; + bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE; - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - flags |= PIPE_CONTROL_QW_WRITE; + bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX; + bit_group_1 |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_CS_STALL; + bit_group_1 |= PIPE_CONTROL_CS_STALL; if (!HAS_3D_PIPELINE(engine->i915)) - flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS; + bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS; else if (engine->class == COMPUTE_CLASS) - flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; + bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) return PTR_ERR(cs); - cs = gen12_emit_pipe_control(cs, - PIPE_CONTROL0_HDC_PIPELINE_FLUSH, - flags, LRC_PPHWSP_SCRATCH_ADDR); + cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1, + LRC_PPHWSP_SCRATCH_ADDR); intel_ring_advance(rq, cs); } @@ -267,10 +326,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) else if (engine->class == COMPUTE_CLASS) flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; - if (!HAS_FLAT_CCS(rq->engine->i915)) - count = 8 + 4; - else - count = 8; + count = 8; + if (gen12_needs_ccs_aux_inv(rq->engine)) + count += 8; cs = intel_ring_begin(rq, count); if (IS_ERR(cs)) @@ -285,11 +343,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - if (!HAS_FLAT_CCS(rq->engine->i915)) { - /* hsdes: 1809175790 */ - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_GFX_CCS_AUX_NV); - } + cs = gen12_emit_aux_table_inv(engine, cs); *cs++ = preparser_disable(false); intel_ring_advance(rq, cs); @@ -300,21 +354,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) { - intel_engine_mask_t aux_inv = 0; - u32 cmd, *cs; + u32 cmd = 4; + u32 *cs; - cmd = 4; if (mode & EMIT_INVALIDATE) { cmd += 2; - if (!HAS_FLAT_CCS(rq->engine->i915) && - (rq->engine->class == VIDEO_DECODE_CLASS || - rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) { - aux_inv = rq->engine->mask & - ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0); - if (aux_inv) - cmd += 4; - } + if (gen12_needs_ccs_aux_inv(rq->engine)) + cmd += 8; } cs = intel_ring_begin(rq, cmd); @@ -338,6 +385,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) cmd |= MI_INVALIDATE_TLB; if (rq->engine->class == VIDEO_DECODE_CLASS) cmd |= MI_INVALIDATE_BSD; + + if (gen12_needs_ccs_aux_inv(rq->engine) && + rq->engine->class == COPY_ENGINE_CLASS) + cmd |= MI_FLUSH_DW_CCS; } *cs++ = cmd; @@ -345,14 +396,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) *cs++ = 0; /* upper addr */ *cs++ = 0; /* value */ - if (aux_inv) { /* hsdes: 1809175790 */ - if (rq->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VD0_AUX_NV); - else - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VE0_AUX_NV); - } + cs = gen12_emit_aux_table_inv(rq->engine, cs); if (mode & EMIT_INVALIDATE) *cs++ = preparser_disable(false); @@ -754,7 +798,7 @@ u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs) u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) { - struct drm_i915_private *i915 = rq->engine->i915; + struct drm_i915_private *i915 = rq->i915; u32 flags = (PIPE_CONTROL_CS_STALL | PIPE_CONTROL_TLB_INVALIDATE | PIPE_CONTROL_TILE_CACHE_FLUSH | @@ -775,7 +819,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) /* Wa_1409600907 */ flags |= PIPE_CONTROL_DEPTH_STALL; - if (!HAS_3D_PIPELINE(rq->engine->i915)) + if (!HAS_3D_PIPELINE(rq->i915)) flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS; else if (rq->engine->class == COMPUTE_CLASS) flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h index 655e5c00ddc2..867ba697aceb 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h @@ -13,6 +13,7 @@ #include "intel_gt_regs.h" #include "intel_gpu_commands.h" +struct intel_engine_cs; struct intel_gt; struct i915_request; @@ -46,28 +47,32 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); -u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg); +u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs); static inline u32 * -__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) +__gen8_emit_pipe_control(u32 *batch, u32 bit_group_0, + u32 bit_group_1, u32 offset) { memset(batch, 0, 6 * sizeof(u32)); - batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0; - batch[1] = flags1; + batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; + batch[1] = bit_group_1; batch[2] = offset; return batch + 6; } -static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) +static inline u32 *gen8_emit_pipe_control(u32 *batch, + u32 bit_group_1, u32 offset) { - return __gen8_emit_pipe_control(batch, 0, flags, offset); + return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset); } -static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) +static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0, + u32 bit_group_1, u32 offset) { - return __gen8_emit_pipe_control(batch, flags0, flags1, offset); + return __gen8_emit_pipe_control(batch, bit_group_0, + bit_group_1, offset); } static inline u32 * diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 0aff5bb13c53..ee15486fed0d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1333,6 +1333,7 @@ static int measure_breadcrumb_dw(struct intel_context *ce) if (!frame) return -ENOMEM; + frame->rq.i915 = engine->i915; frame->rq.engine = engine; frame->rq.context = ce; rcu_assign_pointer(frame->rq.timeline, ce->timeline); diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 2ebd937f3b4c..8a641bcf777c 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -2718,7 +2718,7 @@ static int emit_pdps(struct i915_request *rq) int err, i; u32 *cs; - GEM_BUG_ON(intel_vgpu_active(rq->engine->i915)); + GEM_BUG_ON(intel_vgpu_active(rq->i915)); /* * Beware ye of the dragons, this sequence is magic! @@ -3556,16 +3556,16 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) lrc_init_wa_ctx(engine); if (HAS_LOGICAL_RING_ELSQ(i915)) { - execlists->submit_reg = uncore->regs + + execlists->submit_reg = intel_uncore_regs(uncore) + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); - execlists->ctrl_reg = uncore->regs + + execlists->ctrl_reg = intel_uncore_regs(uncore) + i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); engine->fw_domain = intel_uncore_forcewake_for_reg(engine->uncore, RING_EXECLIST_CONTROL(engine->mmio_base), FW_REG_WRITE); } else { - execlists->submit_reg = uncore->regs + + execlists->submit_reg = intel_uncore_regs(uncore) + i915_mmio_reg_offset(RING_ELSP(base)); } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index 37d0b0fe791d..40371b8a9bbb 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -818,7 +818,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, if (obj->bit_17 == NULL) { obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL); if (obj->bit_17 == NULL) { - drm_err(&to_i915(obj->base.dev)->drm, + drm_err(obj->base.dev, "Failed to allocate memory for bit 17 record\n"); return; } diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 5d143e2a8db0..2bd8d98d2110 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -121,6 +121,7 @@ #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ #define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */ +#define MI_SEMAPHORE_REGISTER_POLL (1 << 16) #define MI_SEMAPHORE_POLL (1 << 15) #define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) #define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) @@ -299,6 +300,7 @@ #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) +#define PIPE_CONTROL_CCS_FLUSH (1<<13) /* MTL+ */ #define PIPE_CONTROL_WRITE_FLUSH (1<<12) #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 7a008e829d4d..18177a8e4aad 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -33,6 +33,7 @@ #include "intel_rps.h" #include "intel_sa_media.h" #include "intel_gt_sysfs.h" +#include "intel_tlb.h" #include "intel_uncore.h" #include "shmem_utils.h" @@ -50,8 +51,7 @@ void intel_gt_common_init_early(struct intel_gt *gt) intel_gt_init_reset(gt); intel_gt_init_requests(gt); intel_gt_init_timelines(gt); - mutex_init(>->tlb.invalidate_lock); - seqcount_mutex_init(>->tlb.seqno, >->tlb.invalidate_lock); + intel_gt_init_tlb(gt); intel_gt_pm_init_early(gt); intel_wopcm_init_early(>->wopcm); @@ -466,7 +466,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE | I915_BO_ALLOC_GPU_ONLY); - if (IS_ERR(obj)) + if (IS_ERR(obj) && !IS_METEORLAKE(i915)) /* Wa_22018444074 */ obj = i915_gem_object_create_stolen(i915, size); if (IS_ERR(obj)) obj = i915_gem_object_create_internal(i915, size); @@ -846,7 +846,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915) intel_gt_fini_requests(gt); intel_gt_fini_reset(gt); intel_gt_fini_timelines(gt); - mutex_destroy(>->tlb.invalidate_lock); + intel_gt_fini_tlb(gt); intel_engines_free(gt); } } @@ -887,7 +887,7 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr) int intel_gt_probe_all(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - struct intel_gt *gt = &i915->gt0; + struct intel_gt *gt = to_gt(i915); const struct intel_gt_definition *gtdef; phys_addr_t phys_addr; unsigned int mmio_bar; @@ -904,7 +904,7 @@ int intel_gt_probe_all(struct drm_i915_private *i915) */ gt->i915 = i915; gt->name = "Primary GT"; - gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask; + gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask; gt_dbg(gt, "Setting up %s\n", gt->name); ret = intel_gt_tile_setup(gt, phys_addr); @@ -1003,137 +1003,3 @@ void intel_gt_info_print(const struct intel_gt_info *info, intel_sseu_dump(&info->sseu, p); } - -/* - * HW architecture suggest typical invalidation time at 40us, - * with pessimistic cases up to 100us and a recommendation to - * cap at 1ms. We go a bit higher just in case. - */ -#define TLB_INVAL_TIMEOUT_US 100 -#define TLB_INVAL_TIMEOUT_MS 4 - -/* - * On Xe_HP the TLB invalidation registers are located at the same MMIO offsets - * but are now considered MCR registers. Since they exist within a GAM range, - * the primary instance of the register rolls up the status from each unit. - */ -static int wait_for_invalidate(struct intel_engine_cs *engine) -{ - if (engine->tlb_inv.mcr) - return intel_gt_mcr_wait_for_reg(engine->gt, - engine->tlb_inv.reg.mcr_reg, - engine->tlb_inv.done, - 0, - TLB_INVAL_TIMEOUT_US, - TLB_INVAL_TIMEOUT_MS); - else - return __intel_wait_for_register_fw(engine->gt->uncore, - engine->tlb_inv.reg.reg, - engine->tlb_inv.done, - 0, - TLB_INVAL_TIMEOUT_US, - TLB_INVAL_TIMEOUT_MS, - NULL); -} - -static void mmio_invalidate_full(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - struct intel_uncore *uncore = gt->uncore; - struct intel_engine_cs *engine; - intel_engine_mask_t awake, tmp; - enum intel_engine_id id; - unsigned long flags; - - if (GRAPHICS_VER(i915) < 8) - return; - - intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - - intel_gt_mcr_lock(gt, &flags); - spin_lock(&uncore->lock); /* serialise invalidate with GT reset */ - - awake = 0; - for_each_engine(engine, gt, id) { - if (!intel_engine_pm_is_awake(engine)) - continue; - - if (engine->tlb_inv.mcr) - intel_gt_mcr_multicast_write_fw(gt, - engine->tlb_inv.reg.mcr_reg, - engine->tlb_inv.request); - else - intel_uncore_write_fw(uncore, - engine->tlb_inv.reg.reg, - engine->tlb_inv.request); - - awake |= engine->mask; - } - - GT_TRACE(gt, "invalidated engines %08x\n", awake); - - /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */ - if (awake && - (IS_TIGERLAKE(i915) || - IS_DG1(i915) || - IS_ROCKETLAKE(i915) || - IS_ALDERLAKE_S(i915) || - IS_ALDERLAKE_P(i915))) - intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1); - - spin_unlock(&uncore->lock); - intel_gt_mcr_unlock(gt, flags); - - for_each_engine_masked(engine, gt, awake, tmp) { - if (wait_for_invalidate(engine)) - gt_err_ratelimited(gt, - "%s TLB invalidation did not complete in %ums!\n", - engine->name, TLB_INVAL_TIMEOUT_MS); - } - - /* - * Use delayed put since a) we mostly expect a flurry of TLB - * invalidations so it is good to avoid paying the forcewake cost and - * b) it works around a bug in Icelake which cannot cope with too rapid - * transitions. - */ - intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL); -} - -static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno) -{ - u32 cur = intel_gt_tlb_seqno(gt); - - /* Only skip if a *full* TLB invalidate barrier has passed */ - return (s32)(cur - ALIGN(seqno, 2)) > 0; -} - -void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno) -{ - intel_wakeref_t wakeref; - - if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)) - return; - - if (intel_gt_is_wedged(gt)) - return; - - if (tlb_seqno_passed(gt, seqno)) - return; - - with_intel_gt_pm_if_awake(gt, wakeref) { - mutex_lock(>->tlb.invalidate_lock); - if (tlb_seqno_passed(gt, seqno)) - goto unlock; - - mmio_invalidate_full(gt); - - write_seqcount_invalidate(>->tlb.seqno); -unlock: - mutex_unlock(>->tlb.invalidate_lock); - } -} - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftest_tlb.c" -#endif diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index d2f4fbde5f9f..6549e28fa219 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -107,16 +107,4 @@ void intel_gt_info_print(const struct intel_gt_info *info, void intel_gt_watchdog_work(struct work_struct *work); -static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt) -{ - return seqprop_sequence(>->tlb.seqno); -} - -static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt) -{ - return intel_gt_tlb_seqno(gt) | 1; -} - -void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno); - #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_defines.h b/drivers/gpu/drm/i915/gt/intel_gt_defines.h new file mode 100644 index 000000000000..5017788bac8f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_defines.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_GT_DEFINES__ +#define __INTEL_GT_DEFINES__ + +#define I915_MAX_GT 2 + +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 62fd00c9e519..77fb57223465 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -31,7 +31,7 @@ static u32 gen11_gt_engine_identity(struct intel_gt *gt, const unsigned int bank, const unsigned int bit) { - void __iomem * const regs = gt->uncore->regs; + void __iomem * const regs = intel_uncore_regs(gt->uncore); u32 timeout_ts; u32 ident; @@ -148,7 +148,7 @@ gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) static void gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) { - void __iomem * const regs = gt->uncore->regs; + void __iomem * const regs = intel_uncore_regs(gt->uncore); unsigned long intr_dw; unsigned int bit; @@ -183,7 +183,7 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) bool gen11_gt_reset_one_iir(struct intel_gt *gt, const unsigned int bank, const unsigned int bit) { - void __iomem * const regs = gt->uncore->regs; + void __iomem * const regs = intel_uncore_regs(gt->uncore); u32 dw; lockdep_assert_held(gt->irq_lock); @@ -404,7 +404,7 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) { - void __iomem * const regs = gt->uncore->regs; + void __iomem * const regs = intel_uncore_regs(gt->uncore); u32 iir; if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index 718cb2c80f79..2cdfb2f713d0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -332,9 +332,11 @@ #define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) #define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4) #define BSD_HWS_PGA_GEN7 _MMIO(0x4180) -#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208) -#define GEN12_VD0_AUX_NV _MMIO(0x4218) -#define GEN12_VD1_AUX_NV _MMIO(0x4228) + +#define GEN12_CCS_AUX_INV _MMIO(0x4208) +#define GEN12_VD0_AUX_INV _MMIO(0x4218) +#define GEN12_VE0_AUX_INV _MMIO(0x4238) +#define GEN12_BCS0_AUX_INV _MMIO(0x4248) #define GEN8_RTCR _MMIO(0x4260) #define GEN8_M1TCR _MMIO(0x4264) @@ -342,14 +344,12 @@ #define GEN8_BTCR _MMIO(0x426c) #define GEN8_VTCR _MMIO(0x4270) -#define GEN12_VD2_AUX_NV _MMIO(0x4298) -#define GEN12_VD3_AUX_NV _MMIO(0x42a8) -#define GEN12_VE0_AUX_NV _MMIO(0x4238) - #define BLT_HWS_PGA_GEN7 _MMIO(0x4280) -#define GEN12_VE1_AUX_NV _MMIO(0x42b8) +#define GEN12_VD2_AUX_INV _MMIO(0x4298) +#define GEN12_CCS0_AUX_INV _MMIO(0x42c8) #define AUX_INV REG_BIT(0) + #define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380) #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c index ee2b44f896a2..f0dea54880af 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c @@ -701,6 +701,80 @@ static const struct attribute *media_perf_power_attrs[] = { }; static ssize_t +rps_up_threshold_pct_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); + struct intel_rps *rps = >->rps; + + return sysfs_emit(buf, "%u\n", intel_rps_get_up_threshold(rps)); +} + +static ssize_t +rps_up_threshold_pct_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); + struct intel_rps *rps = >->rps; + int ret; + u8 val; + + ret = kstrtou8(buf, 10, &val); + if (ret) + return ret; + + ret = intel_rps_set_up_threshold(rps, val); + + return ret == 0 ? count : ret; +} + +static struct kobj_attribute rps_up_threshold_pct = + __ATTR(rps_up_threshold_pct, + 0664, + rps_up_threshold_pct_show, + rps_up_threshold_pct_store); + +static ssize_t +rps_down_threshold_pct_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); + struct intel_rps *rps = >->rps; + + return sysfs_emit(buf, "%u\n", intel_rps_get_down_threshold(rps)); +} + +static ssize_t +rps_down_threshold_pct_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); + struct intel_rps *rps = >->rps; + int ret; + u8 val; + + ret = kstrtou8(buf, 10, &val); + if (ret) + return ret; + + ret = intel_rps_set_down_threshold(rps, val); + + return ret == 0 ? count : ret; +} + +static struct kobj_attribute rps_down_threshold_pct = + __ATTR(rps_down_threshold_pct, + 0664, + rps_down_threshold_pct_show, + rps_down_threshold_pct_store); + +static const struct attribute * const gen6_gt_rps_attrs[] = { + &rps_up_threshold_pct.attr, + &rps_down_threshold_pct.attr, + NULL +}; + +static ssize_t default_min_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct intel_gt *gt = kobj_to_gt(kobj->parent); @@ -722,9 +796,37 @@ default_max_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, cha static struct kobj_attribute default_max_freq_mhz = __ATTR(rps_max_freq_mhz, 0444, default_max_freq_mhz_show, NULL); +static ssize_t +default_rps_up_threshold_pct_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct intel_gt *gt = kobj_to_gt(kobj->parent); + + return sysfs_emit(buf, "%u\n", gt->defaults.rps_up_threshold); +} + +static struct kobj_attribute default_rps_up_threshold_pct = +__ATTR(rps_up_threshold_pct, 0444, default_rps_up_threshold_pct_show, NULL); + +static ssize_t +default_rps_down_threshold_pct_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct intel_gt *gt = kobj_to_gt(kobj->parent); + + return sysfs_emit(buf, "%u\n", gt->defaults.rps_down_threshold); +} + +static struct kobj_attribute default_rps_down_threshold_pct = +__ATTR(rps_down_threshold_pct, 0444, default_rps_down_threshold_pct_show, NULL); + static const struct attribute * const rps_defaults_attrs[] = { &default_min_freq_mhz.attr, &default_max_freq_mhz.attr, + &default_rps_up_threshold_pct.attr, + &default_rps_down_threshold_pct.attr, NULL }; @@ -752,6 +854,12 @@ static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj) if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) ret = sysfs_create_file(kobj, vlv_attr); + if (is_object_gt(kobj) && !intel_uc_uses_guc_slpc(>->uc)) { + ret = sysfs_create_files(kobj, gen6_gt_rps_attrs); + if (ret) + return ret; + } + return ret; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index f08c2556aa25..1b22d7a50665 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -83,6 +83,9 @@ enum intel_submission_method { struct gt_defaults { u32 min_freq; u32 max_freq; + + u8 rps_up_threshold; + u8 rps_down_threshold; }; enum intel_gt_type { diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a4ec20aaafe2..119deb9f938c 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1092,8 +1092,15 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine) obj = i915_gem_object_create_lmem(engine->i915, context_size, I915_BO_ALLOC_PM_VOLATILE); - if (IS_ERR(obj)) + if (IS_ERR(obj)) { obj = i915_gem_object_create_shmem(engine->i915, context_size); + /* + * Wa_22016122933: For MTL the shared memory needs to be mapped + * as WC on CPU side and UC (PAT index 2) on GPU side + */ + if (IS_METEORLAKE(engine->i915)) + i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); + } if (IS_ERR(obj)) return ERR_CAST(obj); @@ -1364,10 +1371,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) IS_DG2_G11(ce->engine->i915)) cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - /* hsdes: 1809175790 */ - if (!HAS_FLAT_CCS(ce->engine->i915)) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_GFX_CCS_AUX_NV); + cs = gen12_emit_aux_table_inv(ce->engine, cs); /* Wa_16014892111 */ if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) || @@ -1392,17 +1396,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - /* hsdes: 1809175790 */ - if (!HAS_FLAT_CCS(ce->engine->i915)) { - if (ce->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VD0_AUX_NV); - else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VE0_AUX_NV); - } - - return cs; + return gen12_emit_aux_table_inv(ce->engine, cs); } static void diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c index 6023288b0e2d..576e5ef0289b 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.c +++ b/drivers/gpu/drm/i915/gt/intel_migrate.c @@ -366,7 +366,7 @@ static int emit_pte(struct i915_request *rq, u64 offset, int length) { - bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915); + bool has_64K_pages = HAS_64K_PAGES(rq->i915); const u64 encode = rq->context->vm->pte_encode(0, pat_index, is_lmem ? PTE_LM : 0); struct intel_ring *ring = rq->ring; @@ -375,7 +375,7 @@ static int emit_pte(struct i915_request *rq, u32 page_size; u32 *hdr, *cs; - GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); + GEM_BUG_ON(GRAPHICS_VER(rq->i915) < 8); page_size = I915_GTT_PAGE_SIZE; dword_length = 0x400; @@ -531,7 +531,7 @@ static int emit_copy_ccs(struct i915_request *rq, u32 dst_offset, u8 dst_access, u32 src_offset, u8 src_access, int size) { - struct drm_i915_private *i915 = rq->engine->i915; + struct drm_i915_private *i915 = rq->i915; int mocs = rq->engine->gt->mocs.uc_index << 1; u32 num_ccs_blks; u32 *cs; @@ -581,7 +581,7 @@ static int emit_copy_ccs(struct i915_request *rq, static int emit_copy(struct i915_request *rq, u32 dst_offset, u32 src_offset, int size) { - const int ver = GRAPHICS_VER(rq->engine->i915); + const int ver = GRAPHICS_VER(rq->i915); u32 instance = rq->engine->instance; u32 *cs; @@ -917,7 +917,7 @@ out_ce: static int emit_clear(struct i915_request *rq, u32 offset, int size, u32 value, bool is_lmem) { - struct drm_i915_private *i915 = rq->engine->i915; + struct drm_i915_private *i915 = rq->i915; int mocs = rq->engine->gt->mocs.uc_index << 1; const int ver = GRAPHICS_VER(i915); int ring_sz; diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index 436756bfbb1a..d07a4f97b943 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -8,6 +8,7 @@ #include "gem/i915_gem_lmem.h" #include "i915_trace.h" +#include "intel_gt.h" #include "intel_gtt.h" #include "gen6_ppgtt.h" #include "gen8_ppgtt.h" @@ -210,8 +211,7 @@ void ppgtt_unbind_vma(struct i915_address_space *vm, return; vm->clear_range(vm, vma_res->start, vma_res->vma_size); - if (vma_res->tlb) - vma_invalidate_tlb(vm, vma_res->tlb); + vma_invalidate_tlb(vm, vma_res->tlb); } static unsigned long pd_count(u64 size, int shift) diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index 2a3217e2890f..f8512aee58a8 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -220,7 +220,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) resource_size_t lmem_range; u64 tile_stolen, flat_ccs_base; - lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF; + lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF; lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT; lmem_size *= SZ_1G; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index e2152f75ba2e..cc6bd21a3e51 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -35,9 +35,6 @@ #define RESET_MAX_RETRIES 3 -/* XXX How to handle concurrent GGTT updates using tiling registers? */ -#define RESET_UNDER_STOP_MACHINE 0 - static void client_mark_guilty(struct i915_gem_context *ctx, bool banned) { struct drm_i915_file_private *file_priv = ctx->file_priv; diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 3fd795c3263f..92085ffd23de 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -805,7 +805,7 @@ static int mi_set_context(struct i915_request *rq, static int remap_l3_slice(struct i915_request *rq, int slice) { #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32)) - u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice]; + u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; int i; if (!remap_info) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index e92e626d4994..092542f53aad 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -16,7 +16,9 @@ #include "intel_gt.h" #include "intel_gt_clock_utils.h" #include "intel_gt_irq.h" +#include "intel_gt_pm.h" #include "intel_gt_pm_irq.h" +#include "intel_gt_print.h" #include "intel_gt_regs.h" #include "intel_mchbar_regs.h" #include "intel_pcode.h" @@ -672,7 +674,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power) { struct intel_gt *gt = rps_to_gt(rps); struct intel_uncore *uncore = gt->uncore; - u32 threshold_up = 0, threshold_down = 0; /* in % */ u32 ei_up = 0, ei_down = 0; lockdep_assert_held(&rps->power.mutex); @@ -680,9 +681,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power) if (new_power == rps->power.mode) return; - threshold_up = 95; - threshold_down = 85; - /* Note the units here are not exactly 1us, but 1280ns. */ switch (new_power) { case LOW_POWER: @@ -709,17 +707,22 @@ static void rps_set_power(struct intel_rps *rps, int new_power) GT_TRACE(gt, "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n", - new_power, threshold_up, ei_up, threshold_down, ei_down); + new_power, + rps->power.up_threshold, ei_up, + rps->power.down_threshold, ei_down); set(uncore, GEN6_RP_UP_EI, intel_gt_ns_to_pm_interval(gt, ei_up * 1000)); set(uncore, GEN6_RP_UP_THRESHOLD, - intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10)); + intel_gt_ns_to_pm_interval(gt, + ei_up * rps->power.up_threshold * 10)); set(uncore, GEN6_RP_DOWN_EI, intel_gt_ns_to_pm_interval(gt, ei_down * 1000)); set(uncore, GEN6_RP_DOWN_THRESHOLD, - intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10)); + intel_gt_ns_to_pm_interval(gt, + ei_down * + rps->power.down_threshold * 10)); set(uncore, GEN6_RP_CONTROL, (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | @@ -731,8 +734,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power) skip_hw_write: rps->power.mode = new_power; - rps->power.up_threshold = threshold_up; - rps->power.down_threshold = threshold_down; } static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) @@ -1559,10 +1560,12 @@ void intel_rps_enable(struct intel_rps *rps) return; GT_TRACE(rps_to_gt(rps), - "min:%x, max:%x, freq:[%d, %d]\n", + "min:%x, max:%x, freq:[%d, %d], thresholds:[%u, %u]\n", rps->min_freq, rps->max_freq, intel_gpu_freq(rps, rps->min_freq), - intel_gpu_freq(rps, rps->max_freq)); + intel_gpu_freq(rps, rps->max_freq), + rps->power.up_threshold, + rps->power.down_threshold); GEM_BUG_ON(rps->max_freq < rps->min_freq); GEM_BUG_ON(rps->idle_freq > rps->max_freq); @@ -2015,6 +2018,12 @@ void intel_rps_init(struct intel_rps *rps) } } + /* Set default thresholds in % */ + rps->power.up_threshold = 95; + rps_to_gt(rps)->defaults.rps_up_threshold = rps->power.up_threshold; + rps->power.down_threshold = 85; + rps_to_gt(rps)->defaults.rps_down_threshold = rps->power.down_threshold; + /* Finally allow us to boost to max by default */ rps->boost_freq = rps->max_freq; rps->idle_freq = rps->min_freq; @@ -2569,6 +2578,58 @@ int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val) return set_min_freq(rps, val); } +u8 intel_rps_get_up_threshold(struct intel_rps *rps) +{ + return rps->power.up_threshold; +} + +static int rps_set_threshold(struct intel_rps *rps, u8 *threshold, u8 val) +{ + int ret; + + if (val > 100) + return -EINVAL; + + ret = mutex_lock_interruptible(&rps->lock); + if (ret) + return ret; + + if (*threshold == val) + goto out_unlock; + + *threshold = val; + + /* Force reset. */ + rps->last_freq = -1; + mutex_lock(&rps->power.mutex); + rps->power.mode = -1; + mutex_unlock(&rps->power.mutex); + + intel_rps_set(rps, clamp(rps->cur_freq, + rps->min_freq_softlimit, + rps->max_freq_softlimit)); + +out_unlock: + mutex_unlock(&rps->lock); + + return ret; +} + +int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold) +{ + return rps_set_threshold(rps, &rps->power.up_threshold, threshold); +} + +u8 intel_rps_get_down_threshold(struct intel_rps *rps) +{ + return rps->power.down_threshold; +} + +int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold) +{ + return rps_set_threshold(rps, &rps->power.down_threshold, threshold); +} + static void intel_rps_set_manual(struct intel_rps *rps, bool enable) { struct intel_uncore *uncore = rps_to_uncore(rps); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h index a3fa987aa91f..92fb01f5a452 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.h +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -37,6 +37,10 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive); int intel_gpu_freq(struct intel_rps *rps, int val); int intel_freq_opcode(struct intel_rps *rps, int val); +u8 intel_rps_get_up_threshold(struct intel_rps *rps); +int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold); +u8 intel_rps_get_down_threshold(struct intel_rps *rps); +int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold); u32 intel_rps_read_actual_frequency(struct intel_rps *rps); u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps); u32 intel_rps_get_requested_frequency(struct intel_rps *rps); diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c index e8f3d18c12b8..8c1dbcbcbc4f 100644 --- a/drivers/gpu/drm/i915/gt/intel_sa_media.c +++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c @@ -29,7 +29,7 @@ int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr, * Standalone media shares the general MMIO space with the primary * GT. We'll re-use the primary GT's mapping. */ - uncore->regs = i915->uncore.regs; + uncore->regs = intel_uncore_regs(&i915->uncore); if (drm_WARN_ON(&i915->drm, uncore->regs == NULL)) return -EIO; diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c new file mode 100644 index 000000000000..139608c30d97 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_tlb.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_perf_oa_regs.h" +#include "intel_engine_pm.h" +#include "intel_gt.h" +#include "intel_gt_mcr.h" +#include "intel_gt_pm.h" +#include "intel_gt_print.h" +#include "intel_gt_regs.h" +#include "intel_tlb.h" + +/* + * HW architecture suggest typical invalidation time at 40us, + * with pessimistic cases up to 100us and a recommendation to + * cap at 1ms. We go a bit higher just in case. + */ +#define TLB_INVAL_TIMEOUT_US 100 +#define TLB_INVAL_TIMEOUT_MS 4 + +/* + * On Xe_HP the TLB invalidation registers are located at the same MMIO offsets + * but are now considered MCR registers. Since they exist within a GAM range, + * the primary instance of the register rolls up the status from each unit. + */ +static int wait_for_invalidate(struct intel_engine_cs *engine) +{ + if (engine->tlb_inv.mcr) + return intel_gt_mcr_wait_for_reg(engine->gt, + engine->tlb_inv.reg.mcr_reg, + engine->tlb_inv.done, + 0, + TLB_INVAL_TIMEOUT_US, + TLB_INVAL_TIMEOUT_MS); + else + return __intel_wait_for_register_fw(engine->gt->uncore, + engine->tlb_inv.reg.reg, + engine->tlb_inv.done, + 0, + TLB_INVAL_TIMEOUT_US, + TLB_INVAL_TIMEOUT_MS, + NULL); +} + +static void mmio_invalidate_full(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + struct intel_engine_cs *engine; + intel_engine_mask_t awake, tmp; + enum intel_engine_id id; + unsigned long flags; + + if (GRAPHICS_VER(i915) < 8) + return; + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + intel_gt_mcr_lock(gt, &flags); + spin_lock(&uncore->lock); /* serialise invalidate with GT reset */ + + awake = 0; + for_each_engine(engine, gt, id) { + if (!intel_engine_pm_is_awake(engine)) + continue; + + if (engine->tlb_inv.mcr) + intel_gt_mcr_multicast_write_fw(gt, + engine->tlb_inv.reg.mcr_reg, + engine->tlb_inv.request); + else + intel_uncore_write_fw(uncore, + engine->tlb_inv.reg.reg, + engine->tlb_inv.request); + + awake |= engine->mask; + } + + GT_TRACE(gt, "invalidated engines %08x\n", awake); + + /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */ + if (awake && + (IS_TIGERLAKE(i915) || + IS_DG1(i915) || + IS_ROCKETLAKE(i915) || + IS_ALDERLAKE_S(i915) || + IS_ALDERLAKE_P(i915))) + intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1); + + spin_unlock(&uncore->lock); + intel_gt_mcr_unlock(gt, flags); + + for_each_engine_masked(engine, gt, awake, tmp) { + if (wait_for_invalidate(engine)) + gt_err_ratelimited(gt, + "%s TLB invalidation did not complete in %ums!\n", + engine->name, TLB_INVAL_TIMEOUT_MS); + } + + /* + * Use delayed put since a) we mostly expect a flurry of TLB + * invalidations so it is good to avoid paying the forcewake cost and + * b) it works around a bug in Icelake which cannot cope with too rapid + * transitions. + */ + intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL); +} + +static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno) +{ + u32 cur = intel_gt_tlb_seqno(gt); + + /* Only skip if a *full* TLB invalidate barrier has passed */ + return (s32)(cur - ALIGN(seqno, 2)) > 0; +} + +void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno) +{ + intel_wakeref_t wakeref; + + if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)) + return; + + if (intel_gt_is_wedged(gt)) + return; + + if (tlb_seqno_passed(gt, seqno)) + return; + + with_intel_gt_pm_if_awake(gt, wakeref) { + mutex_lock(>->tlb.invalidate_lock); + if (tlb_seqno_passed(gt, seqno)) + goto unlock; + + mmio_invalidate_full(gt); + + write_seqcount_invalidate(>->tlb.seqno); +unlock: + mutex_unlock(>->tlb.invalidate_lock); + } +} + +void intel_gt_init_tlb(struct intel_gt *gt) +{ + mutex_init(>->tlb.invalidate_lock); + seqcount_mutex_init(>->tlb.seqno, >->tlb.invalidate_lock); +} + +void intel_gt_fini_tlb(struct intel_gt *gt) +{ + mutex_destroy(>->tlb.invalidate_lock); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_tlb.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.h b/drivers/gpu/drm/i915/gt/intel_tlb.h new file mode 100644 index 000000000000..337327af92ac --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_tlb.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef INTEL_TLB_H +#define INTEL_TLB_H + +#include <linux/seqlock.h> +#include <linux/types.h> + +#include "intel_gt_types.h" + +void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno); + +void intel_gt_init_tlb(struct intel_gt *gt); +void intel_gt_fini_tlb(struct intel_gt *gt); + +static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt) +{ + return seqprop_sequence(>->tlb.seqno); +} + +static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt) +{ + return intel_gt_tlb_seqno(gt) | 1; +} + +#endif /* INTEL_TLB_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 4d2dece96011..589d009032fc 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -123,6 +123,22 @@ static void wa_init_finish(struct i915_wa_list *wal) wal->wa_count, wal->name, wal->engine_name); } +static enum forcewake_domains +wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal) +{ + enum forcewake_domains fw = 0; + struct i915_wa *wa; + unsigned int i; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + fw |= intel_uncore_forcewake_for_reg(uncore, + wa->reg, + FW_REG_READ | + FW_REG_WRITE); + + return fw; +} + static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) { unsigned int addr = i915_mmio_reg_offset(wa->reg); @@ -225,13 +241,13 @@ static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg, static void wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set) { - wa_add(wal, reg, clear, set, clear, false); + wa_add(wal, reg, clear, set, clear | set, false); } static void wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set) { - wa_mcr_add(wal, reg, clear, set, clear, false); + wa_mcr_add(wal, reg, clear, set, clear | set, false); } static void @@ -621,10 +637,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { /* Wa_1406697149 (WaDisableBankHangMode:icl) */ - wa_write(wal, - GEN8_L3CNTLREG, - intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) | - GEN8_ERRDETBCTRL); + wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL); /* WaForceEnableNonCoherent:icl * This is not the same workaround as in early Gen9 platforms, where @@ -653,7 +666,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, /* Wa_1604278689:icl,ehl */ wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID); wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER, - 0, /* write-only register; skip validation */ + 0, 0xFFFFFFFF); /* Wa_1406306137:icl,ehl */ @@ -670,38 +683,8 @@ static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine, wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP); wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)); - wa_mcr_add(wal, - XEHP_FF_MODE2, - FF_MODE2_TDS_TIMER_MASK, - FF_MODE2_TDS_TIMER_128, - 0, false); -} - -/* - * These settings aren't actually workarounds, but general tuning settings that - * need to be programmed on several platforms. - */ -static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine, - struct i915_wa_list *wal) -{ - /* - * Although some platforms refer to it as Wa_1604555607, we need to - * program it even on those that don't explicitly list that - * workaround. - * - * Note that the programming of this register is further modified - * according to the FF_MODE2 guidance given by Wa_1608008084:gen12. - * Wa_1608008084 tells us the FF_MODE2 register will return the wrong - * value when read. The default value for this register is zero for all - * fields and there are no bit masks. So instead of doing a RMW we - * should just write TDS timer value. For the same reason read - * verification is ignored. - */ - wa_add(wal, - GEN12_FF_MODE2, - FF_MODE2_TDS_TIMER_MASK, - FF_MODE2_TDS_TIMER_128, - 0, false); + wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK, + FF_MODE2_TDS_TIMER_128); } static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -709,8 +692,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, { struct drm_i915_private *i915 = engine->i915; - gen12_ctx_gt_tuning_init(engine, wal); - /* * Wa_1409142259:tgl,dg1,adl-p * Wa_1409347922:tgl,dg1,adl-p @@ -732,15 +713,27 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); /* - * Wa_16011163337 + * Wa_16011163337 - GS_TIMER + * + * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we + * need to program it even on those that don't explicitly list that + * workaround. + * + * Note that the programming of GEN12_FF_MODE2 is further modified + * according to the FF_MODE2 guidance given by Wa_1608008084. + * Wa_1608008084 tells us the FF_MODE2 register will return the wrong + * value when read from the CPU. * - * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due - * to Wa_1608008084. + * The default value for this register is zero for all fields. + * So instead of doing a RMW we should just write the desired values + * for TDS and GS timers. Note that since the readback can't be trusted, + * the clear mask is just set to ~0 to make sure other bits are not + * inadvertently set. For the same reason read verification is ignored. */ wa_add(wal, GEN12_FF_MODE2, - FF_MODE2_GS_TIMER_MASK, - FF_MODE2_GS_TIMER_224, + ~0, + FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224, 0, false); if (!IS_DG1(i915)) { @@ -987,6 +980,9 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) int intel_engine_emit_ctx_wa(struct i915_request *rq) { struct i915_wa_list *wal = &rq->engine->ctx_wa_list; + struct intel_uncore *uncore = rq->engine->uncore; + enum forcewake_domains fw; + unsigned long flags; struct i915_wa *wa; unsigned int i; u32 *cs; @@ -1003,13 +999,36 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq) if (IS_ERR(cs)) return PTR_ERR(cs); + fw = wal_get_fw_for_rmw(uncore, wal); + + intel_gt_mcr_lock(wal->gt, &flags); + spin_lock(&uncore->lock); + intel_uncore_forcewake_get__locked(uncore, fw); + *cs++ = MI_LOAD_REGISTER_IMM(wal->count); for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + u32 val; + + /* Skip reading the register if it's not really needed */ + if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) { + val = wa->set; + } else { + val = wa->is_mcr ? + intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) : + intel_uncore_read_fw(uncore, wa->reg); + val &= ~wa->clr; + val |= wa->set; + } + *cs++ = i915_mmio_reg_offset(wa->reg); - *cs++ = wa->set; + *cs++ = val; } *cs++ = MI_NOOP; + intel_uncore_forcewake_put__locked(uncore, fw); + spin_unlock(&uncore->lock); + intel_gt_mcr_unlock(wal->gt, flags); + intel_ring_advance(rq, cs); ret = rq->engine->emit_flush(rq, EMIT_BARRIER); @@ -1485,6 +1504,18 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */ wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE); + + /* + * Wa_14015795083 + * + * Firmware on some gen12 platforms locks the MISCCPCTL register, + * preventing i915 from modifying it for this workaround. Skip the + * readback verification for this workaround on debug builds; if the + * workaround doesn't stick due to firmware behavior, it's not an error + * that we want CI to flag. + */ + wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE, + 0, 0, false); } static void @@ -1710,7 +1741,6 @@ static void xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { /* Wa_14018778641 / Wa_18018781329 */ - wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB); wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); /* Wa_22016670082 */ @@ -1743,8 +1773,6 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) * GT, the media GT's versions are regular singleton registers. */ wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB); - wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB); - wa_write_or(wal, XELPMP_VEBX_MOD_CTRL, FORCE_MISS_FTLB); debug_dump_steering(gt); } @@ -1850,22 +1878,6 @@ void intel_gt_init_workarounds(struct intel_gt *gt) wa_init_finish(wal); } -static enum forcewake_domains -wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal) -{ - enum forcewake_domains fw = 0; - struct i915_wa *wa; - unsigned int i; - - for (i = 0, wa = wal->list; i < wal->count; i++, wa++) - fw |= intel_uncore_forcewake_for_reg(uncore, - wa->reg, - FW_REG_READ | - FW_REG_WRITE); - - return fw; -} - static bool wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur, const char *name, const char *from) @@ -3237,7 +3249,7 @@ wa_list_srm(struct i915_request *rq, const struct i915_wa_list *wal, struct i915_vma *vma) { - struct drm_i915_private *i915 = rq->engine->i915; + struct drm_i915_private *i915 = rq->i915; unsigned int i, count = 0; const struct i915_wa *wa; u32 srm, *cs; @@ -3336,7 +3348,7 @@ retry: err = 0; for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { - if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg))) + if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg))) continue; if (!wa_verify(wal->gt, wa, results[i], wal->name, from)) diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c index 78cdfc6f315f..86cecf7a1105 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c @@ -62,7 +62,7 @@ static int write_timestamp(struct i915_request *rq, int slot) return PTR_ERR(cs); cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT; - if (GRAPHICS_VER(rq->engine->i915) >= 8) + if (GRAPHICS_VER(rq->i915) >= 8) cmd++; *cs++ = cmd; *cs++ = i915_mmio_reg_offset(timestamp_reg(rq->engine)); diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c index a8446ab82501..d73e438fb85f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_mocs.c +++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c @@ -137,7 +137,7 @@ static int read_mocs_table(struct i915_request *rq, if (!table) return 0; - if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915)) + if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915)) addr = global_mocs_offset() + gt->uncore->gsi_offset; else addr = mocs_offset(rq->engine); diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index 2ceeadecc639..a7189c2d660c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -140,7 +140,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce) } cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT; - if (GRAPHICS_VER(rq->engine->i915) >= 8) + if (GRAPHICS_VER(rq->i915) >= 8) cmd++; *cs++ = cmd; diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 39c3ec12df1a..fa36cf920bde 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -459,12 +459,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) if (IS_ERR(cs)) return PTR_ERR(cs); - if (GRAPHICS_VER(rq->engine->i915) >= 8) { + if (GRAPHICS_VER(rq->i915) >= 8) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = addr; *cs++ = 0; *cs++ = value; - } else if (GRAPHICS_VER(rq->engine->i915) >= 4) { + } else if (GRAPHICS_VER(rq->i915) >= 4) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; *cs++ = addr; diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c index 3bd6b540257b..7e41f69fc818 100644 --- a/drivers/gpu/drm/i915/gt/selftest_tlb.c +++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c @@ -6,6 +6,7 @@ #include "i915_selftest.h" #include "gem/i915_gem_internal.h" +#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "gen8_engine_cs.h" @@ -354,7 +355,7 @@ out_a: static void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length) { - intel_gt_invalidate_tlb(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1); + intel_gt_invalidate_tlb_full(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1); } static int invalidate_full(void *arg) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h index 714f0c256118..6d009a905269 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h @@ -8,6 +8,74 @@ #include <linux/types.h> +struct intel_gsc_version { + u16 major; + u16 minor; + u16 hotfix; + u16 build; +} __packed; + +struct intel_gsc_partition { + u32 offset; + u32 size; +} __packed; + +struct intel_gsc_layout_pointers { + u8 rom_bypass_vector[16]; + + /* size of pointers layout not including ROM bypass vector */ + u16 size; + + /* + * bit0: Backup copy of layout pointers exist + * bits1-15: reserved + */ + u8 flags; + + u8 reserved; + + u32 crc32; + + struct intel_gsc_partition datap; + struct intel_gsc_partition boot1; + struct intel_gsc_partition boot2; + struct intel_gsc_partition boot3; + struct intel_gsc_partition boot4; + struct intel_gsc_partition boot5; + struct intel_gsc_partition temp_pages; +} __packed; + +/* Boot partition structures */ +struct intel_gsc_bpdt_header { + u32 signature; +#define INTEL_GSC_BPDT_HEADER_SIGNATURE 0x000055AA + + u16 descriptor_count; /* num of entries after the header */ + + u8 version; + u8 configuration; + + u32 crc32; + + u32 build_version; + struct intel_gsc_version tool_version; +} __packed; + +struct intel_gsc_bpdt_entry { + /* + * Bits 0-15: BPDT entry type + * Bits 16-17: reserved + * Bit 18: code sub-partition + * Bits 19-31: reserved + */ + u32 type; +#define INTEL_GSC_BPDT_ENTRY_TYPE_MASK GENMASK(15, 0) +#define INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE 0x1 + + u32 sub_partition_offset; /* from the base of the BPDT header */ + u32 sub_partition_size; +} __packed; + /* Code partition directory (CPD) structures */ struct intel_gsc_cpd_header_v2 { u32 header_marker; @@ -44,13 +112,6 @@ struct intel_gsc_cpd_entry { u8 reserved[4]; } __packed; -struct intel_gsc_version { - u16 major; - u16 minor; - u16 hotfix; - u16 build; -} __packed; - struct intel_gsc_manifest_header { u32 header_type; /* 0x4 for manifest type */ u32 header_length; /* in dwords */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c index 60e9c6c9e775..163021705210 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c @@ -3,48 +3,216 @@ * Copyright © 2022 Intel Corporation */ +#include "gem/i915_gem_lmem.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_print.h" #include "gt/intel_ring.h" +#include "intel_gsc_binary_headers.h" #include "intel_gsc_fw.h" - -#define GSC_FW_STATUS_REG _MMIO(0x116C40) -#define GSC_FW_CURRENT_STATE REG_GENMASK(3, 0) -#define GSC_FW_CURRENT_STATE_RESET 0 -#define GSC_FW_PROXY_STATE_NORMAL 5 -#define GSC_FW_INIT_COMPLETE_BIT REG_BIT(9) +#include "intel_gsc_uc_heci_cmd_submit.h" +#include "i915_reg.h" static bool gsc_is_in_reset(struct intel_uncore *uncore) { - u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG); + u32 fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1)); - return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) == - GSC_FW_CURRENT_STATE_RESET; + return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fw_status) == + HECI1_FWSTS1_CURRENT_STATE_RESET; } -static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore) +static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore, bool needs_wakeref) { intel_wakeref_t wakeref; u32 fw_status = 0; - with_intel_runtime_pm(uncore->rpm, wakeref) - fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG); + if (needs_wakeref) + wakeref = intel_runtime_pm_get(uncore->rpm); + + fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1)); + if (needs_wakeref) + intel_runtime_pm_put(uncore->rpm, wakeref); return fw_status; } -bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc) +bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref) +{ + return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, + gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore, + needs_wakeref)) == + HECI1_FWSTS1_PROXY_STATE_NORMAL; +} + +int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc) { - return REG_FIELD_GET(GSC_FW_CURRENT_STATE, - gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore)) == - GSC_FW_PROXY_STATE_NORMAL; + if (!(IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY))) + return -ENODEV; + if (!intel_uc_fw_is_loadable(&gsc->fw)) + return -ENODEV; + if (__intel_uc_fw_status(&gsc->fw) == INTEL_UC_FIRMWARE_LOAD_FAIL) + return -ENOLINK; + if (!intel_gsc_uc_fw_proxy_init_done(gsc, true)) + return -EAGAIN; + + return 0; } bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc) { - return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore) & GSC_FW_INIT_COMPLETE_BIT; + return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore, false) & + HECI1_FWSTS1_INIT_COMPLETE; +} + +static inline u32 cpd_entry_offset(const struct intel_gsc_cpd_entry *entry) +{ + return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK; +} + +int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size) +{ + struct intel_gsc_uc *gsc = container_of(gsc_fw, struct intel_gsc_uc, fw); + struct intel_gt *gt = gsc_uc_to_gt(gsc); + const struct intel_gsc_layout_pointers *layout = data; + const struct intel_gsc_bpdt_header *bpdt_header = NULL; + const struct intel_gsc_bpdt_entry *bpdt_entry = NULL; + const struct intel_gsc_cpd_header_v2 *cpd_header = NULL; + const struct intel_gsc_cpd_entry *cpd_entry = NULL; + const struct intel_gsc_manifest_header *manifest; + size_t min_size = sizeof(*layout); + int i; + + if (size < min_size) { + gt_err(gt, "GSC FW too small! %zu < %zu\n", size, min_size); + return -ENODATA; + } + + /* + * The GSC binary starts with the pointer layout, which contains the + * locations of the various partitions of the binary. The one we're + * interested in to get the version is the boot1 partition, where we can + * find a BPDT header followed by entries, one of which points to the + * RBE sub-section of the partition. From here, we can parse the CPD + * header and the following entries to find the manifest location + * (entry identified by the "RBEP.man" name), from which we can finally + * extract the version. + * + * -------------------------------------------------- + * [ intel_gsc_layout_pointers ] + * [ ... ] + * [ boot1.offset >---------------------------]------o + * [ ... ] | + * -------------------------------------------------- | + * | + * -------------------------------------------------- | + * [ intel_gsc_bpdt_header ]<-----o + * -------------------------------------------------- + * [ intel_gsc_bpdt_entry[] ] + * [ entry1 ] + * [ ... ] + * [ entryX ] + * [ type == GSC_RBE ] + * [ offset >-----------------------------]------o + * [ ... ] | + * -------------------------------------------------- | + * | + * -------------------------------------------------- | + * [ intel_gsc_cpd_header_v2 ]<-----o + * -------------------------------------------------- + * [ intel_gsc_cpd_entry[] ] + * [ entry1 ] + * [ ... ] + * [ entryX ] + * [ "RBEP.man" ] + * [ ... ] + * [ offset >----------------------------]------o + * [ ... ] | + * -------------------------------------------------- | + * | + * -------------------------------------------------- | + * [ intel_gsc_manifest_header ]<-----o + * [ ... ] + * [ intel_gsc_version fw_version ] + * [ ... ] + * -------------------------------------------------- + */ + + min_size = layout->boot1.offset + layout->boot1.size; + if (size < min_size) { + gt_err(gt, "GSC FW too small for boot section! %zu < %zu\n", + size, min_size); + return -ENODATA; + } + + min_size = sizeof(*bpdt_header); + if (layout->boot1.size < min_size) { + gt_err(gt, "GSC FW boot section too small for BPDT header: %u < %zu\n", + layout->boot1.size, min_size); + return -ENODATA; + } + + bpdt_header = data + layout->boot1.offset; + if (bpdt_header->signature != INTEL_GSC_BPDT_HEADER_SIGNATURE) { + gt_err(gt, "invalid signature for BPDT header: 0x%08x!\n", + bpdt_header->signature); + return -EINVAL; + } + + min_size += sizeof(*bpdt_entry) * bpdt_header->descriptor_count; + if (layout->boot1.size < min_size) { + gt_err(gt, "GSC FW boot section too small for BPDT entries: %u < %zu\n", + layout->boot1.size, min_size); + return -ENODATA; + } + + bpdt_entry = (void *)bpdt_header + sizeof(*bpdt_header); + for (i = 0; i < bpdt_header->descriptor_count; i++, bpdt_entry++) { + if ((bpdt_entry->type & INTEL_GSC_BPDT_ENTRY_TYPE_MASK) != + INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE) + continue; + + cpd_header = (void *)bpdt_header + bpdt_entry->sub_partition_offset; + min_size = bpdt_entry->sub_partition_offset + sizeof(*cpd_header); + break; + } + + if (!cpd_header) { + gt_err(gt, "couldn't find CPD header in GSC binary!\n"); + return -ENODATA; + } + + if (layout->boot1.size < min_size) { + gt_err(gt, "GSC FW boot section too small for CPD header: %u < %zu\n", + layout->boot1.size, min_size); + return -ENODATA; + } + + if (cpd_header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) { + gt_err(gt, "invalid marker for CPD header in GSC bin: 0x%08x!\n", + cpd_header->header_marker); + return -EINVAL; + } + + min_size += sizeof(*cpd_entry) * cpd_header->num_of_entries; + if (layout->boot1.size < min_size) { + gt_err(gt, "GSC FW boot section too small for CPD entries: %u < %zu\n", + layout->boot1.size, min_size); + return -ENODATA; + } + + cpd_entry = (void *)cpd_header + cpd_header->header_length; + for (i = 0; i < cpd_header->num_of_entries; i++, cpd_entry++) { + if (strcmp(cpd_entry->name, "RBEP.man") == 0) { + manifest = (void *)cpd_header + cpd_entry_offset(cpd_entry); + intel_uc_fw_version_from_gsc_manifest(&gsc->release, + manifest); + gsc->security_version = manifest->security_version; + break; + } + } + + return 0; } static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc) @@ -115,38 +283,21 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc) { struct intel_gt *gt = gsc_uc_to_gt(gsc); struct drm_i915_private *i915 = gt->i915; - struct drm_i915_gem_object *obj; - void *src, *dst; + void *src; if (!gsc->local) return -ENODEV; - obj = gsc->local->obj; - - if (obj->base.size < gsc->fw.size) + if (gsc->local->size < gsc->fw.size) return -ENOSPC; - /* - * Wa_22016122933: For MTL the shared memory needs to be mapped - * as WC on CPU side and UC (PAT index 2) on GPU side - */ - if (IS_METEORLAKE(i915)) - i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); - - dst = i915_gem_object_pin_map_unlocked(obj, - i915_coherent_map_type(i915, obj, true)); - if (IS_ERR(dst)) - return PTR_ERR(dst); - src = i915_gem_object_pin_map_unlocked(gsc->fw.obj, i915_coherent_map_type(i915, gsc->fw.obj, true)); - if (IS_ERR(src)) { - i915_gem_object_unpin_map(obj); + if (IS_ERR(src)) return PTR_ERR(src); - } - memset(dst, 0, obj->base.size); - memcpy(dst, src, gsc->fw.size); + memcpy_toio(gsc->local_vaddr, src, gsc->fw.size); + memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size); /* * Wa_22016122933: Making sure the data in dst is @@ -155,7 +306,6 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc) intel_guc_write_barrier(>->uc.guc); i915_gem_object_unpin_map(gsc->fw.obj); - i915_gem_object_unpin_map(obj); return 0; } @@ -163,12 +313,94 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc) static int gsc_fw_wait(struct intel_gt *gt) { return intel_wait_for_register(gt->uncore, - GSC_FW_STATUS_REG, - GSC_FW_INIT_COMPLETE_BIT, - GSC_FW_INIT_COMPLETE_BIT, + HECI_FWSTS(MTL_GSC_HECI1_BASE, 1), + HECI1_FWSTS1_INIT_COMPLETE, + HECI1_FWSTS1_INIT_COMPLETE, 500); } +struct intel_gsc_mkhi_header { + u8 group_id; +#define MKHI_GROUP_ID_GFX_SRV 0x30 + + u8 command; +#define MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION (0x42) + + u8 reserved; + u8 result; +} __packed; + +struct mtl_gsc_ver_msg_in { + struct intel_gsc_mtl_header header; + struct intel_gsc_mkhi_header mkhi; +} __packed; + +struct mtl_gsc_ver_msg_out { + struct intel_gsc_mtl_header header; + struct intel_gsc_mkhi_header mkhi; + u16 proj_major; + u16 compat_major; + u16 compat_minor; + u16 reserved[5]; +} __packed; + +#define GSC_VER_PKT_SZ SZ_4K + +static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc) +{ + struct intel_gt *gt = gsc_uc_to_gt(gsc); + struct mtl_gsc_ver_msg_in *msg_in; + struct mtl_gsc_ver_msg_out *msg_out; + struct i915_vma *vma; + u64 offset; + void *vaddr; + int err; + + err = intel_guc_allocate_and_map_vma(>->uc.guc, GSC_VER_PKT_SZ * 2, + &vma, &vaddr); + if (err) { + gt_err(gt, "failed to allocate vma for GSC version query\n"); + return err; + } + + offset = i915_ggtt_offset(vma); + msg_in = vaddr; + msg_out = vaddr + GSC_VER_PKT_SZ; + + intel_gsc_uc_heci_cmd_emit_mtl_header(&msg_in->header, + HECI_MEADDRESS_MKHI, + sizeof(*msg_in), 0); + msg_in->mkhi.group_id = MKHI_GROUP_ID_GFX_SRV; + msg_in->mkhi.command = MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION; + + err = intel_gsc_uc_heci_cmd_submit_packet(>->uc.gsc, + offset, + sizeof(*msg_in), + offset + GSC_VER_PKT_SZ, + GSC_VER_PKT_SZ); + if (err) { + gt_err(gt, + "failed to submit GSC request for compatibility version: %d\n", + err); + goto out_vma; + } + + if (msg_out->header.message_size != sizeof(*msg_out)) { + gt_err(gt, "invalid GSC reply length %u [expected %zu], s=0x%x, f=0x%x, r=0x%x\n", + msg_out->header.message_size, sizeof(*msg_out), + msg_out->header.status, msg_out->header.flags, msg_out->mkhi.result); + err = -EPROTO; + goto out_vma; + } + + gsc->fw.file_selected.ver.major = msg_out->compat_major; + gsc->fw.file_selected.ver.minor = msg_out->compat_minor; + +out_vma: + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + return err; +} + int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc) { struct intel_gt *gt = gsc_uc_to_gt(gsc); @@ -226,10 +458,24 @@ int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc) if (err) goto fail; + err = gsc_fw_query_compatibility_version(gsc); + if (err) + goto fail; + + /* we only support compatibility version 1.0 at the moment */ + err = intel_uc_check_file_version(gsc_fw, NULL); + if (err) + goto fail; + /* FW is not fully operational until we enable SW proxy */ intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED); - gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path); + gt_info(gt, "Loaded GSC firmware %s (cv%u.%u, r%u.%u.%u.%u, svn %u)\n", + gsc_fw->file_selected.path, + gsc_fw->file_selected.ver.major, gsc_fw->file_selected.ver.minor, + gsc->release.major, gsc->release.minor, + gsc->release.patch, gsc->release.build, + gsc->security_version); return 0; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h index fff8928218df..bc9dd0de8aaf 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h @@ -9,10 +9,13 @@ #include <linux/types.h> struct intel_gsc_uc; +struct intel_uc_fw; struct intel_uncore; +int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size); int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc); bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc); -bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc); +bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref); +int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c index c659cc01f32f..0d3b22a74365 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c @@ -7,10 +7,11 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_print.h" -#include "intel_gsc_uc.h" #include "intel_gsc_fw.h" -#include "i915_drv.h" #include "intel_gsc_proxy.h" +#include "intel_gsc_uc.h" +#include "i915_drv.h" +#include "i915_reg.h" static void gsc_work(struct work_struct *work) { @@ -61,8 +62,18 @@ static void gsc_work(struct work_struct *work) } ret = intel_gsc_proxy_request_handler(gsc); - if (ret) + if (ret) { + if (actions & GSC_ACTION_FW_LOAD) { + /* + * A proxy failure right after firmware load means the proxy-init + * step has failed so mark GSC as not usable after this + */ + drm_err(>->i915->drm, + "GSC proxy handler failed to init\n"); + intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); + } goto out_put; + } /* mark the GSC FW init as done the first time we run this */ if (actions & GSC_ACTION_FW_LOAD) { @@ -71,12 +82,13 @@ static void gsc_work(struct work_struct *work) * complete the request handling cleanly, so we need to check the * status register to check if the proxy init was actually successful */ - if (intel_gsc_uc_fw_proxy_init_done(gsc)) { + if (intel_gsc_uc_fw_proxy_init_done(gsc, false)) { drm_dbg(>->i915->drm, "GSC Proxy initialized\n"); intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING); } else { drm_err(>->i915->drm, "GSC status reports proxy init not complete\n"); + intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); } } } @@ -98,7 +110,7 @@ static bool gsc_engine_supported(struct intel_gt *gt) GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask); if (gt_is_root(gt)) - mask = RUNTIME_INFO(gt->i915)->platform_engine_mask; + mask = INTEL_INFO(gt->i915)->platform_engine_mask; else mask = gt->info.engine_mask; @@ -133,26 +145,85 @@ void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc) } } +static int gsc_allocate_and_map_vma(struct intel_gsc_uc *gsc, u32 size) +{ + struct intel_gt *gt = gsc_uc_to_gt(gsc); + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + void __iomem *vaddr; + int ret = 0; + + /* + * The GSC FW doesn't immediately suspend after becoming idle, so there + * is a chance that it could still be awake after we successfully + * return from the pci suspend function, even if there are no pending + * operations. + * The FW might therefore try to access memory for its suspend operation + * after the kernel has completed the HW suspend flow; this can cause + * issues if the FW is mapped in normal RAM memory, as some of the + * involved HW units might've already lost power. + * The driver must therefore avoid this situation and the recommended + * way to do so is to use stolen memory for the GSC memory allocation, + * because stolen memory takes a different path in HW and it is + * guaranteed to always work as long as the GPU itself is awake (which + * it must be if the GSC is awake). + */ + obj = i915_gem_object_create_stolen(gt->i915, size); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; + } + + vaddr = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto err; + } + + i915_vma_make_unshrinkable(vma); + + gsc->local = vma; + gsc->local_vaddr = vaddr; + + return 0; + +err: + i915_gem_object_put(obj); + return ret; +} + +static void gsc_unmap_and_free_vma(struct intel_gsc_uc *gsc) +{ + struct i915_vma *vma = fetch_and_zero(&gsc->local); + + if (!vma) + return; + + gsc->local_vaddr = NULL; + i915_vma_unpin_iomap(vma); + i915_gem_object_put(vma->obj); +} + int intel_gsc_uc_init(struct intel_gsc_uc *gsc) { static struct lock_class_key gsc_lock; struct intel_gt *gt = gsc_uc_to_gt(gsc); struct intel_engine_cs *engine = gt->engine[GSC0]; struct intel_context *ce; - struct i915_vma *vma; int err; err = intel_uc_fw_init(&gsc->fw); if (err) goto out; - vma = intel_guc_allocate_vma(>->uc.guc, SZ_8M); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); + err = gsc_allocate_and_map_vma(gsc, SZ_4M); + if (err) goto out_fw; - } - - gsc->local = vma; ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K, I915_GEM_HWS_GSC_ADDR, @@ -173,7 +244,7 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc) return 0; out_vma: - i915_vma_unpin_and_release(&gsc->local, 0); + gsc_unmap_and_free_vma(gsc); out_fw: intel_uc_fw_fini(&gsc->fw); out: @@ -197,7 +268,7 @@ void intel_gsc_uc_fini(struct intel_gsc_uc *gsc) if (gsc->ce) intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce)); - i915_vma_unpin_and_release(&gsc->local, 0); + gsc_unmap_and_free_vma(gsc); intel_uc_fw_fini(&gsc->fw); } @@ -245,3 +316,45 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc) queue_work(gsc->wq, &gsc->work); } + +void intel_gsc_uc_load_status(struct intel_gsc_uc *gsc, struct drm_printer *p) +{ + struct intel_gt *gt = gsc_uc_to_gt(gsc); + struct intel_uncore *uncore = gt->uncore; + intel_wakeref_t wakeref; + + if (!intel_gsc_uc_is_supported(gsc)) { + drm_printf(p, "GSC not supported\n"); + return; + } + + if (!intel_gsc_uc_is_wanted(gsc)) { + drm_printf(p, "GSC disabled\n"); + return; + } + + drm_printf(p, "GSC firmware: %s\n", gsc->fw.file_selected.path); + if (gsc->fw.file_selected.path != gsc->fw.file_wanted.path) + drm_printf(p, "GSC firmware wanted: %s\n", gsc->fw.file_wanted.path); + drm_printf(p, "\tstatus: %s\n", intel_uc_fw_status_repr(gsc->fw.status)); + + drm_printf(p, "Release: %u.%u.%u.%u\n", + gsc->release.major, gsc->release.minor, + gsc->release.patch, gsc->release.build); + + drm_printf(p, "Compatibility Version: %u.%u [min expected %u.%u]\n", + gsc->fw.file_selected.ver.major, gsc->fw.file_selected.ver.minor, + gsc->fw.file_wanted.ver.major, gsc->fw.file_wanted.ver.minor); + + drm_printf(p, "SVN: %u\n", gsc->security_version); + + with_intel_runtime_pm(uncore->rpm, wakeref) { + u32 i; + + for (i = 1; i <= 6; i++) { + u32 status = intel_uncore_read(uncore, + HECI_FWSTS(MTL_GSC_HECI1_BASE, i)); + drm_printf(p, "HECI1 FWSTST%u = 0x%08x\n", i, status); + } + } +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h index a2a0813b8a76..c8082cf200fc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h @@ -8,6 +8,7 @@ #include "intel_uc_fw.h" +struct drm_printer; struct i915_vma; struct intel_context; struct i915_gsc_proxy_component; @@ -17,7 +18,26 @@ struct intel_gsc_uc { struct intel_uc_fw fw; /* GSC-specific additions */ + + /* + * The GSC has 3 version numbers: + * - Release version (incremented with each build) + * - Security version (incremented on security fix) + * - Compatibility version (incremented on interface change) + * + * The one we care about to use the binary is the last one, so that's + * the one we save inside the intel_uc_fw structure. The other two + * versions are only used for debug/info purposes, so we save them here. + * + * Note that the release and security versions are available in the + * binary header, while the compatibility version must be queried after + * loading the binary. + */ + struct intel_uc_fw_ver release; + u32 security_version; + struct i915_vma *local; /* private memory for GSC usage */ + void __iomem *local_vaddr; /* pointer to access the private memory */ struct intel_context *ce; /* for submission to GSC FW via GSC engine */ /* for delayed load and proxy handling */ @@ -44,6 +64,7 @@ void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc); void intel_gsc_uc_resume(struct intel_gsc_uc *gsc); void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc); void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc); +void intel_gsc_uc_load_status(struct intel_gsc_uc *gsc, struct drm_printer *p); static inline bool intel_gsc_uc_is_supported(struct intel_gsc_uc *gsc) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c new file mode 100644 index 000000000000..5baacd822a1c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <drm/drm_print.h> + +#include "gt/intel_gt.h" +#include "gt/intel_gt_debugfs.h" +#include "gt/intel_gt_print.h" +#include "intel_gsc_uc.h" +#include "intel_gsc_uc_debugfs.h" +#include "i915_drv.h" + +static int gsc_info_show(struct seq_file *m, void *data) +{ + struct drm_printer p = drm_seq_file_printer(m); + struct intel_gsc_uc *gsc = m->private; + + if (!intel_gsc_uc_is_supported(gsc)) + return -ENODEV; + + intel_gsc_uc_load_status(gsc, &p); + + return 0; +} +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(gsc_info); + +void intel_gsc_uc_debugfs_register(struct intel_gsc_uc *gsc_uc, struct dentry *root) +{ + static const struct intel_gt_debugfs_file files[] = { + { "gsc_info", &gsc_info_fops, NULL }, + }; + + if (!intel_gsc_uc_is_supported(gsc_uc)) + return; + + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gsc_uc); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.h new file mode 100644 index 000000000000..3415ad39aabb --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef DEBUGFS_GSC_UC_H +#define DEBUGFS_GSC_UC_H + +struct intel_gsc_uc; +struct dentry; + +void intel_gsc_uc_debugfs_register(struct intel_gsc_uc *gsc, struct dentry *root); + +#endif /* DEBUGFS_GSC_UC_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h index ef70e304904a..09d3fbdad05a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h @@ -17,6 +17,7 @@ struct intel_gsc_mtl_header { #define GSC_HECI_VALIDITY_MARKER 0xA578875A u8 heci_client_id; +#define HECI_MEADDRESS_MKHI 7 #define HECI_MEADDRESS_PROXY 10 #define HECI_MEADDRESS_PXP 17 #define HECI_MEADDRESS_HDCP 18 diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 364d0d546ec8..0f79cb658518 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -251,9 +251,11 @@ static int guc_wait_ucode(struct intel_guc *guc) if (ret == 0) ret = -ENXIO; } else if (delta_ms > 200) { - guc_warn(guc, "excessive init time: %lldms! [freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d]\n", - delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), - before_freq, status, count, ret); + guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n", + delta_ms, status, count, ret); + guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n", + intel_rps_read_actual_frequency(&uncore->gt->rps), before_freq, + intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt))); } else { guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n", delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index ee9f83af7cf6..477df260ae3a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -470,12 +470,19 @@ int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val) ret = slpc_set_param(slpc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, val); - if (ret) + if (ret) { guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n", val, ERR_PTR(ret)); - else + } else { slpc->ignore_eff_freq = val; + /* Set min to RPn when we disable efficient freq */ + if (val) + ret = slpc_set_param(slpc, + SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, + slpc->min_freq); + } + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&slpc->lock); return ret; @@ -602,9 +609,8 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc) return ret; if (!slpc->min_freq_softlimit) { - ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit); - if (unlikely(ret)) - return ret; + /* Min softlimit is initialized to RPn */ + slpc->min_freq_softlimit = slpc->min_freq; slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit; } else { return intel_guc_slpc_set_min_freq(slpc, @@ -755,6 +761,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) return ret; } + /* Set cached value of ignore efficient freq */ + intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq); + /* Revert SLPC min/max to softlimits if necessary */ ret = slpc_set_softlimits(slpc); if (unlikely(ret)) { @@ -765,9 +774,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) /* Set cached media freq ratio mode */ intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode); - /* Set cached value of ignore efficient freq */ - intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq); - return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index ddd146265beb..ba9e07fc2b57 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -26,6 +26,7 @@ * The kernel driver is only responsible for loading the HuC firmware and * triggering its security authentication. This is done differently depending * on the platform: + * * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA * and the authentication via GuC * - DG2: load and authentication are both performed via GSC. @@ -33,6 +34,7 @@ * not-DG2 older platforms), while the authentication is done in 2-steps, * a first auth for clear-media workloads via GuC and a second one for all * workloads via GSC. + * * On platforms where the GuC does the authentication, to correctly do so the * HuC binary must be loaded before the GuC one. * Loading the HuC is optional; however, not using the HuC might negatively @@ -265,7 +267,7 @@ static bool vcs_supported(struct intel_gt *gt) GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask); if (gt_is_root(gt)) - mask = RUNTIME_INFO(gt->i915)->platform_engine_mask; + mask = INTEL_INFO(gt->i915)->platform_engine_mask; else mask = gt->info.engine_mask; @@ -308,9 +310,9 @@ void intel_huc_init_early(struct intel_huc *huc) huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL; huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL; } else { - huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS5(MTL_GSC_HECI1_BASE); - huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI_FWSTS5_HUC_AUTH_DONE; - huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI_FWSTS5_HUC_AUTH_DONE; + huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS(MTL_GSC_HECI1_BASE, 5); + huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI1_FWSTS5_HUC_AUTH_DONE; + huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI1_FWSTS5_HUC_AUTH_DONE; } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index e608152fecfc..48f506a26e6d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -107,15 +107,6 @@ out_unpin: return err; } -static void get_version_from_gsc_manifest(struct intel_uc_fw_ver *ver, const void *data) -{ - const struct intel_gsc_manifest_header *manifest = data; - - ver->major = manifest->fw_version.major; - ver->minor = manifest->fw_version.minor; - ver->patch = manifest->fw_version.hotfix; -} - static bool css_valid(const void *data, size_t size) { const struct uc_css_header *css = data; @@ -227,8 +218,8 @@ int intel_huc_fw_get_binary_info(struct intel_uc_fw *huc_fw, const void *data, s for (i = 0; i < header->num_of_entries; i++, entry++) { if (strcmp(entry->name, "HUCP.man") == 0) - get_version_from_gsc_manifest(&huc_fw->file_selected.ver, - data + entry_offset(entry)); + intel_uc_fw_version_from_gsc_manifest(&huc_fw->file_selected.ver, + data + entry_offset(entry)); if (strcmp(entry->name, "huc_fw") == 0) { u32 offset = entry_offset(entry); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c index 2f93cc4e408a..6d541c866edb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c @@ -10,6 +10,7 @@ #include "gt/intel_gt_debugfs.h" #include "intel_guc_debugfs.h" +#include "intel_gsc_uc_debugfs.h" #include "intel_huc_debugfs.h" #include "intel_uc.h" #include "intel_uc_debugfs.h" @@ -58,6 +59,7 @@ void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root) intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc); + intel_gsc_uc_debugfs_register(&uc->gsc, root); intel_guc_debugfs_register(&uc->guc, root); intel_huc_debugfs_register(&uc->huc, root); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 944725e62414..3621963df6b5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -12,6 +12,8 @@ #include "gem/i915_gem_lmem.h" #include "gt/intel_gt_print.h" +#include "intel_gsc_binary_headers.h" +#include "intel_gsc_fw.h" #include "intel_uc_fw.h" #include "intel_uc_fw_abi.h" #include "i915_drv.h" @@ -468,6 +470,17 @@ static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc } } +void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver, + const void *data) +{ + const struct intel_gsc_manifest_header *manifest = data; + + ver->major = manifest->fw_version.major; + ver->minor = manifest->fw_version.minor; + ver->patch = manifest->fw_version.hotfix; + ver->build = manifest->fw_version.build; +} + /** * intel_uc_fw_init_early - initialize the uC object and select the firmware * @uc_fw: uC firmware @@ -668,13 +681,18 @@ static int check_gsc_manifest(struct intel_gt *gt, const struct firmware *fw, struct intel_uc_fw *uc_fw) { - if (uc_fw->type != INTEL_UC_FW_TYPE_HUC) { - gt_err(gt, "trying to GSC-parse a non-HuC binary"); + switch (uc_fw->type) { + case INTEL_UC_FW_TYPE_HUC: + intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size); + break; + case INTEL_UC_FW_TYPE_GSC: + intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size); + break; + default: + MISSING_CASE(uc_fw->type); return -EINVAL; } - intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size); - if (uc_fw->dma_start_offset) { u32 delta = uc_fw->dma_start_offset; @@ -734,10 +752,6 @@ static int check_fw_header(struct intel_gt *gt, { int err = 0; - /* GSC FW version is queried after the FW is loaded */ - if (uc_fw->type == INTEL_UC_FW_TYPE_GSC) - return 0; - if (uc_fw->has_gsc_headers) err = check_gsc_manifest(gt, fw, uc_fw); else @@ -773,6 +787,80 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware ** return 0; } +static int check_mtl_huc_guc_compatibility(struct intel_gt *gt, + struct intel_uc_fw_file *huc_selected) +{ + struct intel_uc_fw_file *guc_selected = >->uc.guc.fw.file_selected; + struct intel_uc_fw_ver *huc_ver = &huc_selected->ver; + struct intel_uc_fw_ver *guc_ver = &guc_selected->ver; + bool new_huc, new_guc; + + /* we can only do this check after having fetched both GuC and HuC */ + GEM_BUG_ON(!huc_selected->path || !guc_selected->path); + + /* + * Due to changes in the authentication flow for MTL, HuC 8.5.1 or newer + * requires GuC 70.7.0 or newer. Older HuC binaries will instead require + * GuC < 70.7.0. + */ + new_huc = huc_ver->major > 8 || + (huc_ver->major == 8 && huc_ver->minor > 5) || + (huc_ver->major == 8 && huc_ver->minor == 5 && huc_ver->patch >= 1); + + new_guc = guc_ver->major > 70 || + (guc_ver->major == 70 && guc_ver->minor >= 7); + + if (new_huc != new_guc) { + UNEXPECTED(gt, "HuC %u.%u.%u is incompatible with GuC %u.%u.%u\n", + huc_ver->major, huc_ver->minor, huc_ver->patch, + guc_ver->major, guc_ver->minor, guc_ver->patch); + gt_info(gt, "MTL GuC 70.7.0+ and HuC 8.5.1+ don't work with older releases\n"); + return -ENOEXEC; + } + + return 0; +} + +int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver) +{ + struct intel_gt *gt = __uc_fw_to_gt(uc_fw); + struct intel_uc_fw_file *wanted = &uc_fw->file_wanted; + struct intel_uc_fw_file *selected = &uc_fw->file_selected; + int ret; + + /* + * MTL has some compatibility issues with early GuC/HuC binaries + * not working with newer ones. This is specific to MTL and we + * don't expect it to extend to other platforms. + */ + if (IS_METEORLAKE(gt->i915) && uc_fw->type == INTEL_UC_FW_TYPE_HUC) { + ret = check_mtl_huc_guc_compatibility(gt, selected); + if (ret) + return ret; + } + + if (!wanted->ver.major || !selected->ver.major) + return 0; + + /* Check the file's major version was as it claimed */ + if (selected->ver.major != wanted->ver.major) { + UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n", + intel_uc_fw_type_repr(uc_fw->type), selected->path, + selected->ver.major, selected->ver.minor, + wanted->ver.major, wanted->ver.minor); + if (!intel_uc_fw_is_overridden(uc_fw)) + return -ENOEXEC; + } else if (old_ver) { + if (selected->ver.minor < wanted->ver.minor) + *old_ver = true; + else if ((selected->ver.minor == wanted->ver.minor) && + (selected->ver.patch < wanted->ver.patch)) + *old_ver = true; + } + + return 0; +} + /** * intel_uc_fw_fetch - fetch uC firmware * @uc_fw: uC firmware @@ -840,25 +928,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) goto fail; } - if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) { - /* Check the file's major version was as it claimed */ - if (uc_fw->file_selected.ver.major != uc_fw->file_wanted.ver.major) { - UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, - uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor, - uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor); - if (!intel_uc_fw_is_overridden(uc_fw)) { - err = -ENOEXEC; - goto fail; - } - } else { - if (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor) - old_ver = true; - else if ((uc_fw->file_selected.ver.minor == uc_fw->file_wanted.ver.minor) && - (uc_fw->file_selected.ver.patch < uc_fw->file_wanted.ver.patch)) - old_ver = true; - } - } + err = intel_uc_check_file_version(uc_fw, &old_ver); + if (err) + goto fail; if (old_ver && uc_fw->file_selected.ver.major) { /* Preserve the version that was really wanted */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 054f02811971..9a431726c8d5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -70,6 +70,7 @@ struct intel_uc_fw_ver { u32 major; u32 minor; u32 patch; + u32 build; }; /* @@ -289,6 +290,9 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) return __intel_uc_fw_get_upload_size(uc_fw); } +void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver, + const void *data); +int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver); void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type, bool needs_ggtt_mapping); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index f4055804aad1..a5c8005ec484 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -974,7 +974,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) context_page_num = rq->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0) + if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0) context_page_num = 19; context_base = (void *) ctx->lrc_reg_state - diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 8ef93889061a..5ec293011d99 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) } } while (unlikely(is_barrier(active))); - if (!__i915_active_fence_set(active, fence)) + fence = __i915_active_fence_set(active, fence); + if (!fence) __i915_active_acquire(ref); + else + dma_fence_put(fence); out: i915_active_release(ref); @@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref, return NULL; } - rcu_read_lock(); prev = __i915_active_fence_set(active, fence); - if (prev) - prev = dma_fence_get_rcu(prev); - else + if (!prev) __i915_active_acquire(ref); - rcu_read_unlock(); return prev; } @@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq) * * Records the new @fence as the last active fence along its timeline in * this active tracker, moving the tracking callbacks from the previous - * fence onto this one. Returns the previous fence (if not already completed), - * which the caller must ensure is executed before the new fence. To ensure - * that the order of fences within the timeline of the i915_active_fence is - * understood, it should be locked by the caller. + * fence onto this one. Gets and returns a reference to the previous fence + * (if not already completed), which the caller must put after making sure + * that it is executed before the new fence. To ensure that the order of + * fences within the timeline of the i915_active_fence is understood, it + * should be locked by the caller. */ struct dma_fence * __i915_active_fence_set(struct i915_active_fence *active, @@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active, struct dma_fence *prev; unsigned long flags; - if (fence == rcu_access_pointer(active->fence)) + /* + * In case of fences embedded in i915_requests, their memory is + * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release + * by new requests. Then, there is a risk of passing back a pointer + * to a new, completely unrelated fence that reuses the same memory + * while tracked under a different active tracker. Combined with i915 + * perf open/close operations that build await dependencies between + * engine kernel context requests and user requests from different + * timelines, this can lead to dependency loops and infinite waits. + * + * As a countermeasure, we try to get a reference to the active->fence + * first, so if we succeed and pass it back to our user then it is not + * released and potentially reused by an unrelated request before the + * user has a chance to set up an await dependency on it. + */ + prev = i915_active_fence_get(active); + if (fence == prev) return fence; GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); @@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active, * Consider that we have two threads arriving (A and B), with * C already resident as the active->fence. * - * A does the xchg first, and so it sees C or NULL depending - * on the timing of the interrupt handler. If it is NULL, the - * previous fence must have been signaled and we know that - * we are first on the timeline. If it is still present, - * we acquire the lock on that fence and serialise with the interrupt - * handler, in the process removing it from any future interrupt - * callback. A will then wait on C before executing (if present). - * - * As B is second, it sees A as the previous fence and so waits for - * it to complete its transition and takes over the occupancy for - * itself -- remembering that it needs to wait on A before executing. + * Both A and B have got a reference to C or NULL, depending on the + * timing of the interrupt handler. Let's assume that if A has got C + * then it has locked C first (before B). * * Note the strong ordering of the timeline also provides consistent * nesting rules for the fence->lock; the inner lock is always the * older lock. */ spin_lock_irqsave(fence->lock, flags); - prev = xchg(__active_fence_slot(active), fence); - if (prev) { - GEM_BUG_ON(prev == fence); + if (prev) spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + + /* + * A does the cmpxchg first, and so it sees C or NULL, as before, or + * something else, depending on the timing of other threads and/or + * interrupt handler. If not the same as before then A unlocks C if + * applicable and retries, starting from an attempt to get a new + * active->fence. Meanwhile, B follows the same path as A. + * Once A succeeds with cmpxch, B fails again, retires, gets A from + * active->fence, locks it as soon as A completes, and possibly + * succeeds with cmpxchg. + */ + while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) { + if (prev) { + spin_unlock(prev->lock); + dma_fence_put(prev); + } + spin_unlock_irqrestore(fence->lock, flags); + + prev = i915_active_fence_get(active); + GEM_BUG_ON(prev == fence); + + spin_lock_irqsave(fence->lock, flags); + if (prev) + spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + } + + /* + * If prev is NULL then the previous fence must have been signaled + * and we know that we are first on the timeline. If it is still + * present then, having the lock on that fence already acquired, we + * serialise with the interrupt handler, in the process of removing it + * from any future interrupt callback. A will then wait on C before + * executing (if present). + * + * As B is second, it sees A as the previous fence and so waits for + * it to complete its transition and takes over the occupancy for + * itself -- remembering that it needs to wait on A before executing. + */ + if (prev) { __list_del_entry(&active->cb.node); spin_unlock(prev->lock); /* serialise with prev->cb_list */ } @@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active, int err = 0; /* Must maintain timeline ordering wrt previous active requests */ - rcu_read_lock(); fence = __i915_active_fence_set(active, &rq->fence); - if (fence) /* but the previous fence may not belong to that timeline! */ - fence = dma_fence_get_rcu(fence); - rcu_read_unlock(); if (fence) { err = i915_request_await_dma_fence(rq, fence); dma_fence_put(fence); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 76ccd4e03e31..4de44cf1026d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -67,6 +67,7 @@ static int i915_capabilities(struct seq_file *m, void *data) seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p); + intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p); i915_print_iommu_status(i915, &p); intel_gt_info_print(&to_gt(i915)->info, &p); intel_driver_caps_print(&i915->caps, &p); diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 222d0a1f3b55..294b022de22b 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -711,6 +711,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) intel_device_info_print(INTEL_INFO(dev_priv), RUNTIME_INFO(dev_priv), &p); + intel_display_device_info_print(DISPLAY_INFO(dev_priv), + DISPLAY_RUNTIME_INFO(dev_priv), &p); i915_print_iommu_status(dev_priv, &p); for_each_gt(gt, dev_priv, i) intel_gt_info_print(>->info, &p); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b4cf6f0f636d..a1a2fe31f434 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -203,9 +203,8 @@ struct drm_i915_private { /* i915 device parameters */ struct i915_params params; - const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ + const struct intel_device_info *__info; /* Use INTEL_INFO() to access. */ struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ - struct intel_display_runtime_info __display_runtime; /* Access with DISPLAY_RUNTIME_INFO() */ struct intel_driver_caps caps; struct i915_dsm dsm; @@ -324,7 +323,6 @@ struct drm_i915_private { /* * i915->gt[0] == &i915->gt0 */ -#define I915_MAX_GT 2 struct intel_gt *gt[I915_MAX_GT]; struct kobject *sysfs_gt; @@ -416,10 +414,10 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915) (engine__) && (engine__)->uabi_class == (class__); \ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) -#define INTEL_INFO(i915) (&(i915)->__info) -#define DISPLAY_INFO(i915) (INTEL_INFO(i915)->display) +#define INTEL_INFO(i915) ((i915)->__info) #define RUNTIME_INFO(i915) (&(i915)->__runtime) -#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->__display_runtime) +#define DISPLAY_INFO(i915) ((i915)->display.info.__device_info) +#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->display.info.__runtime_info) #define DRIVER_CAPS(i915) (&(i915)->caps) #define INTEL_DEVID(i915) (RUNTIME_INFO(i915)->device_id) @@ -839,7 +837,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, */ #define HAS_64K_PAGES(i915) (INTEL_INFO(i915)->has_64k_pages) -#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i)) +#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) #define HAS_EXTRA_GT_LIST(i915) (INTEL_INFO(i915)->extra_gt_list) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 0c38bfb60c9a..4008bb09fdb5 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -649,6 +649,8 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, struct drm_printer p = i915_error_printer(m); intel_device_info_print(&error->device_info, &error->runtime_info, &p); + intel_display_device_info_print(&error->display_device_info, + &error->display_runtime_info, &p); intel_driver_caps_print(&error->driver_caps, &p); } @@ -1173,9 +1175,9 @@ i915_vma_coredump_create(const struct intel_gt *gt, drm_clflush_pages(&page, 1); - s = kmap(page); + s = kmap_local_page(page); ret = compress_page(compress, s, dst, false); - kunmap(page); + kunmap_local(s); drm_clflush_pages(&page, 1); @@ -1983,6 +1985,10 @@ static void capture_gen(struct i915_gpu_coredump *error) memcpy(&error->runtime_info, RUNTIME_INFO(i915), sizeof(error->runtime_info)); + memcpy(&error->display_device_info, DISPLAY_INFO(i915), + sizeof(error->display_device_info)); + memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915), + sizeof(error->display_runtime_info)); error->driver_caps = i915->caps; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index a78c061ce26f..9f5971f5e980 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -14,6 +14,7 @@ #include <drm/drm_mm.h> +#include "display/intel_display_device.h" #include "gt/intel_engine.h" #include "gt/intel_gt_types.h" #include "gt/uc/intel_uc_fw.h" @@ -209,6 +210,8 @@ struct i915_gpu_coredump { struct intel_device_info device_info; struct intel_runtime_info runtime_info; + struct intel_display_device_info display_device_info; + struct intel_display_runtime_info display_runtime_info; struct intel_driver_caps driver_caps; struct i915_params params; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 82fbabcdd7a5..512fc0ef94a4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -423,7 +423,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) static irqreturn_t ilk_irq_handler(int irq, void *arg) { struct drm_i915_private *i915 = arg; - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = intel_uncore_regs(&i915->uncore); u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; @@ -511,7 +511,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs) static irqreturn_t gen8_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; - void __iomem * const regs = dev_priv->uncore.regs; + void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore); u32 master_ctl; if (!intel_irqs_enabled(dev_priv)) @@ -561,7 +561,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs) static irqreturn_t gen11_irq_handler(int irq, void *arg) { struct drm_i915_private *i915 = arg; - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = intel_uncore_regs(&i915->uncore); struct intel_gt *gt = to_gt(i915); u32 master_ctl; u32 gu_misc_iir; @@ -619,7 +619,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = arg; struct intel_gt *gt = to_gt(i915); - void __iomem * const regs = gt->uncore->regs; + void __iomem * const regs = intel_uncore_regs(gt->uncore); u32 master_tile_ctl, master_ctl; u32 gu_misc_iir; @@ -711,7 +711,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - gen8_master_intr_disable(uncore->regs); + gen8_master_intr_disable(intel_uncore_regs(uncore)); gen8_gt_irq_reset(to_gt(dev_priv)); gen8_display_irq_reset(dev_priv); @@ -727,7 +727,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) struct intel_gt *gt = to_gt(dev_priv); struct intel_uncore *uncore = gt->uncore; - gen11_master_intr_disable(dev_priv->uncore.regs); + gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore)); gen11_gt_irq_reset(gt); gen11_display_irq_reset(dev_priv); @@ -742,7 +742,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv) struct intel_gt *gt; unsigned int i; - dg1_master_intr_disable(dev_priv->uncore.regs); + dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore)); for_each_gt(gt, dev_priv, i) gen11_gt_irq_reset(gt); @@ -836,7 +836,7 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) gen8_gt_irq_postinstall(to_gt(dev_priv)); gen8_de_irq_postinstall(dev_priv); - gen8_master_intr_enable(dev_priv->uncore.regs); + gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore)); } static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) @@ -853,7 +853,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); - gen11_master_intr_enable(uncore->regs); + gen11_master_intr_enable(intel_uncore_regs(uncore)); intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); } @@ -880,7 +880,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) GEN11_DISPLAY_IRQ_ENABLE); } - dg1_master_intr_enable(uncore->regs); + dg1_master_intr_enable(intel_uncore_regs(uncore)); intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); } diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 3d7a5db9833b..fcacdc21643c 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -38,9 +38,6 @@ #include "i915_reg.h" #include "intel_pci_config.h" -__diag_push(); -__diag_ignore_all("-Woverride-init", "Allow overriding inherited members"); - #define PLATFORM(x) .platform = (x) #define GEN(x) \ .__runtime.graphics.ip.ver = (x), \ @@ -84,7 +81,7 @@ __diag_ignore_all("-Woverride-init", "Allow overriding inherited members"); .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K #define GEN_DEFAULT_REGIONS \ - .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_SMEM + .memory_regions = REGION_SMEM | REGION_STOLEN_SMEM #define I830_FEATURES \ GEN(2), \ @@ -93,7 +90,7 @@ __diag_ignore_all("-Woverride-init", "Allow overriding inherited members"); .has_3d_pipeline = 1, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .__runtime.platform_engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ @@ -108,7 +105,7 @@ __diag_ignore_all("-Woverride-init", "Allow overriding inherited members"); .gpu_reset_clobbers_display = true, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .__runtime.platform_engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ @@ -140,7 +137,7 @@ static const struct intel_device_info i865g_info = { #define GEN3_FEATURES \ GEN(3), \ .gpu_reset_clobbers_display = true, \ - .__runtime.platform_engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_3d_pipeline = 1, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -203,7 +200,7 @@ static const struct intel_device_info pnv_m_info = { #define GEN4_FEATURES \ GEN(4), \ .gpu_reset_clobbers_display = true, \ - .__runtime.platform_engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_3d_pipeline = 1, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -231,7 +228,7 @@ static const struct intel_device_info i965gm_info = { static const struct intel_device_info g45_info = { GEN4_FEATURES, PLATFORM(INTEL_G45), - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; @@ -239,13 +236,13 @@ static const struct intel_device_info gm45_info = { GEN4_FEATURES, PLATFORM(INTEL_GM45), .is_mobile = 1, - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; #define GEN5_FEATURES \ GEN(5), \ - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_3d_pipeline = 1, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -271,7 +268,7 @@ static const struct intel_device_info ilk_m_info = { #define GEN6_FEATURES \ GEN(6), \ - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_3d_pipeline = 1, \ .has_coherent_ggtt = true, \ .has_llc = 1, \ @@ -319,7 +316,7 @@ static const struct intel_device_info snb_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_3d_pipeline = 1, \ .has_coherent_ggtt = true, \ .has_llc = 1, \ @@ -387,7 +384,7 @@ static const struct intel_device_info vlv_info = { .__runtime.ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), GEN_DEFAULT_PAGE_SIZES, GEN_DEFAULT_REGIONS, LEGACY_CACHELEVEL, @@ -395,7 +392,7 @@ static const struct intel_device_info vlv_info = { #define G75_FEATURES \ GEN7_FEATURES, \ - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .has_rc6p = 0 /* RC6p removed-by HSW */, \ .has_runtime_pm = 1 @@ -453,7 +450,7 @@ static const struct intel_device_info bdw_rsvd_info = { static const struct intel_device_info bdw_gt3_info = { BDW_PLATFORM, .gt = 3, - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -461,7 +458,7 @@ static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), .is_lp = 1, - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), .has_64bit_reloc = 1, .has_runtime_pm = 1, .has_rc6 = 1, @@ -505,7 +502,7 @@ static const struct intel_device_info skl_gt2_info = { #define SKL_GT3_PLUS_PLATFORM \ SKL_PLATFORM, \ - .__runtime.platform_engine_mask = \ + .platform_engine_mask = \ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) @@ -522,7 +519,7 @@ static const struct intel_device_info skl_gt4_info = { #define GEN9_LP_FEATURES \ GEN(9), \ .is_lp = 1, \ - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .has_3d_pipeline = 1, \ .has_64bit_reloc = 1, \ .has_runtime_pm = 1, \ @@ -568,7 +565,7 @@ static const struct intel_device_info kbl_gt2_info = { static const struct intel_device_info kbl_gt3_info = { KBL_PLATFORM, .gt = 3, - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -589,7 +586,7 @@ static const struct intel_device_info cfl_gt2_info = { static const struct intel_device_info cfl_gt3_info = { CFL_PLATFORM, .gt = 3, - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -622,21 +619,21 @@ static const struct intel_device_info cml_gt2_info = { static const struct intel_device_info icl_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; static const struct intel_device_info ehl_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), .__runtime.ppgtt_size = 36, }; static const struct intel_device_info jsl_info = { GEN11_FEATURES, PLATFORM(INTEL_JASPERLAKE), - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), .__runtime.ppgtt_size = 36, }; @@ -651,19 +648,19 @@ static const struct intel_device_info jsl_info = { static const struct intel_device_info tgl_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; static const struct intel_device_info rkl_info = { GEN12_FEATURES, PLATFORM(INTEL_ROCKETLAKE), - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0), }; #define DGFX_FEATURES \ - .__runtime.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \ + .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \ .has_llc = 0, \ .has_pxp = 0, \ .has_snoop = 1, \ @@ -676,7 +673,7 @@ static const struct intel_device_info dg1_info = { .__runtime.graphics.ip.rel = 10, PLATFORM(INTEL_DG1), .require_force_probe = 1, - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), /* Wa_16011227922 */ @@ -686,7 +683,7 @@ static const struct intel_device_info dg1_info = { static const struct intel_device_info adl_s_info = { GEN12_FEATURES, PLATFORM(INTEL_ALDERLAKE_S), - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .dma_mask_size = 39, }; @@ -694,7 +691,7 @@ static const struct intel_device_info adl_s_info = { static const struct intel_device_info adl_p_info = { GEN12_FEATURES, PLATFORM(INTEL_ALDERLAKE_P), - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .__runtime.ppgtt_size = 48, .dma_mask_size = 39, @@ -746,7 +743,7 @@ static const struct intel_device_info xehpsdv_info = { PLATFORM(INTEL_XEHPSDV), .has_64k_pages = 1, .has_media_ratio_mode = 1, - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) | BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) | @@ -766,7 +763,7 @@ static const struct intel_device_info xehpsdv_info = { .has_guc_deprivilege = 1, \ .has_heci_pxp = 1, \ .has_media_ratio_mode = 1, \ - .__runtime.platform_engine_mask = \ + .platform_engine_mask = \ BIT(RCS0) | BIT(BCS0) | \ BIT(VECS0) | BIT(VECS1) | \ BIT(VCS0) | BIT(VCS2) | \ @@ -801,7 +798,7 @@ static const struct intel_device_info pvc_info = { PLATFORM(INTEL_PONTEVECCHIO), .has_flat_ccs = 0, .max_pat_index = 7, - .__runtime.platform_engine_mask = + .platform_engine_mask = BIT(BCS0) | BIT(VCS0) | BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3), @@ -838,16 +835,14 @@ static const struct intel_device_info mtl_info = { .has_snoop = 1, .max_pat_index = 4, .has_pxp = 1, - .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM, - .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0), + .memory_regions = REGION_SMEM | REGION_STOLEN_LMEM, + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0), .require_force_probe = 1, MTL_CACHELEVEL, }; #undef PLATFORM -__diag_pop(); - /* * Make sure any device matches here are from most specific to most * general. For example, since the Quanta match is based on the subsystem diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 7413c11fb562..04bc1f4a1115 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1319,7 +1319,7 @@ __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset) u32 *cs, cmd; cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; - if (GRAPHICS_VER(rq->engine->i915) >= 8) + if (GRAPHICS_VER(rq->i915) >= 8) cmd++; cs = intel_ring_begin(rq, 4); @@ -4431,6 +4431,7 @@ static const struct i915_range mtl_oam_b_counters[] = { static const struct i915_range xehp_oa_b_counters[] = { { .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */ { .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */ + {} }; static const struct i915_range gen7_oa_mux_regs[] = { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7a4f462e8b70..aefad14ab27a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -941,8 +941,30 @@ #define HECI_H_GS1(base) _MMIO((base) + 0xc4c) #define HECI_H_GS1_ER_PREP REG_BIT(0) -#define HECI_FWSTS5(base) _MMIO((base) + 0xc68) -#define HECI_FWSTS5_HUC_AUTH_DONE (1 << 19) +/* + * The FWSTS register values are FW defined and can be different between + * HECI1 and HECI2 + */ +#define HECI_FWSTS1 0xc40 +#define HECI1_FWSTS1_CURRENT_STATE REG_GENMASK(3, 0) +#define HECI1_FWSTS1_CURRENT_STATE_RESET 0 +#define HECI1_FWSTS1_PROXY_STATE_NORMAL 5 +#define HECI1_FWSTS1_INIT_COMPLETE REG_BIT(9) +#define HECI_FWSTS2 0xc48 +#define HECI_FWSTS3 0xc60 +#define HECI_FWSTS4 0xc64 +#define HECI_FWSTS5 0xc68 +#define HECI1_FWSTS5_HUC_AUTH_DONE (1 << 19) +#define HECI_FWSTS6 0xc6c + +/* the FWSTS regs are 1-based, so we use -base for index 0 to get an invalid reg */ +#define HECI_FWSTS(base, x) _MMIO((base) + _PICK(x, -(base), \ + HECI_FWSTS1, \ + HECI_FWSTS2, \ + HECI_FWSTS3, \ + HECI_FWSTS4, \ + HECI_FWSTS5, \ + HECI_FWSTS6)) #define HSW_GTT_CACHE_EN _MMIO(0x4024) #define GTT_CACHE_EN_ALL 0xF0007FFF @@ -4917,6 +4939,7 @@ #define SHPD_FILTER_CNT _MMIO(0xc4038) #define SHPD_FILTER_CNT_500_ADJ 0x001D9 +#define SHPD_FILTER_CNT_250 0x000F8 #define _PCH_DPLL_A 0xc6014 #define _PCH_DPLL_B 0xc6018 diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 32323bb801a1..7c7da284990d 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1353,7 +1353,7 @@ __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) { mark_external(rq); return i915_sw_fence_await_dma_fence(&rq->submit, fence, - i915_fence_context_timeout(rq->engine->i915, + i915_fence_context_timeout(rq->i915, fence->context), I915_FENCE_GFP); } @@ -1661,6 +1661,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq, request_to_parent(rq)->parallel.last_rq = i915_request_get(rq); + /* + * Users have to put a reference potentially got by + * __i915_active_fence_set() to the returned request + * when no longer needed + */ return to_request(__i915_active_fence_set(&timeline->last_request, &rq->fence)); } @@ -1707,6 +1712,10 @@ __i915_request_ensure_ordering(struct i915_request *rq, 0); } + /* + * Users have to put the reference to prev potentially got + * by __i915_active_fence_set() when no longer needed + */ return prev; } @@ -1760,6 +1769,8 @@ __i915_request_add_to_timeline(struct i915_request *rq) prev = __i915_request_ensure_ordering(rq, timeline); else prev = __i915_request_ensure_parallel_ordering(rq, timeline); + if (prev) + i915_request_put(prev); /* * Make sure that no request gazumped us - if it was allocated after diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index f6f9228a1351..ce1cbee1b39d 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -277,7 +277,7 @@ TRACE_EVENT(i915_request_queue, ), TP_fast_assign( - __entry->dev = rq->engine->i915->drm.primary->index; + __entry->dev = rq->i915->drm.primary->index; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; @@ -304,7 +304,7 @@ DECLARE_EVENT_CLASS(i915_request, ), TP_fast_assign( - __entry->dev = rq->engine->i915->drm.primary->index; + __entry->dev = rq->i915->drm.primary->index; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; @@ -353,7 +353,7 @@ TRACE_EVENT(i915_request_in, ), TP_fast_assign( - __entry->dev = rq->engine->i915->drm.primary->index; + __entry->dev = rq->i915->drm.primary->index; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; @@ -382,7 +382,7 @@ TRACE_EVENT(i915_request_out, ), TP_fast_assign( - __entry->dev = rq->engine->i915->drm.primary->index; + __entry->dev = rq->i915->drm.primary->index; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; @@ -623,7 +623,7 @@ TRACE_EVENT(i915_request_wait_begin, * less desirable. */ TP_fast_assign( - __entry->dev = rq->engine->i915->drm.primary->index; + __entry->dev = rq->i915->drm.primary->index; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index ffb425ba591c..e52089564d79 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -34,6 +34,7 @@ #include "gt/intel_engine_heartbeat.h" #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" +#include "gt/intel_tlb.h" #include "i915_drv.h" #include "i915_gem_evict.h" @@ -74,14 +75,14 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) char buf[512]; if (!vma->node.stack) { - drm_dbg(&to_i915(vma->obj->base.dev)->drm, + drm_dbg(vma->obj->base.dev, "vma.node [%08llx + %08llx] %s: unknown owner\n", vma->node.start, vma->node.size, reason); return; } stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); - drm_dbg(&to_i915(vma->obj->base.dev)->drm, + drm_dbg(vma->obj->base.dev, "vma.node [%08llx + %08llx] %s: inserted at %s\n", vma->node.start, vma->node.size, reason, buf); } @@ -805,7 +806,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, * attempt to find space. */ if (size > end - 2 * guard) { - drm_dbg(&to_i915(vma->obj->base.dev)->drm, + drm_dbg(vma->obj->base.dev, "Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", size, flags & PIN_MAPPABLE ? "mappable" : "total", end); return -ENOSPC; @@ -1339,6 +1340,12 @@ err_unpin: void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb) { + struct intel_gt *gt; + int id; + + if (!tlb) + return; + /* * Before we release the pages that were bound by this vma, we * must invalidate all the TLBs that may still have a reference @@ -1347,7 +1354,9 @@ void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb) * the most recent TLB invalidation seqno, and if we have not yet * flushed the TLBs upon release, perform a full invalidation. */ - WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt)); + for_each_gt(gt, vm->i915, id) + WRITE_ONCE(tlb[id], + intel_gt_next_invalidate_tlb_full(vm->gt)); } static void __vma_put_pages(struct i915_vma *vma, unsigned int count) @@ -1629,6 +1638,26 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, return err; } +/** + * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas + * @obj: i915 GEM object + * This function clears scanout flags for objects ggtt vmas. These flags are set + * when object is pinned for display use and this function to clear them all is + * targeted to be called by frontbuffer tracking code when the frontbuffer is + * about to be released. + */ +void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + + spin_lock(&obj->vma.lock); + for_each_ggtt_vma(vma, obj) { + i915_vma_clear_scanout(vma); + vma->display_alignment = I915_GTT_MIN_ALIGNMENT; + } + spin_unlock(&obj->vma.lock); +} + static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) { /* @@ -1908,7 +1937,7 @@ int _i915_vma_move_to_active(struct i915_vma *vma, if (flags & EXEC_OBJECT_WRITE) { struct intel_frontbuffer *front; - front = __intel_frontbuffer_get(obj); + front = i915_gem_object_get_frontbuffer(obj); if (unlikely(front)) { if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) i915_active_add_request(&front->write, rq); @@ -1994,7 +2023,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async) if (async) unbind_fence = i915_vma_resource_unbind(vma_res, - &vma->obj->mm.tlb); + vma->obj->mm.tlb); else unbind_fence = i915_vma_resource_unbind(vma_res, NULL); @@ -2011,7 +2040,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async) dma_fence_put(unbind_fence); unbind_fence = NULL; } - vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb); + vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb); } /* diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 9a9729205d5b..eaa310864370 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -435,6 +435,8 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma) clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma)); } +void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj); + #define for_each_until(cond) if (cond) break; else /** diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c index a27600bc5976..d9600cd1ab06 100644 --- a/drivers/gpu/drm/i915/intel_clock_gating.c +++ b/drivers/gpu/drm/i915/intel_clock_gating.c @@ -559,9 +559,20 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915) static void hsw_init_clock_gating(struct drm_i915_private *i915) { + enum pipe pipe; + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(PIPE_A), 0, HSW_FBCQ_DIS); + /* WaPsrDPAMaskVBlankInSRD:hsw */ + intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD); + + for_each_pipe(i915, pipe) { + /* WaPsrDPRSUnmaskVBlankInSRD:hsw */ + intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe), + 0, HSW_UNMASK_VBL_TO_REGS_IN_SRD); + } + /* This is required by WaCatErrorRejectionIssue:hsw */ intel_uncore_rmw(&i915->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 6e49caf241a5..ea0ec6174ce5 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -93,9 +93,6 @@ void intel_device_info_print(const struct intel_device_info *info, const struct intel_runtime_info *runtime, struct drm_printer *p) { - const struct intel_display_runtime_info *display_runtime = - &info->display->__runtime_defaults; - if (runtime->graphics.ip.rel) drm_printf(p, "graphics version: %u.%02u\n", runtime->graphics.ip.ver, @@ -112,21 +109,13 @@ void intel_device_info_print(const struct intel_device_info *info, drm_printf(p, "media version: %u\n", runtime->media.ip.ver); - if (display_runtime->ip.rel) - drm_printf(p, "display version: %u.%02u\n", - display_runtime->ip.ver, - display_runtime->ip.rel); - else - drm_printf(p, "display version: %u\n", - display_runtime->ip.ver); - drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step)); drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step)); drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step)); drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step)); drm_printf(p, "gt: %d\n", info->gt); - drm_printf(p, "memory-regions: 0x%x\n", runtime->memory_regions); + drm_printf(p, "memory-regions: 0x%x\n", info->memory_regions); drm_printf(p, "page-sizes: 0x%x\n", runtime->page_sizes); drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size); @@ -138,15 +127,6 @@ void intel_device_info_print(const struct intel_device_info *info, #undef PRINT_FLAG drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu)); - -#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display->name)) - DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); -#undef PRINT_FLAG - - drm_printf(p, "has_hdcp: %s\n", str_yes_no(display_runtime->has_hdcp)); - drm_printf(p, "has_dmc: %s\n", str_yes_no(display_runtime->has_dmc)); - drm_printf(p, "has_dsc: %s\n", str_yes_no(display_runtime->has_dsc)); - drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq); } @@ -260,15 +240,19 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915) if (find_devid(devid, subplatform_ult_ids, ARRAY_SIZE(subplatform_ult_ids))) { mask = BIT(INTEL_SUBPLATFORM_ULT); + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + DISPLAY_RUNTIME_INFO(i915)->port_mask &= ~BIT(PORT_D); } else if (find_devid(devid, subplatform_ulx_ids, ARRAY_SIZE(subplatform_ulx_ids))) { mask = BIT(INTEL_SUBPLATFORM_ULX); if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { /* ULX machines are also considered ULT. */ mask |= BIT(INTEL_SUBPLATFORM_ULT); + DISPLAY_RUNTIME_INFO(i915)->port_mask &= ~BIT(PORT_D); } } else if (find_devid(devid, subplatform_portf_ids, ARRAY_SIZE(subplatform_portf_ids))) { + DISPLAY_RUNTIME_INFO(i915)->port_mask |= BIT(PORT_F); mask = BIT(INTEL_SUBPLATFORM_PORTF); } else if (find_devid(devid, subplatform_uy_ids, ARRAY_SIZE(subplatform_uy_ids))) { @@ -380,13 +364,6 @@ void intel_device_info_runtime_init_early(struct drm_i915_private *i915) intel_device_info_subplatform_init(i915); } -/* FIXME: Remove this, and make device info a const pointer to rodata. */ -static struct intel_device_info * -mkwrite_device_info(struct drm_i915_private *i915) -{ - return (struct intel_device_info *)INTEL_INFO(i915); -} - static const struct intel_display_device_info no_display = {}; /** @@ -407,7 +384,6 @@ static const struct intel_display_device_info no_display = {}; */ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) { - struct intel_device_info *info = mkwrite_device_info(dev_priv); struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv); if (HAS_DISPLAY(dev_priv)) @@ -417,7 +393,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) { dev_priv->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); - info->display = &no_display; + dev_priv->display.info.__device_info = &no_display; } /* Disable nuclear pageflip by default on pre-g4x */ @@ -447,26 +423,24 @@ void intel_device_info_driver_create(struct drm_i915_private *i915, u16 device_id, const struct intel_device_info *match_info) { - struct intel_device_info *info; struct intel_runtime_info *runtime; u16 ver, rel, step; - /* Setup the write-once "constant" device info */ - info = mkwrite_device_info(i915); - memcpy(info, match_info, sizeof(*info)); + /* Setup INTEL_INFO() */ + i915->__info = match_info; /* Initialize initial runtime info from static const data and pdev. */ runtime = RUNTIME_INFO(i915); memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime)); /* Probe display support */ - info->display = intel_display_device_probe(i915, info->has_gmd_id, - &ver, &rel, &step); + i915->display.info.__device_info = intel_display_device_probe(i915, HAS_GMD_ID(i915), + &ver, &rel, &step); memcpy(DISPLAY_RUNTIME_INFO(i915), &DISPLAY_INFO(i915)->__runtime_defaults, sizeof(*DISPLAY_RUNTIME_INFO(i915))); - if (info->has_gmd_id) { + if (HAS_GMD_ID(i915)) { DISPLAY_RUNTIME_INFO(i915)->ip.ver = ver; DISPLAY_RUNTIME_INFO(i915)->ip.rel = rel; DISPLAY_RUNTIME_INFO(i915)->ip.step = step; diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 069291b3bd37..dbfe6443457b 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -29,8 +29,6 @@ #include "intel_step.h" -#include "display/intel_display_device.h" - #include "gt/intel_engine_types.h" #include "gt/intel_context_types.h" #include "gt/intel_sseu.h" @@ -212,8 +210,6 @@ struct intel_runtime_info { u16 device_id; - intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */ - u32 rawclk_freq; struct intel_step_info step; @@ -223,8 +219,6 @@ struct intel_runtime_info { enum intel_ppgtt_type ppgtt_type; unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */ - u32 memory_regions; /* regions supported by the HW */ - bool has_pooled_eu; }; @@ -237,12 +231,13 @@ struct intel_device_info { u8 gt; /* GT number, 0 if undefined */ + intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */ + u32 memory_regions; /* regions supported by the HW */ + #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); #undef DEFINE_FLAG - const struct intel_display_device_info *display; - /* * Initial runtime info. Do not access outside of i915_driver_create(). */ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 796ebfe6c550..dfefad5a5fec 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1925,25 +1925,31 @@ __unclaimed_previous_reg_debug(struct intel_uncore *uncore, i915_mmio_reg_offset(reg)); } -static inline void -unclaimed_reg_debug(struct intel_uncore *uncore, - const i915_reg_t reg, - const bool read, - const bool before) +static inline bool __must_check +unclaimed_reg_debug_header(struct intel_uncore *uncore, + const i915_reg_t reg, const bool read) { if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug) - return; + return false; /* interrupts are disabled and re-enabled around uncore->lock usage */ lockdep_assert_held(&uncore->lock); - if (before) { - spin_lock(&uncore->debug->lock); - __unclaimed_previous_reg_debug(uncore, reg, read); - } else { - __unclaimed_reg_debug(uncore, reg, read); - spin_unlock(&uncore->debug->lock); - } + spin_lock(&uncore->debug->lock); + __unclaimed_previous_reg_debug(uncore, reg, read); + + return true; +} + +static inline void +unclaimed_reg_debug_footer(struct intel_uncore *uncore, + const i915_reg_t reg, const bool read) +{ + /* interrupts are disabled and re-enabled around uncore->lock usage */ + lockdep_assert_held(&uncore->lock); + + __unclaimed_reg_debug(uncore, reg, read); + spin_unlock(&uncore->debug->lock); } #define __vgpu_read(x) \ @@ -2001,13 +2007,15 @@ __gen2_read(64) #define GEN6_READ_HEADER(x) \ u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ + bool unclaimed_reg_debug; \ u##x val = 0; \ assert_rpm_wakelock_held(uncore->rpm); \ spin_lock_irqsave(&uncore->lock, irqflags); \ - unclaimed_reg_debug(uncore, reg, true, true) + unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true) #define GEN6_READ_FOOTER \ - unclaimed_reg_debug(uncore, reg, true, false); \ + if (unclaimed_reg_debug) \ + unclaimed_reg_debug_footer(uncore, reg, true); \ spin_unlock_irqrestore(&uncore->lock, irqflags); \ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ return val @@ -2105,13 +2113,15 @@ __gen2_write(32) #define GEN6_WRITE_HEADER \ u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ + bool unclaimed_reg_debug; \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ assert_rpm_wakelock_held(uncore->rpm); \ spin_lock_irqsave(&uncore->lock, irqflags); \ - unclaimed_reg_debug(uncore, reg, false, true) + unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false) #define GEN6_WRITE_FOOTER \ - unclaimed_reg_debug(uncore, reg, false, false); \ + if (unclaimed_reg_debug) \ + unclaimed_reg_debug_footer(uncore, reg, false); \ spin_unlock_irqrestore(&uncore->lock, irqflags) #define __gen6_write(x) \ diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 9ea1f4864a3a..f419c311a0de 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -496,6 +496,11 @@ static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore, return (reg_val & mask) != expected_val ? -EINVAL : 0; } +static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore) +{ + return uncore->regs; +} + /* * The raw_reg_{read,write} macros are intended as a micro-optimization for * interrupt handlers so that the pointer indirection on uncore->regs can diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index bb2e15329f34..38ec754d0ec8 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -162,8 +162,8 @@ static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i9 * for HuC authentication. For now, its limited to DG2. */ if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) && - intel_huc_is_loaded_by_gsc(&i915->gt0.uc.huc) && intel_uc_uses_huc(&i915->gt0.uc)) - return &i915->gt0; + intel_huc_is_loaded_by_gsc(&to_gt(i915)->uc.huc) && intel_uc_uses_huc(&to_gt(i915)->uc)) + return to_gt(i915); return NULL; } @@ -188,8 +188,8 @@ static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_p * Else we rely on mei-pxp module but only on legacy platforms * prior to having separate media GTs and has a valid VDBOX. */ - if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(&i915->gt0)) - return &i915->gt0; + if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915))) + return to_gt(i915); return NULL; } diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c index f13890ec7db1..c7df47364013 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c @@ -197,7 +197,7 @@ bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp) * are out of order) will suffice. */ if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc, INTEL_HUC_AUTH_BY_GSC) && - intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc)) + intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc, true)) return true; return false; diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index d4608b220123..403134a7acec 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -168,7 +168,7 @@ static int write_timestamp(struct i915_request *rq, int slot) return PTR_ERR(cs); len = 5; - if (GRAPHICS_VER(rq->engine->i915) >= 8) + if (GRAPHICS_VER(rq->i915) >= 8) len++; *cs++ = GFX_OP_PIPE_CONTROL(len); diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index 39da0fb0d6d2..ee79e0809a6d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -24,6 +24,8 @@ #include <linux/random.h> #include "gt/intel_gt_pm.h" +#include "gt/uc/intel_gsc_fw.h" + #include "i915_driver.h" #include "i915_drv.h" #include "i915_selftest.h" @@ -127,6 +129,31 @@ static void set_default_test_all(struct selftest *st, unsigned int count) st[i].enabled = true; } +static bool +__gsc_proxy_init_progressing(struct intel_gsc_uc *gsc) +{ + return intel_gsc_uc_fw_proxy_get_status(gsc) == -EAGAIN; +} + +static void +__wait_gsc_proxy_completed(struct drm_i915_private *i915) +{ + bool need_to_wait = (IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY) && + i915->media_gt && + HAS_ENGINE(i915->media_gt, GSC0) && + intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw)); + /* + * The gsc proxy component depends on the kernel component driver load ordering + * and in corner cases (the first time after an IFWI flash), init-completion + * firmware flows take longer. + */ + unsigned long timeout_ms = 8000; + + if (need_to_wait && wait_for(!__gsc_proxy_init_progressing(&i915->media_gt->uc.gsc), + timeout_ms)) + pr_warn(DRIVER_NAME "Timed out waiting for gsc_proxy_completion!\n"); +} + static int __run_selftests(const char *name, struct selftest *st, unsigned int count, @@ -206,6 +233,8 @@ int i915_live_selftests(struct pci_dev *pdev) if (!i915_selftest.live) return 0; + __wait_gsc_proxy_completed(pdev_to_i915(pdev)); + err = run_selftests(live, pdev_to_i915(pdev)); if (err) { i915_selftest.live = err; @@ -227,6 +256,8 @@ int i915_perf_selftests(struct pci_dev *pdev) if (!i915_selftest.perf) return 0; + __wait_gsc_proxy_completed(pdev_to_i915(pdev)); + err = run_selftests(perf, pdev_to_i915(pdev)); if (err) { i915_selftest.perf = err; diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 618d9386d554..3c5e0952f1b8 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -159,15 +159,15 @@ igt_spinner_create_request(struct igt_spinner *spin, batch = spin->batch; - if (GRAPHICS_VER(rq->engine->i915) >= 8) { + if (GRAPHICS_VER(rq->i915) >= 8) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = upper_32_bits(hws_address(hws, rq)); - } else if (GRAPHICS_VER(rq->engine->i915) >= 6) { + } else if (GRAPHICS_VER(rq->i915) >= 6) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = 0; *batch++ = hws_address(hws, rq); - } else if (GRAPHICS_VER(rq->engine->i915) >= 4) { + } else if (GRAPHICS_VER(rq->i915) >= 4) { *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *batch++ = 0; *batch++ = hws_address(hws, rq); @@ -179,11 +179,11 @@ igt_spinner_create_request(struct igt_spinner *spin, *batch++ = arbitration_command; - if (GRAPHICS_VER(rq->engine->i915) >= 8) + if (GRAPHICS_VER(rq->i915) >= 8) *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1; - else if (IS_HASWELL(rq->engine->i915)) + else if (IS_HASWELL(rq->i915)) *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW; - else if (GRAPHICS_VER(rq->engine->i915) >= 6) + else if (GRAPHICS_VER(rq->i915) >= 6) *batch++ = MI_BATCH_BUFFER_START; else *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; @@ -201,7 +201,7 @@ igt_spinner_create_request(struct igt_spinner *spin, } flags = 0; - if (GRAPHICS_VER(rq->engine->i915) <= 5) + if (GRAPHICS_VER(rq->i915) <= 5) flags |= I915_DISPATCH_SECURE; err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags); diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index e4281508d580..03ea75cd84dd 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -210,7 +210,7 @@ static int live_forcewake_ops(void *arg) for_each_engine(engine, gt, id) { i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset); - u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset; + u32 __iomem *reg = intel_uncore_regs(uncore) + engine->mmio_base + r->offset; enum forcewake_domains fw_domains; u32 val; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 4de6a4e8280d..da0b269606c5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -114,7 +114,7 @@ static struct dev_pm_domain pm_domain = { static void mock_gt_probe(struct drm_i915_private *i915) { - i915->gt[0] = &i915->gt0; + i915->gt[0] = to_gt(i915); i915->gt[0]->name = "Mock GT"; } @@ -123,8 +123,8 @@ static const struct intel_device_info mock_info = { .__runtime.page_sizes = (I915_GTT_PAGE_SIZE_4K | I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_2M), - .__runtime.memory_regions = REGION_SMEM, - .__runtime.platform_engine_mask = BIT(0), + .memory_regions = REGION_SMEM, + .platform_engine_mask = BIT(0), /* simply use legacy cache level for mock device */ .max_pat_index = 3, diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index 9f0651d48d41..15492b69f698 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -704,7 +704,7 @@ void intel_dram_edram_detect(struct drm_i915_private *i915) if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || GRAPHICS_VER(i915) >= 9)) return; - edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP); + edram_cap = intel_uncore_read_fw(&i915->uncore, HSW_EDRAM_CAP); /* NB: We can't write IDICR yet because we don't have gt funcs set up */ diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c index 6d0204942f7a..49c7fb16e934 100644 --- a/drivers/gpu/drm/i915/soc/intel_gmch.c +++ b/drivers/gpu/drm/i915/soc/intel_gmch.c @@ -47,11 +47,9 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915) mchbar_addr = ((u64)temp_hi << 32) | temp_lo; /* If ACPI doesn't have it, assume we need to allocate it ourselves */ -#ifdef CONFIG_PNP - if (mchbar_addr && + if (IS_ENABLED(CONFIG_PNP) && mchbar_addr && pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) return 0; -#endif /* Get some space for it */ i915->gmch.mch_res.name = "i915 MCHBAR"; diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index c1bbfbe28bda..93acb0e42bd6 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -68,8 +68,8 @@ typedef struct { } atom_exec_context; int atom_debug = 0; -static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); -int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); +static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params); +int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); static uint32_t atom_arg_mask[8] = { 0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000, @@ -1156,7 +1156,7 @@ static struct { atom_op_shr, ATOM_ARG_MC}, { atom_op_debug, 0},}; -static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) +static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params) { int base = CU16(ctx->cmd_table + 4 + 2 * index); int len, ws, ps, ptr; @@ -1216,7 +1216,7 @@ free: return ret; } -int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params) +int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params) { int r; @@ -1237,7 +1237,7 @@ int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uin return r; } -int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) +int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params) { int r; mutex_lock(&ctx->scratch_mutex); @@ -1359,8 +1359,8 @@ void atom_destroy(struct atom_context *ctx) } bool atom_parse_data_header(struct atom_context *ctx, int index, - uint16_t * size, uint8_t * frev, uint8_t * crev, - uint16_t * data_start) + uint16_t *size, uint8_t *frev, uint8_t *crev, + uint16_t *data_start) { int offset = index * 2 + 4; int idx = CU16(ctx->data_table + offset); @@ -1379,8 +1379,8 @@ bool atom_parse_data_header(struct atom_context *ctx, int index, return true; } -bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, - uint8_t * crev) +bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, + uint8_t *crev) { int offset = index * 2 + 4; int idx = CU16(ctx->cmd_table + offset); diff --git a/drivers/gpu/drm/radeon/clearstate_si.h b/drivers/gpu/drm/radeon/clearstate_si.h index 356219c6c7f2..7da8418704fe 100644 --- a/drivers/gpu/drm/radeon/clearstate_si.h +++ b/drivers/gpu/drm/radeon/clearstate_si.h @@ -23,8 +23,7 @@ #include "clearstate_defs.h" -static const u32 si_SECT_CONTEXT_def_1[] = -{ +static const u32 si_SECT_CONTEXT_def_1[] = { 0x00000000, // DB_RENDER_CONTROL 0x00000000, // DB_COUNT_CONTROL 0x00000000, // DB_DEPTH_VIEW diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 9c1a92fa2af6..25201b9a5aae 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -249,7 +249,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) /* Sub pixel 1/12 so we can have 4K rendering according to doc */ gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); - switch(rdev->num_gb_pipes) { + switch (rdev->num_gb_pipes) { case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; @@ -638,7 +638,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track = (struct r100_cs_track *)p->track; idx_value = radeon_get_ib_value(p, idx); - switch(reg) { + switch (reg) { case AVIVO_D1MODE_VLINE_START_END: case RADEON_CRTC_GUI_TRIG_VLINE: r = r100_cs_packet_parse_vline(p); @@ -1180,7 +1180,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, ib = p->ib.ptr; idx = pkt->idx + 1; track = (struct r100_cs_track *)p->track; - switch(pkt->opcode) { + switch (pkt->opcode) { case PACKET3_3D_LOAD_VBPNTR: r = r100_packet3_load_vbpntr(p, pkt, idx); if (r) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index bf3c411a55c5..85c4bb186203 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1389,7 +1389,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); - ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*) + ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *) ((u8 *)&ss_info->asSS_Info[0]); for (i = 0; i < num_indices; i++) { if (ss_assign->ucSS_Id == id) { @@ -1402,7 +1402,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, ss->refdiv = ss_assign->ucRecommendedRef_Div; return true; } - ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*) + ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *) ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT)); } } @@ -3406,7 +3406,7 @@ static ATOM_VOLTAGE_OBJECT_V2 *atom_lookup_voltage_object_v2(ATOM_VOLTAGE_OBJECT { u32 size = le16_to_cpu(v2->sHeader.usStructureSize); u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V2, asVoltageObj[0]); - u8 *start = (u8*)v2; + u8 *start = (u8 *)v2; while (offset < size) { ATOM_VOLTAGE_OBJECT_V2 *vo = (ATOM_VOLTAGE_OBJECT_V2 *)(start + offset); @@ -3423,7 +3423,7 @@ static ATOM_VOLTAGE_OBJECT_V3 *atom_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT { u32 size = le16_to_cpu(v3->sHeader.usStructureSize); u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]); - u8 *start = (u8*)v3; + u8 *start = (u8 *)v3; while (offset < size) { ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset); diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index d0b450a06506..fb4d931fdf18 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -94,6 +94,8 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function, union acpi_object atpx_arg_elements[2]; struct acpi_object_list atpx_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_device *adev = container_of(handle, struct acpi_device, handle); + struct device *dev = &adev->dev; atpx_arg.count = 2; atpx_arg.pointer = &atpx_arg_elements[0]; @@ -115,8 +117,8 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function, /* Fail only if calling the method fails and ATPX is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - printk("failed to evaluate ATPX got %s\n", - acpi_format_exception(status)); + dev_err(dev, "failed to evaluate ATPX got %s\n", + acpi_format_exception(status)); kfree(buffer.pointer); return NULL; } @@ -157,6 +159,8 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas static int radeon_atpx_validate(struct radeon_atpx *atpx) { u32 valid_bits = 0; + struct acpi_device *adev = container_of(atpx->handle, struct acpi_device, handle); + struct device *dev = &adev->dev; if (atpx->functions.px_params) { union acpi_object *info; @@ -171,7 +175,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) size = *(u16 *) info->buffer.pointer; if (size < 10) { - printk("ATPX buffer is too small: %zu\n", size); + dev_err(dev, "ATPX buffer is too small: %zu\n", size); kfree(info); return -EINVAL; } @@ -202,7 +206,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { - printk("ATPX Hybrid Graphics\n"); + dev_info(dev, "ATPX Hybrid Graphics\n"); /* * Disable legacy PM methods only when pcie port PM is usable, * otherwise the device might fail to power off or power on. @@ -239,7 +243,7 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx) size = *(u16 *) info->buffer.pointer; if (size < 8) { - printk("ATPX buffer is too small: %zu\n", size); + pr_err("ATPX buffer is too small: %zu\n", size); err = -EINVAL; goto out; } @@ -248,8 +252,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx) memcpy(&output, info->buffer.pointer, size); /* TODO: check version? */ - printk("ATPX version %u, functions 0x%08x\n", - output.version, output.function_bits); + pr_info("ATPX version %u, functions 0x%08x\n", + output.version, output.function_bits); radeon_atpx_parse_functions(&atpx->functions, output.function_bits); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 07193cd0c417..d2f02c3dfce2 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -198,8 +198,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", connector->name, bpc); } - } - else if (bpc > 8) { + } else if (bpc > 8) { /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", connector->name); @@ -334,10 +333,8 @@ static void radeon_connector_free_edid(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); - if (radeon_connector->edid) { - kfree(radeon_connector->edid); - radeon_connector->edid = NULL; - } + kfree(radeon_connector->edid); + radeon_connector->edid = NULL; } static int radeon_ddc_get_modes(struct drm_connector *connector) @@ -1372,7 +1369,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) /* assume digital unless load detected otherwise */ radeon_connector->use_digital = true; lret = encoder_funcs->detect(encoder, connector); - DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret); + DRM_DEBUG_KMS("load_detect %x returned: %x\n", encoder->encoder_type, lret); if (lret == connector_status_connected) radeon_connector->use_digital = false; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 39cdede460b5..fa531493b111 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -113,59 +113,32 @@ #define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MINOR 50 #define KMS_DRIVER_PATCHLEVEL 0 -int radeon_suspend_kms(struct drm_device *dev, bool suspend, - bool fbcon, bool freeze); -int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); -extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc, - unsigned int flags, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode); -extern bool radeon_is_px(struct drm_device *dev); -int radeon_mode_dumb_mmap(struct drm_file *filp, - struct drm_device *dev, - uint32_t handle, uint64_t *offset_p); -int radeon_mode_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args); - -/* atpx handler */ -#if defined(CONFIG_VGA_SWITCHEROO) -void radeon_register_atpx_handler(void); -void radeon_unregister_atpx_handler(void); -bool radeon_has_atpx_dgpu_power_cntl(void); -bool radeon_is_atpx_hybrid(void); -#else -static inline void radeon_register_atpx_handler(void) {} -static inline void radeon_unregister_atpx_handler(void) {} -static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } -static inline bool radeon_is_atpx_hybrid(void) { return false; } -#endif int radeon_no_wb; int radeon_modeset = -1; int radeon_dynclks = -1; -int radeon_r4xx_atom = 0; +int radeon_r4xx_atom; int radeon_agpmode = -1; -int radeon_vram_limit = 0; +int radeon_vram_limit; int radeon_gart_size = -1; /* auto */ -int radeon_benchmarking = 0; -int radeon_testing = 0; -int radeon_connector_table = 0; +int radeon_benchmarking; +int radeon_testing; +int radeon_connector_table; int radeon_tv = 1; int radeon_audio = -1; -int radeon_disp_priority = 0; -int radeon_hw_i2c = 0; +int radeon_disp_priority; +int radeon_hw_i2c; int radeon_pcie_gen2 = -1; int radeon_msi = -1; int radeon_lockup_timeout = 10000; -int radeon_fastfb = 0; +int radeon_fastfb; int radeon_dpm = -1; int radeon_aspm = -1; int radeon_runtime_pm = -1; -int radeon_hard_reset = 0; +int radeon_hard_reset; int radeon_vm_size = 8; int radeon_vm_block_size = -1; -int radeon_deep_color = 0; +int radeon_deep_color; int radeon_use_pflipirq = 2; int radeon_bapm = -1; int radeon_backlight = -1; @@ -384,6 +357,7 @@ radeon_pci_shutdown(struct pci_dev *pdev) static int radeon_pmops_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + return radeon_suspend_kms(drm_dev, true, true, false); } @@ -404,12 +378,14 @@ static int radeon_pmops_resume(struct device *dev) static int radeon_pmops_freeze(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + return radeon_suspend_kms(drm_dev, false, true, true); } static int radeon_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + return radeon_resume_kms(drm_dev, false, true); } @@ -494,6 +470,7 @@ long radeon_drm_ioctl(struct file *filp, struct drm_file *file_priv = filp->private_data; struct drm_device *dev; long ret; + dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0) { diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 2ffe0975ee54..34a1c73d3938 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -124,4 +124,17 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); void radeon_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); +/* atpx handler */ +#if defined(CONFIG_VGA_SWITCHEROO) +void radeon_register_atpx_handler(void); +void radeon_unregister_atpx_handler(void); +bool radeon_has_atpx_dgpu_power_cntl(void); +bool radeon_is_atpx_hybrid(void); +#else +static inline void radeon_register_atpx_handler(void) {} +static inline void radeon_unregister_atpx_handler(void) {} +static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } +static inline bool radeon_is_atpx_hybrid(void) { return false; } +#endif + #endif /* __RADEON_DRV_H__ */ diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index b3518a8f95a0..9cb6401fe97e 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -58,6 +58,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) count = -1; list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); + count++; if (clone_encoder == encoder) @@ -108,9 +109,10 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8 if (ASIC_IS_AVIVO(rdev)) ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1; else { - /*if (rdev->family == CHIP_R200) - ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; - else*/ + /* if (rdev->family == CHIP_R200) + * ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; + * else + */ ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; } break; @@ -234,6 +236,7 @@ void radeon_encoder_set_active_device(struct drm_encoder *encoder) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); + radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices; DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n", radeon_encoder->active_device, radeon_encoder->devices, @@ -320,12 +323,12 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct drm_display_mode *native_mode = &radeon_encoder->native_mode; - unsigned hblank = native_mode->htotal - native_mode->hdisplay; - unsigned vblank = native_mode->vtotal - native_mode->vdisplay; - unsigned hover = native_mode->hsync_start - native_mode->hdisplay; - unsigned vover = native_mode->vsync_start - native_mode->vdisplay; - unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start; - unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start; + unsigned int hblank = native_mode->htotal - native_mode->hdisplay; + unsigned int vblank = native_mode->vtotal - native_mode->vdisplay; + unsigned int hover = native_mode->hsync_start - native_mode->hdisplay; + unsigned int vover = native_mode->vsync_start - native_mode->vdisplay; + unsigned int hsync_width = native_mode->hsync_end - native_mode->hsync_start; + unsigned int vsync_width = native_mode->vsync_end - native_mode->vsync_start; adjusted_mode->clock = native_mode->clock; adjusted_mode->flags = native_mode->flags; @@ -424,6 +427,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, bool radeon_encoder_is_digital(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_TMDS1: diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 04109a2a6fd7..4bb242437ff6 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -74,9 +74,9 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev) ptr = dma_alloc_coherent(&rdev->pdev->dev, rdev->gart.table_size, &rdev->gart.table_addr, GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) return -ENOMEM; - } + #ifdef CONFIG_X86 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { @@ -99,9 +99,9 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev) */ void radeon_gart_table_ram_free(struct radeon_device *rdev) { - if (rdev->gart.ptr == NULL) { + if (!rdev->gart.ptr) return; - } + #ifdef CONFIG_X86 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { @@ -133,9 +133,8 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) r = radeon_bo_create(rdev, rdev->gart.table_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, &rdev->gart.robj); - if (r) { + if (r) return r; - } } return 0; } @@ -197,9 +196,9 @@ void radeon_gart_table_vram_unpin(struct radeon_device *rdev) { int r; - if (rdev->gart.robj == NULL) { + if (!rdev->gart.robj) return; - } + r = radeon_bo_reserve(rdev->gart.robj, false); if (likely(r == 0)) { radeon_bo_kunmap(rdev->gart.robj); @@ -220,9 +219,9 @@ void radeon_gart_table_vram_unpin(struct radeon_device *rdev) */ void radeon_gart_table_vram_free(struct radeon_device *rdev) { - if (rdev->gart.robj == NULL) { + if (!rdev->gart.robj) return; - } + radeon_bo_unref(&rdev->gart.robj); } @@ -239,11 +238,10 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev) * Unbinds the requested pages from the gart page table and * replaces them with the dummy page (all asics). */ -void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, +void radeon_gart_unbind(struct radeon_device *rdev, unsigned int offset, int pages) { - unsigned t; - unsigned p; + unsigned int t, p; int i, j; if (!rdev->gart.ready) { @@ -284,12 +282,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, * (all asics). * Returns 0 for success, -EINVAL for failure. */ -int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, +int radeon_gart_bind(struct radeon_device *rdev, unsigned int offset, int pages, struct page **pagelist, dma_addr_t *dma_addr, uint32_t flags) { - unsigned t; - unsigned p; + unsigned int t, p; uint64_t page_base, page_entry; int i, j; @@ -307,9 +304,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { page_entry = radeon_gart_get_page_entry(page_base, flags); rdev->gart.pages_entry[t] = page_entry; - if (rdev->gart.ptr) { + if (rdev->gart.ptr) radeon_gart_set_page(rdev, t, page_entry); - } + page_base += RADEON_GPU_PAGE_SIZE; } } @@ -332,9 +329,9 @@ int radeon_gart_init(struct radeon_device *rdev) { int r, i; - if (rdev->gart.pages) { + if (rdev->gart.pages) return 0; - } + /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */ if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) { DRM_ERROR("Page size is smaller than GPU page size!\n"); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d0119c5f7eb3..358d19242f4b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -316,7 +316,7 @@ int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, { /* TODO: implement */ DRM_ERROR("unimplemented %s\n", __func__); - return -ENOSYS; + return -EOPNOTSUPP; } int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, @@ -324,7 +324,7 @@ int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, { /* TODO: implement */ DRM_ERROR("unimplemented %s\n", __func__); - return -ENOSYS; + return -EOPNOTSUPP; } int radeon_gem_create_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index e0214cf1b43b..a16590c6247f 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -444,7 +444,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); return -EINVAL; } - value = (uint32_t*)&value64; + value = (uint32_t *)&value64; value_size = sizeof(uint64_t); value64 = radeon_get_gpu_clock_counter(rdev); break; @@ -543,18 +543,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) *value = rdev->vce.fb_version; break; case RADEON_INFO_NUM_BYTES_MOVED: - value = (uint32_t*)&value64; + value = (uint32_t *)&value64; value_size = sizeof(uint64_t); value64 = atomic64_read(&rdev->num_bytes_moved); break; case RADEON_INFO_VRAM_USAGE: - value = (uint32_t*)&value64; + value = (uint32_t *)&value64; value_size = sizeof(uint64_t); man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); value64 = ttm_resource_manager_usage(man); break; case RADEON_INFO_GTT_USAGE: - value = (uint32_t*)&value64; + value = (uint32_t *)&value64; value_size = sizeof(uint64_t); man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT); value64 = ttm_resource_manager_usage(man); @@ -614,7 +614,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; } - if (copy_to_user(value_ptr, (char*)value, value_size)) { + if (copy_to_user(value_ptr, (char *)value, value_size)) { DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); return -EFAULT; } diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index 12e180b119ac..7883e9ec0bae 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c @@ -724,12 +724,14 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, } for (i = 0; i < MAX_H_CODE_TIMING_LEN; i++) { - if ((tv_dac->tv.h_code_timing[i] = hor_timing[i]) == 0) + tv_dac->tv.h_code_timing[i] = hor_timing[i]; + if (tv_dac->tv.h_code_timing[i] == 0) break; } for (i = 0; i < MAX_V_CODE_TIMING_LEN; i++) { - if ((tv_dac->tv.v_code_timing[i] = vert_timing[i]) == 0) + tv_dac->tv.v_code_timing[i] = vert_timing[i]; + if (tv_dac->tv.v_code_timing[i] == 0) break; } diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index a5e1d2139e80..c9fef9b61ced 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -156,10 +156,10 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) i, *vram_start, gtt_start, (unsigned long long) (gtt_addr - rdev->mc.gtt_start + - (void*)gtt_start - gtt_map), + (void *)gtt_start - gtt_map), (unsigned long long) (vram_addr - rdev->mc.vram_start + - (void*)gtt_start - gtt_map)); + (void *)gtt_start - gtt_map)); radeon_bo_kunmap(vram_obj); goto out_lclean_unpin; } @@ -207,10 +207,10 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) i, *gtt_start, vram_start, (unsigned long long) (vram_addr - rdev->mc.vram_start + - (void*)vram_start - vram_map), + (void *)vram_start - vram_map), (unsigned long long) (gtt_addr - rdev->mc.gtt_start + - (void*)vram_start - vram_map)); + (void *)vram_start - vram_map)); radeon_bo_kunmap(gtt_obj[i]); goto out_lclean_unpin; } diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index ca4a36464340..d1871af967d4 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -95,7 +95,7 @@ int radeon_vce_init(struct radeon_device *rdev) size = rdev->vce_fw->size - strlen(fw_version) - 9; c = rdev->vce_fw->data; - for (;size > 0; --size, ++c) + for (; size > 0; --size, ++c) if (strncmp(c, fw_version, strlen(fw_version)) == 0) break; @@ -110,7 +110,7 @@ int radeon_vce_init(struct radeon_device *rdev) size = rdev->vce_fw->size - strlen(fb_version) - 3; c = rdev->vce_fw->data; - for (;size > 0; --size, ++c) + for (; size > 0; --size, ++c) if (strncmp(c, fb_version, strlen(fb_version)) == 0) break; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 26fa9b095514..9ce12fa3c356 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -136,8 +136,7 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) return 0; } -static const u32 r7xx_golden_registers[] = -{ +static const u32 r7xx_golden_registers[] = { 0x8d00, 0xffffffff, 0x0e0e0074, 0x8d04, 0xffffffff, 0x013a2b34, 0x9508, 0xffffffff, 0x00000002, @@ -152,8 +151,7 @@ static const u32 r7xx_golden_registers[] = 0x7300, 0xffffffff, 0x001000f0 }; -static const u32 r7xx_golden_dyn_gpr_registers[] = -{ +static const u32 r7xx_golden_dyn_gpr_registers[] = { 0x8db0, 0xffffffff, 0x98989898, 0x8db4, 0xffffffff, 0x98989898, 0x8db8, 0xffffffff, 0x98989898, @@ -165,8 +163,7 @@ static const u32 r7xx_golden_dyn_gpr_registers[] = 0x88c4, 0xffffffff, 0x00000082 }; -static const u32 rv770_golden_registers[] = -{ +static const u32 rv770_golden_registers[] = { 0x562c, 0xffffffff, 0, 0x3f90, 0xffffffff, 0, 0x9148, 0xffffffff, 0, @@ -175,8 +172,7 @@ static const u32 rv770_golden_registers[] = 0x9698, 0x18000000, 0x18000000 }; -static const u32 rv770ce_golden_registers[] = -{ +static const u32 rv770ce_golden_registers[] = { 0x562c, 0xffffffff, 0, 0x3f90, 0xffffffff, 0x00cc0000, 0x9148, 0xffffffff, 0x00cc0000, @@ -187,8 +183,7 @@ static const u32 rv770ce_golden_registers[] = 0x9698, 0x18000000, 0x18000000 }; -static const u32 rv770_mgcg_init[] = -{ +static const u32 rv770_mgcg_init[] = { 0x8bcc, 0xffffffff, 0x130300f9, 0x5448, 0xffffffff, 0x100, 0x55e4, 0xffffffff, 0x100, @@ -345,8 +340,7 @@ static const u32 rv770_mgcg_init[] = 0x92a4, 0xffffffff, 0x00080007 }; -static const u32 rv710_golden_registers[] = -{ +static const u32 rv710_golden_registers[] = { 0x3f90, 0x00ff0000, 0x00fc0000, 0x9148, 0x00ff0000, 0x00fc0000, 0x3f94, 0x00ff0000, 0x00fc0000, @@ -355,8 +349,7 @@ static const u32 rv710_golden_registers[] = 0xa180, 0xffffffff, 0x00003f3f }; -static const u32 rv710_mgcg_init[] = -{ +static const u32 rv710_mgcg_init[] = { 0x8bcc, 0xffffffff, 0x13030040, 0x5448, 0xffffffff, 0x100, 0x55e4, 0xffffffff, 0x100, @@ -414,8 +407,7 @@ static const u32 rv710_mgcg_init[] = 0x9150, 0xffffffff, 0x4d940000 }; -static const u32 rv730_golden_registers[] = -{ +static const u32 rv730_golden_registers[] = { 0x3f90, 0x00ff0000, 0x00f00000, 0x9148, 0x00ff0000, 0x00f00000, 0x3f94, 0x00ff0000, 0x00f00000, @@ -425,8 +417,7 @@ static const u32 rv730_golden_registers[] = 0xa180, 0xffffffff, 0x00003f3f }; -static const u32 rv730_mgcg_init[] = -{ +static const u32 rv730_mgcg_init[] = { 0x8bcc, 0xffffffff, 0x130300f9, 0x5448, 0xffffffff, 0x100, 0x55e4, 0xffffffff, 0x100, @@ -547,8 +538,7 @@ static const u32 rv730_mgcg_init[] = 0x92a4, 0xffffffff, 0x00000005 }; -static const u32 rv740_golden_registers[] = -{ +static const u32 rv740_golden_registers[] = { 0x88c4, 0xffffffff, 0x00000082, 0x28a50, 0xfffffffc, 0x00000004, 0x2650, 0x00040000, 0, @@ -584,8 +574,7 @@ static const u32 rv740_golden_registers[] = 0x9698, 0x18000000, 0x18000000 }; -static const u32 rv740_mgcg_init[] = -{ +static const u32 rv740_mgcg_init[] = { 0x8bcc, 0xffffffff, 0x13030100, 0x5448, 0xffffffff, 0x100, 0x55e4, 0xffffffff, 0x100, diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c index 45575c0d0a1d..09fa7f5e7c41 100644 --- a/drivers/gpu/drm/radeon/rv770_smc.c +++ b/drivers/gpu/drm/radeon/rv770_smc.c @@ -34,8 +34,7 @@ #define FIRST_SMC_INT_VECT_REG 0xFFD8 #define FIRST_INT_VECT_S19 0xFFC0 -static const u8 rv770_smc_int_vectors[] = -{ +static const u8 rv770_smc_int_vectors[] = { 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, @@ -54,8 +53,7 @@ static const u8 rv770_smc_int_vectors[] = 0x03, 0x51, 0x03, 0x51 }; -static const u8 rv730_smc_int_vectors[] = -{ +static const u8 rv730_smc_int_vectors[] = { 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, @@ -74,8 +72,7 @@ static const u8 rv730_smc_int_vectors[] = 0x03, 0x56, 0x03, 0x56 }; -static const u8 rv710_smc_int_vectors[] = -{ +static const u8 rv710_smc_int_vectors[] = { 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, @@ -94,8 +91,7 @@ static const u8 rv710_smc_int_vectors[] = 0x03, 0x51, 0x03, 0x51 }; -static const u8 rv740_smc_int_vectors[] = -{ +static const u8 rv740_smc_int_vectors[] = { 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, @@ -114,8 +110,7 @@ static const u8 rv740_smc_int_vectors[] = 0x03, 0x51, 0x03, 0x51 }; -static const u8 cedar_smc_int_vectors[] = -{ +static const u8 cedar_smc_int_vectors[] = { 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, @@ -134,8 +129,7 @@ static const u8 cedar_smc_int_vectors[] = 0x04, 0xF6, 0x04, 0xF6 }; -static const u8 redwood_smc_int_vectors[] = -{ +static const u8 redwood_smc_int_vectors[] = { 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, @@ -154,8 +148,7 @@ static const u8 redwood_smc_int_vectors[] = 0x04, 0xF6, 0x04, 0xF6 }; -static const u8 juniper_smc_int_vectors[] = -{ +static const u8 juniper_smc_int_vectors[] = { 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, @@ -174,8 +167,7 @@ static const u8 juniper_smc_int_vectors[] = 0x04, 0xF6, 0x04, 0xF6 }; -static const u8 cypress_smc_int_vectors[] = -{ +static const u8 cypress_smc_int_vectors[] = { 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, @@ -194,8 +186,7 @@ static const u8 cypress_smc_int_vectors[] = 0x04, 0xF6, 0x04, 0xF6 }; -static const u8 barts_smc_int_vectors[] = -{ +static const u8 barts_smc_int_vectors[] = { 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, @@ -214,8 +205,7 @@ static const u8 barts_smc_int_vectors[] = 0x05, 0x0A, 0x05, 0x0A }; -static const u8 turks_smc_int_vectors[] = -{ +static const u8 turks_smc_int_vectors[] = { 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, @@ -234,8 +224,7 @@ static const u8 turks_smc_int_vectors[] = 0x05, 0x0A, 0x05, 0x0A }; -static const u8 caicos_smc_int_vectors[] = -{ +static const u8 caicos_smc_int_vectors[] = { 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, @@ -254,8 +243,7 @@ static const u8 caicos_smc_int_vectors[] = 0x05, 0x0A, 0x05, 0x0A }; -static const u8 cayman_smc_int_vectors[] = -{ +static const u8 cayman_smc_int_vectors[] = { 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h index 4ea1cb2e45a3..4b7dee3cf58b 100644 --- a/drivers/gpu/drm/radeon/sislands_smc.h +++ b/drivers/gpu/drm/radeon/sislands_smc.h @@ -89,8 +89,7 @@ struct PP_SIslands_PAPMStatus }; typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus; -struct PP_SIslands_PAPMParameters -{ +struct PP_SIslands_PAPMParameters { uint32_t NearTDPLimitTherm; uint32_t NearTDPLimitPAPM; uint32_t PlatformPowerLimit; @@ -100,8 +99,7 @@ struct PP_SIslands_PAPMParameters }; typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters; -struct SISLANDS_SMC_SCLK_VALUE -{ +struct SISLANDS_SMC_SCLK_VALUE { uint32_t vCG_SPLL_FUNC_CNTL; uint32_t vCG_SPLL_FUNC_CNTL_2; uint32_t vCG_SPLL_FUNC_CNTL_3; @@ -113,8 +111,7 @@ struct SISLANDS_SMC_SCLK_VALUE typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE; -struct SISLANDS_SMC_MCLK_VALUE -{ +struct SISLANDS_SMC_MCLK_VALUE { uint32_t vMPLL_FUNC_CNTL; uint32_t vMPLL_FUNC_CNTL_1; uint32_t vMPLL_FUNC_CNTL_2; @@ -129,8 +126,7 @@ struct SISLANDS_SMC_MCLK_VALUE typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE; -struct SISLANDS_SMC_VOLTAGE_VALUE -{ +struct SISLANDS_SMC_VOLTAGE_VALUE { uint16_t value; uint8_t index; uint8_t phase_settings; @@ -138,8 +134,7 @@ struct SISLANDS_SMC_VOLTAGE_VALUE typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE; -struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL -{ +struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL { uint8_t ACIndex; uint8_t displayWatermark; uint8_t gen2PCIE; @@ -180,8 +175,7 @@ struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL; -struct SISLANDS_SMC_SWSTATE -{ +struct SISLANDS_SMC_SWSTATE { uint8_t flags; uint8_t levelCount; uint8_t padding2; @@ -205,8 +199,7 @@ struct SISLANDS_SMC_SWSTATE_SINGLE { #define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3 #define SISLANDS_SMC_VOLTAGEMASK_MAX 4 -struct SISLANDS_SMC_VOLTAGEMASKTABLE -{ +struct SISLANDS_SMC_VOLTAGEMASKTABLE { uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX]; }; @@ -214,8 +207,7 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE; #define SISLANDS_MAX_NO_VREG_STEPS 32 -struct SISLANDS_SMC_STATETABLE -{ +struct SISLANDS_SMC_STATETABLE { uint8_t thermalProtectType; uint8_t systemFlags; uint8_t maxVDDCIndexInPPTable; @@ -254,8 +246,7 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE; #define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c #define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120 -struct PP_SIslands_FanTable -{ +struct PP_SIslands_FanTable { uint8_t fdo_mode; uint8_t padding; int16_t temp_min; @@ -285,8 +276,7 @@ typedef struct PP_SIslands_FanTable PP_SIslands_FanTable; #define SMC_SISLANDS_SCALE_I 7 #define SMC_SISLANDS_SCALE_R 12 -struct PP_SIslands_CacConfig -{ +struct PP_SIslands_CacConfig { uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES]; uint32_t lkge_lut_V0; uint32_t lkge_lut_Vstep; @@ -308,23 +298,20 @@ typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig; #define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16 #define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 -struct SMC_SIslands_MCRegisterAddress -{ +struct SMC_SIslands_MCRegisterAddress { uint16_t s0; uint16_t s1; }; typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress; -struct SMC_SIslands_MCRegisterSet -{ +struct SMC_SIslands_MCRegisterSet { uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; }; typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet; -struct SMC_SIslands_MCRegisters -{ +struct SMC_SIslands_MCRegisters { uint8_t last; uint8_t reserved[3]; SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; @@ -333,8 +320,7 @@ struct SMC_SIslands_MCRegisters typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters; -struct SMC_SIslands_MCArbDramTimingRegisterSet -{ +struct SMC_SIslands_MCArbDramTimingRegisterSet { uint32_t mc_arb_dram_timing; uint32_t mc_arb_dram_timing2; uint8_t mc_arb_rfsh_rate; @@ -344,8 +330,7 @@ struct SMC_SIslands_MCArbDramTimingRegisterSet typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet; -struct SMC_SIslands_MCArbDramTimingRegisters -{ +struct SMC_SIslands_MCArbDramTimingRegisters { uint8_t arb_current; uint8_t reserved[3]; SMC_SIslands_MCArbDramTimingRegisterSet data[16]; @@ -353,8 +338,7 @@ struct SMC_SIslands_MCArbDramTimingRegisters typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters; -struct SMC_SISLANDS_SPLL_DIV_TABLE -{ +struct SMC_SISLANDS_SPLL_DIV_TABLE { uint32_t freq[256]; uint32_t ss[256]; }; @@ -374,8 +358,7 @@ typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE; #define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16 -struct Smc_SIslands_DTE_Configuration -{ +struct Smc_SIslands_DTE_Configuration { uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; uint32_t K; diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index 3b87eebddc97..407bc07cec69 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -1094,8 +1094,8 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp, "%s%u", dma_names[layer->id], i); dma->chan = dma_request_chan(disp->dev, dma_channel_name); if (IS_ERR(dma->chan)) { - dev_err(disp->dev, "failed to request dma channel\n"); - ret = PTR_ERR(dma->chan); + ret = dev_err_probe(disp->dev, PTR_ERR(dma->chan), + "failed to request dma channel\n"); dma->chan = NULL; return ret; } @@ -1228,7 +1228,6 @@ int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub) { struct platform_device *pdev = to_platform_device(dpsub->dev); struct zynqmp_disp *disp; - struct resource *res; int ret; disp = kzalloc(sizeof(*disp), GFP_KERNEL); @@ -1238,22 +1237,19 @@ int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub) disp->dev = &pdev->dev; disp->dpsub = dpsub; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend"); - disp->blend.base = devm_ioremap_resource(disp->dev, res); + disp->blend.base = devm_platform_ioremap_resource_byname(pdev, "blend"); if (IS_ERR(disp->blend.base)) { ret = PTR_ERR(disp->blend.base); goto error; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf"); - disp->avbuf.base = devm_ioremap_resource(disp->dev, res); + disp->avbuf.base = devm_platform_ioremap_resource_byname(pdev, "av_buf"); if (IS_ERR(disp->avbuf.base)) { ret = PTR_ERR(disp->avbuf.base); goto error; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud"); - disp->audio.base = devm_ioremap_resource(disp->dev, res); + disp->audio.base = devm_platform_ioremap_resource_byname(pdev, "aud"); if (IS_ERR(disp->audio.base)) { ret = PTR_ERR(disp->audio.base); goto error; diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index 0a7b466446fb..a0606fab0e22 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -784,7 +784,7 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp) } /** - * zynqmp_dp_link_train - Train the link + * zynqmp_dp_train - Train the link * @dp: DisplayPort IP core structure * * Return: 0 if all trains are done successfully, or corresponding error code. diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index 9d64a8d98372..88eb33acd5f0 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -227,7 +227,9 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev) dpsub->dev = &pdev->dev; platform_set_drvdata(pdev, dpsub); - dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT)); + ret = dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT)); + if (ret) + return ret; /* Try the reserved memory. Proceed if there's none. */ of_reserved_mem_device_init(&pdev->dev); |