diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
7 files changed, 399 insertions, 341 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6b5e2206e687..f0a6816709ca 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -320,18 +320,18 @@ static bool dm_is_idle(void *handle) return true; } -static int dm_wait_for_idle(void *handle) +static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; } -static bool dm_check_soft_reset(void *handle) +static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) { return false; } -static int dm_soft_reset(void *handle) +static int dm_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; @@ -968,7 +968,7 @@ static int dm_set_powergating_state(void *handle, } /* Prototypes of private functions */ -static int dm_early_init(void *handle); +static int dm_early_init(struct amdgpu_ip_block *ip_block); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) @@ -1307,6 +1307,29 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) DRM_INFO("DMUB hardware initialized: version=0x%08X\n", adev->dm.dmcub_fw_version); + /* Keeping sanity checks off if + * DCN31 >= 4.0.59.0 + * DCN314 >= 8.0.16.0 + * Otherwise, turn on sanity checks + */ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) + adev->dm.dc->debug.sanity_checks = true; + break; + case IP_VERSION(3, 1, 4): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) + adev->dm.dc->debug.sanity_checks = true; + break; + default: + break; + } + return 0; } @@ -1696,6 +1719,26 @@ dm_allocate_gpu_mem( return da->cpu_ptr; } +void +dm_free_gpu_mem( + struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *pvMem) +{ + struct dal_allocation *da; + + /* walk the da list in DM */ + list_for_each_entry(da, &adev->dm.da_list, list) { + if (pvMem == da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + } + +} + static enum dmub_status dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, enum dmub_gpint_command command_code, @@ -1762,16 +1805,20 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * /* Send the chunk */ ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); if (ret != DMUB_STATUS_OK) - /* No need to free bb here since it shall be done in dm_sw_fini() */ - return NULL; + goto free_bb; } /* Now ask DMUB to copy the bb */ ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); if (ret != DMUB_STATUS_OK) - return NULL; + goto free_bb; return bb; + +free_bb: + dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); + return NULL; + } static enum dmub_ips_disable_type dm_get_default_ips_mode( @@ -1886,7 +1933,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) else init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); } else { - init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) + init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); + else + init_data.flags.gpu_vm_support = + (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); } adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; @@ -2115,9 +2166,9 @@ error: return -EINVAL; } -static int amdgpu_dm_early_fini(void *handle) +static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_audio_fini(adev); @@ -2509,9 +2560,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return 0; } -static int dm_sw_init(void *handle) +static int dm_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->dm.cgs_device = amdgpu_cgs_create_device(adev); @@ -2531,9 +2582,9 @@ static int dm_sw_init(void *handle) return load_dmcu_fw(adev); } -static int dm_sw_fini(void *handle) +static int dm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct dal_allocation *da; list_for_each_entry(da, &adev->dm.da_list, list) { @@ -2541,11 +2592,11 @@ static int dm_sw_fini(void *handle) amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); list_del(&da->list); kfree(da); + adev->dm.bb_from_dmub = NULL; break; } } - adev->dm.bb_from_dmub = NULL; kfree(adev->dm.dmub_fb_info); adev->dm.dmub_fb_info = NULL; @@ -2598,9 +2649,9 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) return ret; } -static int dm_late_init(void *handle) +static int dm_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct dmcu_iram_parameters params; unsigned int linear_lut[16]; @@ -2790,7 +2841,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -2808,9 +2859,9 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) * - Vblank support * - Debug FS entries, if enabled */ -static int dm_hw_init(void *handle) +static int dm_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* Create DAL display manager */ @@ -2824,15 +2875,15 @@ static int dm_hw_init(void *handle) /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that * were loaded. Also flush IRQ workqueues and disable them. */ -static int dm_hw_fini(void *handle) +static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_hpd_fini(adev); @@ -2936,9 +2987,9 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } -static int dm_suspend(void *handle) +static int dm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_display_manager *dm = &adev->dm; int ret = 0; @@ -3125,9 +3176,9 @@ cleanup: kfree(bundle); } -static int dm_resume(void *handle) +static int dm_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct drm_device *ddev = adev_to_drm(adev); struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; @@ -3142,8 +3193,7 @@ static int dm_resume(void *handle) struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; - int i, r, j, ret; - bool need_hotplug = false; + int i, r, j; struct dc_commit_streams_params commit_params = {}; if (dm->dc->caps.ips_support) { @@ -3332,23 +3382,16 @@ static int dm_resume(void *handle) aconnector->mst_root) continue; - ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); - - if (ret < 0) { - dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, - aconnector->dc_link); - need_hotplug = true; - } + drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); } drm_connector_list_iter_end(&iter); - if (need_hotplug) - drm_kms_helper_hotplug_event(ddev); - amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); + drm_kms_helper_hotplug_event(ddev); + return 0; } @@ -3379,8 +3422,6 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .soft_reset = dm_soft_reset, .set_clockgating_state = dm_set_clockgating_state, .set_powergating_state = dm_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version dm_ip_block = { @@ -3495,7 +3536,7 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); amdgpu_dm_update_freesync_caps(connector, - aconnector->edid); + aconnector->drm_edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); if (!aconnector->dc_sink) { @@ -3554,18 +3595,19 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { - aconnector->edid = NULL; + aconnector->drm_edid = NULL; if (aconnector->dc_link->aux_mode) { - drm_dp_cec_unset_edid( - &aconnector->dm_dp_aux.aux); + drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); } } else { - aconnector->edid = - (struct edid *)sink->dc_edid.raw_edid; + const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; + + aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); + drm_edid_connector_update(connector, aconnector->drm_edid); if (aconnector->dc_link->aux_mode) - drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, - aconnector->edid); + drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, + connector->display_info.source_physical_address); } if (!aconnector->timing_requested) { @@ -3576,17 +3618,16 @@ void amdgpu_dm_update_connector_after_detect( "failed to create aconnector->requested_timing\n"); } - drm_connector_update_edid_property(connector, aconnector->edid); - amdgpu_dm_update_freesync_caps(connector, aconnector->edid); + amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); update_connector_ext_caps(aconnector); } else { drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); amdgpu_dm_update_freesync_caps(connector, NULL); - drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + drm_edid_free(aconnector->drm_edid); + aconnector->drm_edid = NULL; kfree(aconnector->timing_requested); aconnector->timing_requested = NULL; /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ @@ -4622,7 +4663,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, if (!rc) DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); } else { - rc = dc_link_set_backlight_level(link, brightness, 0); + struct set_backlight_level_params backlight_level_params = { 0 }; + + backlight_level_params.backlight_pwm_u16_16 = brightness; + backlight_level_params.transition_time_in_ms = 0; + + rc = dc_link_set_backlight_level(link, &backlight_level_params); if (!rc) DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } @@ -5177,15 +5223,20 @@ static ssize_t s3_debug_store(struct device *device, int s3_state; struct drm_device *drm_dev = dev_get_drvdata(device); struct amdgpu_device *adev = drm_to_adev(drm_dev); + struct amdgpu_ip_block *ip_block; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); + if (!ip_block) + return -EINVAL; ret = kstrtoint(buf, 0, &s3_state); if (ret == 0) { if (s3_state) { - dm_resume(adev); + dm_resume(ip_block); drm_kms_helper_hotplug_event(adev_to_drm(adev)); } else - dm_suspend(adev); + dm_suspend(ip_block); } return ret == 0 ? count : 0; @@ -5257,9 +5308,9 @@ static int dm_init_microcode(struct amdgpu_device *adev) return r; } -static int dm_early_init(void *handle) +static int dm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_mode_info *mode_info = &adev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); @@ -6762,7 +6813,7 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); - aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; + aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } finish: @@ -7122,32 +7173,24 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; - struct edid *edid; - struct i2c_adapter *ddc; + const struct drm_edid *drm_edid; - if (dc_link && dc_link->aux_mode) - ddc = &aconnector->dm_dp_aux.aux.ddc; - else - ddc = &aconnector->i2c->base; - - /* - * Note: drm_get_edid gets edid in the following order: - * 1) override EDID if set via edid_override debugfs, - * 2) firmware EDID if set via edid_firmware module parameter - * 3) regular DDC read. - */ - edid = drm_get_edid(connector, ddc); - if (!edid) { + drm_edid = drm_edid_read(connector); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; } - aconnector->edid = edid; - + aconnector->drm_edid = drm_edid; /* Update emulated (virtual) sink's EDID */ if (dc_em_sink && dc_link) { + // FIXME: Get rid of drm_edid_raw() + const struct edid *edid = drm_edid_raw(drm_edid); + memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); - memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); + memmove(dc_em_sink->dc_edid.raw_edid, edid, + (edid->extensions + 1) * EDID_LENGTH); dm_helpers_parse_edid_caps( dc_link, &dc_em_sink->dc_edid, @@ -7177,36 +7220,26 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; - struct dc_link *dc_link = aconnector->dc_link; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; - struct edid *edid; - struct i2c_adapter *ddc; - - if (dc_link->aux_mode) - ddc = &aconnector->dm_dp_aux.aux.ddc; - else - ddc = &aconnector->i2c->base; + const struct drm_edid *drm_edid; + const struct edid *edid; - /* - * Note: drm_get_edid gets edid in the following order: - * 1) override EDID if set via edid_override debugfs, - * 2) firmware EDID if set via edid_firmware module parameter - * 3) regular DDC read. - */ - edid = drm_get_edid(connector, ddc); - if (!edid) { + drm_edid = drm_edid_read(connector); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; } - if (drm_detect_hdmi_monitor(edid)) + if (connector->display_info.is_hdmi) init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() aconnector->dc_em_sink = dc_link_add_remote_sink( aconnector->dc_link, (uint8_t *)edid, @@ -7313,10 +7346,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; enum dc_status dc_result = DC_OK; + uint8_t bpc_limit = 6; if (!dm_state) return NULL; + if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || + aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + bpc_limit = 8; + do { stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, @@ -7337,11 +7375,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); if (dc_result != DC_OK) { - DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", + DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->clock, - dc_result, + dc_pixel_encoding_to_str(stream->timing.pixel_encoding), + dc_color_depth_to_str(stream->timing.display_color_depth), dc_status_to_str(dc_result)); dc_stream_release(stream); @@ -7349,10 +7388,13 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, requested_bpc -= 2; /* lower bpc to retry validation */ } - } while (stream == NULL && requested_bpc >= 6); + } while (stream == NULL && requested_bpc >= bpc_limit); - if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { - DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); + if ((dc_result == DC_FAIL_ENC_VALIDATE || + dc_result == DC_EXCEED_DONGLE_CAP) && + !aconnector->force_yuv420_output) { + DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", + __func__, __LINE__); aconnector->force_yuv420_output = true; stream = create_validate_stream_for_sink(aconnector, drm_mode, @@ -7893,16 +7935,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector) } static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (edid) { + if (drm_edid) { /* empty probed_modes */ INIT_LIST_HEAD(&connector->probed_modes); amdgpu_dm_connector->num_modes = - drm_add_edid_modes(connector, edid); + drm_edid_connector_add_modes(connector); /* sorting the probed modes before calling function * amdgpu_dm_get_native_mode() since EDID can have @@ -7916,10 +7958,10 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, amdgpu_dm_get_native_mode(connector); /* Freesync capabilities are reset by calling - * drm_add_edid_modes() and need to be + * drm_edid_connector_add_modes() and need to be * restored here. */ - amdgpu_dm_update_freesync_caps(connector, edid); + amdgpu_dm_update_freesync_caps(connector, drm_edid); } else { amdgpu_dm_connector->num_modes = 0; } @@ -8015,12 +8057,12 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) } static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!(amdgpu_freesync_vid_mode && edid)) + if (!(amdgpu_freesync_vid_mode && drm_edid)) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -8033,24 +8075,24 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct drm_encoder *encoder; - struct edid *edid = amdgpu_dm_connector->edid; + const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; struct dc_link_settings *verified_link_cap = &amdgpu_dm_connector->dc_link->verified_link_cap; const struct dc *dc = amdgpu_dm_connector->dc_link->dc; encoder = amdgpu_dm_connector_to_encoder(connector); - if (!drm_edid_is_valid(edid)) { + if (!drm_edid) { amdgpu_dm_connector->num_modes = drm_add_modes_noedid(connector, 640, 480); if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) amdgpu_dm_connector->num_modes += drm_add_modes_noedid(connector, 1920, 1080); } else { - amdgpu_dm_connector_ddc_get_modes(connector, edid); + amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); if (encoder) amdgpu_dm_connector_add_common_modes(encoder, connector); - amdgpu_dm_connector_add_freesync_modes(connector, edid); + amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); } amdgpu_dm_fbc_init(connector); @@ -8374,7 +8416,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0) || acrtc_state->stream->link->psr_settings.psr_version < - DC_PSR_VERSION_UNSUPPORTED) { + DC_PSR_VERSION_UNSUPPORTED || + !(adev->flags & AMD_IS_APU)) { timing = &acrtc_state->stream->timing; /* at least 2 frames */ @@ -8874,6 +8917,56 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane, } } +static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach, + const struct dm_crtc_state *acrtc_state, + const u64 current_ts) +{ + struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; + struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; + struct amdgpu_dm_connector *aconn = + (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; + + if (acrtc_state->update_type > UPDATE_TYPE_FAST) { + if (pr->config.replay_supported && !pr->replay_feature_enabled) + amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); + else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED && + !psr->psr_feature_enabled) + if (!aconn->disallow_edp_enter_psr) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + } + + /* Decrement skip count when SR is enabled and we're doing fast updates. */ + if (acrtc_state->update_type == UPDATE_TYPE_FAST && + (psr->psr_feature_enabled || pr->config.replay_supported)) { + if (aconn->sr_skip_count > 0) + aconn->sr_skip_count--; + + /* Allow SR when skip count is 0. */ + acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count; + + /* + * If sink supports PSR SU/Panel Replay, there is no need to rely on + * a vblank event disable request to enable PSR/RP. PSR SU/RP + * can be enabled immediately once OS demonstrates an + * adequate number of fast atomic commits to notify KMD + * of update events. See `vblank_control_worker()`. + */ + if (acrtc_attach->dm_irq_params.allow_sr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif + (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { + if (pr->replay_feature_enabled && !pr->replay_allow_active) + amdgpu_dm_replay_enable(acrtc_state->stream, true); + if (psr->psr_version >= DC_PSR_VERSION_SU_1 && + !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) + amdgpu_dm_psr_enable(acrtc_state->stream); + } + } else { + acrtc_attach->dm_irq_params.allow_sr_entry = false; + } +} + static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_device *dev, struct amdgpu_display_manager *dm, @@ -9027,7 +9120,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * during the PSR-SU was disabled. */ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && - acrtc_attach->dm_irq_params.allow_psr_entry && + acrtc_attach->dm_irq_params.allow_sr_entry && #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && #endif @@ -9202,9 +9295,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.abm_level = &acrtc_state->abm_level; mutex_lock(&dm->dc_lock); - if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(acrtc_state->stream); + if (acrtc_state->update_type > UPDATE_TYPE_FAST) { + if (acrtc_state->stream->link->replay_settings.replay_allow_active) + amdgpu_dm_replay_disable(acrtc_state->stream); + if (acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + } mutex_unlock(&dm->dc_lock); /* @@ -9245,57 +9341,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dm_update_pflip_irq_state(drm_to_adev(dev), acrtc_attach); - if (acrtc_state->update_type > UPDATE_TYPE_FAST) { - if (acrtc_state->stream->link->replay_settings.config.replay_supported && - !acrtc_state->stream->link->replay_settings.replay_feature_enabled) { - struct amdgpu_dm_connector *aconn = - (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; - amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); - } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && - !acrtc_state->stream->link->psr_settings.psr_feature_enabled) { - - struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *) - acrtc_state->stream->dm_stream_context; - - if (!aconn->disallow_edp_enter_psr) - amdgpu_dm_link_setup_psr(acrtc_state->stream); - } - } - - /* Decrement skip count when PSR is enabled and we're doing fast updates. */ - if (acrtc_state->update_type == UPDATE_TYPE_FAST && - acrtc_state->stream->link->psr_settings.psr_feature_enabled) { - struct amdgpu_dm_connector *aconn = - (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; - - if (aconn->psr_skip_count > 0) - aconn->psr_skip_count--; - - /* Allow PSR when skip count is 0. */ - acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; - - /* - * If sink supports PSR SU, there is no need to rely on - * a vblank event disable request to enable PSR. PSR SU - * can be enabled immediately once OS demonstrates an - * adequate number of fast atomic commits to notify KMD - * of update events. See `vblank_control_worker()`. - */ - if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && - acrtc_attach->dm_irq_params.allow_psr_entry && -#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY - !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && -#endif - !acrtc_state->stream->link->psr_settings.psr_allow_active && - !aconn->disallow_edp_enter_psr && - (timestamp_ns - - acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > - 500000000) - amdgpu_dm_psr_enable(acrtc_state->stream); - } else { - acrtc_attach->dm_irq_params.allow_psr_entry = false; - } - + amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns); mutex_unlock(&dm->dc_lock); } @@ -9428,6 +9474,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, bool mode_set_reset_required = false; u32 i; struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; + bool set_backlight_level = false; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -9547,6 +9594,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; + set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { drm_dbg_atomic(dev, "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -9574,7 +9622,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); /* Allow idle optimization when vblank count is 0 for display off */ - if (dm->active_vblank_irq_count == 0) + if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) dc_allow_idle_optimizations(dm->dc, true); mutex_unlock(&dm->dc_lock); @@ -9598,6 +9646,19 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->otg_inst = status->primary_otg_inst; } } + + /* During boot up and resume the DC layer will reset the panel brightness + * to fix a flicker issue. + * It will cause the dm->actual_brightness is not the current panel brightness + * level. (the dm->brightness is the correct panel level) + * So we set the backlight level with dm->brightness value after set mode + */ + if (set_backlight_level) { + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + } } static void dm_set_writeback(struct amdgpu_display_manager *dm, @@ -10105,6 +10166,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for (i = 0; i < crtc_disable_count; i++) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); + + trace_amdgpu_dm_atomic_commit_tail_finish(state); } static int dm_force_atomic_commit(struct drm_connector *connector) @@ -12005,7 +12068,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, } static void parse_edid_displayid_vrr(struct drm_connector *connector, - struct edid *edid) + const struct edid *edid) { u8 *edid_ext = NULL; int i; @@ -12048,7 +12111,7 @@ static void parse_edid_displayid_vrr(struct drm_connector *connector, } static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; @@ -12064,7 +12127,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, break; } - while (j < EDID_LENGTH) { + while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); @@ -12083,7 +12146,8 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, } static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, + struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; @@ -12117,7 +12181,7 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * amdgpu_dm_update_freesync_caps - Update Freesync capabilities * * @connector: Connector to query. - * @edid: EDID from monitor + * @drm_edid: DRM EDID from monitor * * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep * track of some of the display information in the internal data struct used by @@ -12125,19 +12189,16 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * FreeSync parameters. */ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { int i = 0; - struct detailed_timing *timing; - struct detailed_non_pixel *data; - struct detailed_data_monitor_range *range; struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = NULL; struct dc_sink *sink; - struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; + const struct edid *edid; bool freesync_capable = false; enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; @@ -12150,13 +12211,13 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->dc_sink : amdgpu_dm_connector->dc_em_sink; - if (!edid || !sink) { + drm_edid_connector_update(connector, drm_edid); + + if (!drm_edid || !sink) { dm_con_state = to_dm_connector_state(connector->state); amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0; - connector->display_info.monitor_range.min_vfreq = 0; - connector->display_info.monitor_range.max_vfreq = 0; freesync_capable = false; goto update; @@ -12167,6 +12228,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() + /* Some eDP panels only have the refresh rate range info in DisplayID */ if ((connector->display_info.monitor_range.min_vfreq == 0 || connector->display_info.monitor_range.max_vfreq == 0)) @@ -12174,67 +12237,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP)) { - bool edid_check_required = false; - - if (amdgpu_dm_connector->dc_link && - amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) { - if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { - amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; - amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) - freesync_capable = true; - } else { - edid_check_required = edid->version > 1 || - (edid->version == 1 && - edid->revision > 1); - } - } - - if (edid_check_required) { - for (i = 0; i < 4; i++) { - - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - range = &data->data.range; - /* - * Check if monitor has continuous frequency mode - */ - if (data->type != EDID_DETAIL_MONITOR_RANGE) - continue; - /* - * Check for flag range limits only. If flag == 1 then - * no additional timing information provided. - * Default GTF, GTF Secondary curve and CVT are not - * supported - */ - if (range->flags != 1) - continue; - - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; - connector->display_info.monitor_range.max_vfreq = range->max_vfreq; - - if (edid->revision >= 4) { - if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ) - connector->display_info.monitor_range.min_vfreq += 255; - if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ) - connector->display_info.monitor_range.max_vfreq += 255; - } - - amdgpu_dm_connector->min_vfreq = - connector->display_info.monitor_range.min_vfreq; - amdgpu_dm_connector->max_vfreq = - connector->display_info.monitor_range.max_vfreq; - - break; - } - - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) { - - freesync_capable = true; - } - } + amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (vsdb_info.replay_mode) { @@ -12243,12 +12249,9 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; } - } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 15d4690c74d6..6464a8378387 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -673,7 +673,7 @@ struct amdgpu_dm_connector { /* we need to mind the EDID between detect and get modes due to analog/digital/tvencoder */ - struct edid *edid; + const struct drm_edid *drm_edid; /* shared with amdgpu */ struct amdgpu_hpd hpd; @@ -727,7 +727,7 @@ struct amdgpu_dm_connector { /* Cached display modes */ struct drm_display_mode freesync_vid_base; - int psr_skip_count; + int sr_skip_count; bool disallow_edp_enter_psr; /* Record progress status of mst*/ @@ -951,7 +951,7 @@ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector); void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid); + const struct drm_edid *drm_edid); void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); @@ -1004,6 +1004,9 @@ void *dm_allocate_gpu_mem(struct amdgpu_device *adev, enum dc_gpu_mem_alloc_type type, size_t size, long long *addr); +void dm_free_gpu_mem(struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *addr); bool amdgpu_dm_is_headless(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index a2cf2c066a76..64a041c2af05 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -35,8 +35,8 @@ #include "amdgpu_dm_trace.h" #include "amdgpu_dm_debugfs.h" -#define HPD_DETECTION_PERIOD_uS 5000000 -#define HPD_DETECTION_TIME_uS 1000 +#define HPD_DETECTION_PERIOD_uS 2000000 +#define HPD_DETECTION_TIME_uS 100000 void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) { @@ -154,6 +154,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature( amdgpu_dm_psr_enable(vblank_work->stream); if (dm->idle_workqueue && + (dm->dc->config.disable_ips == DMUB_IPS_ENABLE) && dm->dc->idle_optimizations_allowed && dm->idle_workqueue->enable && !dm->idle_workqueue->running) @@ -251,10 +252,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) else if (dm->active_vblank_irq_count) dm->active_vblank_irq_count--; - if (dm->active_vblank_irq_count > 0) { - DRM_DEBUG_KMS("Allow idle optimizations (MALL): false\n"); + if (dm->active_vblank_irq_count > 0) dc_allow_idle_optimizations(dm->dc, false); - } /* * Control PSR based on vblank requirements from OS @@ -266,17 +265,14 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) * where the SU region is the full hactive*vactive region. See * fill_dc_dirty_rects(). */ - if (vblank_work->stream && vblank_work->stream->link) { + if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) { amdgpu_dm_crtc_set_panel_sr_feature( vblank_work, vblank_work->enable, - vblank_work->acrtc->dm_irq_params.allow_psr_entry || - vblank_work->stream->link->replay_settings.replay_feature_enabled); + vblank_work->acrtc->dm_irq_params.allow_sr_entry); } - if (dm->active_vblank_irq_count == 0) { - DRM_DEBUG_KMS("Allow idle optimizations (MALL): true\n"); + if (dm->active_vblank_irq_count == 0) dc_allow_idle_optimizations(dm->dc, true); - } mutex_unlock(&dm->dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index db56b0aa5454..6a97bb2d9160 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1529,7 +1529,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1543,8 +1542,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1558,10 +1555,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_clock_en); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1719,7 +1715,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1733,8 +1728,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1748,10 +1741,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_width); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1907,7 +1899,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1921,8 +1912,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1936,10 +1925,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_height); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2091,7 +2079,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2105,8 +2092,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2120,10 +2105,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_bits_per_pixel); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2270,7 +2254,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2284,8 +2267,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2299,10 +2280,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_pic_width); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2328,7 +2308,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2342,8 +2321,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2357,10 +2334,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_pic_height); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2401,7 +2377,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2415,8 +2390,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2430,10 +2403,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_chunk_size); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2474,7 +2446,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2488,8 +2459,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2503,10 +2472,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_bpg_offset); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 069e0195e50a..b0fea0856866 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -23,6 +23,8 @@ * */ +#include <acpi/video.h> + #include <linux/string.h> #include <linux/acpi.h> #include <linux/i2c.h> @@ -44,6 +46,7 @@ #include "dm_helpers.h" #include "ddc_service_types.h" +#include "clk_mgr.h" static u32 edid_extract_panel_id(struct edid *edid) { @@ -641,6 +644,8 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, // write rc data memmove(rc_data, data, length); ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); + if (ret < 0) + goto err; } // write rc offset @@ -649,20 +654,21 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); + if (ret < 0) + goto err; // write rc length rc_length[0] = (unsigned char) length & 0xFF; rc_length[1] = (unsigned char) (length >> 8) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); + if (ret < 0) + goto err; // write rc cmd rc_cmd = cmd | 0x80; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); - - if (ret < 0) { - DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); - return false; - } + if (ret < 0) + goto err; // poll until active is 0 for (i = 0; i < 10; i++) { @@ -685,6 +691,10 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, drm_dbg_dp(aux->drm_dev, "success = %d\n", success); return success; + +err: + DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); + return false; } static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) @@ -891,6 +901,60 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link) return dp_sink_present; } +static int +dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct drm_connector *connector = data; + struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev); + unsigned char start = block * EDID_LENGTH; + void *edid; + int r; + + if (!acpidev) + return -ENODEV; + + /* fetch the entire edid from BIOS */ + r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, &edid); + if (r < 0) { + drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r); + return r; + } + if (len > r || start > r || start + len > r) { + r = -EINVAL; + goto cleanup; + } + + memcpy(buf, edid + start, len); + r = 0; + +cleanup: + kfree(edid); + + return r; +} + +static const struct drm_edid * +dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + + if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID) + return NULL; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + break; + default: + return NULL; + } + + if (connector->force == DRM_FORCE_OFF) + return NULL; + + return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); +} + enum dc_edid_status dm_helpers_read_local_edid( struct dc_context *ctx, struct dc_link *link, @@ -901,7 +965,8 @@ enum dc_edid_status dm_helpers_read_local_edid( struct i2c_adapter *ddc; int retry = 3; enum dc_edid_status edid_status; - struct edid *edid; + const struct drm_edid *drm_edid; + const struct edid *edid; if (link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; @@ -912,26 +977,31 @@ enum dc_edid_status dm_helpers_read_local_edid( * do check sum and retry to make sure read correct edid. */ do { - - edid = drm_get_edid(&aconnector->base, ddc); + drm_edid = dm_helpers_read_acpi_edid(aconnector); + if (drm_edid) + drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name); + else + drm_edid = drm_edid_read_ddc(connector, ddc); + drm_edid_connector_update(connector, drm_edid); /* DP Compliance Test 4.2.2.6 */ if (link->aux_mode && connector->edid_corrupt) drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); - if (!edid && connector->edid_corrupt) { + if (!drm_edid && connector->edid_corrupt) { connector->edid_corrupt = false; return EDID_BAD_CHECKSUM; } - if (!edid) + if (!drm_edid) return EDID_NO_RESPONSE; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); /* We don't need the original edid anymore */ - kfree(edid); + drm_edid_free(drm_edid); edid_status = dm_helpers_parse_edid_caps( link, @@ -1054,17 +1124,8 @@ void dm_helpers_free_gpu_mem( void *pvMem) { struct amdgpu_device *adev = ctx->driver_context; - struct dal_allocation *da; - - /* walk the da list in DM */ - list_for_each_entry(da, &adev->dm.da_list, list) { - if (pvMem == da->cpu_ptr) { - amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); - list_del(&da->list); - kfree(da); - break; - } - } + + dm_free_gpu_mem(adev, type, pvMem); } bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) @@ -1121,6 +1182,8 @@ bool dm_helpers_dp_handle_test_pattern_request( struct pipe_ctx *pipe_ctx = NULL; struct amdgpu_dm_connector *aconnector = link->priv; struct drm_device *dev = aconnector->base.dev; + struct dc_state *dc_state = ctx->dc->current_state; + struct clk_mgr *clk_mgr = ctx->dc->clk_mgr; int i; for (i = 0; i < MAX_PIPES; i++) { @@ -1221,6 +1284,16 @@ bool dm_helpers_dp_handle_test_pattern_request( pipe_ctx->stream->test_pattern.type = test_pattern; pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; + /* Temp W/A for compliance test failure */ + dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false; + dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ? + clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz; + dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz; + ctx->dc->clk_mgr->funcs->update_clocks( + ctx->dc->clk_mgr, + dc_state, + false); + dc_link_dp_set_test_pattern( (struct dc_link *) link, test_pattern, @@ -1301,4 +1374,4 @@ bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream { // TODO return false; -}
\ No newline at end of file +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h index 5c9303241aeb..6a7ecc1e4602 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h @@ -33,7 +33,7 @@ struct dm_irq_params { struct mod_vrr_params vrr_params; struct dc_stream_state *stream; int active_planes; - bool allow_psr_entry; + bool allow_sr_entry; struct mod_freesync_config freesync_config; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index a08e8a0b696c..6e4359490613 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -129,7 +129,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) dc_sink_release(aconnector->dc_sink); } - kfree(aconnector->edid); + drm_edid_free(aconnector->drm_edid); drm_connector_cleanup(connector); drm_dp_mst_put_port_malloc(aconnector->mst_output_port); @@ -182,7 +182,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) dc_sink_release(dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + aconnector->drm_edid = NULL; aconnector->dsc_aux = NULL; port->passthrough_aux = NULL; } @@ -302,16 +302,18 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector) return drm_add_edid_modes(connector, NULL); - if (!aconnector->edid) { - struct edid *edid; + if (!aconnector->drm_edid) { + const struct drm_edid *drm_edid; - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port); + drm_edid = drm_dp_mst_edid_read(connector, + &aconnector->mst_root->mst_mgr, + aconnector->mst_output_port); - if (!edid) { + if (!drm_edid) { amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, false); - drm_connector_update_edid_property( + drm_edid_connector_update( &aconnector->base, NULL); @@ -345,7 +347,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return ret; } - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, true); } @@ -360,10 +362,13 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + const struct edid *edid; + + edid = drm_edid_raw(aconnector->drm_edid); // FIXME: Get rid of drm_edid_raw() dc_sink = dc_link_add_remote_sink( aconnector->dc_link, - (uint8_t *)aconnector->edid, - (aconnector->edid->extensions + 1) * EDID_LENGTH, + (uint8_t *)edid, + (edid->extensions + 1) * EDID_LENGTH, &init_params); if (!dc_sink) { @@ -405,7 +410,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps( - connector, aconnector->edid); + connector, aconnector->drm_edid); #if defined(CONFIG_DRM_AMD_DC_FP) if (!validate_dsc_caps_on_connector(aconnector)) @@ -419,10 +424,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) } } - drm_connector_update_edid_property( - &aconnector->base, aconnector->edid); + drm_edid_connector_update(&aconnector->base, aconnector->drm_edid); - ret = drm_add_edid_modes(connector, aconnector->edid); + ret = drm_edid_connector_add_modes(connector); return ret; } @@ -500,7 +504,7 @@ dm_dp_mst_detect(struct drm_connector *connector, dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + aconnector->drm_edid = NULL; aconnector->dsc_aux = NULL; port->passthrough_aux = NULL; @@ -1120,6 +1124,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, int i, k, ret; bool debugfs_overwrite = false; uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + struct drm_connector_state *new_conn_state; memset(params, 0, sizeof(params)); @@ -1127,7 +1132,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return PTR_ERR(mst_state); /* Set up params */ - DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count); + DRM_DEBUG_DRIVER("%s: MST_DSC Try to set up params from %d streams\n", __func__, dc_state->stream_count); for (i = 0; i < dc_state->stream_count; i++) { struct dc_dsc_policy dsc_policy = {0}; @@ -1143,6 +1148,14 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (!aconnector->mst_output_port) continue; + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + + if (!new_conn_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC Skip the stream 0x%p with invalid new_conn_state\n", + __func__, __LINE__, stream); + continue; + } + stream->timing.flags.DSC = 0; params[count].timing = &stream->timing; @@ -1175,6 +1188,8 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, count++; } + DRM_DEBUG_DRIVER("%s: MST_DSC Params set up for %d streams\n", __func__, count); + if (count == 0) { ASSERT(0); return 0; @@ -1302,7 +1317,7 @@ static bool is_dsc_need_re_compute( continue; aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; - if (!aconnector || !aconnector->dsc_aux) + if (!aconnector) continue; stream_on_link[new_stream_on_link_num] = aconnector; |