summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/dc/core/dc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/core/dc.c')
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c405
1 files changed, 130 insertions, 275 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1c218c526650..52564b93f7eb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -53,7 +53,6 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
-#include "dc_link.h"
#include "link.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -74,6 +73,8 @@
#include "dc_trace.h"
+#include "hw_sequencer_private.h"
+
#include "dce/dmub_outbox.h"
#define CTX \
@@ -147,7 +148,7 @@ static void destroy_links(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
if (NULL != dc->links[i])
- link_destroy(&dc->links[i]);
+ dc->link_srv->destroy_link(&dc->links[i]);
}
}
@@ -216,7 +217,7 @@ static bool create_links(
link_init_params.connector_index = i;
link_init_params.link_index = dc->link_count;
link_init_params.dc = dc;
- link = link_create(&link_init_params);
+ link = dc->link_srv->create_link(&link_init_params);
if (link) {
dc->links[dc->link_count] = link;
@@ -238,7 +239,7 @@ static bool create_links(
link_init_params.dc = dc;
link_init_params.is_dpia_link = true;
- link = link_create(&link_init_params);
+ link = dc->link_srv->create_link(&link_init_params);
if (link) {
dc->links[dc->link_count] = link;
link->dc = dc;
@@ -399,6 +400,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
{
int i;
+ /*
+ * Don't adjust DRR while there's bandwidth optimizations pending to
+ * avoid conflicting with firmware updates.
+ */
+ if (dc->ctx->dce_version > DCE_VERSION_MAX)
+ if (dc->optimized_required || dc->wm_optimized_required)
+ return false;
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -814,6 +823,9 @@ static void dc_destruct(struct dc *dc)
dc_destroy_resource_pool(dc);
+ if (dc->link_srv)
+ link_destroy_link_service(&dc->link_srv);
+
if (dc->ctx->gpio_service)
dal_gpio_service_destroy(&dc->ctx->gpio_service);
@@ -876,6 +888,10 @@ static bool dc_construct_ctx(struct dc *dc,
dc->ctx = dc_ctx;
+ dc->link_srv = link_create_link_service();
+ if (!dc->link_srv)
+ return false;
+
return true;
}
@@ -984,7 +1000,7 @@ static bool dc_construct(struct dc *dc,
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
if (dc->res_pool->funcs->update_bw_bounding_box) {
@@ -1057,6 +1073,53 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex
}
}
+static void phantom_pipe_blank(
+ struct dc *dc,
+ struct timing_generator *tg,
+ int width,
+ int height)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ struct output_pixel_processor *opp = NULL;
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+ uint32_t otg_active_width, otg_active_height;
+ uint32_t i;
+
+ /* program opp dpg blank color */
+ color_space = COLOR_SPACE_SRGB;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ otg_active_width = width;
+ otg_active_height = height;
+
+ /* get the OPTC source */
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+ ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+ opp = dc->res_pool->opps[i];
+ break;
+ }
+ }
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator)
+ opp->funcs->opp_set_disp_pattern_generator(
+ opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+
+ if (tg->funcs->is_tg_enabled(tg))
+ hws->funcs.wait_for_blank_complete(opp);
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -1115,8 +1178,14 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* again for different use.
*/
if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
- if (tg->funcs->enable_crtc)
+ if (tg->funcs->enable_crtc) {
+ int main_pipe_width, main_pipe_height;
+
+ main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
+ main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+ phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
tg->funcs->enable_crtc(tg);
+ }
}
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
@@ -1199,7 +1268,7 @@ static void disable_vbios_mode_if_required(
pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
if (pix_clk_100hz != requested_pix_clk_100hz) {
- link_set_dpms_off(pipe);
+ dc->link_srv->set_dpms_off(pipe);
pipe->stream->dpms_off = false;
}
}
@@ -1298,7 +1367,7 @@ static void detect_edp_presence(struct dc *dc)
int i;
int edp_num;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (!edp_num)
return;
@@ -1324,16 +1393,12 @@ void dc_hardware_init(struct dc *dc)
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params)
{
-#ifdef CONFIG_DRM_AMD_DC_HDCP
dc->ctx->cp_psp = init_params->cp_psp;
-#endif
}
void dc_deinit_callbacks(struct dc *dc)
{
-#ifdef CONFIG_DRM_AMD_DC_HDCP
memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
-#endif
}
void dc_destroy(struct dc **dc)
@@ -1658,7 +1723,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
- if (link_is_edp_ilr_optimization_required(link, crtc_timing)) {
+ if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
return false;
}
@@ -2001,53 +2066,6 @@ context_alloc_fail:
return res;
}
-/* TODO: When the transition to the new commit sequence is done, remove this
- * function in favor of dc_commit_streams. */
-bool dc_commit_state(struct dc *dc, struct dc_state *context)
-{
- enum dc_status result = DC_ERROR_UNEXPECTED;
- int i;
-
- /* TODO: Since change commit sequence can have a huge impact,
- * we decided to only enable it for DCN3x. However, as soon as
- * we get more confident about this change we'll need to enable
- * the new sequence for all ASICs. */
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- result = dc_commit_streams(dc, context->streams, context->stream_count);
- return result == DC_OK;
- }
-
- if (!streams_changed(dc, context->streams, context->stream_count)) {
- return DC_OK;
- }
-
- DC_LOG_DC("%s: %d streams\n",
- __func__, context->stream_count);
-
- for (i = 0; i < context->stream_count; i++) {
- struct dc_stream_state *stream = context->streams[i];
-
- dc_stream_log(dc, stream);
- }
-
- /*
- * Previous validation was perfomred with fast_validation = true and
- * the full DML state required for hardware programming was skipped.
- *
- * Re-validate here to calculate these parameters / watermarks.
- */
- result = dc_validate_global_state(dc, context, false);
- if (result != DC_OK) {
- DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
- dc_status_to_str(result), result);
- return result;
- }
-
- result = dc_commit_state_no_check(dc, context);
-
- return (result == DC_OK);
-}
-
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
struct dc_stream_state *stream,
@@ -2134,27 +2152,33 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
- if (dc->ctx->dce_version >= DCE_VERSION_MAX)
- TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
- else
+ /*
+ * Only relevant for DCN behavior where we can guarantee the optimization
+ * is safe to apply - retain the legacy behavior for DCE.
+ */
+
+ if (dc->ctx->dce_version < DCE_VERSION_MAX)
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ else {
+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
- if (is_flip_pending_in_pipes(dc, context))
- return;
+ if (is_flip_pending_in_pipes(dc, context))
+ return;
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].stream == NULL ||
- context->res_ctx.pipe_ctx[i].plane_state == NULL) {
- context->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
- }
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL ||
+ context->res_ctx.pipe_ctx[i].plane_state == NULL) {
+ context->res_ctx.pipe_ctx[i].pipe_idx = i;
+ dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+ }
- process_deferred_updates(dc);
+ process_deferred_updates(dc);
- dc->hwss.optimize_bandwidth(dc, context);
+ dc->hwss.optimize_bandwidth(dc, context);
- if (dc->debug.enable_double_buffered_dsc_pg_support)
- dc->hwss.update_dsc_pg(dc, context, true);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+ }
dc->optimized_required = false;
dc->wm_optimized_required = false;
@@ -3173,7 +3197,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
+ DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
}
if (stream_update->hdr_static_metadata &&
@@ -3209,13 +3235,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
continue;
if (stream_update->dsc_config)
- link_update_dsc_config(pipe_ctx);
+ dc->link_srv->update_dsc_config(pipe_ctx);
if (stream_update->mst_bw_update) {
if (stream_update->mst_bw_update->is_increase)
- link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ dc->link_srv->increase_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
else
- link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ dc->link_srv->reduce_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
}
if (stream_update->pending_test_pattern) {
@@ -3229,7 +3257,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
- link_set_dpms_off(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
/* for dpms, keep acquired resources*/
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
@@ -3239,7 +3267,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
} else {
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
- link_set_dpms_on(dc->current_state, pipe_ctx);
+ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
}
}
@@ -3466,22 +3494,6 @@ static void commit_planes_for_stream(struct dc *dc,
dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
- if (update_type != UPDATE_TYPE_FAST) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
- if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
- subvp_prev_use) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
- }
- }
- }
-
// Stream updates
if (stream_update)
commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
@@ -3510,14 +3522,9 @@ static void commit_planes_for_stream(struct dc *dc,
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- }
-
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
+ NULL, subvp_prev_use);
return;
}
@@ -3702,6 +3709,9 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
+ if (update_type != UPDATE_TYPE_FAST)
+ dc->hwss.post_unlock_program_front_end(dc, context);
+
if (subvp_prev_use && !subvp_curr_use) {
/* If disabling subvp, disable phantom streams after front end
* programming has completed (we turn on phantom OTG in order
@@ -3711,15 +3721,8 @@ static void commit_planes_for_stream(struct dc *dc,
}
if (update_type != UPDATE_TYPE_FAST)
- dc->hwss.post_unlock_program_front_end(dc, context);
- if (update_type != UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
-
- if (update_type != UPDATE_TYPE_FAST)
- if (dc->hwss.commit_subvp_config)
- dc->hwss.commit_subvp_config(dc, context);
-
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
@@ -4083,24 +4086,30 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_context *dc_ctx = dc->ctx;
int i, j;
+ stream_status = dc_stream_get_status(stream);
+ context = dc->current_state;
+
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
* we get more confident about this change we'll need to enable
* the new sequence for all ASICs.
*/
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
dc_update_planes_and_stream(dc, srf_updates,
surface_count, stream,
stream_update);
return;
}
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
-
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
@@ -4123,12 +4132,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
- } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
+ } else if (update_type == UPDATE_TYPE_FAST) {
/*
* Previous frame finished and HW is ready for optimization.
- *
- * Only relevant for DCN behavior where we can guarantee the optimization
- * is safe to apply - retain the legacy behavior for DCE.
*/
dc_post_update_surfaces_to_stream(dc);
}
@@ -4305,7 +4311,7 @@ void dc_resume(struct dc *dc)
uint32_t i;
for (i = 0; i < dc->link_count; i++)
- link_resume(dc->links[i]);
+ dc->link_srv->resume(dc->links[i]);
}
bool dc_is_dmcu_initialized(struct dc *dc)
@@ -4317,157 +4323,6 @@ bool dc_is_dmcu_initialized(struct dc *dc)
return false;
}
-bool dc_is_oem_i2c_device_present(
- struct dc *dc,
- size_t slave_address)
-{
- if (dc->res_pool->oem_device)
- return dce_i2c_oem_device_present(
- dc->res_pool,
- dc->res_pool->oem_device,
- slave_address);
-
- return false;
-}
-
-bool dc_submit_i2c(
- struct dc *dc,
- uint32_t link_index,
- struct i2c_command *cmd)
-{
-
- struct dc_link *link = dc->links[link_index];
- struct ddc_service *ddc = link->ddc;
- return dce_i2c_submit_command(
- dc->res_pool,
- ddc->ddc_pin,
- cmd);
-}
-
-bool dc_submit_i2c_oem(
- struct dc *dc,
- struct i2c_command *cmd)
-{
- struct ddc_service *ddc = dc->res_pool->oem_device;
- if (ddc)
- return dce_i2c_submit_command(
- dc->res_pool,
- ddc->ddc_pin,
- cmd);
-
- return false;
-}
-
-static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
-{
- if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- dc_sink_retain(sink);
-
- dc_link->remote_sinks[dc_link->sink_count] = sink;
- dc_link->sink_count++;
-
- return true;
-}
-
-/*
- * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
- *
- * EDID length is in bytes
- */
-struct dc_sink *dc_link_add_remote_sink(
- struct dc_link *link,
- const uint8_t *edid,
- int len,
- struct dc_sink_init_data *init_data)
-{
- struct dc_sink *dc_sink;
- enum dc_edid_status edid_status;
-
- if (len > DC_MAX_EDID_BUFFER_SIZE) {
- dm_error("Max EDID buffer size breached!\n");
- return NULL;
- }
-
- if (!init_data) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- if (!init_data->link) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- dc_sink = dc_sink_create(init_data);
-
- if (!dc_sink)
- return NULL;
-
- memmove(dc_sink->dc_edid.raw_edid, edid, len);
- dc_sink->dc_edid.length = len;
-
- if (!link_add_remote_sink_helper(
- link,
- dc_sink))
- goto fail_add_sink;
-
- edid_status = dm_helpers_parse_edid_caps(
- link,
- &dc_sink->dc_edid,
- &dc_sink->edid_caps);
-
- /*
- * Treat device as no EDID device if EDID
- * parsing fails
- */
- if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
- dc_sink->dc_edid.length = 0;
- dm_error("Bad EDID, status%d!\n", edid_status);
- }
-
- return dc_sink;
-
-fail_add_sink:
- dc_sink_release(dc_sink);
- return NULL;
-}
-
-/*
- * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
- *
- * Note that this just removes the struct dc_sink - it doesn't
- * program hardware or alter other members of dc_link
- */
-void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
-{
- int i;
-
- if (!link->sink_count) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- for (i = 0; i < link->sink_count; i++) {
- if (link->remote_sinks[i] == sink) {
- dc_sink_release(sink);
- link->remote_sinks[i] = NULL;
-
- /* shrink array to remove empty place */
- while (i < link->sink_count - 1) {
- link->remote_sinks[i] = link->remote_sinks[i+1];
- i++;
- }
- link->remote_sinks[i] = NULL;
- link->sink_count--;
- return;
- }
- }
-}
-
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
@@ -4990,7 +4845,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
return;
}
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
/* Determine panel inst */
for (i = 0; i < edp_num; i++) {