summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorWayne Lin <wayne.lin@amd.com>2022-03-09 17:05:05 +0800
committerAlex Deucher <alexander.deucher@amd.com>2023-07-18 14:32:37 -0400
commit4f6d9e38c4d244ad106eb9ebd8c0e1215e866f35 (patch)
treec54babc5adbc069651388a7b766c2f87137be625 /drivers/gpu
parent87279fdf5ee0ad1360765ef70389d1c4d0f81bb6 (diff)
drm/amd/display: Add polling method to handle MST reply packet
[Why] Specific TBT4 dock doesn't send out short HPD to notify source that IRQ event DOWN_REP_MSG_RDY is set. Which violates the spec and cause source can't send out streams to mst sinks. [How] To cover this misbehavior, add an additional polling method to detect DOWN_REP_MSG_RDY is set. HPD driven handling method is still kept. Just hook up our handler to drm mgr->cbs->poll_hpd_irq(). Cc: Mario Limonciello <mario.limonciello@amd.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: stable@vger.kernel.org Reviewed-by: Jerry Zuo <jerry.zuo@amd.com> Acked-by: Alan Liu <haoping.liu@amd.com> Signed-off-by: Wayne Lin <wayne.lin@amd.com> Tested-by: Daniel Wheeler <daniel.wheeler@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c117
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c110
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h11
4 files changed, 159 insertions, 86 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 948d61845efd..0fa739fd6a9c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1347,6 +1347,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
if (amdgpu_in_reset(adev))
goto skip;
+ if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ goto skip;
+ }
+
mutex_lock(&adev->dm.dc_lock);
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dc_link_dp_handle_automated_test(dc_link);
@@ -3232,87 +3241,6 @@ static void handle_hpd_irq(void *param)
}
-static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
-{
- u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
- u8 dret;
- bool new_irq_handled = false;
- int dpcd_addr;
- int dpcd_bytes_to_read;
-
- const int max_process_count = 30;
- int process_count = 0;
-
- const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
-
- if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
- dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
- /* DPCD 0x200 - 0x201 for downstream IRQ */
- dpcd_addr = DP_SINK_COUNT;
- } else {
- dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
- /* DPCD 0x2002 - 0x2005 for downstream IRQ */
- dpcd_addr = DP_SINK_COUNT_ESI;
- }
-
- dret = drm_dp_dpcd_read(
- &aconnector->dm_dp_aux.aux,
- dpcd_addr,
- esi,
- dpcd_bytes_to_read);
-
- while (dret == dpcd_bytes_to_read &&
- process_count < max_process_count) {
- u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
- u8 retry;
-
- dret = 0;
-
- process_count++;
-
- DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
- /* handle HPD short pulse irq */
- if (aconnector->mst_mgr.mst_state)
- drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
- esi,
- ack,
- &new_irq_handled);
-
- if (new_irq_handled) {
- /* ACK at DPCD to notify down stream */
- for (retry = 0; retry < 3; retry++) {
- ssize_t wret;
-
- wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
- dpcd_addr + 1,
- ack[1]);
- if (wret == 1)
- break;
- }
-
- if (retry == 3) {
- DRM_ERROR("Failed to ack MST event.\n");
- return;
- }
-
- drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
- /* check if there is new irq to be handled */
- dret = drm_dp_dpcd_read(
- &aconnector->dm_dp_aux.aux,
- dpcd_addr,
- esi,
- dpcd_bytes_to_read);
-
- new_irq_handled = false;
- } else {
- break;
- }
- }
-
- if (process_count == max_process_count)
- DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
-}
-
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
@@ -3374,7 +3302,23 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
- dm_handle_mst_sideband_msg(aconnector);
+ bool skip = false;
+
+ /*
+ * DOWN_REP_MSG_RDY is also handled by polling method
+ * mgr->cbs->poll_hpd_irq()
+ */
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_mst_msg_rdy_event;
+
+ if (!skip)
+ offload_wq->is_handling_mst_msg_rdy_event = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
goto out;
}
@@ -3483,11 +3427,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq,
(void *) aconnector);
-
- if (adev->dm.hpd_rx_offload_wq)
- adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
- aconnector;
}
+
+ if (adev->dm.hpd_rx_offload_wq)
+ adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+ aconnector;
}
}
@@ -7296,6 +7240,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
mutex_init(&aconnector->hpd_lock);
+ mutex_init(&aconnector->handle_mst_msg_ready);
/*
* configure support HPD hot plug connector_>polled default value is 0
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 4561f55afa99..9fb5bb3a75a7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -195,6 +195,11 @@ struct hpd_rx_irq_offload_work_queue {
*/
bool is_handling_link_loss;
/**
+ * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
+ * ready event when we're already handling mst message ready event
+ */
+ bool is_handling_mst_msg_rdy_event;
+ /**
* @aconnector: The aconnector that this work queue is attached to
*/
struct amdgpu_dm_connector *aconnector;
@@ -638,6 +643,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ struct mutex handle_mst_msg_ready;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 46d0a8f57e55..888e80f498e9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -619,8 +619,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return connector;
}
+void dm_handle_mst_sideband_msg_ready_event(
+ struct drm_dp_mst_topology_mgr *mgr,
+ enum mst_msg_ready_type msg_rdy_type)
+{
+ uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ uint8_t dret;
+ bool new_irq_handled = false;
+ int dpcd_addr;
+ uint8_t dpcd_bytes_to_read;
+ const uint8_t max_process_count = 30;
+ uint8_t process_count = 0;
+ u8 retry;
+ struct amdgpu_dm_connector *aconnector =
+ container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+
+
+ const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+ if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+ dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+ /* DPCD 0x200 - 0x201 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT;
+ } else {
+ dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+ /* DPCD 0x2002 - 0x2005 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT_ESI;
+ }
+
+ mutex_lock(&aconnector->handle_mst_msg_ready);
+
+ while (process_count < max_process_count) {
+ u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
+
+ process_count++;
+
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ if (dret != dpcd_bytes_to_read) {
+ DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
+ break;
+ }
+
+ DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+
+ switch (msg_rdy_type) {
+ case DOWN_REP_MSG_RDY_EVENT:
+ /* Only handle DOWN_REP_MSG_RDY case*/
+ esi[1] &= DP_DOWN_REP_MSG_RDY;
+ break;
+ case UP_REQ_MSG_RDY_EVENT:
+ /* Only handle UP_REQ_MSG_RDY case*/
+ esi[1] &= DP_UP_REQ_MSG_RDY;
+ break;
+ default:
+ /* Handle both cases*/
+ esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
+ break;
+ }
+
+ if (!esi[1])
+ break;
+
+ /* handle MST irq */
+ if (aconnector->mst_mgr.mst_state)
+ drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
+ esi,
+ ack,
+ &new_irq_handled);
+
+ if (new_irq_handled) {
+ /* ACK at DPCD to notify down stream */
+ for (retry = 0; retry < 3; retry++) {
+ ssize_t wret;
+
+ wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
+ dpcd_addr + 1,
+ ack[1]);
+ if (wret == 1)
+ break;
+ }
+
+ if (retry == 3) {
+ DRM_ERROR("Failed to ack MST event.\n");
+ return;
+ }
+
+ drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
+
+ new_irq_handled = false;
+ } else {
+ break;
+ }
+ }
+
+ mutex_unlock(&aconnector->handle_mst_msg_ready);
+
+ if (process_count == max_process_count)
+ DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
+{
+ dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
+}
+
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
+ .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 1e4ede1e57ab..37c820ab0fdb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -49,6 +49,13 @@
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
+enum mst_msg_ready_type {
+ NONE_MSG_RDY_EVENT = 0,
+ DOWN_REP_MSG_RDY_EVENT = 1,
+ UP_REQ_MSG_RDY_EVENT = 2,
+ DOWN_OR_UP_MSG_RDY_EVENT = 3
+};
+
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
+void dm_handle_mst_sideband_msg_ready_event(
+ struct drm_dp_mst_topology_mgr *mgr,
+ enum mst_msg_ready_type msg_rdy_type);
+
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;