[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20251024191707.2310589-4-sean.anderson@linux.dev>
Date: Fri, 24 Oct 2025 15:17:07 -0400
From: Sean Anderson <sean.anderson@...ux.dev>
To: Laurent Pinchart <laurent.pinchart@...asonboard.com>,
Tomi Valkeinen <tomi.valkeinen@...asonboard.com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
dri-devel@...ts.freedesktop.org
Cc: linux-kernel@...r.kernel.org,
David Airlie <airlied@...il.com>,
Hyun Kwon <hyun.kwon@...inx.com>,
Simona Vetter <simona@...ll.ch>,
Michal Simek <michal.simek@....com>,
linux-arm-kernel@...ts.infradead.org,
Sean Anderson <sean.anderson@...ux.dev>
Subject: [PATCH 3/3] drm: zynqmp_dp: Retrain link after HPD if necessary
The section 5.1.4 of the v1.2 DisplayPort standard says
> The Source device shall respond to Hot Plug event/Hot Re-plug event by
> first reading DPCD Link/Sink Device Status registers at DPCD 00200h
> through 00205h.... If the link is unstable or lost, the Source device
> then reads the DPCD Receiver Capabilities registers at DPCD 00000h
> through 0000Fh to determine the appropriate information needed to
> train the link. The Source device shall then initiate link training.
However, zynqmp_dp_hpd_work_func does not check the link status. This
may prevent the sink from detecting the source if, for example, the user
disconnects the cable and then reconnects it. I encountered this problem
when testing a mini DP connector (although I had no problem when using a
full-size connector with the existing driver).
Follow the spec by checking the link status after a HPD event and
retraining if necessary.
Fixes: d76271d22694 ("drm: xlnx: DRM/KMS driver for Xilinx ZynqMP DisplayPort Subsystem")
Signed-off-by: Sean Anderson <sean.anderson@...ux.dev>
---
drivers/gpu/drm/xlnx/zynqmp_dp.c | 37 ++++++++++++++++++++------------
1 file changed, 23 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index caf2e0ce3644..a90bc0e406f6 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1677,6 +1677,24 @@ static int zynqmp_dp_bridge_atomic_check(struct drm_bridge *bridge,
return 0;
}
+static bool zynqmp_hpd_needs_retain(struct zynqmp_dp *dp)
+{
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+ int err;
+
+ err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (err < 0) {
+ dev_dbg_ratelimited(dp->dev,
+ "could not read sink status: %d\n", err);
+ return false;
+ }
+
+ return status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt);
+}
+
static enum drm_connector_status __zynqmp_dp_bridge_detect(struct zynqmp_dp *dp)
{
struct zynqmp_dp_link_config *link_config = &dp->link_config;
@@ -1698,6 +1716,9 @@ static enum drm_connector_status __zynqmp_dp_bridge_detect(struct zynqmp_dp *dp)
if (state & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_HPD) {
WRITE_ONCE(dp->status, connector_status_connected);
+ if (!zynqmp_hpd_needs_retain(dp))
+ return connector_status_connected;
+
ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
sizeof(dp->dpcd));
if (ret < 0) {
@@ -2335,25 +2356,13 @@ static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work)
{
struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp,
hpd_irq_work);
- u8 status[DP_LINK_STATUS_SIZE + 2];
- int err;
guard(mutex)(&dp->lock);
if (dp->ignore_hpd)
return;
- err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
- DP_LINK_STATUS_SIZE + 2);
- if (err < 0) {
- dev_dbg_ratelimited(dp->dev,
- "could not read sink status: %d\n", err);
- } else {
- if (status[4] & DP_LINK_STATUS_UPDATED ||
- !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
- !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
- zynqmp_dp_train_loop(dp);
- }
- }
+ if (zynqmp_hpd_needs_retain(dp))
+ zynqmp_dp_train_loop(dp);
}
static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
--
2.35.1.1320.gc452695387.dirty
Powered by blists - more mailing lists