[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240422184553.3573009-9-sean.anderson@linux.dev>
Date: Mon, 22 Apr 2024 14:45:48 -0400
From: Sean Anderson <sean.anderson@...ux.dev>
To: Laurent Pinchart <laurent.pinchart@...asonboard.com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
dri-devel@...ts.freedesktop.org
Cc: David Airlie <airlied@...il.com>,
Daniel Vetter <daniel@...ll.ch>,
linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
Michal Simek <michal.simek@....com>,
Sean Anderson <sean.anderson@...ux.dev>
Subject: [PATCH v3 08/13] drm: zynqmp_dp: Don't retrain the link in our IRQ
Retraining the link can take a while, and might involve waiting for
DPCD reads/writes to complete. In preparation for unthreading the IRQ
handler, move this into its own work function.
Signed-off-by: Sean Anderson <sean.anderson@...ux.dev>
---
(no changes since v2)
Changes in v2:
- Document hpd_irq_work
- Split this off from the locking changes
drivers/gpu/drm/xlnx/zynqmp_dp.c | 45 ++++++++++++++++++++------------
1 file changed, 29 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 5eb926f050de..8ad8db01f136 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -288,6 +288,7 @@ struct zynqmp_dp_config {
* @phy: PHY handles for DP lanes
* @num_lanes: number of enabled phy lanes
* @hpd_work: hot plug detection worker
+ * @hpd_irq_work: hot plug detection IRQ worker
* @status: connection status
* @enabled: flag to indicate if the device is enabled
* @dpcd: DP configuration data from currently connected sink device
@@ -303,6 +304,7 @@ struct zynqmp_dp {
struct drm_dp_aux aux;
struct drm_bridge bridge;
struct work_struct hpd_work;
+ struct work_struct hpd_irq_work;
struct mutex lock;
struct drm_bridge *next_bridge;
@@ -1626,6 +1628,29 @@ static void zynqmp_dp_hpd_work_func(struct work_struct *work)
drm_bridge_hpd_notify(&dp->bridge, status);
}
+static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work)
+{
+ struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp,
+ hpd_irq_work);
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+ int err;
+
+ mutex_lock(&dp->lock);
+ err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (err < 0) {
+ dev_dbg_ratelimited(dp->dev,
+ "could not read sink status: %d\n", err);
+ } else {
+ if (status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
+ zynqmp_dp_train_loop(dp);
+ }
+ }
+ mutex_unlock(&dp->lock);
+}
+
static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
{
struct zynqmp_dp *dp = (struct zynqmp_dp *)data;
@@ -1650,23 +1675,9 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
if (status & ZYNQMP_DP_INT_HPD_EVENT)
schedule_work(&dp->hpd_work);
- if (status & ZYNQMP_DP_INT_HPD_IRQ) {
- int ret;
- u8 status[DP_LINK_STATUS_SIZE + 2];
+ if (status & ZYNQMP_DP_INT_HPD_IRQ)
+ schedule_work(&dp->hpd_irq_work);
- ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
- DP_LINK_STATUS_SIZE + 2);
- if (ret < 0)
- goto handled;
-
- if (status[4] & DP_LINK_STATUS_UPDATED ||
- !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
- !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
- zynqmp_dp_train_loop(dp);
- }
- }
-
-handled:
return IRQ_HANDLED;
}
@@ -1692,6 +1703,7 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
mutex_init(&dp->lock);
INIT_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func);
+ INIT_WORK(&dp->hpd_irq_work, zynqmp_dp_hpd_irq_work_func);
/* Acquire all resources (IOMEM, IRQ and PHYs). */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
@@ -1791,6 +1803,7 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub)
zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_ALL);
disable_irq(dp->irq);
+ cancel_work_sync(&dp->hpd_irq_work);
cancel_work_sync(&dp->hpd_work);
zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0);
--
2.35.1.1320.gc452695387.dirty
Powered by blists - more mailing lists