[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240503192922.2172314-10-sean.anderson@linux.dev>
Date: Fri, 3 May 2024 15:29:21 -0400
From: Sean Anderson <sean.anderson@...ux.dev>
To: Laurent Pinchart <laurent.pinchart@...asonboard.com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
dri-devel@...ts.freedesktop.org
Cc: linux-arm-kernel@...ts.infradead.org,
David Airlie <airlied@...il.com>,
linux-kernel@...r.kernel.org,
Daniel Vetter <daniel@...ll.ch>,
Tomi Valkeinen <tomi.valkeinen@...asonboard.com>,
Michal Simek <michal.simek@....com>,
Sean Anderson <sean.anderson@...ux.dev>
Subject: [PATCH v5 09/10] drm: zynqmp_dp: Take dp->lock in zynqmp_dp_hpd_work_func
Add a non-locking version of zynqmp_dp_bridge_detect and use it in
zynqmp_dp_hpd_work_func so we can take the lock explicitly. This will
make it easier to check for hpd_ignore when we add debugfs support.
Signed-off-by: Sean Anderson <sean.anderson@...ux.dev>
---
(no changes since v3)
Changes in v3:
- New
drivers/gpu/drm/xlnx/zynqmp_dp.c | 24 ++++++++++++++++++------
1 file changed, 18 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 91767ddbe1ce..6f3792dcac28 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1567,14 +1567,13 @@ static int zynqmp_dp_bridge_atomic_check(struct drm_bridge *bridge,
return 0;
}
-static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status __zynqmp_dp_bridge_detect(struct zynqmp_dp *dp)
{
- struct zynqmp_dp *dp = bridge_to_dp(bridge);
struct zynqmp_dp_link_config *link_config = &dp->link_config;
u32 state, i;
int ret;
- mutex_lock(&dp->lock);
+ lockdep_assert_held(&dp->lock);
/*
* This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to
@@ -1603,16 +1602,26 @@ static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *brid
dp->num_lanes);
dp->status = connector_status_connected;
- mutex_unlock(&dp->lock);
return connector_status_connected;
}
disconnected:
dp->status = connector_status_disconnected;
- mutex_unlock(&dp->lock);
return connector_status_disconnected;
}
+static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
+{
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
+ enum drm_connector_status ret;
+
+ mutex_lock(&dp->lock);
+ ret = __zynqmp_dp_bridge_detect(dp);
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -1696,7 +1705,10 @@ static void zynqmp_dp_hpd_work_func(struct work_struct *work)
struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, hpd_work);
enum drm_connector_status status;
- status = zynqmp_dp_bridge_detect(&dp->bridge);
+ mutex_lock(&dp->lock);
+ status = __zynqmp_dp_bridge_detect(dp);
+ mutex_unlock(&dp->lock);
+
drm_bridge_hpd_notify(&dp->bridge, status);
}
--
2.35.1.1320.gc452695387.dirty
Powered by blists - more mailing lists