lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180308232421.14049-4-lyude@redhat.com>
Date:   Thu,  8 Mar 2018 18:24:17 -0500
From:   Lyude Paul <lyude@...hat.com>
To:     intel-gfx@...ts.freedesktop.org, dri-devel@...ts.freedesktop.org
Cc:     Manasi Navare <manasi.d.navare@...el.com>,
        Ville Syrjälä 
        <ville.syrjala@...ux.intel.com>,
        Jani Nikula <jani.nikula@...ux.intel.com>,
        Joonas Lahtinen <joonas.lahtinen@...ux.intel.com>,
        Rodrigo Vivi <rodrigo.vivi@...el.com>,
        David Airlie <airlied@...ux.ie>, linux-kernel@...r.kernel.org
Subject: [PATCH 3/6] drm/i915: Only use one link bw config for MST topologies

When a DP MST link needs retraining, sometimes the hub will detect that
the current link bw config is impossible and will update it's RX caps in
the DPCD to reflect the new maximum link rate. Currently, we make the
assumption that the RX caps in the dpcd will never change like this.
This means if the sink changes it's RX caps after we've already set up
an MST link and we attempt to add or remove another sink from the
topology, we could put ourselves into an invalid state where we've tried
to configure different sinks on the same MST topology with different
link rates. We could also run into this situation if a sink reports a
higher link rate after suspend, usually from us having trained it with a
fallback bw configuration before suspending.

So: "lock" the bw config by only using the max DP link rate/lane count
on the first modeset for an MST topology. For every modeset following,
we instead use the last configured link bw for this topology. We only
unlock the bw config when we've detected a new MST sink.

Signed-off-by: Lyude Paul <lyude@...hat.com>
Cc: Manasi Navare <manasi.d.navare@...el.com>
Cc: Ville Syrjälä <ville.syrjala@...ux.intel.com>
---
 drivers/gpu/drm/i915/intel_dp.c     | 11 +++++++++--
 drivers/gpu/drm/i915/intel_dp_mst.c | 22 +++++++++++++++-------
 drivers/gpu/drm/i915/intel_drv.h    |  6 ++++++
 3 files changed, 30 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5abf0c95725a..5645a194de92 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3871,18 +3871,25 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
 static void
 intel_dp_configure_mst(struct intel_dp *intel_dp)
 {
+	bool was_mst;
+
 	if (!i915_modparams.enable_dp_mst)
 		return;
 
 	if (!intel_dp->can_mst)
 		return;
 
+	was_mst = intel_dp->is_mst;
 	intel_dp->is_mst = intel_dp_can_mst(intel_dp);
 
-	if (intel_dp->is_mst)
+	if (intel_dp->is_mst) {
 		DRM_DEBUG_KMS("Sink is MST capable\n");
-	else
+
+		if (!was_mst)
+			intel_dp->mst_bw_locked = false;
+	} else {
 		DRM_DEBUG_KMS("Sink is not MST capable\n");
+	}
 
 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
 					intel_dp->is_mst);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index c3de0918ee13..c0553456b18e 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -42,7 +42,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 		to_intel_connector(conn_state->connector);
 	struct drm_atomic_state *state = pipe_config->base.state;
 	int bpp;
-	int lane_count, slots;
+	int lane_count, link_rate, slots;
 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	int mst_pbn;
 	bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
@@ -56,16 +56,22 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 			      bpp);
 	}
 	/*
-	 * for MST we always configure max link bw - the spec doesn't
-	 * seem to suggest we should do otherwise.
+	 * for MST we always configure max link bw if we don't know better -
+	 * the spec doesn't seem to suggest we should do otherwise. But,
+	 * ensure it always stays consistent with the rest of this hub's
+	 * state.
 	 */
-	lane_count = intel_dp_max_lane_count(intel_dp);
+	if (intel_dp->mst_bw_locked) {
+		lane_count = intel_dp->lane_count;
+		link_rate = intel_dp->link_rate;
+	} else {
+		lane_count = intel_dp_max_lane_count(intel_dp);
+		link_rate = intel_dp_max_link_rate(intel_dp);
+	}
 
 	pipe_config->lane_count = lane_count;
-
 	pipe_config->pipe_bpp = bpp;
-
-	pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
+	pipe_config->port_clock = link_rate;
 
 	if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port))
 		pipe_config->has_audio = true;
@@ -221,6 +227,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
 	connector->encoder = encoder;
 	intel_mst->connector = connector;
 
+	intel_dp->mst_bw_locked = true;
+
 	DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 
 	drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3700fcfddb1f..e5d3ef6754a5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1110,6 +1110,12 @@ struct intel_dp {
 	bool can_mst; /* this port supports mst */
 	bool is_mst;
 	int active_mst_links;
+	/* Set when we've already decided on a link bw for mst, to prevent us
+	 * from setting different link bandwiths if the hub tries to confuse
+	 * us by changing it later
+	 */
+	bool mst_bw_locked;
+
 	/* connector directly attached - won't be use for modeset in mst world */
 	struct intel_connector *attached_connector;
 
-- 
2.14.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ