[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211130103353.0ab1a44f@canb.auug.org.au>
Date: Tue, 30 Nov 2021 10:33:53 +1100
From: Stephen Rothwell <sfr@...b.auug.org.au>
To: Dave Airlie <airlied@...ux.ie>,
Daniel Vetter <daniel.vetter@...ll.ch>,
Intel Graphics <intel-gfx@...ts.freedesktop.org>,
DRI <dri-devel@...ts.freedesktop.org>
Cc: Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
Linux Next Mailing List <linux-next@...r.kernel.org>,
Maxime Ripard <maxime@...no.tech>
Subject: linux-next: manual merge of the drm tree with the drm-misc-fixes
tree
Hi all,
Today's linux-next merge of the drm tree got a conflict in:
drivers/gpu/drm/vc4/vc4_kms.c
between commits:
f927767978d2 ("drm/vc4: kms: Fix return code check")
d354699e2292 ("drm/vc4: kms: Don't duplicate pending commit")
from the drm-misc-fixes tree and commit:
16e101051f32 ("drm/vc4: Increase the core clock based on HVS load")
from the drm tree.
I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging. You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.
--
Cheers,
Stephen Rothwell
diff --cc drivers/gpu/drm/vc4/vc4_kms.c
index b61792d2aa65,79d4d9dd1394..000000000000
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@@ -337,12 -340,21 +340,21 @@@ static void vc4_atomic_commit_tail(stru
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
- struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
+ struct vc4_hvs_state *new_hvs_state;
struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state;
+ unsigned int channel;
int i;
+ old_hvs_state = vc4_hvs_get_old_global_state(state);
- if (WARN_ON(!old_hvs_state))
++ if (WARN_ON(IS_ERR(old_hvs_state)))
+ return;
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
- if (WARN_ON(!new_hvs_state))
++ if (WARN_ON(IS_ERR(new_hvs_state)))
+ return;
+
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state;
@@@ -353,32 -365,31 +365,36 @@@
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
}
- old_hvs_state = vc4_hvs_get_old_global_state(state);
- if (IS_ERR(old_hvs_state))
- return;
+ if (vc4->hvs->hvs5) {
+ unsigned long core_rate = max_t(unsigned long,
+ 500000000,
+ new_hvs_state->core_clock_rate);
+
+ clk_set_min_rate(hvs->core_clk, core_rate);
+ }
- for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
- struct vc4_crtc_state *vc4_crtc_state =
- to_vc4_crtc_state(old_crtc_state);
- unsigned int channel = vc4_crtc_state->assigned_channel;
+ for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
+ struct drm_crtc_commit *commit;
int ret;
- if (channel == VC4_HVS_CHANNEL_DISABLED)
+ if (!old_hvs_state->fifo_state[channel].in_use)
continue;
- if (!old_hvs_state->fifo_state[channel].in_use)
+ commit = old_hvs_state->fifo_state[channel].pending_commit;
+ if (!commit)
continue;
- ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
+ ret = drm_crtc_commit_wait(commit);
if (ret)
drm_err(dev, "Timed out waiting for commit\n");
+
+ drm_crtc_commit_put(commit);
+ old_hvs_state->fifo_state[channel].pending_commit = NULL;
}
+ if (vc4->hvs->hvs5)
+ clk_set_min_rate(hvs->core_clk, 500000000);
+
drm_atomic_helper_commit_modeset_disables(dev, state);
vc4_ctm_commit(vc4, state);
@@@ -667,11 -673,19 +678,13 @@@ vc4_hvs_channels_duplicate_state(struc
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
-
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
+ state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
-
- if (!old_state->fifo_state[i].pending_commit)
- continue;
-
- state->fifo_state[i].pending_commit =
- drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
}
+ state->core_clock_rate = old_state->core_clock_rate;
+
return &state->base;
}
@@@ -826,6 -840,76 +839,76 @@@ static int vc4_pv_muxing_atomic_check(s
return 0;
}
+ static int
+ vc4_core_clock_atomic_check(struct drm_atomic_state *state)
+ {
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+ struct vc4_hvs_state *hvs_new_state;
+ struct vc4_load_tracker_state *load_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_crtc *crtc;
+ unsigned int num_outputs;
+ unsigned long pixel_rate;
+ unsigned long cob_rate;
+ unsigned int i;
+
+ priv_state = drm_atomic_get_private_obj_state(state,
+ &vc4->load_tracker);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ load_state = to_vc4_load_tracker_state(priv_state);
+
+ hvs_new_state = vc4_hvs_get_global_state(state);
- if (!hvs_new_state)
- return -EINVAL;
++ if (IS_ERR(hvs_new_state))
++ return PTR_ERR(hvs_new_state);
+
+ for_each_oldnew_crtc_in_state(state, crtc,
+ old_crtc_state,
+ new_crtc_state,
+ i) {
+ if (old_crtc_state->active) {
+ struct vc4_crtc_state *old_vc4_state =
+ to_vc4_crtc_state(old_crtc_state);
+ unsigned int channel = old_vc4_state->assigned_channel;
+
+ hvs_new_state->fifo_state[channel].fifo_load = 0;
+ }
+
+ if (new_crtc_state->active) {
+ struct vc4_crtc_state *new_vc4_state =
+ to_vc4_crtc_state(new_crtc_state);
+ unsigned int channel = new_vc4_state->assigned_channel;
+
+ hvs_new_state->fifo_state[channel].fifo_load =
+ new_vc4_state->hvs_load;
+ }
+ }
+
+ cob_rate = 0;
+ num_outputs = 0;
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ if (!hvs_new_state->fifo_state[i].in_use)
+ continue;
+
+ num_outputs++;
+ cob_rate += hvs_new_state->fifo_state[i].fifo_load;
+ }
+
+ pixel_rate = load_state->hvs_load;
+ if (num_outputs > 1) {
+ pixel_rate = (pixel_rate * 40) / 100;
+ } else {
+ pixel_rate = (pixel_rate * 60) / 100;
+ }
+
+ hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
+
+ return 0;
+ }
+
+
static int
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
Content of type "application/pgp-signature" skipped
Powered by blists - more mailing lists