[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230109143846.1966301-12-mwen@igalia.com>
Date: Mon, 9 Jan 2023 13:38:39 -0100
From: Melissa Wen <mwen@...lia.com>
To: harry.wentland@....com, sunpeng.li@....com,
Rodrigo.Siqueira@....com, alexander.deucher@....com,
christian.koenig@....com, Xinhui.Pan@....com, airlied@...il.com,
daniel@...ll.ch
Cc: Joshua Ashton <joshua@...ggi.es>, alex.hung@....com,
nicholas.kazlauskas@....com, sungjoon.kim@....com,
seanpaul@...omium.org, bhawanpreet.lakha@....com,
Shashank Sharma <shashank.sharma@....com>,
ville.syrjala@...ux.intel.com, maarten.lankhorst@...ux.intel.com,
mripard@...nel.org, tzimmermann@...e.de, kernel-dev@...lia.com,
laurent.pinchart+renesas@...asonboard.com,
Melissa Wen <mwen@...lia.com>, amd-gfx@...ts.freedesktop.org,
dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org
Subject: [RFC PATCH v2 11/18] drm/amd/display: handle MPC 3D LUT resources for a given context
In the original dc_acquire_release_mpc_3dlut(), only current ctx is
considered, which doesn't fit the steps for atomic checking new ctx.
Therefore, create a function to handle 3D LUT resource for a given
context, so that we can check resources availability in atomic_check
time and handle failures properly.
Signed-off-by: Melissa Wen <mwen@...lia.com>
---
drivers/gpu/drm/amd/display/dc/core/dc.c | 39 ++++++++++++++++++++++++
drivers/gpu/drm/amd/display/dc/dc.h | 8 +++++
2 files changed, 47 insertions(+)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 88f1130c3b83..76270d21286a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2085,6 +2085,45 @@ bool dc_acquire_release_mpc_3dlut(
return ret;
}
+bool
+dc_acquire_release_mpc_3dlut_for_ctx(struct dc *dc,
+ bool acquire,
+ struct dc_state *state,
+ struct dc_stream_state *stream,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper)
+{
+ int pipe_idx;
+ bool ret = false;
+ bool found_pipe_idx = false;
+ const struct resource_pool *pool = dc->res_pool;
+ struct resource_context *res_ctx = &state->res_ctx;
+ int mpcc_id = 0;
+
+ if (pool && res_ctx) {
+ if (acquire) {
+ /*find pipe idx for the given stream*/
+ for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
+ if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
+ found_pipe_idx = true;
+ mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
+ break;
+ }
+ }
+ } else
+ found_pipe_idx = true;/*for release pipe_idx is not required*/
+
+ if (found_pipe_idx) {
+ if (acquire && pool->funcs->acquire_post_bldn_3dlut)
+ ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
+ else if (!acquire && pool->funcs->release_post_bldn_3dlut)
+ ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
+ }
+ }
+ return ret;
+}
+
+
static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
{
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 72963617553e..a5abf7f308c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -1345,6 +1345,14 @@ bool dc_acquire_release_mpc_3dlut(
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
+bool
+dc_acquire_release_mpc_3dlut_for_ctx(struct dc *dc,
+ bool acquire,
+ struct dc_state *state,
+ struct dc_stream_state *stream,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper);
+
void dc_resource_state_copy_construct(
const struct dc_state *src_ctx,
struct dc_state *dst_ctx);
--
2.35.1
Powered by blists - more mailing lists