[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191203071036.14158-7-bibby.hsieh@mediatek.com>
Date: Tue, 3 Dec 2019 15:10:36 +0800
From: Bibby Hsieh <bibby.hsieh@...iatek.com>
To: David Airlie <airlied@...ux.ie>,
Matthias Brugger <matthias.bgg@...il.com>,
Daniel Vetter <daniel.vetter@...ll.ch>,
<dri-devel@...ts.freedesktop.org>,
<linux-mediatek@...ts.infradead.org>
CC: Philipp Zabel <p.zabel@...gutronix.de>,
YT Shen <yt.shen@...iatek.com>,
Thierry Reding <thierry.reding@...il.com>,
CK Hu <ck.hu@...iatek.com>,
<linux-arm-kernel@...ts.infradead.org>, <tfiga@...omium.org>,
<drinkcat@...omium.org>, <linux-kernel@...r.kernel.org>,
<srv_heupstream@...iatek.com>,
Bibby Hsieh <bibby.hsieh@...iatek.com>,
Yongqiang Niu <yongqiang.niu@...iatek.com>
Subject: [PATCH v2 6/6] drm/mediatek: apply CMDQ control flow
Unlike other SoCs, MT8183 does not have "shadow"
registers for performaing an atomic video mode
set or page flip at vblank/vsync.
The CMDQ (Commend Queue) in MT8183 is used to help
update all relevant display controller registers
with critical time limation.
Signed-off-by: YT Shen <yt.shen@...iatek.com>
Signed-off-by: CK Hu <ck.hu@...iatek.com>
Signed-off-by: Philipp Zabel <p.zabel@...gutronix.de>
Signed-off-by: Bibby Hsieh <bibby.hsieh@...iatek.com>
Signed-off-by: Yongqiang Niu <yongqiang.niu@...iatek.com>
---
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 163 +++++++++++++++++-------
1 file changed, 118 insertions(+), 45 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 9b2f7935ba28..7b47fe8ccee8 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -12,6 +12,8 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include <linux/of_address.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_crtc.h"
@@ -42,11 +44,17 @@ struct mtk_drm_crtc {
unsigned int layer_nr;
bool pending_planes;
+ struct cmdq_client *cmdq_client;
+ u32 cmdq_event;
+
void __iomem *config_regs;
const struct mtk_mmsys_reg_data *mmsys_reg_data;
struct mtk_disp_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
+
+ /* lock for cmdq access */
+ struct mutex chan_lock;
};
struct mtk_crtc_state {
@@ -56,6 +64,11 @@ struct mtk_crtc_state {
unsigned int pending_width;
unsigned int pending_height;
unsigned int pending_vrefresh;
+ struct cmdq_pkt *cmdq_handle;
+};
+
+struct mtk_cmdq_cb_data {
+ struct cmdq_pkt *cmdq_handle;
};
static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
@@ -229,6 +242,50 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
return NULL;
}
+#ifdef CONFIG_MTK_CMDQ
+static void ddp_cmdq_cb(struct cmdq_cb_data data)
+{
+ struct mtk_cmdq_cb_data *cb_data = data.data;
+
+ cmdq_pkt_destroy(cb_data->cmdq_handle);
+ kfree(cb_data);
+}
+
+static void mtk_cmdq_acquire(struct drm_crtc *crtc)
+{
+ struct mtk_crtc_state *mtk_crtc_state =
+ to_mtk_crtc_state(crtc->state);
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+
+ mutex_lock(&mtk_crtc->chan_lock);
+ mtk_crtc_state->cmdq_handle =
+ cmdq_pkt_create(mtk_crtc->cmdq_client,
+ PAGE_SIZE);
+ cmdq_pkt_clear_event(mtk_crtc_state->cmdq_handle,
+ mtk_crtc->cmdq_event);
+ cmdq_pkt_wfe(mtk_crtc_state->cmdq_handle, mtk_crtc->cmdq_event);
+}
+
+static void mtk_cmdq_release(struct drm_crtc *crtc)
+{
+ struct mtk_crtc_state *mtk_crtc_state =
+ to_mtk_crtc_state(crtc->state);
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_cmdq_cb_data *cb_data;
+
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data) {
+ DRM_DEV_ERROR(crtc->dev->dev, "Failed to alloc cb_data\n");
+ return;
+ }
+
+ cb_data->cmdq_handle = mtk_crtc_state->cmdq_handle;
+ cmdq_pkt_flush_async(mtk_crtc_state->cmdq_handle,
+ ddp_cmdq_cb, cb_data);
+ mutex_unlock(&mtk_crtc->chan_lock);
+}
+#endif
+
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
@@ -383,7 +440,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
if (state->pending_config) {
mtk_ddp_comp_config(comp, state->pending_width,
state->pending_height,
- state->pending_vrefresh, 0, NULL);
+ state->pending_vrefresh, 0,
+ state->cmdq_handle);
state->pending_config = false;
}
@@ -403,13 +461,54 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
- plane_state, NULL);
+ plane_state,
+ state->cmdq_handle);
plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
}
}
+static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
+{
+ struct drm_crtc *crtc = &mtk_crtc->base;
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
+ unsigned int pending_planes = 0;
+ int i;
+
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+ if (plane_state->pending.dirty) {
+ plane_state->pending.config = true;
+ plane_state->pending.dirty = false;
+ pending_planes |= BIT(i);
+ } else if (plane_state->pending.async_dirty) {
+ plane_state->pending.config = true;
+ plane_state->pending.async_update = false;
+ plane_state->pending.async_dirty = false;
+ pending_planes |= BIT(i);
+ }
+ }
+ if (pending_planes)
+ mtk_crtc->pending_planes = true;
+
+ if (priv->data->shadow_register) {
+ mtk_disp_mutex_acquire(mtk_crtc->mutex);
+ mtk_crtc_ddp_config(crtc);
+ mtk_disp_mutex_release(mtk_crtc->mutex);
+ }
+#ifdef CONFIG_MTK_CMDQ
+ if (mtk_crtc->cmdq_client) {
+ mtk_cmdq_acquire(crtc);
+ mtk_crtc_ddp_config(crtc);
+ mtk_cmdq_release(crtc);
+ }
+#endif
+}
+
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state)
{
@@ -425,34 +524,15 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct mtk_drm_private *priv = crtc->dev->dev_private;
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
const struct drm_plane_helper_funcs *plane_helper_funcs =
plane->helper_private;
- int i;
if (!mtk_crtc->enabled)
return;
plane_helper_funcs->atomic_update(plane, new_state);
-
- for (i = 0; i < mtk_crtc->layer_nr; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i];
- struct mtk_plane_state *plane_state;
-
- plane_state = to_mtk_plane_state(plane->state);
- if (plane_state->pending.async_dirty) {
- plane_state->pending.config = true;
- plane_state->pending.async_update = false;
- plane_state->pending.async_dirty = false;
- }
- }
- mtk_crtc->pending_planes = true;
- if (priv->data->shadow_register) {
- mtk_disp_mutex_acquire(mtk_crtc->mutex);
- mtk_crtc_ddp_config(crtc);
- mtk_disp_mutex_release(mtk_crtc->mutex);
- }
+ mtk_drm_crtc_hw_config(mtk_crtc);
}
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -536,34 +616,14 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_drm_private *priv = crtc->dev->dev_private;
- unsigned int pending_planes = 0;
int i;
if (mtk_crtc->event)
mtk_crtc->pending_needs_vblank = true;
- for (i = 0; i < mtk_crtc->layer_nr; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i];
- struct mtk_plane_state *plane_state;
-
- plane_state = to_mtk_plane_state(plane->state);
- if (plane_state->pending.dirty) {
- plane_state->pending.config = true;
- plane_state->pending.dirty = false;
- pending_planes |= BIT(i);
- }
- }
- if (pending_planes)
- mtk_crtc->pending_planes = true;
if (crtc->state->color_mgmt_changed)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
-
- if (priv->data->shadow_register) {
- mtk_disp_mutex_acquire(mtk_crtc->mutex);
- mtk_crtc_ddp_config(crtc);
- mtk_disp_mutex_release(mtk_crtc->mutex);
- }
+ mtk_drm_crtc_hw_config(mtk_crtc);
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -613,7 +673,7 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
- if (!priv->data->shadow_register)
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
mtk_crtc_ddp_config(crtc);
mtk_drm_finish_page_flip(mtk_crtc);
@@ -755,6 +815,19 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
priv->num_pipes++;
-
+#ifdef CONFIG_MTK_CMDQ
+ mtk_crtc->cmdq_client =
+ cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+ 2000);
+ of_property_read_u32_index(dev->of_node, "mediatek,gce-events",
+ drm_crtc_index(&mtk_crtc->base),
+ &mtk_crtc->cmdq_event);
+ if (IS_ERR(mtk_crtc->cmdq_client)) {
+ dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
+ drm_crtc_index(&mtk_crtc->base));
+ mtk_crtc->cmdq_client = NULL;
+ }
+ mutex_init(&mtk_crtc->chan_lock);
+#endif
return 0;
}
--
2.18.0
Powered by blists - more mailing lists