[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1427922770-13721-1-git-send-email-jilaiw@codeaurora.org>
Date: Wed, 1 Apr 2015 17:12:50 -0400
From: Jilai Wang <jilaiw@...eaurora.org>
To: dri-devel@...ts.freedesktop.org
Cc: linux-arm-msm@...r.kernel.org, linux-kernel@...r.kernel.org,
robdclark@...il.com, Jilai Wang <jilaiw@...eaurora.org>
Subject: [PATCH 2/3] drm:msm: Initial Add Writeback Support
Add writeback support in msm kms framework.
Signed-off-by: Jilai Wang <jilaiw@...eaurora.org>
---
drivers/gpu/drm/msm/Kconfig | 10 +
drivers/gpu/drm/msm/Makefile | 9 +-
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | 10 +
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h | 1 +
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 19 +-
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 7 +
drivers/gpu/drm/msm/mdp/mdp5/mdp5_wb_encoder.c | 460 +++++++++++++++++++
drivers/gpu/drm/msm/mdp/mdp_kms.h | 2 +-
drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb.c | 319 +++++++++++++
drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb.h | 98 ++++
drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb_connector.c | 157 +++++++
drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb_v4l2.c | 522 ++++++++++++++++++++++
drivers/gpu/drm/msm/msm_drv.c | 2 +
drivers/gpu/drm/msm/msm_drv.h | 19 +-
drivers/gpu/drm/msm/msm_fbdev.c | 34 +-
drivers/gpu/drm/msm/msm_gem.c | 1 +
16 files changed, 1664 insertions(+), 6 deletions(-)
create mode 100644 drivers/gpu/drm/msm/mdp/mdp5/mdp5_wb_encoder.c
create mode 100644 drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb.c
create mode 100644 drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb.h
create mode 100644 drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb_connector.c
create mode 100644 drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb_v4l2.c
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 1e6a907..f6c7914 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -27,6 +27,16 @@ config DRM_MSM_FBDEV
support. Note that this support also provide the linux console
support on top of the MSM modesetting driver.
+config DRM_MSM_WB
+ bool "Enable writeback support for MSM modesetting driver"
+ depends on DRM_MSM
+ depends on VIDEO_V4L2
+ select VIDEOBUF2_CORE
+ default y
+ help
+ Choose this option if you have a need to support writeback
+ connector.
+
config DRM_MSM_REGISTER_LOGGING
bool "MSM DRM register logging"
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 674a132..e5bf334 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,4 +1,5 @@
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/mdp_wb
+ccflags-$(CONFIG_DRM_MSM_WB) += -Idrivers/gpu/drm/msm/mdp/mdp_wb
msm-y := \
adreno/adreno_device.o \
@@ -51,4 +52,10 @@ msm-y := \
msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_DRM_MSM_WB) += \
+ mdp/mdp5/mdp5_wb_encoder.o \
+ mdp/mdp_wb/mdp_wb.o \
+ mdp/mdp_wb/mdp_wb_connector.o \
+ mdp/mdp_wb/mdp_wb_v4l2.o
+
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 1fe7315..e87cf74 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -71,9 +71,14 @@ const struct mdp5_cfg_hw msm8x74_config = {
.count = 4,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
},
+ .wb = {
+ .count = 5,
+ .base = { 0x11100, 0x13100, 0x15100, 0x17100, 0x19100 },
+ },
.intfs = {
[0] = INTF_eDP,
[3] = INTF_HDMI,
+ [4] = INTF_WB,
},
.max_clk = 200000000,
};
@@ -135,9 +140,14 @@ const struct mdp5_cfg_hw apq8084_config = {
.count = 5,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
},
+ .wb = {
+ .count = 5,
+ .base = { 0x11100, 0x11500, 0x11900, 0x11d00, 0x12100 },
+ },
.intfs = {
[0] = INTF_eDP,
[3] = INTF_HDMI,
+ [4] = INTF_WB,
},
.max_clk = 320000000,
};
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index f47328d..ccb6048c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -74,6 +74,7 @@ struct mdp5_cfg_hw {
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
struct mdp5_sub_block intf;
+ struct mdp5_sub_block wb;
u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index ff9201b..1b1569d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -179,7 +179,11 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
.mode = intf_mode,
};
- encoder = mdp5_encoder_init(dev, &intf);
+ if (intf_type == INTF_WB)
+ encoder = mdp5_wb_encoder_init(dev, &intf);
+ else
+ encoder = mdp5_encoder_init(dev, &intf);
+
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct encoder\n");
return encoder;
@@ -230,6 +234,19 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
break;
+ case INTF_WB:
+ if (!priv->wb)
+ break;
+
+ encoder = construct_encoder(mdp5_kms, INTF_WB, intf_num,
+ MDP5_INTF_WB_MODE_LINE);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = msm_wb_modeset_init(priv->wb, dev, encoder);
+ break;
default:
dev_err(dev->dev, "unknown intf: %d\n", intf_type);
ret = -EINVAL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 6efa5c6..f6d23cc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -238,5 +238,12 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf);
+#ifdef CONFIG_DRM_MSM_WB
+struct drm_encoder *mdp5_wb_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf);
+#else
+static inline struct drm_encoder *mdp5_wb_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf) { return NULL; }
+#endif
#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_wb_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_wb_encoder.c
new file mode 100644
index 0000000..8ce3449
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_wb_encoder.c
@@ -0,0 +1,460 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp_wb.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct mdp5_wb_encoder {
+ struct drm_encoder base;
+ struct mdp5_interface intf;
+ bool enabled;
+ uint32_t bsc;
+ struct mdp5_ctl *ctl;
+
+ /* irq handler for wb encoder */
+ struct mdp_irq wb_vblank;
+ /* wb id same as ctl id */
+ u32 wb_id;
+};
+#define to_mdp5_wb_encoder(x) container_of(x, struct mdp5_wb_encoder, base)
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static struct msm_wb *get_wb(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return priv->wb;
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_MDP_PORT0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+ MDP_BUS_VECTOR_ENTRY(0, 0),
+ MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
+};
+static struct msm_bus_paths mdp_bus_usecases[] = { {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[0],
+}, {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[1],
+} };
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+ .usecase = mdp_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+ .name = "mdss_mdp",
+};
+
+static void bs_init(struct mdp5_wb_encoder *mdp5_wb_encoder)
+{
+ mdp5_wb_encoder->bsc = msm_bus_scale_register_client(
+ &mdp_bus_scale_table);
+ DBG("bus scale client: %08x", mdp5_wb_encoder->bsc);
+}
+
+static void bs_fini(struct mdp5_wb_encoder *mdp5_wb_encoder)
+{
+ if (mdp5_wb_encoder->bsc) {
+ msm_bus_scale_unregister_client(mdp5_wb_encoder->bsc);
+ mdp5_wb_encoder->bsc = 0;
+ }
+}
+
+static void bs_set(struct mdp5_wb_encoder *mdp5_wb_encoder, int idx)
+{
+ if (mdp5_wb_encoder->bsc) {
+ DBG("set bus scaling: %d", idx);
+ /* HACK: scaling down, and then immediately back up
+ * seems to leave things broken (underflow).. so
+ * never disable:
+ */
+ idx = 1;
+ msm_bus_scale_client_update_request(mdp5_wb_encoder->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct mdp5_wb_encoder *mdp5_wb_encoder) {}
+static void bs_fini(struct mdp5_wb_encoder *mdp5_wb_encoder) {}
+static void bs_set(struct mdp5_wb_encoder *mdp5_wb_encoder, int idx) {}
+#endif
+
+static void mdp5_wb_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp5_wb_encoder *mdp5_wb_encoder = to_mdp5_wb_encoder(encoder);
+ bs_fini(mdp5_wb_encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp5_wb_encoder);
+}
+
+static const struct drm_encoder_funcs mdp5_wb_encoder_funcs = {
+ .destroy = mdp5_wb_encoder_destroy,
+};
+
+static bool mdp5_wb_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+void mdp5_wb_encoder_buf_prepare(struct msm_wb *wb, struct msm_wb_buffer *buf)
+{
+ struct drm_encoder *encoder = wb->encoder;
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ uint32_t nplanes = drm_format_num_planes(buf->pixel_format);
+ int i;
+
+ DBG("plane no %d", nplanes);
+ mdp5_enable(mdp5_kms);
+ for (i = 0; i < nplanes; i++) {
+ DBG("buf %d: plane %x", i, (int)buf->planes[i]);
+ msm_gem_get_iova(buf->planes[i], mdp5_kms->id, &buf->iova[i]);
+ buf->iova[i] += buf->offsets[i];
+ }
+ for (; i < MAX_PLANE; i++)
+ buf->iova[i] = 0;
+ mdp5_disable(mdp5_kms);
+}
+
+static void mdp5_wb_encoder_addr_setup(struct drm_encoder *encoder,
+ struct msm_wb_buffer *buf)
+{
+ struct mdp5_wb_encoder *mdp5_wb_encoder = to_mdp5_wb_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ u32 wb_id = mdp5_wb_encoder->wb_id;
+
+ mdp5_write(mdp5_kms, REG_MDP5_WB_DST0_ADDR(wb_id), buf->iova[0]);
+ mdp5_write(mdp5_kms, REG_MDP5_WB_DST1_ADDR(wb_id), buf->iova[1]);
+ mdp5_write(mdp5_kms, REG_MDP5_WB_DST2_ADDR(wb_id), buf->iova[2]);
+ mdp5_write(mdp5_kms, REG_MDP5_WB_DST3_ADDR(wb_id), buf->iova[3]);
+ DBG("Program WB DST address %x %x %x %x", buf->iova[0],
+ buf->iova[1], buf->iova[2], buf->iova[3]);
+ /* Notify ctl that wb buffer is ready to trigger start */
+ mdp5_ctl_commit(mdp5_wb_encoder->ctl,
+ mdp_ctl_flush_mask_encoder(&mdp5_wb_encoder->intf));
+}
+
+static void wb_csc_setup(struct mdp5_kms *mdp5_kms, u32 wb_id,
+ struct csc_cfg *csc)
+{
+ uint32_t i;
+ uint32_t *matrix;
+
+ if (unlikely(!csc))
+ return;
+
+ matrix = csc->matrix;
+ mdp5_write(mdp5_kms, REG_MDP5_WB_CSC_MATRIX_COEFF_0(wb_id),
+ MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11(matrix[0]) |
+ MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12(matrix[1]));
+ mdp5_write(mdp5_kms, REG_MDP5_WB_CSC_MATRIX_COEFF_1(wb_id),
+ MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13(matrix[2]) |
+ MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21(matrix[3]));
+ mdp5_write(mdp5_kms, REG_MDP5_WB_CSC_MATRIX_COEFF_2(wb_id),
+ MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22(matrix[4]) |
+ MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23(matrix[5]));
+ mdp5_write(mdp5_kms, REG_MDP5_WB_CSC_MATRIX_COEFF_3(wb_id),
+ MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31(matrix[6]) |
+ MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32(matrix[7]));
+ mdp5_write(mdp5_kms, REG_MDP5_WB_CSC_MATRIX_COEFF_4(wb_id),
+ MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33(matrix[8]));
+
+ for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) {
+ uint32_t *pre_clamp = csc->pre_clamp;
+ uint32_t *post_clamp = csc->post_clamp;
+
+ mdp5_write(mdp5_kms, REG_MDP5_WB_CSC_COMP_PRECLAMP(wb_id, i),
+ MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH(pre_clamp[2*i+1]) |
+ MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW(pre_clamp[2*i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_WB_CSC_COMP_POSTCLAMP(wb_id, i),
+ MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH(post_clamp[2*i+1]) |
+ MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW(post_clamp[2*i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_WB_CSC_COMP_PREBIAS(wb_id, i),
+ MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE(csc->pre_bias[i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_WB_CSC_COMP_POSTBIAS(wb_id, i),
+ MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE(csc->post_bias[i]));
+ }
+}
+
+static void mdp5_wb_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_wb_encoder *mdp5_wb_encoder = to_mdp5_wb_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct msm_kms *kms = &mdp5_kms->base.base;
+ const struct msm_format *msm_fmt;
+ const struct mdp_format *fmt;
+ struct msm_wb *wb = get_wb(encoder);
+ struct msm_wb_buf_format *wb_buf_fmt;
+ struct msm_wb_buffer *buf;
+ u32 wb_id;
+ u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+ u32 opmode = 0;
+
+ DBG("Wb2 encoder modeset");
+
+ /* now we can get the ctl from crtc and extract the wb_id from ctl */
+ if (!mdp5_wb_encoder->ctl)
+ mdp5_wb_encoder->ctl = mdp5_crtc_get_ctl(encoder->crtc);
+
+ wb_id = mdp5_ctl_get_ctl_id(mdp5_wb_encoder->ctl);
+ mdp5_wb_encoder->wb_id = wb_id;
+
+ /* get color_format from wb device */
+ wb_buf_fmt = msm_wb_get_buf_format(wb);
+ msm_fmt = kms->funcs->get_format(kms, wb_buf_fmt->pixel_format);
+ if (!msm_fmt) {
+ pr_err("%s: Unsupported Color Format %d\n", __func__,
+ wb_buf_fmt->pixel_format);
+ return;
+ }
+
+ fmt = to_mdp_format(msm_fmt);
+ chroma_samp = fmt->chroma_sample;
+
+ if (MDP_FORMAT_IS_YUV(fmt)) {
+ /* config csc */
+ DBG("YUV output %d, configure CSC",
+ fmt->base.pixel_format);
+ wb_csc_setup(mdp5_kms, mdp5_wb_encoder->wb_id,
+ mdp_get_default_csc_cfg(CSC_RGB2YUV));
+ opmode |= MDP5_WB_DST_OP_MODE_CSC_EN |
+ MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT(
+ DATA_FORMAT_RGB) |
+ MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT(
+ DATA_FORMAT_YUV);
+
+ switch (chroma_samp) {
+ case CHROMA_420:
+ case CHROMA_H2V1:
+ opmode |= MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_EN;
+ break;
+ case CHROMA_H1V2:
+ default:
+ pr_err("unsupported wb chroma samp=%d\n", chroma_samp);
+ return;
+ }
+ }
+
+ dst_format = MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP(chroma_samp) |
+ MDP5_WB_DST_FORMAT_WRITE_PLANES(fmt->fetch_type) |
+ MDP5_WB_DST_FORMAT_DSTC3_OUT(fmt->bpc_a) |
+ MDP5_WB_DST_FORMAT_DSTC2_OUT(fmt->bpc_r) |
+ MDP5_WB_DST_FORMAT_DSTC1_OUT(fmt->bpc_b) |
+ MDP5_WB_DST_FORMAT_DSTC0_OUT(fmt->bpc_g) |
+ COND(fmt->unpack_tight, MDP5_WB_DST_FORMAT_PACK_TIGHT) |
+ MDP5_WB_DST_FORMAT_PACK_COUNT(fmt->unpack_count - 1) |
+ MDP5_WB_DST_FORMAT_DST_BPP(fmt->cpp - 1);
+
+ if (fmt->bpc_a || fmt->alpha_enable) {
+ dst_format |= MDP5_WB_DST_FORMAT_DSTC3_EN;
+ if (!fmt->alpha_enable)
+ dst_format |= MDP5_WB_DST_FORMAT_DST_ALPHA_X;
+ }
+
+ pattern = MDP5_WB_DST_PACK_PATTERN_ELEMENT3(fmt->unpack[3]) |
+ MDP5_WB_DST_PACK_PATTERN_ELEMENT2(fmt->unpack[2]) |
+ MDP5_WB_DST_PACK_PATTERN_ELEMENT1(fmt->unpack[1]) |
+ MDP5_WB_DST_PACK_PATTERN_ELEMENT0(fmt->unpack[0]);
+
+ /* get the stride info from WB device */
+ ystride0 = MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE(wb_buf_fmt->pitches[0]) |
+ MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE(wb_buf_fmt->pitches[1]);
+ ystride1 = MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE(wb_buf_fmt->pitches[2]) |
+ MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE(wb_buf_fmt->pitches[3]);
+
+ /* get the output resolution from WB device */
+ outsize = MDP5_WB_OUT_SIZE_DST_H(wb_buf_fmt->height) |
+ MDP5_WB_OUT_SIZE_DST_W(wb_buf_fmt->width);
+
+ mdp5_write(mdp5_kms, REG_MDP5_WB_ALPHA_X_VALUE(wb_id), 0xFF);
+ mdp5_write(mdp5_kms, REG_MDP5_WB_DST_FORMAT(wb_id), dst_format);
+ mdp5_write(mdp5_kms, REG_MDP5_WB_DST_OP_MODE(wb_id), opmode);
+ mdp5_write(mdp5_kms, REG_MDP5_WB_DST_PACK_PATTERN(wb_id), pattern);
+ mdp5_write(mdp5_kms, REG_MDP5_WB_DST_YSTRIDE0(wb_id), ystride0);
+ mdp5_write(mdp5_kms, REG_MDP5_WB_DST_YSTRIDE1(wb_id), ystride1);
+ mdp5_write(mdp5_kms, REG_MDP5_WB_OUT_SIZE(wb_id), outsize);
+
+ mdp5_crtc_set_intf(encoder->crtc, &mdp5_wb_encoder->intf);
+
+ /* program the dst address */
+ buf = msm_wb_dequeue_buf(wb, MSM_WB_BUF_Q_FREE);
+ /*
+ * if no free buffer is available, the only possibility is
+ * WB connector becomes offline. User app should be notified
+ * by udev event and stop the rendering soon.
+ * so don't do anything here.
+ */
+ if (!buf) {
+ pr_warn("%s: No buffer available\n", __func__);
+ return;
+ }
+
+ /* Last step of mode set: set up dst address */
+ msm_wb_queue_buf(wb, buf, MSM_WB_BUF_Q_ACTIVE);
+ mdp5_wb_encoder_addr_setup(encoder, buf);
+}
+
+static void mdp5_wb_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_wb_encoder *mdp5_wb_encoder = to_mdp5_wb_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct msm_wb *wb = get_wb(encoder);
+ struct msm_wb_buffer *buf;
+
+ DBG("Disable wb encoder");
+
+ if (WARN_ON(!mdp5_wb_encoder->enabled))
+ return;
+
+ mdp5_ctl_set_encoder_state(ctl, false);
+
+ mdp_irq_unregister(&mdp5_kms->base,
+ &mdp5_wb_encoder->wb_vblank);
+
+ /* move the active buf to free buf queue*/
+ while ((buf = msm_wb_dequeue_buf(wb, MSM_WB_BUF_Q_ACTIVE))
+ != NULL)
+ msm_wb_queue_buf(wb, buf, MSM_WB_BUF_Q_FREE);
+
+ msm_wb_update_encoder_state(wb, false);
+ bs_set(mdp5_wb_encoder, 0);
+
+ mdp5_wb_encoder->enabled = false;
+}
+
+static void mdp5_wb_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_wb_encoder *mdp5_wb_encoder = to_mdp5_wb_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct msm_wb *wb = get_wb(encoder);
+
+ DBG("Enable wb encoder");
+
+ if (WARN_ON(mdp5_wb_encoder->enabled))
+ return;
+
+ bs_set(mdp5_wb_encoder, 1);
+ mdp_irq_register(&mdp5_kms->base,
+ &mdp5_wb_encoder->wb_vblank);
+
+
+ mdp5_ctl_set_encoder_state(ctl, true);
+ msm_wb_update_encoder_state(wb, true);
+
+ mdp5_wb_encoder->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp5_wb_encoder_helper_funcs = {
+ .mode_fixup = mdp5_wb_encoder_mode_fixup,
+ .mode_set = mdp5_wb_encoder_mode_set,
+ .disable = mdp5_wb_encoder_disable,
+ .enable = mdp5_wb_encoder_enable,
+};
+
+static void mdp5_wb_encoder_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_wb_encoder *mdp5_wb_encoder =
+ container_of(irq, struct mdp5_wb_encoder, wb_vblank);
+ struct mdp5_kms *mdp5_kms = get_kms(&mdp5_wb_encoder->base);
+ struct msm_wb *wb = get_wb(&mdp5_wb_encoder->base);
+ u32 wb_id = mdp5_wb_encoder->wb_id;
+ struct msm_wb_buffer *new_buf, *buf;
+ u32 reg_val;
+
+ DBG("wb id %d", wb_id);
+
+ reg_val = mdp5_read(mdp5_kms, REG_MDP5_WB_DST0_ADDR(wb_id));
+ buf = msm_wb_dequeue_buf(wb, MSM_WB_BUF_Q_ACTIVE);
+ if (WARN_ON(!buf || (reg_val != buf->iova[0]))) {
+ if (!buf)
+ pr_err("%s: no active buffer\n", __func__);
+ else
+ pr_err("%s: current addr %x expect %x\n",
+ __func__, reg_val, buf->iova[0]);
+ return;
+ }
+
+ /* retrieve the free buffer */
+ new_buf = msm_wb_dequeue_buf(wb, MSM_WB_BUF_Q_FREE);
+ if (!new_buf) {
+ pr_info("%s: No buffer is available\n", __func__);
+ /* reuse current active buffer */
+ new_buf = buf;
+ } else {
+ msm_wb_buf_captured(wb, buf, false);
+ }
+
+ /* Update the address anyway to trigger the WB flush */
+ msm_wb_queue_buf(wb, new_buf, MSM_WB_BUF_Q_ACTIVE);
+ mdp5_wb_encoder_addr_setup(&mdp5_wb_encoder->base, new_buf);
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp5_wb_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp5_wb_encoder *mdp5_wb_encoder;
+ int ret;
+
+ DBG("Init writeback encoder");
+
+ mdp5_wb_encoder = kzalloc(sizeof(*mdp5_wb_encoder), GFP_KERNEL);
+ if (!mdp5_wb_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ memcpy(&mdp5_wb_encoder->intf, intf, sizeof(mdp5_wb_encoder->intf));
+ encoder = &mdp5_wb_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp5_wb_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL);
+ drm_encoder_helper_add(encoder, &mdp5_wb_encoder_helper_funcs);
+
+ mdp5_wb_encoder->wb_vblank.irq = mdp5_wb_encoder_vblank_irq;
+ mdp5_wb_encoder->wb_vblank.irqmask = intf2vblank(0, intf);
+
+ bs_init(mdp5_wb_encoder);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp5_wb_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 5ae4039..2d3428c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -88,7 +88,7 @@ struct mdp_format {
uint8_t unpack[4];
bool alpha_enable, unpack_tight;
uint8_t cpp, unpack_count;
- enum mdp_sspp_fetch_type fetch_type;
+ enum mdp_fetch_type fetch_type;
enum mdp_chroma_samp_type chroma_sample;
};
#define to_mdp_format(x) container_of(x, struct mdp_format, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb.c b/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb.c
new file mode 100644
index 0000000..5c40ec9
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb.c
@@ -0,0 +1,319 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp_wb.h"
+#include "msm_kms.h"
+#include "../mdp_kms.h"
+
+struct msm_wb_priv_data {
+ bool streaming;
+
+ struct msm_wb_buf_format fmt;
+ /* buf queue */
+ struct msm_wb_buf_queue vidq;
+ spinlock_t vidq_lock;
+
+ /* wait queue to sync between v4l2 and drm during stream off */
+ bool encoder_on;
+ wait_queue_head_t encoder_state_wq;
+};
+
+void msm_wb_update_encoder_state(struct msm_wb *wb, bool enable)
+{
+ wb->priv_data->encoder_on = enable;
+ wake_up_all(&wb->priv_data->encoder_state_wq);
+}
+
+struct msm_wb_buf_format *msm_wb_get_buf_format(struct msm_wb *wb)
+{
+ return &wb->priv_data->fmt;
+}
+
+int msm_wb_set_buf_format(struct msm_wb *wb, u32 pixel_fmt,
+ u32 width, u32 height)
+{
+ struct msm_drm_private *priv = wb->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ const struct msm_format *msm_fmt;
+ const struct mdp_format *mdp_fmt;
+ struct msm_wb_buf_format *fmt = &wb->priv_data->fmt;
+
+ msm_fmt = kms->funcs->get_format(kms, pixel_fmt);
+ if (!msm_fmt) {
+ pr_err("%s: Unsupported Color Format %d\n", __func__,
+ pixel_fmt);
+ return -EINVAL;
+ }
+
+ mdp_fmt = to_mdp_format(msm_fmt);
+
+ fmt->pixel_format = pixel_fmt;
+ fmt->width = width;
+ fmt->height = height;
+ DBG("Set format %x width %d height %d", pixel_fmt, width, height);
+
+ switch (mdp_fmt->fetch_type) {
+ case MDP_PLANE_INTERLEAVED:
+ fmt->plane_num = 1;
+ fmt->pitches[0] = width * mdp_fmt->cpp;
+ break;
+ case MDP_PLANE_PLANAR:
+ fmt->plane_num = 3;
+ fmt->pitches[0] = width;
+ fmt->pitches[1] = width;
+ fmt->pitches[2] = width;
+ if (mdp_fmt->alpha_enable) {
+ fmt->plane_num = 4;
+ fmt->pitches[3] = width;
+ }
+ break;
+ case MDP_PLANE_PSEUDO_PLANAR:
+ fmt->plane_num = 2;
+ fmt->pitches[0] = width;
+ switch (mdp_fmt->chroma_sample) {
+ case CHROMA_H2V1:
+ case CHROMA_420:
+ fmt->pitches[1] = width/2;
+ break;
+ case CHROMA_H1V2:
+ fmt->pitches[1] = width;
+ break;
+ default:
+ pr_err("%s: Not supported fmt\n", __func__);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+void msm_wb_queue_buf(struct msm_wb *wb, struct msm_wb_buffer *wb_buf,
+ enum msm_wb_buf_queue_type type)
+{
+ unsigned long flags;
+ struct list_head *q;
+
+ if (type == MSM_WB_BUF_Q_FREE)
+ q = &wb->priv_data->vidq.free;
+ else
+ q = &wb->priv_data->vidq.active;
+
+ if (type == MSM_WB_BUF_Q_FREE)
+ mdp5_wb_encoder_buf_prepare(wb, wb_buf);
+
+ spin_lock_irqsave(&wb->priv_data->vidq_lock, flags);
+ list_add_tail(&wb_buf->list, q);
+ spin_unlock_irqrestore(&wb->priv_data->vidq_lock, flags);
+}
+
+struct msm_wb_buffer *msm_wb_dequeue_buf(struct msm_wb *wb,
+ enum msm_wb_buf_queue_type type)
+{
+ struct msm_wb_buffer *buf = NULL;
+ unsigned long flags;
+ struct list_head *q;
+
+ if (type == MSM_WB_BUF_Q_FREE)
+ q = &wb->priv_data->vidq.free;
+ else
+ q = &wb->priv_data->vidq.active;
+
+ spin_lock_irqsave(&wb->priv_data->vidq_lock, flags);
+ if (!list_empty(q)) {
+ buf = list_entry(q->next,
+ struct msm_wb_buffer, list);
+ list_del(&buf->list);
+ }
+ spin_unlock_irqrestore(&wb->priv_data->vidq_lock, flags);
+
+ return buf;
+}
+
+int msm_wb_start_streaming(struct msm_wb *wb)
+{
+ if (wb->priv_data->streaming) {
+ pr_err("%s: wb is streaming\n", __func__);
+ return -EBUSY;
+ }
+
+ DBG("Stream ON");
+ wb->priv_data->streaming = true;
+ msm_wb_connector_hotplug(wb, wb->priv_data->streaming);
+
+ return 0;
+}
+
+int msm_wb_stop_streaming(struct msm_wb *wb)
+{
+ int rc;
+ struct msm_wb_buffer *buf;
+
+ if (!wb->priv_data->streaming) {
+ pr_info("%s: wb is not streaming\n", __func__);
+ return -EINVAL;
+ }
+
+ DBG("Stream off");
+ wb->priv_data->streaming = false;
+ msm_wb_connector_hotplug(wb, wb->priv_data->streaming);
+
+ /* wait until drm encoder off */
+ rc = wait_event_timeout(wb->priv_data->encoder_state_wq,
+ !wb->priv_data->encoder_on, 10 * HZ);
+ if (!rc) {
+ pr_err("%s: wait encoder off timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* flush all active and free buffers */
+ while ((buf = msm_wb_dequeue_buf(wb, MSM_WB_BUF_Q_ACTIVE)) != NULL)
+ msm_wb_buf_captured(wb, buf, true);
+
+ while ((buf = msm_wb_dequeue_buf(wb, MSM_WB_BUF_Q_FREE)) != NULL)
+ msm_wb_buf_captured(wb, buf, true);
+
+ DBG("Stream turned off");
+
+ return 0;
+}
+
+int msm_wb_modeset_init(struct msm_wb *wb,
+ struct drm_device *dev, struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret;
+
+ wb->dev = dev;
+ wb->encoder = encoder;
+
+ wb->connector = msm_wb_connector_init(wb);
+ if (IS_ERR(wb->connector)) {
+ ret = PTR_ERR(wb->connector);
+ dev_err(dev->dev, "failed to create WB connector: %d\n", ret);
+ wb->connector = NULL;
+ goto fail;
+ }
+
+ priv->connectors[priv->num_connectors++] = wb->connector;
+
+ return 0;
+
+fail:
+ if (wb->connector) {
+ wb->connector->funcs->destroy(wb->connector);
+ wb->connector = NULL;
+ }
+
+ return ret;
+}
+
+static void msm_wb_destroy(struct msm_wb *wb)
+{
+ platform_set_drvdata(wb->pdev, NULL);
+}
+
+static struct msm_wb *msm_wb_init(struct platform_device *pdev)
+{
+ struct msm_wb *wb = NULL;
+
+ wb = devm_kzalloc(&pdev->dev, sizeof(*wb), GFP_KERNEL);
+ if (!wb)
+ return ERR_PTR(-ENOMEM);
+
+ wb->pdev = pdev;
+ wb->priv_data = devm_kzalloc(&pdev->dev, sizeof(*wb->priv_data),
+ GFP_KERNEL);
+ if (!wb->priv_data)
+ return ERR_PTR(-ENOMEM);
+
+ if (msm_wb_v4l2_init(wb)) {
+ pr_err("%s: wb v4l2 init failed\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ spin_lock_init(&wb->priv_data->vidq_lock);
+ INIT_LIST_HEAD(&wb->priv_data->vidq.active);
+ INIT_LIST_HEAD(&wb->priv_data->vidq.free);
+ init_waitqueue_head(&wb->priv_data->encoder_state_wq);
+
+ platform_set_drvdata(pdev, wb);
+
+ return wb;
+}
+
+static int msm_wb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+ struct msm_wb *wb;
+
+ wb = msm_wb_init(to_platform_device(dev));
+ if (IS_ERR(wb))
+ return PTR_ERR(wb);
+
+ priv->wb = wb;
+
+ return 0;
+}
+
+static void msm_wb_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+
+ if (priv->wb) {
+ msm_wb_destroy(priv->wb);
+ priv->wb = NULL;
+ }
+}
+
+static const struct component_ops msm_wb_ops = {
+ .bind = msm_wb_bind,
+ .unbind = msm_wb_unbind,
+};
+
+static int msm_wb_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &msm_wb_ops);
+}
+
+static int msm_wb_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &msm_wb_ops);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,mdss_wb"},
+ {}
+};
+
+static struct platform_driver msm_wb_driver = {
+ .probe = msm_wb_dev_probe,
+ .remove = msm_wb_dev_remove,
+ .driver = {
+ .name = "wb_msm",
+ .of_match_table = dt_match,
+ },
+};
+
+void __init msm_wb_register(void)
+{
+ platform_driver_register(&msm_wb_driver);
+}
+
+void __exit msm_wb_unregister(void)
+{
+ platform_driver_unregister(&msm_wb_driver);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb.h b/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb.h
new file mode 100644
index 0000000..4c75419
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb.h
@@ -0,0 +1,98 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WB_CONNECTOR_H__
+#define __WB_CONNECTOR_H__
+
+#include <linux/platform_device.h>
+#include "msm_kms.h"
+
+struct vb2_buffer;
+
+struct msm_wb_buffer {
+ struct list_head list;
+ struct drm_gem_object *planes[MAX_PLANE];
+ u32 pixel_format;
+ u32 offsets[MAX_PLANE];
+ u32 iova[MAX_PLANE];
+ struct vb2_buffer *vb; /* v4l2 buffer */
+};
+
+struct msm_wb_buf_format {
+ u32 pixel_format;
+ u32 width;
+ u32 height;
+ u32 plane_num;
+ u32 pitches[MAX_PLANE];
+};
+
+enum msm_wb_buf_queue_type {
+ MSM_WB_BUF_Q_FREE = 0,
+ MSM_WB_BUF_Q_ACTIVE,
+ MSM_WB_BUF_Q_NUM
+};
+
+struct msm_wb_buf_queue {
+ struct list_head free;
+ struct list_head active;
+};
+
+struct msm_wb_priv_data;
+struct msm_wb {
+ struct drm_device *dev;
+ struct platform_device *pdev;
+
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ void *wb_v4l2;
+
+ struct msm_wb_priv_data *priv_data;
+};
+
+int msm_wb_start_streaming(struct msm_wb *wb);
+int msm_wb_stop_streaming(struct msm_wb *wb);
+void mdp5_wb_encoder_buf_prepare(struct msm_wb *wb, struct msm_wb_buffer *buf);
+void msm_wb_connector_hotplug(struct msm_wb *wb, bool connected);
+int msm_wb_set_buf_format(struct msm_wb *wb, u32 pixel_fmt,
+ u32 width, u32 height);
+
+#ifdef CONFIG_DRM_MSM_WB
+struct msm_wb_buf_format *msm_wb_get_buf_format(struct msm_wb *wb);
+void msm_wb_queue_buf(struct msm_wb *wb, struct msm_wb_buffer *buf,
+ enum msm_wb_buf_queue_type type);
+struct msm_wb_buffer *msm_wb_dequeue_buf(struct msm_wb *wb,
+ enum msm_wb_buf_queue_type type);
+void msm_wb_update_encoder_state(struct msm_wb *wb, bool enable);
+void msm_wb_buf_captured(struct msm_wb *wb, struct msm_wb_buffer *buf,
+ bool discard);
+#else
+static inline struct msm_wb_buf_format *msm_wb_get_buf_format(
+ struct msm_wb *wb) { return NULL; }
+static inline void msm_wb_queue_buf(struct msm_wb *wb,
+ struct msm_wb_buffer *buf, enum msm_wb_buf_queue_type type) {}
+static inline struct msm_wb_buffer *msm_wb_dequeue_buf(struct msm_wb *wb,
+ enum msm_wb_buf_queue_type type) { return NULL; }
+static inline void msm_wb_update_encoder_state(struct msm_wb *wb,
+ bool enable) {}
+static inline void msm_wb_buf_captured(struct msm_wb *wb,
+ struct msm_wb_buffer *buf, bool discard) {}
+#endif
+
+int msm_wb_v4l2_init(struct msm_wb *wb);
+
+/*
+ * wb connector:
+ */
+struct drm_connector *msm_wb_connector_init(struct msm_wb *wb);
+
+#endif /* __WB_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb_connector.c b/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb_connector.c
new file mode 100644
index 0000000..814dec9
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb_connector.c
@@ -0,0 +1,157 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp_wb.h"
+
+struct msm_wb_connector {
+ struct drm_connector base;
+ struct msm_wb *wb;
+ struct work_struct hpd_work;
+ bool connected;
+};
+#define to_wb_connector(x) container_of(x, struct msm_wb_connector, base)
+
+static enum drm_connector_status msm_wb_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct msm_wb_connector *wb_connector = to_wb_connector(connector);
+
+ DBG("%s", wb_connector->connected ? "connected" : "disconnected");
+ return wb_connector->connected ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static void msm_wb_hotplug_work(struct work_struct *work)
+{
+ struct msm_wb_connector *wb_connector =
+ container_of(work, struct msm_wb_connector, hpd_work);
+ struct drm_connector *connector = &wb_connector->base;
+
+ drm_kms_helper_hotplug_event(connector->dev);
+}
+
+void msm_wb_connector_hotplug(struct msm_wb *wb, bool connected)
+{
+ struct drm_connector *connector = wb->connector;
+ struct msm_wb_connector *wb_connector = to_wb_connector(connector);
+ struct msm_drm_private *priv = connector->dev->dev_private;
+
+ wb_connector->connected = connected;
+ queue_work(priv->wq, &wb_connector->hpd_work);
+}
+
+static void msm_wb_connector_destroy(struct drm_connector *connector)
+{
+ struct msm_wb_connector *wb_connector = to_wb_connector(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+
+ kfree(wb_connector);
+}
+
+static int msm_wb_connector_get_modes(struct drm_connector *connector)
+{
+ struct msm_wb_connector *wb_connector = to_wb_connector(connector);
+ struct msm_wb *wb = wb_connector->wb;
+ struct msm_wb_buf_format *wb_buf_fmt;
+ struct drm_display_mode *mode = NULL;
+
+ wb_buf_fmt = msm_wb_get_buf_format(wb);
+ mode = drm_cvt_mode(connector->dev, wb_buf_fmt->width,
+ wb_buf_fmt->height, 60, false, false, false);
+
+ if (!mode) {
+ pr_err("%s: failed to create mode\n", __func__);
+ return -ENOTSUPP;
+ }
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static int msm_wb_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return 0;
+}
+
+static struct drm_encoder *
+msm_wb_connector_best_encoder(struct drm_connector *connector)
+{
+ struct msm_wb_connector *wb_connector = to_wb_connector(connector);
+
+ return wb_connector->wb->encoder;
+}
+
+static const struct drm_connector_funcs msm_wb_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = msm_wb_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = msm_wb_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+
+};
+
+static const struct drm_connector_helper_funcs msm_wb_connector_helper_funcs = {
+ .get_modes = msm_wb_connector_get_modes,
+ .mode_valid = msm_wb_connector_mode_valid,
+ .best_encoder = msm_wb_connector_best_encoder,
+};
+
+/* initialize connector */
+struct drm_connector *msm_wb_connector_init(struct msm_wb *wb)
+{
+ struct drm_connector *connector = NULL;
+ struct msm_wb_connector *wb_connector;
+ int ret;
+
+ wb_connector = kzalloc(sizeof(*wb_connector), GFP_KERNEL);
+ if (!wb_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ wb_connector->wb = wb;
+ connector = &wb_connector->base;
+
+ ret = drm_connector_init(wb->dev, connector, &msm_wb_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret)
+ goto fail;
+
+ drm_connector_helper_add(connector, &msm_wb_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_connector_register(connector);
+
+ ret = drm_mode_connector_attach_encoder(connector, wb->encoder);
+ if (ret)
+ goto fail;
+
+ INIT_WORK(&wb_connector->hpd_work, msm_wb_hotplug_work);
+
+ return connector;
+
+fail:
+ if (connector)
+ msm_wb_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb_v4l2.c b/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb_v4l2.c
new file mode 100644
index 0000000..f6df4d4
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_wb/mdp_wb_v4l2.c
@@ -0,0 +1,522 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf2-core.h>
+
+#include "mdp_wb.h"
+
+#define MSM_WB_MODULE_NAME "msm_wb"
+#define MAX_WIDTH 2048
+#define MAX_HEIGHT 2048
+
+static unsigned debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activates debug info");
+
+#define dprintk(dev, level, fmt, arg...) \
+ v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
+
+struct msm_wb_fmt {
+ const char *name;
+ u32 fourcc; /* v4l2 format id */
+ u32 drm_fourcc; /* drm format id */
+ u8 depth;
+ u8 plane_cnt;
+ u32 plane_bpp[MAX_PLANE]; /* bit per pixel per plalne */
+ bool is_yuv;
+};
+
+static const struct msm_wb_fmt formats[] = {
+ {
+ .name = "Y/CbCr 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .drm_fourcc = DRM_FORMAT_NV12,
+ .depth = 12,
+ .plane_cnt = 2,
+ .plane_bpp = {8, 4, 0, 0},
+ .is_yuv = true,
+ },
+ {
+ .name = "Y/CrCb 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .drm_fourcc = DRM_FORMAT_NV21,
+ .depth = 12,
+ .plane_cnt = 2,
+ .plane_bpp = {8, 4, 0, 0},
+ .is_yuv = true,
+ },
+ {
+ .name = "RGB24",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .drm_fourcc = DRM_FORMAT_RGB888,
+ .depth = 24,
+ .plane_cnt = 2,
+ .plane_bpp = {24, 0, 0, 0},
+ },
+ {
+ .name = "ARGB32",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .drm_fourcc = DRM_FORMAT_ARGB8888,
+ .depth = 32,
+ .plane_cnt = 1,
+ .plane_bpp = {24, 0, 0, 0},
+ },
+};
+
+/* buffer for one video frame */
+struct msm_wb_v4l2_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_buffer vb;
+ struct msm_wb_buffer wb_buf;
+};
+
+struct msm_wb_v4l2_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+
+ struct mutex mutex;
+
+ /* video capture */
+ const struct msm_wb_fmt *fmt;
+ unsigned int width, height;
+
+ struct vb2_queue vb_vidq;
+
+ struct msm_wb *wb;
+};
+
+static const struct msm_wb_fmt *get_format(u32 fourcc)
+{
+ const struct msm_wb_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == fourcc)
+ break;
+ }
+
+ if (k == ARRAY_SIZE(formats))
+ return NULL;
+
+ return &formats[k];
+}
+
+void msm_wb_buf_captured(struct msm_wb *wb,
+ struct msm_wb_buffer *buf, bool discard)
+{
+ struct msm_wb_v4l2_buffer *v4l2_buf =
+ container_of(buf, struct msm_wb_v4l2_buffer, wb_buf);
+ enum vb2_buffer_state buf_state = discard ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE;
+
+ v4l2_get_timestamp(&v4l2_buf->vb.v4l2_buf.timestamp);
+ vb2_buffer_done(&v4l2_buf->vb, buf_state);
+}
+
+/* ------------------------------------------------------------------
+ DMA buffer operations
+ ------------------------------------------------------------------*/
+
+static int msm_wb_vb2_map_dmabuf(void *mem_priv)
+{
+ return 0;
+}
+
+static void msm_wb_vb2_unmap_dmabuf(void *mem_priv)
+{
+}
+
+static void *msm_wb_vb2_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, int write)
+{
+ struct msm_wb_v4l2_dev *dev = alloc_ctx;
+ struct drm_device *drm_dev = dev->wb->dev;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&drm_dev->object_name_lock);
+ obj = drm_dev->driver->gem_prime_import(drm_dev, dbuf);
+ if (WARN_ON(!obj)) {
+ mutex_unlock(&drm_dev->object_name_lock);
+ v4l2_err(&dev->v4l2_dev, "Can't convert dmabuf to gem obj.\n");
+ return NULL;
+ }
+
+ if (obj->dma_buf) {
+ if (WARN_ON(obj->dma_buf != dbuf)) {
+ v4l2_err(&dev->v4l2_dev,
+ "dma buf doesn't match.\n");
+ drm_gem_object_unreference_unlocked(obj);
+
+ obj = ERR_PTR(-EINVAL);
+ }
+ } else {
+ obj->dma_buf = dbuf;
+ }
+
+ mutex_unlock(&drm_dev->object_name_lock);
+
+ return obj;
+}
+
+static void msm_wb_vb2_detach_dmabuf(void *mem_priv)
+{
+ struct drm_gem_object *obj = mem_priv;
+
+ drm_gem_object_unreference_unlocked(obj);
+}
+
+void *msm_wb_vb2_cookie(void *buf_priv)
+{
+ return buf_priv;
+}
+
+const struct vb2_mem_ops msm_wb_vb2_mem_ops = {
+ .map_dmabuf = msm_wb_vb2_map_dmabuf,
+ .unmap_dmabuf = msm_wb_vb2_unmap_dmabuf,
+ .attach_dmabuf = msm_wb_vb2_attach_dmabuf,
+ .detach_dmabuf = msm_wb_vb2_detach_dmabuf,
+ .cookie = msm_wb_vb2_cookie,
+};
+
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+#define MSM_WB_BUF_NUM_MIN 4
+
+static int msm_wb_vb2_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct msm_wb_v4l2_dev *dev = vb2_get_drv_priv(vq);
+ const struct msm_wb_fmt *wb_fmt = dev->fmt;
+ int i;
+
+ *nbuffers = MSM_WB_BUF_NUM_MIN;
+ *nplanes = wb_fmt->plane_cnt;
+
+ for (i = 0; i < *nplanes; i++) {
+ sizes[i] = (wb_fmt->plane_bpp[i] * dev->width *
+ dev->height) >> 3;
+ alloc_ctxs[i] = dev;
+ }
+
+ dprintk(dev, 1, "%s, count=%d, plane count=%d\n", __func__,
+ *nbuffers, *nplanes);
+
+ return 0;
+}
+
+static int msm_wb_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static void msm_wb_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct msm_wb_v4l2_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct msm_wb_v4l2_buffer *buf =
+ container_of(vb, struct msm_wb_v4l2_buffer, vb);
+ struct msm_wb_buffer *wb_buf = &buf->wb_buf;
+ int i;
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ /* pass the buffer to wb */
+ wb_buf->vb = vb;
+ wb_buf->pixel_format = dev->fmt->drm_fourcc;
+ for (i = 0; i < vb->num_planes; i++) {
+ wb_buf->offsets[i] = vb->v4l2_planes[i].data_offset;
+ wb_buf->planes[i] = vb2_plane_cookie(vb, i);
+ WARN_ON(!wb_buf->planes[i]);
+ }
+
+ msm_wb_queue_buf(dev->wb, wb_buf, MSM_WB_BUF_Q_FREE);
+}
+
+static int msm_wb_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct msm_wb_v4l2_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ return msm_wb_start_streaming(dev->wb);
+}
+
+/* abort streaming and wait for last buffer */
+static int msm_wb_vb2_stop_streaming(struct vb2_queue *vq)
+{
+ struct msm_wb_v4l2_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ return msm_wb_stop_streaming(dev->wb);
+}
+
+static const struct vb2_ops msm_wb_vb2_ops = {
+ .queue_setup = msm_wb_vb2_queue_setup,
+ .buf_prepare = msm_wb_vb2_buf_prepare,
+ .buf_queue = msm_wb_vb2_buf_queue,
+ .start_streaming = msm_wb_vb2_start_streaming,
+ .stop_streaming = msm_wb_vb2_stop_streaming,
+};
+
+/* ------------------------------------------------------------------
+ IOCTL vidioc handling
+ ------------------------------------------------------------------*/
+static int msm_wb_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct msm_wb_v4l2_dev *dev = video_drvdata(file);
+
+ strcpy(cap->driver, "msm_wb");
+ strcpy(cap->card, "msm_wb");
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev->v4l2_dev.name);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int msm_wb_vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct msm_wb_v4l2_dev *dev = video_drvdata(file);
+ const struct msm_wb_fmt *fmt;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ v4l2_err(&dev->v4l2_dev, "Invalid buf type %d.\n",
+ f->type);
+ return -EINVAL;
+ }
+
+ if (f->index >= ARRAY_SIZE(formats))
+ return -ERANGE;
+
+ fmt = &formats[f->index];
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int msm_wb_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct msm_wb_v4l2_dev *dev = video_drvdata(file);
+ int i;
+
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ f->fmt.pix_mp.width = dev->width;
+ f->fmt.pix_mp.height = dev->height;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.pixelformat = dev->fmt->fourcc;
+ f->fmt.pix_mp.num_planes = dev->fmt->plane_cnt;
+
+ for (i = 0; i < dev->fmt->plane_cnt; i++) {
+ f->fmt.pix_mp.plane_fmt[i].bytesperline =
+ (dev->fmt->plane_bpp[i] * dev->width) >> 3;
+ f->fmt.pix_mp.plane_fmt[i].sizeimage =
+ f->fmt.pix_mp.plane_fmt[i].bytesperline * dev->height;
+ }
+
+ if (dev->fmt->is_yuv)
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_SRGB;
+
+ return 0;
+}
+
+static int msm_wb_vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct msm_wb_v4l2_dev *dev = video_drvdata(file);
+ const struct msm_wb_fmt *fmt;
+ int i;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ v4l2_err(&dev->v4l2_dev, "Invalid buf type %d.\n",
+ f->type);
+ return -EINVAL;
+ }
+
+ fmt = get_format(f->fmt.pix_mp.pixelformat);
+ if (!fmt) {
+ v4l2_err(&dev->v4l2_dev, "Fourcc format (0x%08x) unknown.\n",
+ f->fmt.pix_mp.pixelformat);
+ return -ENOTSUPP;
+ }
+
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ v4l_bound_align_image(&f->fmt.pix_mp.width, 48, MAX_WIDTH, 4,
+ &f->fmt.pix_mp.height, 32, MAX_HEIGHT, 4, 0);
+ f->fmt.pix_mp.num_planes = fmt->plane_cnt;
+
+ for (i = 0; i < dev->fmt->plane_cnt; i++) {
+ f->fmt.pix_mp.plane_fmt[i].bytesperline =
+ (dev->fmt->plane_bpp[i] * f->fmt.pix_mp.width) >> 3;
+ f->fmt.pix_mp.plane_fmt[i].sizeimage =
+ f->fmt.pix_mp.plane_fmt[i].bytesperline *
+ f->fmt.pix_mp.height;
+ }
+
+ if (fmt->is_yuv)
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_SRGB;
+
+ return 0;
+}
+
+static int msm_wb_vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct msm_wb_v4l2_dev *dev = video_drvdata(file);
+ struct msm_wb *wb = dev->wb;
+ struct vb2_queue *q = &dev->vb_vidq;
+ int rc;
+
+ rc = msm_wb_vidioc_try_fmt_vid_cap(file, priv, f);
+ if (rc < 0)
+ return rc;
+
+ if (vb2_is_busy(q)) {
+ v4l2_err(&dev->v4l2_dev, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ dev->fmt = get_format(f->fmt.pix_mp.pixelformat);
+ dev->width = f->fmt.pix_mp.width;
+ dev->height = f->fmt.pix_mp.height;
+
+ rc = msm_wb_set_buf_format(wb, dev->fmt->drm_fourcc,
+ dev->width, dev->height);
+ if (rc)
+ v4l2_err(&dev->v4l2_dev,
+ "Set format (0x%08x w:%x h:%x) failed.\n",
+ dev->fmt->drm_fourcc, dev->width, dev->height);
+
+ return rc;
+}
+
+static const struct v4l2_file_operations msm_wb_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops msm_wb_v4l2_ioctl_ops = {
+ .vidioc_querycap = msm_wb_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = msm_wb_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = msm_wb_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap_mplane = msm_wb_vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = msm_wb_vidioc_s_fmt_vid_cap,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct video_device msm_wb_v4l2_template = {
+ .name = "msm_wb",
+ .fops = &msm_wb_v4l2_fops,
+ .ioctl_ops = &msm_wb_v4l2_ioctl_ops,
+ .release = video_device_release_empty,
+};
+
+int msm_wb_v4l2_init(struct msm_wb *wb)
+{
+ struct msm_wb_v4l2_dev *dev;
+ struct video_device *vfd;
+ struct vb2_queue *q;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
+ "%s", MSM_WB_MODULE_NAME);
+ ret = v4l2_device_register(NULL, &dev->v4l2_dev);
+ if (ret)
+ goto free_dev;
+
+ /* default ARGB8888 640x480 */
+ dev->fmt = get_format(V4L2_PIX_FMT_RGB32);
+ dev->width = 640;
+ dev->height = 480;
+
+ /* initialize queue */
+ q = &dev->vb_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_DMABUF;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct msm_wb_v4l2_buffer);
+ q->ops = &msm_wb_vb2_ops;
+ q->mem_ops = &msm_wb_vb2_mem_ops;
+ q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+
+ mutex_init(&dev->mutex);
+
+ vfd = &dev->vdev;
+ *vfd = msm_wb_v4l2_template;
+ vfd->debug = debug;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = q;
+
+ /*
+ * Provide a mutex to v4l2 core. It will be used to protect
+ * all fops and v4l2 ioctls.
+ */
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ goto unreg_dev;
+
+ dev->wb = wb;
+ wb->wb_v4l2 = dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+ video_device_node_name(vfd));
+
+ return 0;
+
+unreg_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+free_dev:
+ kfree(dev);
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a72ed0a..61e76d0 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1032,6 +1032,7 @@ static struct platform_driver msm_platform_driver = {
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_wb_register();
msm_edp_register();
hdmi_register();
adreno_register();
@@ -1045,6 +1046,7 @@ static void __exit msm_drm_unregister(void)
hdmi_unregister();
adreno_unregister();
msm_edp_unregister();
+ msm_wb_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9e8d441..ceb551a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -53,6 +53,9 @@ struct msm_mmu;
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
+struct hdmi;
+struct msm_edp;
+struct msm_wb;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
@@ -82,6 +85,8 @@ struct msm_drm_private {
*/
struct msm_edp *edp;
+ struct msm_wb *wb;
+
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
@@ -224,18 +229,28 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
-struct hdmi;
int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
struct drm_encoder *encoder);
void __init hdmi_register(void);
void __exit hdmi_unregister(void);
-struct msm_edp;
void __init msm_edp_register(void);
void __exit msm_edp_unregister(void);
int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
struct drm_encoder *encoder);
+#ifdef CONFIG_DRM_MSM_WB
+void __init msm_wb_register(void);
+void __exit msm_wb_unregister(void);
+int msm_wb_modeset_init(struct msm_wb *wb, struct drm_device *dev,
+ struct drm_encoder *encoder);
+#else
+static inline void __init msm_wb_register(void) {}
+static inline void __exit msm_wb_unregister(void) {}
+static inline int msm_wb_modeset_init(struct msm_wb *wb, struct drm_device *dev,
+ struct drm_encoder *encoder) { return -EINVAL; }
+#endif
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index df60f65..9d553f2 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -212,6 +212,38 @@ static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
DBG("fbdev: get gamma");
}
+/* add all connectors to fb except wb connector */
+static int msm_drm_fb_add_connectors(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ int i;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_VIRTUAL)
+ continue;
+
+ fb_helper_connector =
+ kzalloc(sizeof(*fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ goto fail;
+
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] =
+ fb_helper_connector;
+ }
+ return 0;
+fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ kfree(fb_helper->connector_info[i]);
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+ return -ENOMEM;
+}
+
static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
.gamma_set = msm_crtc_fb_gamma_set,
.gamma_get = msm_crtc_fb_gamma_get,
@@ -241,7 +273,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
goto fail;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
+ ret = msm_drm_fb_add_connectors(helper);
if (ret)
goto fini;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 49dea4f..11a1924 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -534,6 +534,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (msm_obj->pages)
drm_free_large(msm_obj->pages);
+ drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
vunmap(msm_obj->vaddr);
put_pages(obj);
--
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists