[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <52330c2afbf6bab7c06fbdd2b5cb9b2a4e24319b@intel.com>
Date: Mon, 08 Sep 2025 14:03:45 +0300
From: Jani Nikula <jani.nikula@...ux.intel.com>
To: syyang <syyang@...tium.com>, robh@...nel.org, krzk+dt@...nel.org,
conor+dt@...nel.org, andrzej.hajda@...el.com, neil.armstrong@...aro.org,
rfoss@...nel.org
Cc: Laurent.pinchart@...asonboard.com, jonas@...boo.se,
jernej.skrabec@...il.com, devicetree@...r.kernel.org,
dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
yangsunyun1993@...il.com, syyang <syyang@...tium.com>
Subject: Re: [PATCH v1 2/2] This patch adds a new DRM bridge driver for the
Lontium LT9611C chip.
On Wed, 03 Sep 2025, syyang <syyang@...tium.com> wrote:
> +static int lt9611c_read_edid(struct lt9611c *lt9611c)
> +{
> + struct device *dev = lt9611c->dev;
> + int ret, i, bytes_to_copy, offset = 0;
> + u8 packets_num;
> + u8 read_edid_data_cmd[5] = {0x52, 0x48, 0x33, 0x3A, 0x00};
> + u8 return_edid_data[37];
> + u8 read_edid_byte_num_cmd[5] = {0x52, 0x48, 0x32, 0x3A, 0x00};
> + u8 return_edid_byte_num[6];
> +
> + ret = i2c_read_write_flow(lt9611c, read_edid_byte_num_cmd, 5, return_edid_byte_num, 6);
> + if (ret) {
> + dev_err(dev, "Failed to read EDID byte number\n");
> + lt9611c->edid_valid = false;
> + return ret;
> + }
> +
> + lt9611c->edid_len = (return_edid_byte_num[4] << 8) | return_edid_byte_num[5];
> +
> + if (!lt9611c->edid_buf || lt9611c->edid_len > (lt9611c->edid_valid ?
> + lt9611c->edid_len : 0)) {
> + kfree(lt9611c->edid_buf);
> + lt9611c->edid_buf = kzalloc(lt9611c->edid_len, GFP_KERNEL);
> + if (!lt9611c->edid_buf) {
> + dev_err(dev, "Failed to allocate EDID buffer\n");
> + lt9611c->edid_len = 0;
> + lt9611c->edid_valid = false;
> + return -ENOMEM;
> + }
> + }
If you want to do caching, store a struct drm_edid pointer at a higher
level, not dumb buffers at the low level. Might be easier to start off
without any caching.
> +
> + packets_num = (lt9611c->edid_len % 32) ? (lt9611c->edid_len / 32 + 1) :
> + (lt9611c->edid_len / 32);
> + for (i = 0; i < packets_num; i++) {
> + read_edid_data_cmd[4] = (u8)i;
> + ret = i2c_read_write_flow(lt9611c, read_edid_data_cmd, 5, return_edid_data, 37);
> + if (ret) {
> + dev_err(dev, "Failed to read EDID packet %d\n", i);
> + lt9611c->edid_valid = false;
> + return -EIO;
> + }
> + offset = i * 32;
> + bytes_to_copy = min(32, lt9611c->edid_len - offset);
> + memcpy(lt9611c->edid_buf + offset, &return_edid_data[5], bytes_to_copy);
> + }
And really, you wouldn't have to implement the custom get edid block at
all, if you added a proper i2c adapter implementation and passed that to
drm_edid_read_ddc().
> +
> + lt9611c->edid_valid = true;
> +
> + return ret;
> +}
> +
> +static int lt9611c_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
> +{
> + struct lt9611c *lt9611c = data;
> + struct device *dev = lt9611c->dev;
> + unsigned int total_blocks;
> + int ret;
> +
> + if (len > 128)
> + return -EINVAL;
> +
> + guard(mutex)(<9611c->ocm_lock);
> + if (block == 0 || !lt9611c->edid_valid) {
> + ret = lt9611c_read_edid(lt9611c);
> + if (ret) {
> + dev_err(dev, "EDID read failed\n");
> + return ret;
> + }
> + }
> +
> + total_blocks = lt9611c->edid_len / 128;
> + if (!total_blocks) {
> + dev_err(dev, "No valid EDID blocks\n");
> + return -EIO;
> + }
> +
> + if (block >= total_blocks) {
> + dev_err(dev, "Requested block %u exceeds total blocks %u\n",
> + block, total_blocks);
> + return -EINVAL;
> + }
> +
> + memcpy(buf, lt9611c->edid_buf + block * 128, len);
The get edid block hook is supposed to read *one* block. Can't you
implement reading one block? This now reads the entire edid for every
block.
Again, better yet, i2c adapter implementation.
> +
> + return 0;
> +}
> +
> +static const struct drm_edid *lt9611c_bridge_edid_read(struct drm_bridge *bridge,
> + struct drm_connector *connector)
> +{
> + struct lt9611c *lt9611c = bridge_to_lt9611c(bridge);
> +
> + usleep_range(10000, 20000);
> + return drm_edid_read_custom(connector, lt9611c_get_edid_block, lt9611c);
> +}
--
Jani Nikula, Intel
Powered by blists - more mailing lists