[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YmqwlbLcFgTDUpkX@orome>
Date: Thu, 28 Apr 2022 17:19:49 +0200
From: Thierry Reding <thierry.reding@...il.com>
To: Cai Huoqing <cai.huoqing@...ux.dev>
Cc: Thomas Zimmermann <tzimmermann@...e.de>,
David Airlie <airlied@...ux.ie>, linux-kernel@...r.kernel.org,
Christian König <christian.koenig@....com>,
linaro-mm-sig@...ts.linaro.org, dri-devel@...ts.freedesktop.org,
Sumit Semwal <sumit.semwal@...aro.org>,
linux-media@...r.kernel.org
Subject: Re: [PATCH v2 2/4] drm/nvdla: Add driver support for NVDLA
On Thu, Apr 28, 2022 at 05:18:13PM +0200, Thierry Reding wrote:
> On Tue, Apr 26, 2022 at 02:07:59PM +0800, Cai Huoqing wrote:
> [...]
> > diff --git a/drivers/gpu/drm/nvdla/nvdla_drv.c b/drivers/gpu/drm/nvdla/nvdla_drv.c
>
> I'll look at this from an architectural level and leave it to other
> experts to review the more technical things.
>
> [...]
> > +static struct nvdla_config nvdla_config_os_initial = {
> > + .atom_size = 32,
> > + .bdma_enable = true,
> > + .rubik_enable = true,
> > + .weight_compress_support = true,
> > +};
> > +
> > +static struct nvdla_config nvdla_config_small = {
> > + //.atom_size = 8,
> > + .atom_size = 32, // nv_large config
> > + .bdma_enable = false,
> > + .rubik_enable = false,
> > + .weight_compress_support = false,
> > +};
> > +
> [...]
> > +static union nvdla_operation_container operation_desc[NVDLA_OP_NUM][NVDLA_NUM_GROUPS];
> > +static union nvdla_surface_container surface_desc[NVDLA_OP_NUM][NVDLA_NUM_GROUPS];
> > +
> > +static struct nvdla_task_desc global_task;
> > +
> > +static struct nvdla_engine engine = {
> > + .processors[NVDLA_OP_BDMA] = {
> > + .name = "BDMA",
> > + .op_type = NVDLA_OP_BDMA,
> > + .program = nvdla_bdma_program,
> > + .enable = nvdla_bdma_enable,
> > + .set_producer = nvdla_bdma_set_producer,
> > + .is_ready = nvdla_bdma_is_ready,
> > + .dump_config = nvdla_bdma_dump_config,
> > + .rdma_check = nvdla_bdma_rdma_check,
> > + .consumer_ptr = 0,
> > + .roi_index = 0,
> > + .group_status = 0,
> > + .rdma_status = 0,
> > + .last_group = 1,
> > + .groups[0] = {
> > + .id = 0,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_BDMA][0],
> > + .surface_desc = &surface_desc[NVDLA_OP_BDMA][0],
> > + },
> > + .groups[1] = {
> > + .id = 1,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_BDMA][1],
> > + .surface_desc = &surface_desc[NVDLA_OP_BDMA][1],
> > + },
> > + },
> > + .processors[NVDLA_OP_CONV] = {
> > + .name = "Convolution",
> > + .op_type = NVDLA_OP_CONV,
> > + .program = nvdla_conv_program,
> > + .enable = nvdla_conv_enable,
> > + .set_producer = nvdla_conv_set_producer,
> > + .is_ready = nvdla_conv_is_ready,
> > + .dump_config = nvdla_conv_dump_config,
> > + .rdma_check = nvdla_conv_rdma_check,
> > + .consumer_ptr = 0,
> > + .roi_index = 0,
> > + .group_status = 0,
> > + .rdma_status = 0,
> > + .last_group = 1,
> > + .groups[0] = {
> > + .id = 0,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_CONV][0],
> > + .surface_desc = &surface_desc[NVDLA_OP_CONV][0],
> > + },
> > + .groups[1] = {
> > + .id = 1,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_CONV][1],
> > + .surface_desc = &surface_desc[NVDLA_OP_CONV][1],
> > + },
> > + },
> > + .processors[NVDLA_OP_SDP] = {
> > + .name = "SDP",
> > + .op_type = NVDLA_OP_SDP,
> > + .program = nvdla_sdp_program,
> > + .enable = nvdla_sdp_enable,
> > + .set_producer = nvdla_sdp_set_producer,
> > + .is_ready = nvdla_sdp_is_ready,
> > + .dump_config = nvdla_sdp_dump_config,
> > + .rdma_check = nvdla_sdp_rdma_check,
> > + .consumer_ptr = 0,
> > + .roi_index = 0,
> > + .group_status = 0,
> > + .rdma_status = 0,
> > + .last_group = 1,
> > + .groups[0] = {
> > + .id = 0,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_SDP][0],
> > + .surface_desc = &surface_desc[NVDLA_OP_SDP][0],
> > + },
> > + .groups[1] = {
> > + .id = 1,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_SDP][1],
> > + .surface_desc = &surface_desc[NVDLA_OP_SDP][1],
> > + },
> > + },
> > + .processors[NVDLA_OP_PDP] = {
> > + .name = "PDP",
> > + .op_type = NVDLA_OP_PDP,
> > + .program = nvdla_pdp_program,
> > + .enable = nvdla_pdp_enable,
> > + .set_producer = nvdla_pdp_set_producer,
> > + .is_ready = nvdla_pdp_is_ready,
> > + .dump_config = nvdla_pdp_dump_config,
> > + .rdma_check = nvdla_pdp_rdma_check,
> > + .consumer_ptr = 0,
> > + .roi_index = 0,
> > + .group_status = 0,
> > + .rdma_status = 0,
> > + .last_group = 1,
> > + .groups[0] = {
> > + .id = 0,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_PDP][0],
> > + .surface_desc = &surface_desc[NVDLA_OP_PDP][0],
> > + },
> > + .groups[1] = {
> > + .id = 1,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_PDP][1],
> > + .surface_desc = &surface_desc[NVDLA_OP_PDP][1],
> > + },
> > + },
> > + .processors[NVDLA_OP_CDP] = {
> > + .name = "CDP",
> > + .op_type = NVDLA_OP_CDP,
> > + .program = nvdla_cdp_program,
> > + .enable = nvdla_cdp_enable,
> > + .set_producer = nvdla_cdp_set_producer,
> > + .is_ready = nvdla_cdp_is_ready,
> > + .dump_config = nvdla_cdp_dump_config,
> > + .rdma_check = nvdla_cdp_rdma_check,
> > + .consumer_ptr = 0,
> > + .roi_index = 0,
> > + .group_status = 0,
> > + .rdma_status = 0,
> > + .last_group = 1,
> > + .groups[0] = {
> > + .id = 0,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_CDP][0],
> > + .surface_desc = &surface_desc[NVDLA_OP_CDP][0],
> > + },
> > + .groups[1] = {
> > + .id = 1,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_CDP][1],
> > + .surface_desc = &surface_desc[NVDLA_OP_CDP][1],
> > + },
> > + },
> > +
> > + .processors[NVDLA_OP_RUBIK] = {
> > + .name = "RUBIK",
> > + .op_type = NVDLA_OP_RUBIK,
> > + .program = nvdla_rubik_program,
> > + .enable = nvdla_rubik_enable,
> > + .set_producer = nvdla_rubik_set_producer,
> > + .is_ready = nvdla_rubik_is_ready,
> > + .dump_config = nvdla_rubik_dump_config,
> > + .rdma_check = nvdla_rubik_rdma_check,
> > + .consumer_ptr = 0,
> > + .roi_index = 0,
> > + .group_status = 0,
> > + .rdma_status = 0,
> > + .last_group = 1,
> > + .groups[0] = {
> > + .id = 0,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_RUBIK][0],
> > + .surface_desc = &surface_desc[NVDLA_OP_RUBIK][0],
> > + },
> > + .groups[1] = {
> > + .id = 1,
> > + .rdma_id = 0,
> > + .active = 0,
> > + .events = 0,
> > + .roi_index = 0,
> > + .is_rdma_needed = 0,
> > + .lut_index = -1,
> > + .operation_desc = &operation_desc[NVDLA_OP_RUBIK][1],
> > + .surface_desc = &surface_desc[NVDLA_OP_RUBIK][1],
> > + },
> > + },
> > +
> > +};
>
> These global variables aren't going to work because Tegra234 (Tegra194's
> successor) has two instances of NVDLA.
Small correction: I just recalled that even Tegra194 has two DLA
instances, so if we want both supported we'll need to get rid of those
global variables for that chip already.
Thierry
Download attachment "signature.asc" of type "application/pgp-signature" (834 bytes)
Powered by blists - more mailing lists