[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b4777025-0220-b1e4-f6f3-00d75ec8f0be@linaro.org>
Date: Thu, 22 Dec 2022 12:32:28 +0100
From: Krzysztof Kozlowski <krzysztof.kozlowski@...aro.org>
To: Sumit Gupta <sumitg@...dia.com>, treding@...dia.com,
dmitry.osipenko@...labora.com, viresh.kumar@...aro.org,
rafael@...nel.org, jonathanh@...dia.com, robh+dt@...nel.org,
linux-kernel@...r.kernel.org, linux-tegra@...r.kernel.org,
linux-pm@...r.kernel.org, devicetree@...r.kernel.org
Cc: sanjayc@...dia.com, ksitaraman@...dia.com, ishah@...dia.com,
bbasu@...dia.com
Subject: Re: [Patch v1 01/10] memory: tegra: add interconnect support for DRAM
scaling in Tegra234
On 20/12/2022 17:02, Sumit Gupta wrote:
> Adding Interconnect framework support to dynamically set the DRAM
> bandwidth from different clients. Both the MC and EMC drivers are
> added as ICC providers. The path for any request will be:
> MC-Client[1-n] -> MC -> EMC -> EMEM/DRAM
>
> MC clients will request for bandwidth to the MC driver which will
> pass the tegra icc node having current request info to the EMC driver.
> The EMC driver will send the BPMP Client ID, Client type and bandwidth
> request info to the BPMP-FW where the final DRAM freq for achieving the
> requested bandwidth is set based on the passed parameters.
>
> Signed-off-by: Sumit Gupta <sumitg@...dia.com>
> ---
> drivers/memory/tegra/mc.c | 18 ++-
> drivers/memory/tegra/tegra186-emc.c | 166 ++++++++++++++++++++++++++++
> drivers/memory/tegra/tegra234.c | 101 ++++++++++++++++-
> include/soc/tegra/mc.h | 7 ++
> include/soc/tegra/tegra-icc.h | 72 ++++++++++++
> 5 files changed, 362 insertions(+), 2 deletions(-)
> create mode 100644 include/soc/tegra/tegra-icc.h
>
> diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
> index 592907546ee6..ff887fb03bce 100644
> --- a/drivers/memory/tegra/mc.c
> +++ b/drivers/memory/tegra/mc.c
> @@ -17,6 +17,7 @@
> #include <linux/sort.h>
>
> #include <soc/tegra/fuse.h>
> +#include <soc/tegra/tegra-icc.h>
>
> #include "mc.h"
>
> @@ -779,6 +780,7 @@ const char *const tegra_mc_error_names[8] = {
> */
> static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
> {
> + struct tegra_icc_node *tnode;
> struct icc_node *node;
> unsigned int i;
> int err;
> @@ -792,7 +794,11 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
> mc->provider.data = &mc->provider;
> mc->provider.set = mc->soc->icc_ops->set;
> mc->provider.aggregate = mc->soc->icc_ops->aggregate;
> - mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
> + mc->provider.get_bw = mc->soc->icc_ops->get_bw;
> + if (mc->soc->icc_ops->xlate)
> + mc->provider.xlate = mc->soc->icc_ops->xlate;
> + if (mc->soc->icc_ops->xlate_extended)
> + mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
>
> err = icc_provider_add(&mc->provider);
> if (err)
> @@ -814,6 +820,10 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
> goto remove_nodes;
>
> for (i = 0; i < mc->soc->num_clients; i++) {
> + tnode = kzalloc(sizeof(*tnode), GFP_KERNEL);
> + if (!tnode)
> + return -ENOMEM;
> +
> /* create MC client node */
> node = icc_node_create(mc->soc->clients[i].id);
> if (IS_ERR(node)) {
> @@ -828,6 +838,12 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
> err = icc_link_create(node, TEGRA_ICC_MC);
> if (err)
> goto remove_nodes;
> +
> + node->data = tnode;
Where is it freed?
(...)
>
> struct tegra_mc_ops {
> @@ -238,6 +243,8 @@ struct tegra_mc {
> struct {
> struct dentry *root;
> } debugfs;
> +
> + struct tegra_icc_node *curr_tnode;
> };
>
> int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate);
> diff --git a/include/soc/tegra/tegra-icc.h b/include/soc/tegra/tegra-icc.h
> new file mode 100644
> index 000000000000..3855d8571281
> --- /dev/null
> +++ b/include/soc/tegra/tegra-icc.h
Why not in linux?
> @@ -0,0 +1,72 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (C) 2022-2023 NVIDIA CORPORATION. All rights reserved.
> + */
> +
> +#ifndef MEMORY_TEGRA_ICC_H
This does not match the path/name.
> +#define MEMORY_TEGRA_ICC_H
> +
> +enum tegra_icc_client_type {
> + TEGRA_ICC_NONE,
> + TEGRA_ICC_NISO,
> + TEGRA_ICC_ISO_DISPLAY,
> + TEGRA_ICC_ISO_VI,
> + TEGRA_ICC_ISO_AUDIO,
> + TEGRA_ICC_ISO_VIFAL,
> +};
> +
> +struct tegra_icc_node {
> + struct icc_node *node;
> + struct tegra_mc *mc;
> + u32 bpmp_id;
> + u32 type;
> +};
> +
> +/* ICC ID's for MC client's used in BPMP */
> +#define TEGRA_ICC_BPMP_DEBUG 1
> +#define TEGRA_ICC_BPMP_CPU_CLUSTER0 2
> +#define TEGRA_ICC_BPMP_CPU_CLUSTER1 3
> +#define TEGRA_ICC_BPMP_CPU_CLUSTER2 4
> +#define TEGRA_ICC_BPMP_GPU 5
> +#define TEGRA_ICC_BPMP_CACTMON 6
> +#define TEGRA_ICC_BPMP_DISPLAY 7
> +#define TEGRA_ICC_BPMP_VI 8
> +#define TEGRA_ICC_BPMP_EQOS 9
> +#define TEGRA_ICC_BPMP_PCIE_0 10
> +#define TEGRA_ICC_BPMP_PCIE_1 11
> +#define TEGRA_ICC_BPMP_PCIE_2 12
> +#define TEGRA_ICC_BPMP_PCIE_3 13
> +#define TEGRA_ICC_BPMP_PCIE_4 14
> +#define TEGRA_ICC_BPMP_PCIE_5 15
> +#define TEGRA_ICC_BPMP_PCIE_6 16
> +#define TEGRA_ICC_BPMP_PCIE_7 17
> +#define TEGRA_ICC_BPMP_PCIE_8 18
> +#define TEGRA_ICC_BPMP_PCIE_9 19
> +#define TEGRA_ICC_BPMP_PCIE_10 20
> +#define TEGRA_ICC_BPMP_DLA_0 21
> +#define TEGRA_ICC_BPMP_DLA_1 22
> +#define TEGRA_ICC_BPMP_SDMMC_1 23
> +#define TEGRA_ICC_BPMP_SDMMC_2 24
> +#define TEGRA_ICC_BPMP_SDMMC_3 25
> +#define TEGRA_ICC_BPMP_SDMMC_4 26
> +#define TEGRA_ICC_BPMP_NVDEC 27
> +#define TEGRA_ICC_BPMP_NVENC 28
> +#define TEGRA_ICC_BPMP_NVJPG_0 29
> +#define TEGRA_ICC_BPMP_NVJPG_1 30
> +#define TEGRA_ICC_BPMP_OFAA 31
> +#define TEGRA_ICC_BPMP_XUSB_HOST 32
> +#define TEGRA_ICC_BPMP_XUSB_DEV 33
> +#define TEGRA_ICC_BPMP_TSEC 34
> +#define TEGRA_ICC_BPMP_VIC 35
> +#define TEGRA_ICC_BPMP_APE 36
> +#define TEGRA_ICC_BPMP_APEDMA 37
> +#define TEGRA_ICC_BPMP_SE 38
> +#define TEGRA_ICC_BPMP_ISP 39
> +#define TEGRA_ICC_BPMP_HDA 40
> +#define TEGRA_ICC_BPMP_VIFAL 41
> +#define TEGRA_ICC_BPMP_VI2FAL 42
> +#define TEGRA_ICC_BPMP_VI2 43
> +#define TEGRA_ICC_BPMP_RCE 44
> +#define TEGRA_ICC_BPMP_PVA 45
> +
> +#endif /* MEMORY_TEGRA_ICC_H */
Best regards,
Krzysztof
Powered by blists - more mailing lists