[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8942558.lOV4Wx5bFT@senjougahara>
Date: Thu, 13 Nov 2025 13:36:24 +0900
From: Mikko Perttunen <mperttunen@...dia.com>
To: Krzysztof Kozlowski <krzk@...nel.org>, Rob Herring <robh@...nel.org>,
Conor Dooley <conor+dt@...nel.org>, Thierry Reding <treding@...dia.com>,
Thierry Reding <thierry.reding@...il.com>,
Jonathan Hunter <jonathanh@...dia.com>,
Prashant Gaikwad <pgaikwad@...dia.com>,
Michael Turquette <mturquette@...libre.com>, Stephen Boyd <sboyd@...nel.org>,
Dmitry Osipenko <digetx@...il.com>, MyungJoo Ham <myungjoo.ham@...sung.com>,
Kyungmin Park <kyungmin.park@...sung.com>,
Chanwoo Choi <cw00.choi@...sung.com>, Svyatoslav Ryhel <clamor95@...il.com>,
Svyatoslav Ryhel <clamor95@...il.com>
Cc: linux-kernel@...r.kernel.org, devicetree@...r.kernel.org,
linux-tegra@...r.kernel.org, linux-clk@...r.kernel.org,
linux-pm@...r.kernel.org
Subject:
Re: [PATCH v3 04/11] memory: tegra: implement EMEM regs and ICC ops for
Tegra114
On Monday, September 15, 2025 5:01 PM Svyatoslav Ryhel wrote:
> Prepare Internal Memory Controller for introduction of External Memory
> Controller.
>
> Signed-off-by: Svyatoslav Ryhel <clamor95@...il.com>
> ---
> drivers/memory/tegra/tegra114.c | 193 ++++++++++++++++++++++++++++++++
> 1 file changed, 193 insertions(+)
>
> diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
> index d03a5d162dbd..c615857f7fad 100644
> --- a/drivers/memory/tegra/tegra114.c
> +++ b/drivers/memory/tegra/tegra114.c
> @@ -3,6 +3,7 @@
> * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
> */
>
> +#include <linux/device.h>
> #include <linux/of.h>
> #include <linux/mm.h>
>
> @@ -1165,6 +1166,195 @@ static const struct tegra_mc_reset tegra114_mc_resets[] = {
> TEGRA114_MC_RESET(VI, 0x200, 0x204, 17),
> };
>
> +static void tegra114_mc_tune_client_latency(struct tegra_mc *mc,
> + const struct tegra_mc_client *client,
> + unsigned int bandwidth_mbytes_sec)
> +{
> + u32 arb_tolerance_compensation_nsec, arb_tolerance_compensation_div;
> + unsigned int fifo_size = client->fifo_size;
> + u32 arb_nsec, la_ticks, value;
> +
> + /* see 20.3.1.1 Client Configuration in Tegra4 TRM v01p */
> + if (bandwidth_mbytes_sec)
> + arb_nsec = fifo_size * NSEC_PER_USEC / bandwidth_mbytes_sec;
> + else
> + arb_nsec = U32_MAX;
> +
> + /*
> + * Latency allowness should be set with consideration for the module's
> + * latency tolerance and internal buffering capabilities.
> + *
> + * Display memory clients use isochronous transfers and have very low
> + * tolerance to a belated transfers. Hence we need to compensate the
> + * memory arbitration imperfection for them in order to prevent FIFO
> + * underflow condition when memory bus is busy.
> + *
> + * VI clients also need a stronger compensation.
> + */
> + switch (client->swgroup) {
> + case TEGRA_SWGROUP_MPCORE:
> + case TEGRA_SWGROUP_PTC:
> + /*
> + * We always want lower latency for these clients, hence
> + * don't touch them.
> + */
> + return;
> +
> + case TEGRA_SWGROUP_DC:
> + case TEGRA_SWGROUP_DCB:
> + arb_tolerance_compensation_nsec = 1050;
> + arb_tolerance_compensation_div = 2;
> + break;
> +
> + case TEGRA_SWGROUP_VI:
> + arb_tolerance_compensation_nsec = 1050;
> + arb_tolerance_compensation_div = 1;
> + break;
> +
> + default:
> + arb_tolerance_compensation_nsec = 150;
> + arb_tolerance_compensation_div = 1;
> + break;
> + }
> +
> + if (arb_nsec > arb_tolerance_compensation_nsec)
> + arb_nsec -= arb_tolerance_compensation_nsec;
> + else
> + arb_nsec = 0;
> +
> + arb_nsec /= arb_tolerance_compensation_div;
> +
> + /*
> + * Latency allowance is a number of ticks a request from a particular
> + * client may wait in the EMEM arbiter before it becomes a high-priority
> + * request.
> + */
> + la_ticks = arb_nsec / mc->tick;
> + la_ticks = min(la_ticks, client->regs.la.mask);
> +
> + value = mc_readl(mc, client->regs.la.reg);
> + value &= ~(client->regs.la.mask << client->regs.la.shift);
> + value |= la_ticks << client->regs.la.shift;
> + mc_writel(mc, value, client->regs.la.reg);
> +}
> +
> +static int tegra114_mc_icc_set(struct icc_node *src, struct icc_node *dst)
> +{
> + struct tegra_mc *mc = icc_provider_to_tegra_mc(src->provider);
> + const struct tegra_mc_client *client = &mc->soc->clients[src->id];
> + u64 peak_bandwidth = icc_units_to_bps(src->peak_bw);
> +
> + /*
> + * Skip pre-initialization that is done by icc_node_add(), which sets
> + * bandwidth to maximum for all clients before drivers are loaded.
> + *
> + * This doesn't make sense for us because we don't have drivers for all
> + * clients and it's okay to keep configuration left from bootloader
> + * during boot, at least for today.
> + */
> + if (src == dst)
> + return 0;
> +
> + /* convert bytes/sec to megabytes/sec */
> + do_div(peak_bandwidth, 1000000);
> +
> + tegra114_mc_tune_client_latency(mc, client, peak_bandwidth);
> +
> + return 0;
> +}
> +
> +static int tegra114_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
> + u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
> +{
> + /*
> + * ISO clients need to reserve extra bandwidth up-front because
> + * there could be high bandwidth pressure during initial filling
> + * of the client's FIFO buffers. Secondly, we need to take into
> + * account impurities of the memory subsystem.
> + */
> + if (tag & TEGRA_MC_ICC_TAG_ISO)
> + peak_bw = tegra_mc_scale_percents(peak_bw, 400);
> +
> + *agg_avg += avg_bw;
> + *agg_peak = max(*agg_peak, peak_bw);
> +
> + return 0;
> +}
> +
> +static struct icc_node_data *
> +tegra114_mc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
> +{
> + struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
> + const struct tegra_mc_client *client;
> + unsigned int i, idx = spec->args[0];
> + struct icc_node_data *ndata;
> + struct icc_node *node;
> +
> + list_for_each_entry(node, &mc->provider.nodes, node_list) {
> + if (node->id != idx)
> + continue;
> +
> + ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
> + if (!ndata)
> + return ERR_PTR(-ENOMEM);
> +
> + client = &mc->soc->clients[idx];
> + ndata->node = node;
> +
> + switch (client->swgroup) {
> + case TEGRA_SWGROUP_DC:
> + case TEGRA_SWGROUP_DCB:
> + case TEGRA_SWGROUP_PTC:
> + case TEGRA_SWGROUP_VI:
> + /* these clients are isochronous by default */
> + ndata->tag = TEGRA_MC_ICC_TAG_ISO;
> + break;
> +
> + default:
> + ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
> + break;
> + }
> +
> + return ndata;
> + }
> +
> + for (i = 0; i < mc->soc->num_clients; i++) {
> + if (mc->soc->clients[i].id == idx)
> + return ERR_PTR(-EPROBE_DEFER);
> + }
> +
> + dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
> +
> + return ERR_PTR(-EINVAL);
> +}
> +
> +static const struct tegra_mc_icc_ops tegra114_mc_icc_ops = {
> + .xlate_extended = tegra114_mc_of_icc_xlate_extended,
> + .aggregate = tegra114_mc_icc_aggreate,
> + .set = tegra114_mc_icc_set,
> +};
> +
> +static const unsigned long tegra114_mc_emem_regs[] = {
> + MC_EMEM_ARB_CFG,
> + MC_EMEM_ARB_OUTSTANDING_REQ,
> + MC_EMEM_ARB_TIMING_RCD,
> + MC_EMEM_ARB_TIMING_RP,
> + MC_EMEM_ARB_TIMING_RC,
> + MC_EMEM_ARB_TIMING_RAS,
> + MC_EMEM_ARB_TIMING_FAW,
> + MC_EMEM_ARB_TIMING_RRD,
> + MC_EMEM_ARB_TIMING_RAP2PRE,
> + MC_EMEM_ARB_TIMING_WAP2PRE,
> + MC_EMEM_ARB_TIMING_R2R,
> + MC_EMEM_ARB_TIMING_W2W,
> + MC_EMEM_ARB_TIMING_R2W,
> + MC_EMEM_ARB_TIMING_W2R,
> + MC_EMEM_ARB_DA_TURNS,
> + MC_EMEM_ARB_DA_COVERS,
> + MC_EMEM_ARB_MISC0,
> + MC_EMEM_ARB_RING1_THROTTLE,
> +};
> +
> const struct tegra_mc_soc tegra114_mc_soc = {
> .clients = tegra114_mc_clients,
> .num_clients = ARRAY_SIZE(tegra114_mc_clients),
> @@ -1172,10 +1362,13 @@ const struct tegra_mc_soc tegra114_mc_soc = {
> .atom_size = 32,
> .client_id_mask = 0x7f,
> .smmu = &tegra114_smmu_soc,
> + .emem_regs = tegra114_mc_emem_regs,
> + .num_emem_regs = ARRAY_SIZE(tegra114_mc_emem_regs),
> .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
> MC_INT_DECERR_EMEM,
> .reset_ops = &tegra_mc_reset_ops_common,
> .resets = tegra114_mc_resets,
> .num_resets = ARRAY_SIZE(tegra114_mc_resets),
> + .icc_ops = &tegra114_mc_icc_ops,
> .ops = &tegra30_mc_ops,
> };
>
Reviewed-by: Mikko Perttunen <mperttunen@...dia.com>
Powered by blists - more mailing lists