[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7e9a44be-7360-44dc-8a6a-7b899514f979@intel.com>
Date: Wed, 10 Dec 2025 10:44:22 -0700
From: Dave Jiang <dave.jiang@...el.com>
To: Robert Richter <rrichter@....com>,
Alison Schofield <alison.schofield@...el.com>,
Vishal Verma <vishal.l.verma@...el.com>, Ira Weiny <ira.weiny@...el.com>,
Dan Williams <dan.j.williams@...el.com>,
Jonathan Cameron <jonathan.cameron@...wei.com>,
Davidlohr Bueso <dave@...olabs.net>
Cc: linux-cxl@...r.kernel.org, linux-kernel@...r.kernel.org,
Gregory Price <gourry@...rry.net>,
"Fabio M. De Francesco" <fabio.m.de.francesco@...ux.intel.com>,
Terry Bowman <terry.bowman@....com>, Joshua Hahn <joshua.hahnjy@...il.com>
Subject: Re: [PATCH v8 13/13] cxl: Disable HPA/SPA translation handlers for
Normalized addressing
On 12/9/25 11:06 AM, Robert Richter wrote:
> The root decoder implements callbacks hpa_to_spa and spa_to_hpa to
> perform Host Physical Address (HPA) and System Physical Address
> translations respectively. The callbacks are needed in cases where HPA
> != SPA to convert addresses for tracing and error handling and to
> setup Poison injection. Currently this is used for XOR interleaving.
>
> In AMD Zen5 systems with Normalized addressing, the addresses of
> endpoints are not SPA and those callbacks need to be implemented.
>
> Now, as ACPI PRM translation could be expensive in tracing or error
> handling code paths, do not yet enable this direction of translation
> (hpa_to_spa) to avoid its intensive use. Instead, mark the HPA invalid
> and return an error for this case.
>
> The spa_to_hpa callback will be used in region_offset_to_dpa_result()
> to determine the endpoint by the position in the interleaving
> chunk. With Normalized addressing, the order of endpoints in the
> interleaving chunk is implementation defined. Do not use this approach
> and and return an error instead.
>
> Disable both HPA/SPA translation handlers for Normalized addressing by
> returning an error (ULLONG_MAX).
>
> Signed-off-by: Robert Richter <rrichter@....com>
Reviewed-by: Dave Jiang <dave.jiang@...el.com>
> ---
> drivers/cxl/core/atl.c | 35 +++++++++++++++++++++++++++++++++++
> drivers/cxl/core/region.c | 21 ++++++++++++++++++++-
> drivers/cxl/cxl.h | 1 +
> 3 files changed, 56 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/cxl/core/atl.c b/drivers/cxl/core/atl.c
> index 4bd2289b8bbb..9d05a83bde73 100644
> --- a/drivers/cxl/core/atl.c
> +++ b/drivers/cxl/core/atl.c
> @@ -57,6 +57,39 @@ static u64 prm_cxl_dpa_spa(struct pci_dev *pci_dev, u64 dpa)
> return spa;
> }
>
> +static u64 atl_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
> +{
> + /*
> + * PRM translation could be expensive in tracing or error
> + * handling code paths. Avoid this for now and return an
> + * error instead.
> + */
> +
> + return ULLONG_MAX;
> +}
> +
> +static u64 atl_spa_to_hpa(struct cxl_root_decoder *cxlrd, u64 spa)
> +{
> + /*
> + * The callback will be used in region_offset_to_dpa_result()
> + * to determine the endpoint by the position in the
> + * interleaving chunk. With Normalized addressing, the order
> + * of endpoints in the interleaving chunk is implementation
> + * defined. Do not use this approach and and return an error
> + * instead.
> + */
> +
> + return ULLONG_MAX;
> +}
> +
> +static int cxl_prm_setup_region(struct cxl_region *cxlr)
> +{
> + cxlr->cxlrd->ops.hpa_to_spa = atl_hpa_to_spa;
> + cxlr->cxlrd->ops.spa_to_hpa = atl_spa_to_hpa;
> +
> + return 0;
> +}
> +
> static int cxl_prm_setup_root(struct cxl_root *cxl_root, void *data)
> {
> struct cxl_region_context *ctx = data;
> @@ -183,6 +216,8 @@ static int cxl_prm_setup_root(struct cxl_root *cxl_root, void *data)
> ctx->interleave_ways = ways;
> ctx->interleave_granularity = gran;
>
> + cxl_root->ops.translation_setup_region = cxl_prm_setup_region;
> +
> dev_dbg(&cxld->dev,
> "address mapping found for %s (hpa -> spa): %#llx+%#llx -> %#llx+%#llx ways:%d granularity:%d\n",
> dev_name(cxlmd->dev.parent), base, len, hpa_range.start,
> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
> index 2c070c7c7bfe..130af1cf95b7 100644
> --- a/drivers/cxl/core/region.c
> +++ b/drivers/cxl/core/region.c
> @@ -623,6 +623,21 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
> }
> static DEVICE_ATTR_RO(mode);
>
> +static int cxl_region_setup_translation(struct cxl_region *cxlr)
> +{
> + struct cxl_port *root = to_cxl_port(cxlr->dev.parent->parent);
> + struct cxl_root *cxl_root;
> +
> + if (!root || !is_cxl_root(root))
> + return 0;
> +
> + cxl_root = to_cxl_root(root);
> + if (!cxl_root || !cxl_root->ops.translation_setup_region)
> + return 0;
> +
> + return cxl_root->ops.translation_setup_region(cxlr);
> +}
> +
> static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
> {
> struct cxl_root_decoder *cxlrd = cxlr->cxlrd;
> @@ -666,7 +681,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
> p->res = res;
> p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
>
> - return 0;
> + return cxl_region_setup_translation(cxlr);
> }
>
> static void cxl_region_iomem_release(struct cxl_region *cxlr)
> @@ -3663,6 +3678,10 @@ static int __construct_region(struct cxl_region *cxlr,
> p->interleave_granularity = ctx->interleave_granularity;
> p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
>
> + rc = cxl_region_setup_translation(cxlr);
> + if (rc)
> + return rc;
> +
> rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
> if (rc)
> return rc;
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index 20b0fd43fa7b..f4623728a815 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -653,6 +653,7 @@ struct cxl_root_ops {
> struct access_coordinate *coord, int entries,
> int *qos_class);
> int (*translation_setup_root)(struct cxl_root *cxl_root, void *data);
> + int (*translation_setup_region)(struct cxl_region *cxlr);
> };
>
> /**
Powered by blists - more mailing lists