[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <12a3ea6e-56c0-4d7b-9d75-43c13c03e9a9@amd.com>
Date: Thu, 28 Aug 2025 09:57:50 +0100
From: Alejandro Lucero Palau <alucerop@....com>
To: Terry Bowman <terry.bowman@....com>, dave@...olabs.net,
jonathan.cameron@...wei.com, dave.jiang@...el.com,
alison.schofield@...el.com, dan.j.williams@...el.com, bhelgaas@...gle.com,
shiju.jose@...wei.com, ming.li@...omail.com,
Smita.KoralahalliChannabasappa@....com, rrichter@....com,
dan.carpenter@...aro.org, PradeepVineshReddy.Kodamati@....com,
lukas@...ner.de, Benjamin.Cheatham@....com,
sathyanarayanan.kuppuswamy@...ux.intel.com, linux-cxl@...r.kernel.org,
ira.weiny@...el.com
Cc: linux-kernel@...r.kernel.org, linux-pci@...r.kernel.org
Subject: Re: [PATCH v11 05/23] cxl: Move CXL driver RCH error handling into
CONFIG_CXL_RCH_RAS conditional block
On 8/27/25 02:35, Terry Bowman wrote:
> Restricted CXL Host (RCH) protocol error handling uses a procedure distinct
> from the CXL Virtual Hierarchy (VH) handling. This is because of the
> differences in the RCH and VH topologies. Improve the maintainability and
> add ability to enable/disable RCH handling.
>
> Move and combine the RCH handling code into a single block conditionally
> compiled with the CONFIG_CXL_RCH_RAS kernel config.
>
> Signed-off-by: Terry Bowman <terry.bowman@....com>
>
> ---
> v10->v11:
> - New patch
> ---
> drivers/cxl/core/ras.c | 175 +++++++++++++++++++++--------------------
> 1 file changed, 90 insertions(+), 85 deletions(-)
>
> diff --git a/drivers/cxl/core/ras.c b/drivers/cxl/core/ras.c
> index 0875ce8116ff..f42f9a255ef8 100644
> --- a/drivers/cxl/core/ras.c
> +++ b/drivers/cxl/core/ras.c
> @@ -126,6 +126,7 @@ void cxl_ras_exit(void)
> cancel_work_sync(&cxl_cper_prot_err_work);
> }
>
> +#ifdef CONFIG_CXL_RCH_RAS
You are introducing CONFIG_CXL_RCH_RAS in the next patch. Is it correct
to use it here?
> static void cxl_dport_map_rch_aer(struct cxl_dport *dport)
> {
> resource_size_t aer_phys;
> @@ -141,18 +142,6 @@ static void cxl_dport_map_rch_aer(struct cxl_dport *dport)
> }
> }
>
> -static void cxl_dport_map_ras(struct cxl_dport *dport)
> -{
> - struct cxl_register_map *map = &dport->reg_map;
> - struct device *dev = dport->dport_dev;
> -
> - if (!map->component_map.ras.valid)
> - dev_dbg(dev, "RAS registers not found\n");
> - else if (cxl_map_component_regs(map, &dport->regs.component,
> - BIT(CXL_CM_CAP_CAP_ID_RAS)))
> - dev_dbg(dev, "Failed to map RAS capability.\n");
> -}
> -
> static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
> {
> void __iomem *aer_base = dport->regs.dport_aer;
> @@ -177,6 +166,95 @@ static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
> writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
> }
>
> +/*
> + * Copy the AER capability registers using 32 bit read accesses.
> + * This is necessary because RCRB AER capability is MMIO mapped. Clear the
> + * status after copying.
> + *
> + * @aer_base: base address of AER capability block in RCRB
> + * @aer_regs: destination for copying AER capability
> + */
> +static bool cxl_rch_get_aer_info(void __iomem *aer_base,
> + struct aer_capability_regs *aer_regs)
> +{
> + int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32);
> + u32 *aer_regs_buf = (u32 *)aer_regs;
> + int n;
> +
> + if (!aer_base)
> + return false;
> +
> + /* Use readl() to guarantee 32-bit accesses */
> + for (n = 0; n < read_cnt; n++)
> + aer_regs_buf[n] = readl(aer_base + n * sizeof(u32));
> +
> + writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS);
> + writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS);
> +
> + return true;
> +}
> +
> +/* Get AER severity. Return false if there is no error. */
> +static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs,
> + int *severity)
> +{
> + if (aer_regs->uncor_status & ~aer_regs->uncor_mask) {
> + if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV)
> + *severity = AER_FATAL;
> + else
> + *severity = AER_NONFATAL;
> + return true;
> + }
> +
> + if (aer_regs->cor_status & ~aer_regs->cor_mask) {
> + *severity = AER_CORRECTABLE;
> + return true;
> + }
> +
> + return false;
> +}
> +
> +static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
> +{
> + struct pci_dev *pdev = to_pci_dev(cxlds->dev);
> + struct aer_capability_regs aer_regs;
> + struct cxl_dport *dport;
> + int severity;
> +
> + struct cxl_port *port __free(put_cxl_port) =
> + cxl_pci_find_port(pdev, &dport);
> + if (!port)
> + return;
> +
> + if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
> + return;
> +
> + if (!cxl_rch_get_aer_severity(&aer_regs, &severity))
> + return;
> +
> + pci_print_aer(pdev, severity, &aer_regs);
> + if (severity == AER_CORRECTABLE)
> + cxl_handle_cor_ras(cxlds, dport->regs.ras);
> + else
> + cxl_handle_ras(cxlds, dport->regs.ras);
> +}
> +#else
> +static inline void cxl_dport_map_rch_aer(struct cxl_dport *dport) { }
> +static inline void cxl_disable_rch_root_ints(struct cxl_dport *dport) { }
> +static inline void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
> +#endif
> +
> +static void cxl_dport_map_ras(struct cxl_dport *dport)
> +{
> + struct cxl_register_map *map = &dport->reg_map;
> + struct device *dev = dport->dport_dev;
> +
> + if (!map->component_map.ras.valid)
> + dev_dbg(dev, "RAS registers not found\n");
> + else if (cxl_map_component_regs(map, &dport->regs.component,
> + BIT(CXL_CM_CAP_CAP_ID_RAS)))
> + dev_dbg(dev, "Failed to map RAS capability.\n");
> +}
>
> /**
> * cxl_dport_init_ras_reporting - Setup CXL RAS report on this dport
> @@ -270,79 +348,6 @@ static bool cxl_handle_ras(struct cxl_dev_state *cxlds, void __iomem *ras_base)
> return true;
> }
>
> -/*
> - * Copy the AER capability registers using 32 bit read accesses.
> - * This is necessary because RCRB AER capability is MMIO mapped. Clear the
> - * status after copying.
> - *
> - * @aer_base: base address of AER capability block in RCRB
> - * @aer_regs: destination for copying AER capability
> - */
> -static bool cxl_rch_get_aer_info(void __iomem *aer_base,
> - struct aer_capability_regs *aer_regs)
> -{
> - int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32);
> - u32 *aer_regs_buf = (u32 *)aer_regs;
> - int n;
> -
> - if (!aer_base)
> - return false;
> -
> - /* Use readl() to guarantee 32-bit accesses */
> - for (n = 0; n < read_cnt; n++)
> - aer_regs_buf[n] = readl(aer_base + n * sizeof(u32));
> -
> - writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS);
> - writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS);
> -
> - return true;
> -}
> -
> -/* Get AER severity. Return false if there is no error. */
> -static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs,
> - int *severity)
> -{
> - if (aer_regs->uncor_status & ~aer_regs->uncor_mask) {
> - if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV)
> - *severity = AER_FATAL;
> - else
> - *severity = AER_NONFATAL;
> - return true;
> - }
> -
> - if (aer_regs->cor_status & ~aer_regs->cor_mask) {
> - *severity = AER_CORRECTABLE;
> - return true;
> - }
> -
> - return false;
> -}
> -
> -static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
> -{
> - struct pci_dev *pdev = to_pci_dev(cxlds->dev);
> - struct aer_capability_regs aer_regs;
> - struct cxl_dport *dport;
> - int severity;
> -
> - struct cxl_port *port __free(put_cxl_port) =
> - cxl_pci_find_port(pdev, &dport);
> - if (!port)
> - return;
> -
> - if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
> - return;
> -
> - if (!cxl_rch_get_aer_severity(&aer_regs, &severity))
> - return;
> -
> - pci_print_aer(pdev, severity, &aer_regs);
> - if (severity == AER_CORRECTABLE)
> - cxl_handle_cor_ras(cxlds, dport->regs.ras);
> - else
> - cxl_handle_ras(cxlds, dport->regs.ras);
> -}
> -
> void cxl_cor_error_detected(struct pci_dev *pdev)
> {
> struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
Powered by blists - more mailing lists