[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250925223440.3539069-24-terry.bowman@amd.com>
Date: Thu, 25 Sep 2025 17:34:38 -0500
From: Terry Bowman <terry.bowman@....com>
To: <dave@...olabs.net>, <jonathan.cameron@...wei.com>,
<dave.jiang@...el.com>, <alison.schofield@...el.com>,
<dan.j.williams@...el.com>, <bhelgaas@...gle.com>, <shiju.jose@...wei.com>,
<ming.li@...omail.com>, <Smita.KoralahalliChannabasappa@....com>,
<rrichter@....com>, <dan.carpenter@...aro.org>,
<PradeepVineshReddy.Kodamati@....com>, <lukas@...ner.de>,
<Benjamin.Cheatham@....com>, <sathyanarayanan.kuppuswamy@...ux.intel.com>,
<linux-cxl@...r.kernel.org>, <alucerop@....com>, <ira.weiny@...el.com>
CC: <linux-kernel@...r.kernel.org>, <linux-pci@...r.kernel.org>,
<terry.bowman@....com>
Subject: [PATCH v12 23/25] CXL/PCI: Introduce CXL uncorrectable protocol error recovery
Populate the cxl_do_recovery() function with uncorrectable protocol error (UCE)
handling. Follow similar design as found in PCIe error driver,
pcie_do_recovery(). One difference is cxl_do_recovery() will treat all UCEs
as fatal with a kernel panic. This is to prevent corruption on CXL memory.
Introduce cxl_walk_port(). Make this analogous to pci_walk_bridge() but walking
CXL ports instead. This will iterate through the CXL topology from the
erroring device through the downstream CXL Ports and Endpoints.
Export pci_aer_clear_fatal_status() for CXL to use if a UCE is not found.
Signed-off-by: Terry Bowman <terry.bowman@....com>
---
Changes in v11->v12:
- Cleaned up port discovery in cxl_do_recovery() (Dave)
- Added PCI_EXP_TYPE_RC_END to type check in cxl_report_error_detected()
Changes in v10->v11:
- pci_ers_merge_results() - Move to earlier patch
---
drivers/cxl/core/ras.c | 111 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 111 insertions(+)
diff --git a/drivers/cxl/core/ras.c b/drivers/cxl/core/ras.c
index 7e8d63c32d72..45f92defca64 100644
--- a/drivers/cxl/core/ras.c
+++ b/drivers/cxl/core/ras.c
@@ -443,8 +443,119 @@ void cxl_endpoint_port_init_ras(struct cxl_port *ep)
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_port_init_ras, "CXL");
+static int cxl_report_error_detected(struct device *dev, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ pci_ers_result_t vote, *result = data;
+
+ guard(device)(dev);
+
+ if ((pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) ||
+ (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)) {
+ if (!cxl_pci_drv_bound(pdev))
+ return 0;
+
+ vote = cxl_error_detected(dev);
+ } else {
+ vote = cxl_port_error_detected(dev);
+ }
+
+ *result = pci_ers_merge_result(*result, vote);
+
+ return 0;
+}
+
+static int match_port_by_parent_dport(struct device *dev, const void *dport_dev)
+{
+ struct cxl_port *port;
+
+ if (!is_cxl_port(dev))
+ return 0;
+
+ port = to_cxl_port(dev);
+
+ return port->parent_dport->dport_dev == dport_dev;
+}
+
+static void cxl_walk_port(struct device *port_dev,
+ int (*cb)(struct device *, void *),
+ void *userdata)
+{
+ struct cxl_dport *dport = NULL;
+ struct cxl_port *port;
+ unsigned long index;
+
+ if (!port_dev)
+ return;
+
+ port = to_cxl_port(port_dev);
+ if (port->uport_dev && dev_is_pci(port->uport_dev))
+ cb(port->uport_dev, userdata);
+
+ xa_for_each(&port->dports, index, dport)
+ {
+ struct device *child_port_dev __free(put_device) =
+ bus_find_device(&cxl_bus_type, &port->dev, dport->dport_dev,
+ match_port_by_parent_dport);
+
+ cb(dport->dport_dev, userdata);
+
+ cxl_walk_port(child_port_dev, cxl_report_error_detected, userdata);
+ }
+
+ if (is_cxl_endpoint(port))
+ cb(port->uport_dev->parent, userdata);
+}
+
static void cxl_do_recovery(struct device *dev)
{
+ pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct cxl_port *port = NULL;
+
+ if ((pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) ||
+ (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)) {
+ struct cxl_dport *dport;
+ struct cxl_port *rp_port __free(put_cxl_port) = find_cxl_port(&pdev->dev, &dport);
+
+ port = rp_port;
+
+ } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM) {
+ struct cxl_port *us_port __free(put_cxl_port) = find_cxl_port_by_uport(&pdev->dev);
+
+ port = us_port;
+
+ } else if ((pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) ||
+ (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)) {
+ struct cxl_dev_state *cxlds;
+
+ if (!cxl_pci_drv_bound(pdev))
+ return;
+
+ cxlds = pci_get_drvdata(pdev);
+ port = cxlds->cxlmd->endpoint;
+ }
+
+ if (!port) {
+ dev_err(dev, "Failed to find the CXL device\n");
+ return;
+ }
+
+ cxl_walk_port(&port->dev, cxl_report_error_detected, &status);
+ if (status == PCI_ERS_RESULT_PANIC)
+ panic("CXL cachemem error.");
+
+ /*
+ * If we have native control of AER, clear error status in the device
+ * that detected the error. If the platform retained control of AER,
+ * it is responsible for clearing this status. In that case, the
+ * signaling device may not even be visible to the OS.
+ */
+ if (cxl_error_is_native(pdev)) {
+ pcie_clear_device_status(pdev);
+ pci_aer_clear_nonfatal_status(pdev);
+ pci_aer_clear_fatal_status(pdev);
+ }
}
static void cxl_handle_cor_ras(struct device *dev, u64 serial, void __iomem *ras_base)
--
2.34.1
Powered by blists - more mailing lists