[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120122527.0000680a@huawei.com>
Date: Tue, 20 Jan 2026 12:25:27 +0000
From: Jonathan Cameron <Jonathan.Cameron@...wei.com>
To: <dan.j.williams@...el.com>
CC: Terry Bowman <terry.bowman@....com>, <dave@...olabs.net>,
<dave.jiang@...el.com>, <alison.schofield@...el.com>, <bhelgaas@...gle.com>,
<shiju.jose@...wei.com>, <ming.li@...omail.com>,
<Smita.KoralahalliChannabasappa@....com>, <rrichter@....com>,
<dan.carpenter@...aro.org>, <PradeepVineshReddy.Kodamati@....com>,
<lukas@...ner.de>, <Benjamin.Cheatham@....com>,
<sathyanarayanan.kuppuswamy@...ux.intel.com>, <linux-cxl@...r.kernel.org>,
<vishal.l.verma@...el.com>, <alucerop@....com>, <ira.weiny@...el.com>,
<linux-kernel@...r.kernel.org>, <linux-pci@...r.kernel.org>
Subject: Re: [PATCH v14 19/34] cxl/port: Fix devm resource leaks around with
dport management
On Mon, 19 Jan 2026 15:02:09 -0800
dan.j.williams@...el.com wrote:
> Jonathan Cameron wrote:
> [..]
> > Any thoughts on what I'm missing?
>
> Probably:
>
> cxl/port: Map Port component registers before switchport init
>
> ...which really should not be a distinct patch from the one that changes
> the ordering. I will send out a more patient series to settle this so
> Terry does not need to keep carrying it.
>
Ah. I stopped looking when I got to this patch :) Spot on - thanks!
Below is my suggestion of another approach. I think it ends up
a fair bit simpler, but you know this code a lot better than I do, so I may
be missing some problems! I like the simpler reaping. That approach looks
like it can be used elsewhere in this file.
Testing so far is one representative config only.
The patch you highlight above is squashed into this. At least one comment
I made on your patch applies here too (naughty me :)
From a95567d9e2e3809ca1953a9aec24324ae13f6145 Mon Sep 17 00:00:00 2001
From: Jonathan Cameron <Jonathan.Cameron@...wei.com>
Date: Tue, 20 Jan 2026 10:53:14 +0000
Subject: [PATCH] Alternative to Dan's approach for managing dport resources.
Create a devres group to manage a subset of resources.
On success, close the devres group, but also stash a copy in
struct cxl_dport so we can use it for dport reaping.
On error, when group open release it.
Somewhat tested, but not what I'd consider exhaustive yet!
Used a 2 port switch, 2 EP test config.
1. Unbind endpoints, checked reap happened.
2. Unbind port1, check tear down happened.
3. Add some errors so some calls fail (stepping through each
group that was created) Some cases get setup by
it having another try based on a different downstream device
arrival triggering the flow, but it seems fine.
---
drivers/cxl/core/port.c | 76 ++++++++++++++++++++---------------
drivers/cxl/cxl.h | 2 +
drivers/cxl/cxlpci.h | 4 ++
tools/testing/cxl/test/mock.c | 1 +
4 files changed, 51 insertions(+), 32 deletions(-)
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index fef3aa0c6680..e3c53b2fc78f 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -778,7 +778,7 @@ static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map
return cxl_setup_regs(map);
}
-static int cxl_port_setup_regs(struct cxl_port *port,
+int cxl_port_setup_regs(struct cxl_port *port,
resource_size_t component_reg_phys)
{
if (dev_is_platform(port->uport_dev))
@@ -786,6 +786,7 @@ static int cxl_port_setup_regs(struct cxl_port *port,
return cxl_setup_comp_regs(&port->dev, &port->reg_map,
component_reg_phys);
}
+EXPORT_SYMBOL_NS_GPL(cxl_port_setup_regs, "CXL");
static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
resource_size_t component_reg_phys)
@@ -1065,7 +1066,12 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *dport)
dev_name(dup->dport_dev));
return -EBUSY;
}
-
+ //comment from original patch here.
+ if (!port->nr_dports) {
+ rc = cxl_port_setup_regs(port, port->component_reg_phys);
+ if (rc)
+ return rc;
+ }
rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport,
GFP_KERNEL);
if (rc)
@@ -1181,20 +1187,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
if (rc)
return ERR_PTR(rc);
- /*
- * Setup port register if this is the first dport showed up. Having
- * a dport also means that there is at least 1 active link.
- */
- if (port->nr_dports == 1 &&
- port->component_reg_phys != CXL_RESOURCE_NONE) {
- rc = cxl_port_setup_regs(port, port->component_reg_phys);
- if (rc) {
- xa_erase(&port->dports, (unsigned long)dport->dport_dev);
- return ERR_PTR(rc);
- }
- port->component_reg_phys = CXL_RESOURCE_NONE;
- }
-
get_device(dport_dev);
rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
if (rc)
@@ -1441,11 +1433,7 @@ static void delete_switch_port(struct cxl_port *port)
static void del_dport(struct cxl_dport *dport)
{
- struct cxl_port *port = dport->port;
-
- devm_release_action(&port->dev, cxl_dport_unlink, dport);
- devm_release_action(&port->dev, cxl_dport_remove, dport);
- devm_kfree(&port->dev, dport);
+ devres_release_group(&dport->port->dev, dport->devres_group);
}
static void del_dports(struct cxl_port *port)
@@ -1602,6 +1590,9 @@ static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
struct device *dport_dev)
{
struct cxl_dport *dport;
+ struct cxl_dport *new_dport;
+ struct device *host;
+ void *devres_group;
int rc;
device_lock_assert(&port->dev);
@@ -1615,21 +1606,31 @@ static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
return ERR_PTR(-EBUSY);
}
- struct cxl_dport *new_dport __free(del_cxl_dport) =
- devm_cxl_add_dport_by_dev(port, dport_dev);
- if (IS_ERR(new_dport))
- return new_dport;
+ if (is_cxl_root(port))
+ host = port->uport_dev;
+ else
+ host = &port->dev;
+
+ devres_group = devres_open_group(host, NULL, GFP_KERNEL);
+ if (!devres_group)
+ return ERR_PTR(-ENOMEM);
- cxl_switch_parse_cdat(new_dport);
+ if (port->nr_dports == 0) {
+ rc = cxl_port_setup_regs(port, port->component_reg_phys);
+ if (rc)
+ goto release_group;
- if (ida_is_empty(&port->decoder_ida)) {
rc = devm_cxl_switch_port_decoders_setup(port);
if (rc)
- return ERR_PTR(rc);
- dev_dbg(&port->dev, "first dport%d:%s added with decoders\n",
- new_dport->port_id, dev_name(dport_dev));
- return no_free_ptr(new_dport);
+ goto release_group;
+ }
+
+ new_dport = devm_cxl_add_dport_by_dev(port, dport_dev);
+ if (IS_ERR(new_dport)) {
+ rc = PTR_ERR(new_dport);
+ goto release_group;
}
+ cxl_switch_parse_cdat(new_dport);
/* New dport added, update the decoder targets */
device_for_each_child(&port->dev, new_dport, update_decoder_targets);
@@ -1637,7 +1638,18 @@ static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
dev_dbg(&port->dev, "dport%d:%s added\n", new_dport->port_id,
dev_name(dport_dev));
- return no_free_ptr(new_dport);
+ /*
+ * Stash the group for use during reaping when all downstream devices
+ * go away.
+ */
+ new_dport->devres_group = devres_group;
+ devres_close_group(host, devres_group);
+
+ return new_dport;
+
+release_group:
+ devres_release_group(host, devres_group);
+ return ERR_PTR(rc);
}
static struct cxl_dport *devm_cxl_create_port(struct device *ep_dev,
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index c796c3db36e0..855f7629f5c4 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -688,6 +688,7 @@ struct cxl_rcrb_info {
* @coord: access coordinates (bandwidth and latency performance attributes)
* @link_latency: calculated PCIe downstream latency
* @gpf_dvsec: Cached GPF port DVSEC
+ * @devres_group: Used to simplify reaping ports.
*/
struct cxl_dport {
struct device *dport_dev;
@@ -700,6 +701,7 @@ struct cxl_dport {
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
long link_latency;
int gpf_dvsec;
+ void *devres_group;
};
/**
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 1d526bea8431..a3fbedb66dbb 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -132,4 +132,8 @@ void read_cdat_data(struct cxl_port *port);
void cxl_cor_error_detected(struct pci_dev *pdev);
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
pci_channel_state_t state);
+
+int cxl_port_setup_regs(struct cxl_port *port,
+ resource_size_t component_reg_phys);
+
#endif /* __CXL_PCI_H__ */
Powered by blists - more mailing lists