[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <73311003-6b8e-4140-935a-55bd63a723e6@intel.com>
Date: Tue, 16 Jul 2024 14:06:41 +0800
From: "Li, Ming4" <ming4.li@...el.com>
To: <alejandro.lucero-palau@....com>, <linux-cxl@...r.kernel.org>,
<netdev@...r.kernel.org>, <dan.j.williams@...el.com>,
<martin.habets@...inx.com>, <edward.cree@....com>, <davem@...emloft.net>,
<kuba@...nel.org>, <pabeni@...hat.com>, <edumazet@...gle.com>,
<richard.hughes@....com>
CC: Alejandro Lucero <alucerop@....com>
Subject: Re: [PATCH v2 09/15] cxl: define a driver interface for HPA free
space enumaration
On 7/16/2024 1:28 AM, alejandro.lucero-palau@....com wrote:
> From: Alejandro Lucero <alucerop@....com>
>
> CXL region creation involves allocating capacity from device DPA
> (device-physical-address space) and assigning it to decode a given HPA
> (host-physical-address space). Before determining how much DPA to
> allocate the amount of available HPA must be determined. Also, not all
> HPA is create equal, some specifically targets RAM, some target PMEM,
> some is prepared for device-memory flows like HDM-D and HDM-DB, and some
> is host-only (HDM-H).
>
> Wrap all of those concerns into an API that retrieves a root decoder
> (platform CXL window) that fits the specified constraints and the
> capacity available for a new region.
>
> Based on https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m6fbe775541da3cd477d65fa95c8acdc347345b4f
>
> Signed-off-by: Alejandro Lucero <alucerop@....com>
> Co-developed-by: Dan Williams <dan.j.williams@...el.com>
> ---
> drivers/cxl/core/region.c | 161 +++++++++++++++++++++++++++++
> drivers/cxl/cxl.h | 3 +
> drivers/cxl/cxlmem.h | 5 +
> drivers/net/ethernet/sfc/efx_cxl.c | 14 +++
> include/linux/cxl_accel_mem.h | 9 ++
> 5 files changed, 192 insertions(+)
>
> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
> index 538ebd5a64fd..ca464bfef77b 100644
> --- a/drivers/cxl/core/region.c
> +++ b/drivers/cxl/core/region.c
> @@ -702,6 +702,167 @@ static int free_hpa(struct cxl_region *cxlr)
> return 0;
> }
>
> +
> +struct cxlrd_max_context {
> + struct device * const *host_bridges;
> + int interleave_ways;
> + unsigned long flags;
> + resource_size_t max_hpa;
> + struct cxl_root_decoder *cxlrd;
> +};
> +
> +static int find_max_hpa(struct device *dev, void *data)
> +{
> + struct cxlrd_max_context *ctx = data;
> + struct cxl_switch_decoder *cxlsd;
> + struct cxl_root_decoder *cxlrd;
> + struct resource *res, *prev;
> + struct cxl_decoder *cxld;
> + resource_size_t max;
> + int found;
> +
> + if (!is_root_decoder(dev))
> + return 0;
> +
> + cxlrd = to_cxl_root_decoder(dev);
> + cxld = &cxlrd->cxlsd.cxld;
> + if ((cxld->flags & ctx->flags) != ctx->flags) {
> + dev_dbg(dev, "find_max_hpa, flags not matching: %08lx vs %08lx\n",
> + cxld->flags, ctx->flags);
> + return 0;
> + }
> +
> + /* A Host bridge could have more interleave ways than an
> + * endpoint, couldnĀ“t it?
> + *
> + * What does interleave ways mean here in terms of the requestor?
> + * Why the FFMWS has 0 interleave ways but root port has 1?
> + */
> + if (cxld->interleave_ways != ctx->interleave_ways) {
> + dev_dbg(dev, "find_max_hpa, interleave_ways not matching\n");
> + return 0;
> + }
> +
> + cxlsd = &cxlrd->cxlsd;
> +
> + guard(rwsem_read)(&cxl_region_rwsem);
> + found = 0;
> + for (int i = 0; i < ctx->interleave_ways; i++)
> + for (int j = 0; j < ctx->interleave_ways; j++)
> + if (ctx->host_bridges[i] ==
> + cxlsd->target[j]->dport_dev) {
> + found++;
> + break;
> + }
> +
> + if (found != ctx->interleave_ways) {
> + dev_dbg(dev, "find_max_hpa, no interleave_ways found\n");
> + return 0;
> + }
> +
> + /*
> + * Walk the root decoder resource range relying on cxl_region_rwsem to
> + * preclude sibling arrival/departure and find the largest free space
> + * gap.
> + */
> + lockdep_assert_held_read(&cxl_region_rwsem);
> + max = 0;
> + res = cxlrd->res->child;
> + if (!res)
> + max = resource_size(cxlrd->res);
> + else
> + max = 0;
> +
> + for (prev = NULL; res; prev = res, res = res->sibling) {
> + struct resource *next = res->sibling;
> + resource_size_t free = 0;
> +
> + if (!prev && res->start > cxlrd->res->start) {
> + free = res->start - cxlrd->res->start;
> + max = max(free, max);
> + }
> + if (prev && res->start > prev->end + 1) {
> + free = res->start - prev->end + 1;
> + max = max(free, max);
> + }
> + if (next && res->end + 1 < next->start) {
> + free = next->start - res->end + 1;
> + max = max(free, max);
> + }
> + if (!next && res->end + 1 < cxlrd->res->end + 1) {
> + free = cxlrd->res->end + 1 - res->end + 1;
> + max = max(free, max);
> + }
> + }
> +
> + if (max > ctx->max_hpa) {
> + if (ctx->cxlrd)
> + put_device(CXLRD_DEV(ctx->cxlrd));
> + get_device(CXLRD_DEV(cxlrd));
> + ctx->cxlrd = cxlrd;
> + ctx->max_hpa = max;
> + dev_info(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max);
> + }
> + return 0;
> +}
> +
> +/**
> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints
> + * @endpoint: an endpoint that is mapped by the returned decoder
> + * @interleave_ways: number of entries in @host_bridges
> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B]
> + * @max: output parameter of bytes available in the returned decoder
> + *
> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available (@max)'
> + * is a point in time snapshot. If by the time the caller goes to use this root
> + * decoder's capacity the capacity is reduced then caller needs to loop and
> + * retry.
> + *
> + * The returned root decoder has an elevated reference count that needs to be
> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with
> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder
> + * does not race.
> + */
> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint,
> + int interleave_ways,
> + unsigned long flags,
> + resource_size_t *max)
> +{
> +
> + struct cxlrd_max_context ctx = {
> + .host_bridges = &endpoint->host_bridge,
> + .interleave_ways = interleave_ways,
> + .flags = flags,
> + };
> + struct cxl_port *root_port;
> + struct cxl_root *root;
> +
> + if (!is_cxl_endpoint(endpoint)) {
> + dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n");
> + return ERR_PTR(-EINVAL);
> + }
> +
> + root = find_cxl_root(endpoint);
Could use scope-based resource managementĀ __free() here to drop below put_device(&root_port->dev);
e.g. struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(endpoint);
> + if (!root) {
> + dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n");
> + return ERR_PTR(-ENXIO);
> + }
> +
> + root_port = &root->port;
> + down_read(&cxl_region_rwsem);
> + device_for_each_child(&root_port->dev, &ctx, find_max_hpa);
> + up_read(&cxl_region_rwsem);
> + put_device(&root_port->dev);
> +
> + if (!ctx.cxlrd)
> + return ERR_PTR(-ENOMEM);
> +
> + *max = ctx.max_hpa;
> + return ctx.cxlrd;
> +}
> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, CXL);
> +
> +
> static ssize_t size_store(struct device *dev, struct device_attribute *attr,
> const char *buf, size_t len)
> {
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index 9973430d975f..d3fdd2c1e066 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -770,6 +770,9 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev);
> struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
> struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
> struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
> +
> +#define CXLRD_DEV(cxlrd) &cxlrd->cxlsd.cxld.dev
> +
> bool is_root_decoder(struct device *dev);
> bool is_switch_decoder(struct device *dev);
> bool is_endpoint_decoder(struct device *dev);
> diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
> index 8f2a820bd92d..a0e0795ec064 100644
> --- a/drivers/cxl/cxlmem.h
> +++ b/drivers/cxl/cxlmem.h
> @@ -877,4 +877,9 @@ struct cxl_hdm {
> struct seq_file;
> struct dentry *cxl_debugfs_create_dir(const char *dir);
> void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds);
> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint,
> + int interleave_ways,
> + unsigned long flags,
> + resource_size_t *max);
> +
> #endif /* __CXL_MEM_H__ */
> diff --git a/drivers/net/ethernet/sfc/efx_cxl.c b/drivers/net/ethernet/sfc/efx_cxl.c
> index 2cf4837ddfc1..6d49571ccff7 100644
> --- a/drivers/net/ethernet/sfc/efx_cxl.c
> +++ b/drivers/net/ethernet/sfc/efx_cxl.c
> @@ -22,6 +22,7 @@ void efx_cxl_init(struct efx_nic *efx)
> {
> struct pci_dev *pci_dev = efx->pci_dev;
> struct efx_cxl *cxl = efx->cxl;
> + resource_size_t max = 0;
> struct resource res;
> u16 dvsec;
>
> @@ -74,6 +75,19 @@ void efx_cxl_init(struct efx_nic *efx)
> if (IS_ERR(cxl->endpoint))
> pci_info(pci_dev, "CXL accel acquire endpoint failed");
>
> + cxl->cxlrd = cxl_get_hpa_freespace(cxl->endpoint, 1,
> + CXL_DECODER_F_RAM | CXL_DECODER_F_TYPE2,
> + &max);
> +
> + if (IS_ERR(cxl->cxlrd)) {
> + pci_info(pci_dev, "CXL accel get HPA failed");
> + goto out;
> + }
> +
> + if (max < EFX_CTPIO_BUFFER_SIZE)
> + pci_info(pci_dev, "CXL accel not enough free HPA space %llu < %u\n",
> + max, EFX_CTPIO_BUFFER_SIZE);
> +out:
> cxl_release_endpoint(cxl->cxlmd, cxl->endpoint);
> }
>
> diff --git a/include/linux/cxl_accel_mem.h b/include/linux/cxl_accel_mem.h
> index 701910021df8..f3e77688ffe0 100644
> --- a/include/linux/cxl_accel_mem.h
> +++ b/include/linux/cxl_accel_mem.h
> @@ -6,6 +6,10 @@
> #ifndef __CXL_ACCEL_MEM_H
> #define __CXL_ACCEL_MEM_H
>
> +#define CXL_DECODER_F_RAM BIT(0)
> +#define CXL_DECODER_F_PMEM BIT(1)
> +#define CXL_DECODER_F_TYPE2 BIT(2)
> +
> enum accel_resource{
> CXL_ACCEL_RES_DPA,
> CXL_ACCEL_RES_RAM,
> @@ -32,4 +36,9 @@ struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
>
> struct cxl_port *cxl_acquire_endpoint(struct cxl_memdev *cxlmd);
> void cxl_release_endpoint(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
> +
> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint,
> + int interleave_ways,
> + unsigned long flags,
> + resource_size_t *max);
> #endif
Powered by blists - more mailing lists