[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <4c8f704b-5607-5ca0-c00e-01e412117f6b@linux.ibm.com>
Date: Tue, 25 Feb 2020 17:30:52 +0100
From: Frederic Barrat <fbarrat@...ux.ibm.com>
To: "Alastair D'Silva" <alastair@....ibm.com>, alastair@...ilva.org
Cc: "Aneesh Kumar K . V" <aneesh.kumar@...ux.ibm.com>,
"Oliver O'Halloran" <oohall@...il.com>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>,
Andrew Donnellan <ajd@...ux.ibm.com>,
Arnd Bergmann <arnd@...db.de>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Dan Williams <dan.j.williams@...el.com>,
Vishal Verma <vishal.l.verma@...el.com>,
Dave Jiang <dave.jiang@...el.com>,
Ira Weiny <ira.weiny@...el.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Mauro Carvalho Chehab <mchehab+samsung@...nel.org>,
"David S. Miller" <davem@...emloft.net>,
Rob Herring <robh@...nel.org>,
Anton Blanchard <anton@...abs.org>,
Krzysztof Kozlowski <krzk@...nel.org>,
Mahesh Salgaonkar <mahesh@...ux.vnet.ibm.com>,
Madhavan Srinivasan <maddy@...ux.vnet.ibm.com>,
Cédric Le Goater <clg@...d.org>,
Anju T Sudhakar <anju@...ux.vnet.ibm.com>,
Hari Bathini <hbathini@...ux.ibm.com>,
Thomas Gleixner <tglx@...utronix.de>,
Greg Kurz <groug@...d.org>,
Nicholas Piggin <npiggin@...il.com>,
Masahiro Yamada <yamada.masahiro@...ionext.com>,
Alexey Kardashevskiy <aik@...abs.ru>,
linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
linux-nvdimm@...ts.01.org, linux-mm@...ck.org
Subject: Re: [PATCH v3 06/27] ocxl: Tally up the LPC memory on a link & allow
it to be mapped
Le 21/02/2020 à 04:26, Alastair D'Silva a écrit :
> From: Alastair D'Silva <alastair@...ilva.org>
>
> Tally up the LPC memory on an OpenCAPI link & allow it to be mapped
>
> Signed-off-by: Alastair D'Silva <alastair@...ilva.org>
> ---
> drivers/misc/ocxl/core.c | 10 ++++++
> drivers/misc/ocxl/link.c | 53 +++++++++++++++++++++++++++++++
> drivers/misc/ocxl/ocxl_internal.h | 33 +++++++++++++++++++
> 3 files changed, 96 insertions(+)
>
> diff --git a/drivers/misc/ocxl/core.c b/drivers/misc/ocxl/core.c
> index b7a09b21ab36..2531c6cf19a0 100644
> --- a/drivers/misc/ocxl/core.c
> +++ b/drivers/misc/ocxl/core.c
> @@ -230,8 +230,18 @@ static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev)
> if (rc)
> goto err_free_pasid;
>
> + if (afu->config.lpc_mem_size || afu->config.special_purpose_mem_size) {
> + rc = ocxl_link_add_lpc_mem(afu->fn->link, afu->config.lpc_mem_offset,
> + afu->config.lpc_mem_size +
> + afu->config.special_purpose_mem_size);
> + if (rc)
> + goto err_free_mmio;
> + }
> +
> return 0;
>
> +err_free_mmio:
> + unmap_mmio_areas(afu);
> err_free_pasid:
> reclaim_afu_pasid(afu);
> err_free_actag:
> diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
> index 58d111afd9f6..1e039cc5ebe5 100644
> --- a/drivers/misc/ocxl/link.c
> +++ b/drivers/misc/ocxl/link.c
> @@ -84,6 +84,11 @@ struct ocxl_link {
> int dev;
> atomic_t irq_available;
> struct spa *spa;
> + struct mutex lpc_mem_lock; /* protects lpc_mem & lpc_mem_sz */
> + u64 lpc_mem_sz; /* Total amount of LPC memory presented on the link */
> + u64 lpc_mem;
> + int lpc_consumers;
> +
> void *platform_data;
> };
> static struct list_head links_list = LIST_HEAD_INIT(links_list);
> @@ -396,6 +401,8 @@ static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_l
> if (rc)
> goto err_spa;
>
> + mutex_init(&link->lpc_mem_lock);
> +
> /* platform specific hook */
> rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask,
> &link->platform_data);
> @@ -711,3 +718,49 @@ void ocxl_link_free_irq(void *link_handle, int hw_irq)
> atomic_inc(&link->irq_available);
> }
> EXPORT_SYMBOL_GPL(ocxl_link_free_irq);
> +
> +int ocxl_link_add_lpc_mem(void *link_handle, u64 offset, u64 size)
> +{
> + struct ocxl_link *link = (struct ocxl_link *) link_handle;
> +
> + // Check for overflow
> + if (offset > (offset + size))
> + return -EINVAL;
> +
> + mutex_lock(&link->lpc_mem_lock);
> + link->lpc_mem_sz = max(link->lpc_mem_sz, offset + size);
> +
> + mutex_unlock(&link->lpc_mem_lock);
> +
> + return 0;
> +}
> +
> +u64 ocxl_link_lpc_map(void *link_handle, struct pci_dev *pdev)
> +{
> + struct ocxl_link *link = (struct ocxl_link *) link_handle;
> +
> + mutex_lock(&link->lpc_mem_lock);
> +
> + if(!link->lpc_mem)
> + link->lpc_mem = pnv_ocxl_platform_lpc_setup(pdev, link->lpc_mem_sz);
> +
> + if(link->lpc_mem)
> + link->lpc_consumers++;
> + mutex_unlock(&link->lpc_mem_lock);
> +
> + return link->lpc_mem;
> +}
> +
> +void ocxl_link_lpc_release(void *link_handle, struct pci_dev *pdev)
> +{
> + struct ocxl_link *link = (struct ocxl_link *) link_handle;
> +
> + mutex_lock(&link->lpc_mem_lock);
> + WARN_ON(--link->lpc_consumers < 0);
Here, we always decrement the lpc_consumers count. However, it was only
incremented if the mapping was setup correctly in opal.
We could arguably claim that ocxl_link_lpc_release() should only be
called if ocxl_link_lpc_map() succeeded, but it would make error path
handling easier if we only decrement the lpc_consumers count if
link->lpc_mem is set. So that we can just call ocxl_link_lpc_release()
in error paths without having to worry about triggering the WARN_ON message.
Fred
> + if (link->lpc_consumers == 0) {
> + pnv_ocxl_platform_lpc_release(pdev);
> + link->lpc_mem = 0;
> + }
> +
> + mutex_unlock(&link->lpc_mem_lock);
> +}
> diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h
> index 198e4e4bc51d..d0c8c4838f42 100644
> --- a/drivers/misc/ocxl/ocxl_internal.h
> +++ b/drivers/misc/ocxl/ocxl_internal.h
> @@ -142,4 +142,37 @@ int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset);
> u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id);
> void ocxl_afu_irq_free_all(struct ocxl_context *ctx);
>
> +/**
> + * ocxl_link_add_lpc_mem() - Increment the amount of memory required by an OpenCAPI link
> + *
> + * @link_handle: The OpenCAPI link handle
> + * @offset: The offset of the memory to add
> + * @size: The amount of memory to increment by
> + *
> + * Returns 0 on success, negative on overflow
> + */
> +int ocxl_link_add_lpc_mem(void *link_handle, u64 offset, u64 size);
> +
> +/**
> + * ocxl_link_lpc_map() - Map the LPC memory for an OpenCAPI device
> + * Since LPC memory belongs to a link, the whole LPC memory available
> + * on the link must be mapped in order to make it accessible to a device.
> + * @link_handle: The OpenCAPI link handle
> + * @pdev: A device that is on the link
> + *
> + * Returns the address of the mapped LPC memory, or 0 on error
> + */
> +u64 ocxl_link_lpc_map(void *link_handle, struct pci_dev *pdev);
> +
> +/**
> + * ocxl_link_lpc_release() - Release the LPC memory device for an OpenCAPI device
> + *
> + * Offlines LPC memory on an OpenCAPI link for a device. If this is the
> + * last device on the link to release the memory, unmap it from the link.
> + *
> + * @link_handle: The OpenCAPI link handle
> + * @pdev: A device that is on the link
> + */
> +void ocxl_link_lpc_release(void *link_handle, struct pci_dev *pdev);
> +
> #endif /* _OCXL_INTERNAL_H_ */
>
Powered by blists - more mailing lists