[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <99d4f018-0e2d-9ab4-b4d4-58af7e494ec3@kaod.org>
Date: Thu, 2 Apr 2020 10:08:05 +0200
From: Cédric Le Goater <clg@...d.org>
To: Haren Myneni <haren@...ux.ibm.com>, mpe@...erman.id.au
Cc: mikey@...ling.org, srikar@...ux.vnet.ibm.com,
frederic.barrat@...ibm.com, ajd@...ux.ibm.com,
linux-kernel@...r.kernel.org, npiggin@...il.com, hch@...radead.org,
oohall@...il.com, sukadev@...ux.vnet.ibm.com,
linuxppc-dev@...ts.ozlabs.org, herbert@...dor.apana.org.au
Subject: Re: [PATCH v10 03/14] powerpc/vas: Alloc and setup IRQ and trigger
port address
On 4/2/20 9:10 AM, Haren Myneni wrote:
>
> Allocate a xive irq on each chip with a vas instance. The NX coprocessor
> raises a host CPU interrupt via vas if it encounters page fault on user
> space request buffer. Subsequent patches register the trigger port with
> the NX coprocessor, and create a vas fault handler for this interrupt
> mapping.
Looks good !
> Signed-off-by: Haren Myneni <haren@...ux.ibm.com>
Reviewed-by: Cédric Le Goater <clg@...d.org>
Thanks,
C.
> ---
> arch/powerpc/platforms/powernv/vas.c | 44 +++++++++++++++++++++++++++++++-----
> arch/powerpc/platforms/powernv/vas.h | 2 ++
> 2 files changed, 40 insertions(+), 6 deletions(-)
>
> diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c
> index ed9cc6d..3303cfe 100644
> --- a/arch/powerpc/platforms/powernv/vas.c
> +++ b/arch/powerpc/platforms/powernv/vas.c
> @@ -15,6 +15,7 @@
> #include <linux/of_address.h>
> #include <linux/of.h>
> #include <asm/prom.h>
> +#include <asm/xive.h>
>
> #include "vas.h"
>
> @@ -25,10 +26,12 @@
>
> static int init_vas_instance(struct platform_device *pdev)
> {
> - int rc, cpu, vasid;
> - struct resource *res;
> - struct vas_instance *vinst;
> struct device_node *dn = pdev->dev.of_node;
> + struct vas_instance *vinst;
> + struct xive_irq_data *xd;
> + uint32_t chipid, hwirq;
> + struct resource *res;
> + int rc, cpu, vasid;
>
> rc = of_property_read_u32(dn, "ibm,vas-id", &vasid);
> if (rc) {
> @@ -36,6 +39,12 @@ static int init_vas_instance(struct platform_device *pdev)
> return -ENODEV;
> }
>
> + rc = of_property_read_u32(dn, "ibm,chip-id", &chipid);
> + if (rc) {
> + pr_err("No ibm,chip-id property for %s?\n", pdev->name);
> + return -ENODEV;
> + }
> +
> if (pdev->num_resources != 4) {
> pr_err("Unexpected DT configuration for [%s, %d]\n",
> pdev->name, vasid);
> @@ -69,9 +78,32 @@ static int init_vas_instance(struct platform_device *pdev)
>
> vinst->paste_win_id_shift = 63 - res->end;
>
> - pr_devel("Initialized instance [%s, %d], paste_base 0x%llx, "
> - "paste_win_id_shift 0x%llx\n", pdev->name, vasid,
> - vinst->paste_base_addr, vinst->paste_win_id_shift);
> + hwirq = xive_native_alloc_irq_on_chip(chipid);
> + if (!hwirq) {
> + pr_err("Inst%d: Unable to allocate global irq for chip %d\n",
> + vinst->vas_id, chipid);
> + return -ENOENT;
> + }
> +
> + vinst->virq = irq_create_mapping(NULL, hwirq);
> + if (!vinst->virq) {
> + pr_err("Inst%d: Unable to map global irq %d\n",
> + vinst->vas_id, hwirq);
> + return -EINVAL;
> + }
> +
> + xd = irq_get_handler_data(vinst->virq);
> + if (!xd) {
> + pr_err("Inst%d: Invalid virq %d\n",
> + vinst->vas_id, vinst->virq);
> + return -EINVAL;
> + }
> +
> + vinst->irq_port = xd->trig_page;
> + pr_devel("Initialized instance [%s, %d] paste_base 0x%llx paste_win_id_shift 0x%llx IRQ %d Port 0x%llx\n",
> + pdev->name, vasid, vinst->paste_base_addr,
> + vinst->paste_win_id_shift, vinst->virq,
> + vinst->irq_port);
>
> for_each_possible_cpu(cpu) {
> if (cpu_to_chip_id(cpu) == of_get_ibm_chip_id(dn))
> diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
> index 5574aec..598608b 100644
> --- a/arch/powerpc/platforms/powernv/vas.h
> +++ b/arch/powerpc/platforms/powernv/vas.h
> @@ -313,6 +313,8 @@ struct vas_instance {
> u64 paste_base_addr;
> u64 paste_win_id_shift;
>
> + u64 irq_port;
> + int virq;
> struct mutex mutex;
> struct vas_window *rxwin[VAS_COP_TYPE_MAX];
> struct vas_window *windows[VAS_WINDOWS_PER_CHIP];
>
Powered by blists - more mailing lists