lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAAhSdy3RaJEnUJEB+W+7iTJ46c8r8vWUh5HBNe5utpxFv-zr0Q@mail.gmail.com>
Date: Thu, 2 Jan 2025 21:24:11 +0530
From: Anup Patel <anup@...infault.org>
To: Vladimir Kondratiev <vladimir.kondratiev@...ileye.com>
Cc: Thomas Gleixner <tglx@...utronix.de>, Paul Walmsley <paul.walmsley@...ive.com>, 
	Palmer Dabbelt <palmer@...belt.com>, Albert Ou <aou@...s.berkeley.edu>, 
	Rob Herring <robh@...nel.org>, Krzysztof Kozlowski <krzk+dt@...nel.org>, Conor Dooley <conor+dt@...nel.org>, 
	linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/2] irqchip/riscv-aplic: add support for hart indexes

On Thu, Jan 2, 2025 at 3:11 PM Vladimir Kondratiev
<vladimir.kondratiev@...ileye.com> wrote:
>
> Risc-V APLIC specification defines "hart index" in [1]:
>
> Within a given interrupt domain, each of the domain’s harts has a
> unique index number in the range 0 to 214 − 1 (= 16,383). The index
> number a domain associates with a hart may or may not have any
> relationship to the unique hart identifier (“hart ID”) that the
> RISC-V Privileged Architecture assigns to the hart. Two different
> interrupt domains may employ entirely different index numbers for
> the same set of harts.

Rather than this, better cite the text in "4.5 Memory-mapped control
region for an interrupt domain".

>
> Support arbitrary hart indexes specified in optional APLIC property
> "riscv,hart-index" that should be array of u32 elements, one per
> interrupt target. If this property not specified, fallback is to use
> hart ids, with hart index for each APLIC to be (hartid - hartid0)
> where hartid0 is hart id for the 1-st target.

This is an incorrect assumption in the default cause. The HART IDs
of the HARTs targeted by an APLIC domain can be non-contiguous.

>
> [1]: https://github.com/riscv/riscv-aia
>
> Signed-off-by: Vladimir Kondratiev <vladimir.kondratiev@...ileye.com>
> ---
>  drivers/irqchip/irq-riscv-aplic-direct.c | 25 ++++++++++++++++++++++--
>  1 file changed, 23 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
> index 7cd6b646774b..80c82e34e894 100644
> --- a/drivers/irqchip/irq-riscv-aplic-direct.c
> +++ b/drivers/irqchip/irq-riscv-aplic-direct.c
> @@ -221,12 +221,15 @@ static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
>
>  int aplic_direct_setup(struct device *dev, void __iomem *regs)
>  {
> +       static const char *prop_hart_index = "riscv,hart-index";
>         int i, j, rc, cpu, current_cpu, setup_count = 0;
>         struct aplic_direct *direct;
>         struct irq_domain *domain;
>         struct aplic_priv *priv;
>         struct aplic_idc *idc;
>         unsigned long hartid;
> +       unsigned long hartid0;
> +       u32 *hart_index = NULL;
>         u32 v, hwirq;
>
>         direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL);
> @@ -240,6 +243,22 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs)
>                 return rc;
>         }
>
> +       rc = device_property_count_u32(dev, prop_hart_index);
> +       if (rc == -ENODATA)
> +               rc = 0;
> +       if (rc > 0 && rc != priv->nr_idcs)
> +               rc = -EOVERFLOW;
> +       if (rc > 0) {
> +               hart_index = devm_kcalloc(dev, priv->nr_idcs, sizeof(*hart_index), GFP_KERNEL);
> +               if (!hart_index)
> +                       return -ENOMEM;
> +               rc = device_property_read_u32_array(dev, prop_hart_index,
> +                                                   hart_index, priv->nr_idcs);
> +       }
> +       if (rc < 0) {
> +               dev_err(dev, "APLIC property \"%s\" error %pe\n", prop_hart_index, ERR_PTR(rc));
> +               return rc;
> +       }

No need for this additional array allocation and reading.

>         /* Setup per-CPU IDC and target CPU mask */
>         current_cpu = get_cpu();
>         for (i = 0; i < priv->nr_idcs; i++) {
> @@ -249,6 +268,8 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs)
>                         continue;
>                 }
>
> +               if (i == 0)
> +                       hartid0 = hartid;
>                 /*
>                  * Skip interrupts other than external interrupts for
>                  * current privilege level.
> @@ -265,8 +286,8 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs)
>                 cpumask_set_cpu(cpu, &direct->lmask);
>
>                 idc = per_cpu_ptr(&aplic_idcs, cpu);
> -               idc->hart_index = i;
> -               idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
> +               idc->hart_index = hart_index ? hart_index[i] : hartid - hartid0;
> +               idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index * APLIC_IDC_SIZE;
>                 idc->direct = direct;
>
>                 aplic_idc_set_delivery(idc, true);
> --
> 2.43.0
>

How about this ?

diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c
b/drivers/irqchip/irq-riscv-aplic-direct.c
index 7cd6b646774b..bac88b7790ec 100644
--- a/drivers/irqchip/irq-riscv-aplic-direct.c
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -31,7 +31,7 @@ struct aplic_direct {
 };

 struct aplic_idc {
-    unsigned int        hart_index;
+    u32            hart_index;
     void __iomem        *regs;
     struct aplic_direct    *direct;
 };
@@ -219,6 +219,19 @@ static int aplic_direct_parse_parent_hwirq(struct
device *dev, u32 index,
     return 0;
 }

+static int aplic_direct_get_hart_index(struct device *dev, u32 logical_index,
+                    u32 *hart_index)
+{
+    if (!is_of_node(dev->fwnode) ||
+        !of_property_present(to_of_node(dev->fwnode), "riscv,hart-indexes")) {
+        *hart_index = logical_index;
+        return 0;
+    }
+
+    return of_property_read_u32_index(to_of_node(dev->fwnode),
"riscv,hart-indexes",
+                      logical_index, hart_index);
+}
+
 int aplic_direct_setup(struct device *dev, void __iomem *regs)
 {
     int i, j, rc, cpu, current_cpu, setup_count = 0;
@@ -265,8 +278,13 @@ int aplic_direct_setup(struct device *dev, void
__iomem *regs)
         cpumask_set_cpu(cpu, &direct->lmask);

         idc = per_cpu_ptr(&aplic_idcs, cpu);
-        idc->hart_index = i;
-        idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
+        rc = aplic_direct_get_hart_index(dev, i, &idc->hart_index);
+        if (rc) {
+            dev_warn(dev, "hart index not found for IDC%d\n", i);
+            continue;
+        }
+
+        idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index *
APLIC_IDC_SIZE;
         idc->direct = direct;

         aplic_idc_set_delivery(idc, true);

Regards,
Anup

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ