[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <84ee1ae4bfc0ddbafdb38a21135f67ceb121c5e1.1644396407.git.geert@linux-m68k.org>
Date: Wed, 9 Feb 2022 09:54:01 +0100
From: Geert Uytterhoeven <geert@...ux-m68k.org>
To: Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>
Cc: Atish Patra <atishp@...osinc.com>,
Jessica Clarke <jrtc27@...c27.com>,
Anup Patel <anup@...infault.org>,
Emil Renner Berthing <kernel@...il.dk>,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
Geert Uytterhoeven <geert@...ux-m68k.org>
Subject: [PATCH v2 3/3] RISC-V: Fix IPI/RFENCE hmask on non-monotonic hartid ordering
If the boot CPU does not have the lowest hartid, "hartid - hbase" can
become negative, leading to an incorrect hmask, causing userspace to
crash with SEGV. This is observed on e.g. Starlight Beta, where cpuid 1
maps to hartid 0, and cpuid 0 maps to hartid 1.
Fix this by detecting this case, and shifting the accumulated mask and
updating hbase, if possible.
Fixes: 26fb751ca37846c9 ("RISC-V: Do not use cpumask data structure for hartid bitmap")
Signed-off-by: Geert Uytterhoeven <geert@...ux-m68k.org>
Reviewed-by: Atish Patra <atishp@...osinc.com>
Tested-by: Emil Renner Berthing <kernel@...il.dk>
Tested-by: Atish Patra <atishp@...osinc.com>
---
v2:
- Add Reviewed-by, Tested-by.
---
arch/riscv/kernel/sbi.c | 57 ++++++++++++++++++++++++++++-------------
1 file changed, 39 insertions(+), 18 deletions(-)
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 22444cfcd56cc646..775d3322b422fe14 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -250,7 +250,7 @@ static void __sbi_set_timer_v02(uint64_t stime_value)
static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask)
{
- unsigned long hartid, cpuid, hmask = 0, hbase = 0;
+ unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
struct sbiret ret = {0};
int result;
@@ -259,16 +259,27 @@ static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask)
for_each_cpu(cpuid, cpu_mask) {
hartid = cpuid_to_hartid_map(cpuid);
- if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
- ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
- hmask, hbase, 0, 0, 0, 0);
- if (ret.error)
- goto ecall_failed;
- hmask = 0;
- hbase = 0;
+ if (hmask) {
+ if (hartid + BITS_PER_LONG <= htop ||
+ hbase + BITS_PER_LONG <= hartid) {
+ ret = sbi_ecall(SBI_EXT_IPI,
+ SBI_EXT_IPI_SEND_IPI, hmask,
+ hbase, 0, 0, 0, 0);
+ if (ret.error)
+ goto ecall_failed;
+ hmask = 0;
+ } else if (hartid < hbase) {
+ /* shift the mask to fit lower hartid */
+ hmask <<= hbase - hartid;
+ hbase = hartid;
+ }
}
- if (!hmask)
+ if (!hmask) {
hbase = hartid;
+ htop = hartid;
+ } else if (hartid > htop) {
+ htop = hartid;
+ }
hmask |= BIT(hartid - hbase);
}
@@ -345,7 +356,7 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5)
{
- unsigned long hartid, cpuid, hmask = 0, hbase = 0;
+ unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
int result;
if (!cpu_mask || cpumask_empty(cpu_mask))
@@ -353,16 +364,26 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
for_each_cpu(cpuid, cpu_mask) {
hartid = cpuid_to_hartid_map(cpuid);
- if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
- result = __sbi_rfence_v02_call(fid, hmask, hbase,
- start, size, arg4, arg5);
- if (result)
- return result;
- hmask = 0;
- hbase = 0;
+ if (hmask) {
+ if (hartid + BITS_PER_LONG <= htop ||
+ hbase + BITS_PER_LONG <= hartid) {
+ result = __sbi_rfence_v02_call(fid, hmask,
+ hbase, start, size, arg4, arg5);
+ if (result)
+ return result;
+ hmask = 0;
+ } else if (hartid < hbase) {
+ /* shift the mask to fit lower hartid */
+ hmask <<= hbase - hartid;
+ hbase = hartid;
+ }
}
- if (!hmask)
+ if (!hmask) {
hbase = hartid;
+ htop = hartid;
+ } else if (hartid > htop) {
+ htop = hartid;
+ }
hmask |= BIT(hartid - hbase);
}
--
2.25.1
Powered by blists - more mailing lists