[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20260131025800.1550692-1-xujiakai2025@iscas.ac.cn>
Date: Sat, 31 Jan 2026 02:58:00 +0000
From: Jiakai Xu <xujiakai2025@...as.ac.cn>
To: linux-kernel@...r.kernel.org,
linux-riscv@...ts.infradead.org,
kvm@...r.kernel.org,
kvm-riscv@...ts.infradead.org
Cc: Alexandre Ghiti <alex@...ti.fr>,
Albert Ou <aou@...s.berkeley.edu>,
Palmer Dabbelt <palmer@...belt.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Atish Patra <atish.patra@...ux.dev>,
Anup Patel <anup@...infault.org>,
Jiakai Xu <xujiakai2025@...as.ac.cn>,
Jiakai Xu <jiakaiPeanut@...il.com>
Subject: [PATCH] RISC-V: KVM: Change imsic->vsfile_lock from rwlock_t to raw_spinlock_t
The per-vCPU IMSIC context uses a vsfile_lock to protect access
to the VS-file. Currently, this lock is an rwlock_t, and is used
with read_lock_irqsave/write_lock_irqsave in multiple places
inside arch/riscv/kvm/aia_imsic.c.
During fuzz testing of KVM ioctl sequences, an
"[BUG: Invalid wait context]" crash was observed when holding
vsfile_lock in certain VCPU scheduling paths, for example during
kvm_riscv_vcpu_aia_imsic_put(). Log shows that at this point
the task may hold vcpu->mutex and scheduler runqueue locks,
and thus is in a context where acquiring a read/write rwlock
with irqsave is illegal.
The crash manifests as:
[ BUG: Invalid wait context ]
(&imsic->vsfile_lock){....}-{3:3}, at:
kvm_riscv_vcpu_aia_imsic_put arch/riscv/kvm/aia_imsic.c:728
...
2 locks held by syz.4.4541/8252:
#0: (&vcpu->mutex), at: kvm_vcpu_ioctl virt/kvm/kvm_main.c:4460
#1: (&rq->__lock), at: raw_spin_rq_lock_nested kernel/sched/core.c:639
#1: (&rq->__lock), at: raw_spin_rq_lock kernel/sched/sched.h:1580
#1: (&rq->__lock), at: rq_lock kernel/sched/sched.h:1907
#1: (&rq->__lock), at: __schedule kernel/sched/core.c:6772
...
Call Trace:
_raw_read_lock_irqsave kernel/locking/spinlock.c:236
kvm_riscv_vcpu_aia_imsic_put arch/riscv/kvm/aia_imsic.c:716
kvm_riscv_vcpu_aia_put arch/riscv/kvm/aia.c:154
kvm_arch_vcpu_put arch/riscv/kvm/vcpu.c:650
kvm_sched_out virt/kvm/kvm_main.c:6421
__fire_sched_out_preempt_notifiers kernel/sched/core.c:4835
fire_sched_out_preempt_notifiers kernel/sched/core.c:4843
prepare_task_switch kernel/sched/core.c:5050
context_switch kernel/sched/core.c:5205
__schedule kernel/sched/core.c:6867
__schedule_loop kernel/sched/core.c:6949
schedule kernel/sched/core.c:6964
kvm_riscv_check_vcpu_requests arch/riscv/kvm/vcpu.c:699
kvm_arch_vcpu_ioctl_run arch/riscv/kvm/vcpu.c:920
Therefore, replace vsfile_lock with raw_spinlock_t, and update
all acquire/release calls to
raw_spin_lock_irqsave()/raw_spin_unlock_irqrestore().
Fixes: db8b7e97d6137a ("RISC-V: KVM: Add in-kernel virtualization of AIA IMSIC")
Signed-off-by: Jiakai Xu <xujiakai2025@...as.ac.cn>
Signed-off-by: Jiakai Xu <jiakaiPeanut@...il.com>
---
arch/riscv/kvm/aia_imsic.c | 36 ++++++++++++++++++------------------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c
index fda0346f0ea1f..8730229442a26 100644
--- a/arch/riscv/kvm/aia_imsic.c
+++ b/arch/riscv/kvm/aia_imsic.c
@@ -47,7 +47,7 @@ struct imsic {
*/
/* IMSIC VS-file */
- rwlock_t vsfile_lock;
+ raw_spinlock_t vsfile_lock;
int vsfile_cpu;
int vsfile_hgei;
void __iomem *vsfile_va;
@@ -597,13 +597,13 @@ static void imsic_vsfile_cleanup(struct imsic *imsic)
* VCPU is being destroyed.
*/
- write_lock_irqsave(&imsic->vsfile_lock, flags);
+ raw_spin_lock_irqsave(&imsic->vsfile_lock, flags);
old_vsfile_hgei = imsic->vsfile_hgei;
old_vsfile_cpu = imsic->vsfile_cpu;
imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
imsic->vsfile_va = NULL;
imsic->vsfile_pa = 0;
- write_unlock_irqrestore(&imsic->vsfile_lock, flags);
+ raw_spin_unlock_irqrestore(&imsic->vsfile_lock, flags);
memset(imsic->swfile, 0, sizeof(*imsic->swfile));
@@ -688,10 +688,10 @@ bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu)
* only check for interrupt when IMSIC VS-file is being used.
*/
- read_lock_irqsave(&imsic->vsfile_lock, flags);
+ raw_spin_lock_irqsave(&imsic->vsfile_lock, flags);
if (imsic->vsfile_cpu > -1)
ret = !!(csr_read(CSR_HGEIP) & BIT(imsic->vsfile_hgei));
- read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+ raw_spin_unlock_irqrestore(&imsic->vsfile_lock, flags);
return ret;
}
@@ -713,10 +713,10 @@ void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu)
if (!kvm_vcpu_is_blocking(vcpu))
return;
- read_lock_irqsave(&imsic->vsfile_lock, flags);
+ raw_spin_lock_irqsave(&imsic->vsfile_lock, flags);
if (imsic->vsfile_cpu > -1)
csr_set(CSR_HGEIE, BIT(imsic->vsfile_hgei));
- read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+ raw_spin_unlock_irqrestore(&imsic->vsfile_lock, flags);
}
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
@@ -727,13 +727,13 @@ void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
/* Read and clear IMSIC VS-file details */
- write_lock_irqsave(&imsic->vsfile_lock, flags);
+ raw_spin_lock_irqsave(&imsic->vsfile_lock, flags);
old_vsfile_hgei = imsic->vsfile_hgei;
old_vsfile_cpu = imsic->vsfile_cpu;
imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
imsic->vsfile_va = NULL;
imsic->vsfile_pa = 0;
- write_unlock_irqrestore(&imsic->vsfile_lock, flags);
+ raw_spin_unlock_irqrestore(&imsic->vsfile_lock, flags);
/* Do nothing, if no IMSIC VS-file to release */
if (old_vsfile_cpu < 0)
@@ -786,10 +786,10 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
return 1;
/* Read old IMSIC VS-file details */
- read_lock_irqsave(&imsic->vsfile_lock, flags);
+ raw_spin_lock_irqsave(&imsic->vsfile_lock, flags);
old_vsfile_hgei = imsic->vsfile_hgei;
old_vsfile_cpu = imsic->vsfile_cpu;
- read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+ raw_spin_unlock_irqrestore(&imsic->vsfile_lock, flags);
/* Do nothing if we are continuing on same CPU */
if (old_vsfile_cpu == vcpu->cpu)
@@ -839,12 +839,12 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
/* TODO: Update the IOMMU mapping ??? */
/* Update new IMSIC VS-file details in IMSIC context */
- write_lock_irqsave(&imsic->vsfile_lock, flags);
+ raw_spin_lock_irqsave(&imsic->vsfile_lock, flags);
imsic->vsfile_hgei = new_vsfile_hgei;
imsic->vsfile_cpu = vcpu->cpu;
imsic->vsfile_va = new_vsfile_va;
imsic->vsfile_pa = new_vsfile_pa;
- write_unlock_irqrestore(&imsic->vsfile_lock, flags);
+ raw_spin_unlock_irqrestore(&imsic->vsfile_lock, flags);
/*
* At this point, all interrupt producers have been moved
@@ -943,7 +943,7 @@ int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
imsic = vcpu->arch.aia_context.imsic_state;
- read_lock_irqsave(&imsic->vsfile_lock, flags);
+ raw_spin_lock_irqsave(&imsic->vsfile_lock, flags);
rc = 0;
vsfile_hgei = imsic->vsfile_hgei;
@@ -958,7 +958,7 @@ int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
isel, val, 0, 0);
}
- read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+ raw_spin_unlock_irqrestore(&imsic->vsfile_lock, flags);
if (!rc && vsfile_cpu >= 0)
rc = imsic_vsfile_rw(vsfile_hgei, vsfile_cpu, imsic->nr_eix,
@@ -1015,7 +1015,7 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
if (imsic->nr_msis <= iid)
return -EINVAL;
- read_lock_irqsave(&imsic->vsfile_lock, flags);
+ raw_spin_lock_irqsave(&imsic->vsfile_lock, flags);
if (imsic->vsfile_cpu >= 0) {
writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
@@ -1025,7 +1025,7 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
imsic_swfile_extirq_update(vcpu);
}
- read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+ raw_spin_unlock_irqrestore(&imsic->vsfile_lock, flags);
return 0;
}
@@ -1081,7 +1081,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
/* Setup IMSIC context */
imsic->nr_msis = kvm->arch.aia.nr_ids + 1;
- rwlock_init(&imsic->vsfile_lock);
+ raw_spin_lock_init(&imsic->vsfile_lock);
imsic->nr_eix = BITS_TO_U64(imsic->nr_msis);
imsic->nr_hw_eix = BITS_TO_U64(kvm_riscv_aia_max_ids);
imsic->vsfile_hgei = imsic->vsfile_cpu = -1;
--
2.34.1
Powered by blists - more mailing lists