[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8757342e-26e6-467c-a469-0ac1120bcdd9@arm.com>
Date: Wed, 9 Jul 2025 16:29:39 +0100
From: Steven Price <steven.price@....com>
To: Joey Gouly <joey.gouly@....com>
Cc: kvm@...r.kernel.org, kvmarm@...ts.linux.dev,
Catalin Marinas <catalin.marinas@....com>, Marc Zyngier <maz@...nel.org>,
Will Deacon <will@...nel.org>, James Morse <james.morse@....com>,
Oliver Upton <oliver.upton@...ux.dev>,
Suzuki K Poulose <suzuki.poulose@....com>, Zenghui Yu
<yuzenghui@...wei.com>, linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org, Alexandru Elisei <alexandru.elisei@....com>,
Christoffer Dall <christoffer.dall@....com>, Fuad Tabba <tabba@...gle.com>,
linux-coco@...ts.linux.dev,
Ganapatrao Kulkarni <gankulkarni@...amperecomputing.com>,
Gavin Shan <gshan@...hat.com>, Shanker Donthineni <sdonthineni@...dia.com>,
Alper Gun <alpergun@...gle.com>, "Aneesh Kumar K . V"
<aneesh.kumar@...nel.org>, Emi Kisanuki <fj0570is@...itsu.com>
Subject: Re: [PATCH v9 14/43] KVM: arm64: Support timers in realm RECs
Hi Joey,
On 09/07/2025 15:49, Joey Gouly wrote:
> Hi Steven,
>
> On Wed, Jun 11, 2025 at 11:48:11AM +0100, Steven Price wrote:
>> The RMM keeps track of the timer while the realm REC is running, but on
>> exit to the normal world KVM is responsible for handling the timers.
>>
>> The RMM doesn't provide a mechanism to set the counter offset, so don't
>> expose KVM_CAP_COUNTER_OFFSET for a realm VM.
>>
>> A later patch adds the support for propagating the timer values from the
>> exit data structure and calling kvm_realm_timers_update().
>>
>> Reviewed-by: Suzuki K Poulose <suzuki.poulose@....com>
>> Signed-off-by: Steven Price <steven.price@....com>
>> ---
>> Changes since v7:
>> * Hide KVM_CAP_COUNTER_OFFSET for realm guests.
>> ---
>> arch/arm64/kvm/arch_timer.c | 48 +++++++++++++++++++++++++++++++++---
>> arch/arm64/kvm/arm.c | 2 +-
>> include/kvm/arm_arch_timer.h | 2 ++
>> 3 files changed, 47 insertions(+), 5 deletions(-)
>>
>> diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
>> index fdbc8beec930..7f8705d6fdf5 100644
>> --- a/arch/arm64/kvm/arch_timer.c
>> +++ b/arch/arm64/kvm/arch_timer.c
>> @@ -148,6 +148,13 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
>>
>> static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
>> {
>> + struct kvm_vcpu *vcpu = ctxt->vcpu;
>> +
>> + if (kvm_is_realm(vcpu->kvm)) {
>> + WARN_ON(offset);
>> + return;
>> + }
>> +
>> if (!ctxt->offset.vm_offset) {
>> WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
>> return;
>> @@ -462,6 +469,21 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
>> timer_ctx);
>> }
>>
>> +void kvm_realm_timers_update(struct kvm_vcpu *vcpu)
>> +{
>> + struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
>> + int i;
>> +
>> + for (i = 0; i < NR_KVM_EL0_TIMERS; i++) {
>> + struct arch_timer_context *timer = &arch_timer->timers[i];
>> + bool status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT;
>> + bool level = kvm_timer_irq_can_fire(timer) && status;
>> +
>> + if (level != timer->irq.level)
>> + kvm_timer_update_irq(vcpu, level, timer);
>> + }
>> +}
>> +
>> /* Only called for a fully emulated timer */
>> static void timer_emulate(struct arch_timer_context *ctx)
>> {
>> @@ -870,6 +892,8 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
>> if (unlikely(!timer->enabled))
>> return;
>>
>> + kvm_timer_unblocking(vcpu);
>> +
>> get_timer_map(vcpu, &map);
>>
>> if (static_branch_likely(&has_gic_active_state)) {
>> @@ -883,8 +907,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
>> kvm_timer_vcpu_load_nogic(vcpu);
>> }
>>
>> - kvm_timer_unblocking(vcpu);
>> -
>
> The change here to move kvm_timer_unblocking() looks unnecessary, as the change
> to add an early return in kvm_timer_enable() causes timer->enable to never be
> set to 1. Since it is never set, kvm_timer_vcpu_load() will return early
> before this call to kvm_timer_unblocking().
Good spot.
Looking through the code it makes sense keeping the timer disabled
because that also disables all the code for loading/unloading the timer
state which we don't want for realms (because the RMM deals with that).
So I think just reverting the above change to move
kvm_timer_unblocking() is the correct approach.
Thanks,
Steve
> Thanks,
> Joey
>
>> timer_restore_state(map.direct_vtimer);
>> if (map.direct_ptimer)
>> timer_restore_state(map.direct_ptimer);
>> @@ -1065,7 +1087,9 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
>>
>> ctxt->vcpu = vcpu;
>>
>> - if (timerid == TIMER_VTIMER)
>> + if (kvm_is_realm(vcpu->kvm))
>> + ctxt->offset.vm_offset = NULL;
>> + else if (timerid == TIMER_VTIMER)
>> ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
>> else
>> ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset;
>> @@ -1087,13 +1111,19 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
>> void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
>> {
>> struct arch_timer_cpu *timer = vcpu_timer(vcpu);
>> + u64 cntvoff;
>>
>> for (int i = 0; i < NR_KVM_TIMERS; i++)
>> timer_context_init(vcpu, i);
>>
>> + if (kvm_is_realm(vcpu->kvm))
>> + cntvoff = 0;
>> + else
>> + cntvoff = kvm_phys_timer_read();
>> +
>> /* Synchronize offsets across timers of a VM if not already provided */
>> if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) {
>> - timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read());
>> + timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
>> timer_set_offset(vcpu_ptimer(vcpu), 0);
>> }
>>
>> @@ -1633,6 +1663,13 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
>> return -EINVAL;
>> }
>>
>> + /*
>> + * We don't use mapped IRQs for Realms because the RMI doesn't allow
>> + * us setting the LR.HW bit in the VGIC.
>> + */
>> + if (vcpu_is_rec(vcpu))
>> + return 0;
>> +
>> get_timer_map(vcpu, &map);
>>
>> ret = kvm_vgic_map_phys_irq(vcpu,
>> @@ -1764,6 +1801,9 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
>> if (offset->reserved)
>> return -EINVAL;
>>
>> + if (kvm_is_realm(kvm))
>> + return -EINVAL;
>> +
>> mutex_lock(&kvm->lock);
>>
>> if (!kvm_trylock_all_vcpus(kvm)) {
>> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
>> index 0cdcc2ca4a88..6a5c9be4af2d 100644
>> --- a/arch/arm64/kvm/arm.c
>> +++ b/arch/arm64/kvm/arm.c
>> @@ -350,10 +350,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
>> case KVM_CAP_PTP_KVM:
>> case KVM_CAP_ARM_SYSTEM_SUSPEND:
>> case KVM_CAP_IRQFD_RESAMPLE:
>> - case KVM_CAP_COUNTER_OFFSET:
>> case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
>> r = 1;
>> break;
>> + case KVM_CAP_COUNTER_OFFSET:
>> case KVM_CAP_SET_GUEST_DEBUG:
>> r = !kvm_is_realm(kvm);
>> break;
>> diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
>> index 681cf0c8b9df..f64e317c091b 100644
>> --- a/include/kvm/arm_arch_timer.h
>> +++ b/include/kvm/arm_arch_timer.h
>> @@ -113,6 +113,8 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
>> int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
>> int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
>>
>> +void kvm_realm_timers_update(struct kvm_vcpu *vcpu);
>> +
>> u64 kvm_phys_timer_read(void);
>>
>> void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
>> --
>> 2.43.0
>>
Powered by blists - more mailing lists