[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a3eea263-de38-e3d7-f188-93eb5148a73a@linux.ibm.com>
Date: Tue, 8 Feb 2022 15:36:01 +0100
From: Janis Schoetterl-Glausch <scgl@...ux.ibm.com>
To: Christian Borntraeger <borntraeger@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>,
Janosch Frank <frankja@...ux.ibm.com>
Cc: Alexander Gordeev <agordeev@...ux.ibm.com>,
Claudio Imbrenda <imbrenda@...ux.ibm.com>,
David Hildenbrand <david@...hat.com>,
Jonathan Corbet <corbet@....net>, kvm@...r.kernel.org,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-s390@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>,
Sven Schnelle <svens@...ux.ibm.com>,
Vasily Gorbik <gor@...ux.ibm.com>
Subject: Re: [PATCH v2 02/11] KVM: s390: Honor storage keys when accessing
guest memory
On 2/8/22 15:02, Christian Borntraeger wrote:
> Am 07.02.22 um 17:59 schrieb Janis Schoetterl-Glausch:
>> Storage key checking had not been implemented for instructions emulated
>> by KVM. Implement it by enhancing the functions used for guest access,
>> in particular those making use of access_guest which has been renamed
>> to access_guest_with_key.
>> Accesses via access_guest_real should not be key checked.
>>
>> For actual accesses, key checking is done by
>> copy_from/to_user_key (which internally uses MVCOS/MVCP/MVCS).
>> In cases where accessibility is checked without an actual access,
>> this is performed by getting the storage key and checking if the access
>> key matches. In both cases, if applicable, storage and fetch protection
>> override are honored.
>>
>> Signed-off-by: Janis Schoetterl-Glausch <scgl@...ux.ibm.com>
>> Reviewed-by: Janosch Frank <frankja@...ux.ibm.com>
>
> Reviewed-by: Christian Borntraeger <borntraeger@...ux.ibm.com>
>
>> ---
>> arch/s390/include/asm/ctl_reg.h | 2 +
>> arch/s390/include/asm/page.h | 2 +
>> arch/s390/kvm/gaccess.c | 187 ++++++++++++++++++++++++++++++--
>> arch/s390/kvm/gaccess.h | 77 +++++++++++--
>> arch/s390/kvm/intercept.c | 12 +-
>> arch/s390/kvm/kvm-s390.c | 4 +-
>> 6 files changed, 253 insertions(+), 31 deletions(-)
>>
>> diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
>> index 04dc65f8901d..c800199a376b 100644
>> --- a/arch/s390/include/asm/ctl_reg.h
>> +++ b/arch/s390/include/asm/ctl_reg.h
>> @@ -12,6 +12,8 @@
>> #define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10)
>> #define CR0_LOW_ADDRESS_PROTECTION BIT(63 - 35)
>> +#define CR0_FETCH_PROTECTION_OVERRIDE BIT(63 - 38)
>> +#define CR0_STORAGE_PROTECTION_OVERRIDE BIT(63 - 39)
>> #define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49)
>> #define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50)
>> #define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52)
>> diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
>> index d98d17a36c7b..cfc4d6fb2385 100644
>> --- a/arch/s390/include/asm/page.h
>> +++ b/arch/s390/include/asm/page.h
>> @@ -20,6 +20,8 @@
>> #define PAGE_SIZE _PAGE_SIZE
>> #define PAGE_MASK _PAGE_MASK
>> #define PAGE_DEFAULT_ACC 0
>> +/* storage-protection override */
>> +#define PAGE_SPO_ACC 9
>> #define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
>> #define HPAGE_SHIFT 20
>> diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
>> index 4460808c3b9a..7fca0cff4c12 100644
>> --- a/arch/s390/kvm/gaccess.c
>> +++ b/arch/s390/kvm/gaccess.c
>> @@ -10,6 +10,7 @@
>> #include <linux/mm_types.h>
>> #include <linux/err.h>
>> #include <linux/pgtable.h>
>> +#include <linux/bitfield.h>
>> #include <asm/gmap.h>
>> #include "kvm-s390.h"
>> @@ -794,6 +795,79 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
>> return 1;
>> }
>> +static bool fetch_prot_override_applicable(struct kvm_vcpu *vcpu, enum gacc_mode mode,
>> + union asce asce)
>> +{
>> + psw_t *psw = &vcpu->arch.sie_block->gpsw;
>> + unsigned long override;
>> +
>> + if (mode == GACC_FETCH || mode == GACC_IFETCH) {
>> + /* check if fetch protection override enabled */
>> + override = vcpu->arch.sie_block->gcr[0];
>> + override &= CR0_FETCH_PROTECTION_OVERRIDE;
>> + /* not applicable if subject to DAT && private space */
>> + override = override && !(psw_bits(*psw).dat && asce.p);
>> + return override;
>> + }
>> + return false;
>> +}
>> +
>> +static bool fetch_prot_override_applies(unsigned long ga, unsigned int len)
>> +{
>> + return ga < 2048 && ga + len <= 2048;
>> +}
>> +
>> +static bool storage_prot_override_applicable(struct kvm_vcpu *vcpu)
>> +{
>> + /* check if storage protection override enabled */
>> + return vcpu->arch.sie_block->gcr[0] & CR0_STORAGE_PROTECTION_OVERRIDE;
>> +}
>> +
>> +static bool storage_prot_override_applies(u8 access_control)
>> +{
>> + /* matches special storage protection override key (9) -> allow */
>> + return access_control == PAGE_SPO_ACC;
>> +}
>> +
[...]
>> +int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
>> + void *data, unsigned long len, enum gacc_mode mode,
>> + u8 access_key)
>> {
>> psw_t *psw = &vcpu->arch.sie_block->gpsw;
>> unsigned long nr_pages, idx;
>> unsigned long gpa_array[2];
>> unsigned int fragment_len;
>> unsigned long *gpas;
>> + enum prot_type prot;
>> int need_ipte_lock;
>> union asce asce;
>> + bool try_storage_prot_override;
>> + bool try_fetch_prot_override;
>
> These are used only once, so we could get rid of those. On the other hands this
> variant might be slightly more readable, so I am fine either way.
I don't know if the compiler would manage to cache the calls across loop iterations,
but then the functions just perform some checks so it shouldn't matter much.
I'm inclined to keep it since it moves a bit of code out of the loop body, as you say,
it might help a bit with readability, even if not much.
>
>
>> int rc;
>> if (!len)
>> @@ -904,16 +1022,47 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
>> gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
>> if (!gpas)
>> return -ENOMEM;
>> + try_fetch_prot_override = fetch_prot_override_applicable(vcpu, mode, asce);
>> + try_storage_prot_override = storage_prot_override_applicable(vcpu);
>> need_ipte_lock = psw_bits(*psw).dat && !asce.r;
>> if (need_ipte_lock)
>> ipte_lock(vcpu);
>> - rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode);
>> - for (idx = 0; idx < nr_pages && !rc; idx++) {
>> + /*
>> + * Since we do the access further down ultimately via a move instruction
>> + * that does key checking and returns an error in case of a protection
>> + * violation, we don't need to do the check during address translation.
>> + * Skip it by passing access key 0, which matches any storage key,
>> + * obviating the need for any further checks. As a result the check is
>> + * handled entirely in hardware on access, we only need to take care to
>> + * forego key protection checking if fetch protection override applies or
>> + * retry with the special key 9 in case of storage protection override.
>> + */
>> + rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode, 0);
>> + if (rc)
>> + goto out_unlock;
>> + for (idx = 0; idx < nr_pages; idx++) {
>> fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
>> - rc = access_guest_page(vcpu->kvm, mode, gpas[idx], data, fragment_len);
>> + if (try_fetch_prot_override && fetch_prot_override_applies(ga, fragment_len)) {
>> + rc = access_guest_page(vcpu->kvm, mode, gpas[idx],
>> + data, fragment_len);
>> + } else {
>> + rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
>> + data, fragment_len, access_key);
>> + }
>> + if (rc == PGM_PROTECTION && try_storage_prot_override)
>> + rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
>> + data, fragment_len, PAGE_SPO_ACC);
>> + if (rc == PGM_PROTECTION)
>> + prot = PROT_TYPE_KEYC;
>> + if (rc)
>> + break;
>> len -= fragment_len;
>> data += fragment_len;
>> + ga = kvm_s390_logical_to_effective(vcpu, ga + fragment_len);
>> }
>> + if (rc > 0)
>> + rc = trans_exc(vcpu, rc, ga, ar, mode, prot);
>> +out_unlock:
>> if (need_ipte_lock)
>> ipte_unlock(vcpu);
>> if (nr_pages > ARRAY_SIZE(gpa_array))
[...]
Powered by blists - more mailing lists