lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 21 Apr 2022 15:31:37 -0400
From:   Peter Xu <peterx@...hat.com>
To:     Ben Gardon <bgardon@...gle.com>
Cc:     linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
        Paolo Bonzini <pbonzini@...hat.com>,
        Sean Christopherson <seanjc@...gle.com>,
        David Matlack <dmatlack@...gle.com>,
        Jim Mattson <jmattson@...gle.com>,
        David Dunn <daviddunn@...gle.com>,
        Jing Zhang <jingzhangos@...gle.com>,
        Junaid Shahid <junaids@...gle.com>
Subject: Re: [PATCH v6 10/10] KVM: selftests: Test disabling NX hugepages on
 a VM

On Wed, Apr 20, 2022 at 10:35:13AM -0700, Ben Gardon wrote:
> Add an argument to the NX huge pages test to test disabling the feature
> on a VM using the new capability.
> 
> Reviewed-by: David Matlack <dmatlack@...gle.com>
> Signed-off-by: Ben Gardon <bgardon@...gle.com>
> ---
>  .../selftests/kvm/include/kvm_util_base.h     |  2 +
>  tools/testing/selftests/kvm/lib/kvm_util.c    | 16 +++-
>  .../selftests/kvm/x86_64/nx_huge_pages_test.c | 75 +++++++++++++++----
>  .../kvm/x86_64/nx_huge_pages_test.sh          | 39 ++++++----
>  4 files changed, 104 insertions(+), 28 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> index 1dac3c6607f1..8f6aad253392 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> @@ -414,4 +414,6 @@ uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name);
>  
>  uint32_t guest_get_vcpuid(void);
>  
> +int vm_disable_nx_huge_pages(struct kvm_vm *vm);
> +
>  #endif /* SELFTEST_KVM_UTIL_BASE_H */
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index 392abd3c323d..96faa14f4f32 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -112,6 +112,11 @@ int vm_check_cap(struct kvm_vm *vm, long cap)
>  	return ret;
>  }
>  
> +static int __vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
> +{
> +	return ioctl(vm->fd, KVM_ENABLE_CAP, cap);
> +}
> +
>  /* VM Enable Capability
>   *
>   * Input Args:
> @@ -128,7 +133,7 @@ int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
>  {
>  	int ret;
>  
> -	ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
> +	ret = __vm_enable_cap(vm, cap);
>  	TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
>  		"  rc: %i errno: %i", ret, errno);
>  
> @@ -2756,3 +2761,12 @@ uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
>  		    stat_name, ret);
>  	return data;
>  }
> +
> +int vm_disable_nx_huge_pages(struct kvm_vm *vm)
> +{
> +	struct kvm_enable_cap cap = { 0 };
> +
> +	cap.cap = KVM_CAP_VM_DISABLE_NX_HUGE_PAGES;
> +	cap.args[0] = 0;
> +	return __vm_enable_cap(vm, &cap);
> +}

Nitpick: I think it's nicer if we name vm_disable_nx_huge_pages() as
__vm_disable_nx_huge_pages() to show that its retval is not checked,
comparing to most of the vm_*() existing helpers where we do check those.

> diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> index 1c14368500b7..41b228b8cac3 100644
> --- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> @@ -13,6 +13,8 @@
>  #include <fcntl.h>
>  #include <stdint.h>
>  #include <time.h>
> +#include <linux/reboot.h>
> +#include <sys/syscall.h>
>  
>  #include <test_util.h>
>  #include "kvm_util.h"
> @@ -86,18 +88,45 @@ static void check_split_count(struct kvm_vm *vm, int expected_splits)
>  		    expected_splits, actual_splits);
>  }
>  
> -int main(int argc, char **argv)
> +void run_test(bool disable_nx_huge_pages)
>  {
>  	struct kvm_vm *vm;
>  	struct timespec ts;
> +	uint64_t pages;
>  	void *hva;
> -
> -	if (argc != 2 || strtol(argv[1], NULL, 0) != MAGIC_TOKEN) {
> -		printf("This test must be run through nx_huge_pages_test.sh");
> -		return KSFT_SKIP;
> +	int r;
> +
> +	pages = vm_pages_needed(VM_MODE_DEFAULT, 1, DEFAULT_GUEST_PHY_PAGES,
> +				0, 0);
> +	vm = vm_create_without_vcpus(VM_MODE_DEFAULT, pages);
> +
> +	if (disable_nx_huge_pages) {
> +		kvm_check_cap(KVM_CAP_VM_DISABLE_NX_HUGE_PAGES);

Nit: try to fail gracefully on old kernels?

> +
> +		/*
> +		 * Check if this process has the reboot permissions needed to
> +		 * disable NX huge pages on a VM.
> +		 *
> +		 * The reboot call below will never have any effect because
> +		 * the magic values are not set correctly, however the
> +		 * permission check is done before the magic value check.
> +		 */
> +		r = syscall(SYS_reboot, 0, 0, 0, NULL);

This looks fine, but instead of depending on the reboot syscall magics not
being set, how about we simply pass in an extra args to this test program
showing whether we have the correct permissions?

So:

  $NXECUTABLE 887563923 1

Means we have permissions and we should proceed well with disable nx
hugepages, 

  $NXECUTABLE 887563923 0

Otherwise.  I just think it's not necessary to add that layer of
dependency.  What do you think?

> +		if (r && errno == EPERM) {
> +			r = vm_disable_nx_huge_pages(vm);
> +			TEST_ASSERT(errno == EPERM,
> +				    "This process should not have permission to disable NX huge pages");

I just remembered one thing: in the previous patch on handling enablement
of this new cap, we checked args[0] before perm.  Maybe we want to check
perm before args[0] there to look even safer.  This is also a super-nit. :)

> +			return;
> +		}
> +
> +		TEST_ASSERT(r && errno == EINVAL,
> +			    "Reboot syscall should fail with -EINVAL");
> +
> +		r = vm_disable_nx_huge_pages(vm);
> +		TEST_ASSERT(!r, "Disabling NX huge pages should succeed if process has reboot permissions");
>  	}
>  
> -	vm = vm_create_default(0, 0, guest_code);
> +	vm_vcpu_add_default(vm, 0, guest_code);
>  
>  	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_HUGETLB,
>  				    HPAGE_GPA, HPAGE_SLOT,
> @@ -130,23 +159,27 @@ int main(int argc, char **argv)
>  	/*
>  	 * Next, the guest will execute from the first huge page, causing it
>  	 * to be remapped at 4k.
> +	 *
> +	 * If NX huge pages are disabled, this should have no effect.
>  	 */
>  	vcpu_run(vm, 0);
> -	check_2m_page_count(vm, 1);
> -	check_split_count(vm, 1);
> +	check_2m_page_count(vm, disable_nx_huge_pages ? 2 : 1);
> +	check_split_count(vm, disable_nx_huge_pages ? 0 : 1);
>  
>  	/*
>  	 * Executing from the third huge page (previously unaccessed) will
>  	 * cause part to be mapped at 4k.
> +	 *
> +	 * If NX huge pages are disabled, it should be mapped at 2M.
>  	 */
>  	vcpu_run(vm, 0);
> -	check_2m_page_count(vm, 1);
> -	check_split_count(vm, 2);
> +	check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
> +	check_split_count(vm, disable_nx_huge_pages ? 0 : 2);
>  
>  	/* Reading from the first huge page again should have no effect. */
>  	vcpu_run(vm, 0);
> -	check_2m_page_count(vm, 1);
> -	check_split_count(vm, 2);
> +	check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
> +	check_split_count(vm, disable_nx_huge_pages ? 0 : 2);
>  
>  	/*
>  	 * Give recovery thread time to run. The wrapper script sets
> @@ -158,8 +191,11 @@ int main(int argc, char **argv)
>  
>  	/*
>  	 * Now that the reclaimer has run, all the split pages should be gone.
> +	 *
> +	 * If NX huge pages are disabled, the relaimer will not run, so
> +	 * nothing should change from here on.
>  	 */
> -	check_2m_page_count(vm, 1);
> +	check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
>  	check_split_count(vm, 0);
>  
>  	/*
> @@ -167,10 +203,21 @@ int main(int argc, char **argv)
>  	 * reading from it causes a huge page mapping to be installed.
>  	 */
>  	vcpu_run(vm, 0);
> -	check_2m_page_count(vm, 2);
> +	check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 2);
>  	check_split_count(vm, 0);
>  
>  	kvm_vm_free(vm);
> +}
> +
> +int main(int argc, char **argv)
> +{
> +	if (argc != 2 || strtol(argv[1], NULL, 0) != MAGIC_TOKEN) {
> +		printf("This test must be run through nx_huge_pages_test.sh");
> +		return KSFT_SKIP;
> +	}
> +
> +	run_test(false);
> +	run_test(true);
>  
>  	return 0;
>  }
> diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> index c2429ad8066a..b23993f3aab1 100755
> --- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> +++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> @@ -4,22 +4,35 @@
>  # tools/testing/selftests/kvm/nx_huge_page_test.sh
>  # Copyright (C) 2022, Google LLC.
>  
> -NX_HUGE_PAGES=$(cat /sys/module/kvm/parameters/nx_huge_pages)
> -NX_HUGE_PAGES_RECOVERY_RATIO=$(cat /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio)
> -NX_HUGE_PAGES_RECOVERY_PERIOD=$(cat /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms)
> -HUGE_PAGES=$(cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)
> +NX_HUGE_PAGES=$(sudo cat /sys/module/kvm/parameters/nx_huge_pages)
> +NX_HUGE_PAGES_RECOVERY_RATIO=$(sudo cat /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio)
> +NX_HUGE_PAGES_RECOVERY_PERIOD=$(sudo cat /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms)
> +HUGE_PAGES=$(sudo cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)
>  
> -echo 1 > /sys/module/kvm/parameters/nx_huge_pages
> -echo 1 > /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio
> -echo 100 > /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms
> -echo 200 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
> +sudo echo 1 > /sys/module/kvm/parameters/nx_huge_pages
> +sudo echo 1 > /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio
> +sudo echo 100 > /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms
> +sudo echo 200 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages

Shall we directly use these "sudo"s in the patch already where the new test
introduced?

Thanks,

> +
> +NXECUTABLE="$(dirname $0)/nx_huge_pages_test"
> +
> +# Test with reboot permissions
> +sudo setcap cap_sys_boot+ep $NXECUTABLE
> +$NXECUTABLE 887563923
>  
> -"$(dirname $0)"/nx_huge_pages_test 887563923
>  RET=$?
>  
> -echo $NX_HUGE_PAGES > /sys/module/kvm/parameters/nx_huge_pages
> -echo $NX_HUGE_PAGES_RECOVERY_RATIO > /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio
> -echo $NX_HUGE_PAGES_RECOVERY_PERIOD > /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms
> -echo $HUGE_PAGES > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
> +if [ $RET -eq 0 ]; then
> +	# Test without reboot permissions
> +	sudo setcap cap_sys_boot-ep $NXECUTABLE
> +	$NXECUTABLE 887563923
> +
> +	RET=$?
> +fi
> +
> +sudo echo $NX_HUGE_PAGES > /sys/module/kvm/parameters/nx_huge_pages
> +sudo echo $NX_HUGE_PAGES_RECOVERY_RATIO > /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio
> +sudo echo $NX_HUGE_PAGES_RECOVERY_PERIOD > /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms
> +sudo echo $HUGE_PAGES > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
>  
>  exit $RET
> -- 
> 2.36.0.rc0.470.gd361397f0d-goog
> 

-- 
Peter Xu

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ