[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1fe44cd4-4ea9-ad68-2690-54c78dd4f5ad@linux.ibm.com>
Date: Thu, 31 Mar 2022 15:25:31 +0200
From: Janosch Frank <frankja@...ux.ibm.com>
To: Claudio Imbrenda <imbrenda@...ux.ibm.com>, kvm@...r.kernel.org
Cc: borntraeger@...ibm.com, thuth@...hat.com, pasic@...ux.ibm.com,
david@...hat.com, linux-s390@...r.kernel.org,
linux-kernel@...r.kernel.org, scgl@...ux.ibm.com,
mimu@...ux.ibm.com, nrb@...ux.ibm.com
Subject: Re: [PATCH v9 04/18] KVM: s390: pv: refactor s390_reset_acc
On 3/30/22 14:25, Claudio Imbrenda wrote:
> Refactor s390_reset_acc so that it can be reused in upcoming patches.
>
> We don't want to hold all the locks used in a walk_page_range for too
> long, and the destroy page UVC does take some time to complete.
> Therefore we quickly gather the pages to destroy, and then destroy them
> without holding all the locks.
>
> The new refactored function optionally allows to return early without
> completing if a fatal signal is pending (and return and appropriate
> error code). Two wrappers are provided to call the new function.
>
> Signed-off-by: Claudio Imbrenda <imbrenda@...ux.ibm.com>
> (dropping Janosch's Ack because of major changes to the patch)
Reviewed-by: Janosch Frank <frankja@...ux.ibm.com>
[...]
> +#define DESTROY_LOOP_THRESHOLD 32
A question out of curiosity:
Is there any particular reason for the number?
Have you tested other numbers and experienced a speedup/slowdown?
> +
> +struct reset_walk_state {
> + unsigned long next;
> + unsigned long count;
> + unsigned long pfns[DESTROY_LOOP_THRESHOLD];
> +};
> +
> +static int s390_gather_pages(pte_t *ptep, unsigned long addr,
> + unsigned long next, struct mm_walk *walk)
> {
> + struct reset_walk_state *p = walk->private;
> pte_t pte = READ_ONCE(*ptep);
>
> - /* There is a reference through the mapping */
> - if (pte_present(pte))
> - WARN_ON_ONCE(uv_destroy_owned_page(pte_val(pte) & PAGE_MASK));
> -
> - return 0;
> + if (pte_present(pte)) {
> + /* we have a reference from the mapping, take an extra one */
> + get_page(phys_to_page(pte_val(pte)));
> + p->pfns[p->count] = phys_to_pfn(pte_val(pte));
> + p->next = next;
> + p->count++;
> + }
> + return p->count >= DESTROY_LOOP_THRESHOLD;
> }
>
> -static const struct mm_walk_ops reset_acc_walk_ops = {
> - .pte_entry = __s390_reset_acc,
> +static const struct mm_walk_ops gather_pages_ops = {
> + .pte_entry = s390_gather_pages,
> };
>
> -#include <linux/sched/mm.h>
> -void s390_reset_acc(struct mm_struct *mm)
> +/*
> + * Call the Destroy secure page UVC on each page in the given array of PFNs.
> + * Each page needs to have an extra reference, which will be released here.
> + */
> +void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns)
> {
> - if (!mm_is_protected(mm))
> - return;
> - /*
> - * we might be called during
> - * reset: we walk the pages and clear
> - * close of all kvm file descriptors: we walk the pages and clear
> - * exit of process on fd closure: vma already gone, do nothing
> - */
> - if (!mmget_not_zero(mm))
> - return;
> - mmap_read_lock(mm);
> - walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
> - mmap_read_unlock(mm);
> - mmput(mm);
> + unsigned long i;
> +
> + for (i = 0; i < count; i++) {
> + /* we always have an extra reference */
> + uv_destroy_owned_page(pfn_to_phys(pfns[i]));
> + /* get rid of the extra reference */
> + put_page(pfn_to_page(pfns[i]));
> + cond_resched();
> + }
> +}
> +EXPORT_SYMBOL_GPL(s390_uv_destroy_pfns);
> +
> +/**
> + * __s390_uv_destroy_range - Walk the given range of the given address
> + * space, and call the destroy secure page UVC on each page.
> + * Optionally exit early if a fatal signal is pending.
> + * @mm the mm to operate on
> + * @start the start of the range
> + * @end the end of the range
> + * @interruptible if not 0, stop when a fatal signal is received
> + * Return: 0 on success, -EINTR if the function stopped before completing
> + */
> +int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
> + unsigned long end, bool interruptible)
> +{
> + struct reset_walk_state state = { .next = start };
> + int r = 1;
> +
> + while (r > 0) {
> + state.count = 0;
> + mmap_read_lock(mm);
> + r = walk_page_range(mm, state.next, end, &gather_pages_ops, &state);
> + mmap_read_unlock(mm);
> + cond_resched();
> + s390_uv_destroy_pfns(state.count, state.pfns);
> + if (interruptible && fatal_signal_pending(current))
> + return -EINTR;
> + }
> + return 0;
> }
> -EXPORT_SYMBOL_GPL(s390_reset_acc);
> +EXPORT_SYMBOL_GPL(__s390_uv_destroy_range);
>
> /**
> * s390_remove_old_asce - Remove the topmost level of page tables from the
Powered by blists - more mailing lists