[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <9e008ea0-e715-4c04-842a-824ea37790a2@amazon.com>
Date: Tue, 14 Jan 2025 16:08:00 +0000
From: Nikita Kalyazin <kalyazin@...zon.com>
To: <michael.day@....com>, <willy@...radead.org>, <pbonzini@...hat.com>,
<linux-fsdevel@...r.kernel.org>, <linux-mm@...ck.org>,
<linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>
CC: <david@...hat.com>, <jthoughton@...gle.com>, <michael.roth@....com>,
<ackerleytng@...gle.com>, <graf@...zon.de>, <jgowans@...zon.com>,
<roypat@...zon.co.uk>, <derekmn@...zon.com>, <nsaenz@...zon.es>,
<xmarcalx@...zon.com>
Subject: Re: [RFC PATCH 2/2] KVM: guest_memfd: use filemap_grab_folios in
write
On 10/01/2025 21:08, Mike Day wrote:
> On 1/10/25 09:46, Nikita Kalyazin wrote:
>> The write syscall on guest_memfd makes use of filemap_grab_folios to
>> grab folios in batches. This speeds up population by 8.3% due to the
>> reduction in locking and tree walking when adding folios to the
>> pagecache.
>>
>> Signed-off-by: Nikita Kalyazin <kalyazin@...zon.com>
>> ---
>> virt/kvm/guest_memfd.c | 176 +++++++++++++++++++++++++++++++++--------
>> 1 file changed, 143 insertions(+), 33 deletions(-)
>>
>> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
>> index e80566ef56e9..ccfadc3a7389 100644
>> --- a/virt/kvm/guest_memfd.c
>> +++ b/virt/kvm/guest_memfd.c
>> @@ -102,17 +102,134 @@ static struct folio *kvm_gmem_get_folio(struct
>> inode *inode, pgoff_t index)
>> return filemap_grab_folio(inode->i_mapping, index);
>> }
>>
>> +/*
>> + * Returns locked folios on success. The caller is responsible for
>> + * setting the up-to-date flag before the memory is mapped into the
>> guest.
>> + * There is no backing storage for the memory, so the folios will remain
>> + * up-to-date until they're removed.
>> + *
>> + * Ignore accessed, referenced, and dirty flags. The memory is
>> + * unevictable and there is no storage to write back to.
>> + */
>> +static int kvm_gmem_get_folios(struct inode *inode, pgoff_t index,
>> + struct folio **folios, int num)
>> +{
>> + return filemap_grab_folios(inode->i_mapping, index, folios, num);
>> +}
>> +
>> #if defined(CONFIG_KVM_GENERIC_PRIVATE_MEM) && !
>> defined(CONFIG_KVM_AMD_SEV)
>> +static int kvm_kmem_gmem_write_inner(struct inode *inode, pgoff_t index,
>> + const void __user *buf,
>> + struct folio **folios, int num)
>> +{
>> + int ret, i, num_grabbed, num_written;
>> +
>> + num_grabbed = kvm_gmem_get_folios(inode, index, folios, num);
>> + if (num_grabbed < 0)
>> + return num_grabbed;
>> +
>> + for (i = 0; i < num_grabbed; i++) {
>> + struct folio *folio = folios[i];
>> + void *vaddr;
>> +
>> + if (folio_test_hwpoison(folio)) {
>> + folio_unlock(folio);
>> + folio_put(folio);
>> + ret = -EFAULT;
>> + break;
>> + }
>> +
>> + if (folio_test_uptodate(folio)) {
>> + folio_unlock(folio);
>> + folio_put(folio);
>> + ret = -ENOSPC;
>> + break;
>> + }
>> +
>> + folio_unlock(folio);
>> +
>> + vaddr = kmap_local_folio(folio, 0);
>> + ret = copy_from_user(vaddr, buf + (i << PAGE_SHIFT),
>> PAGE_SIZE);
>> + if (ret)
>> + ret = -EINVAL;
>> + kunmap_local(vaddr);
>> +
>> + if (ret) {
>> + folio_put(folio);
>> + break;
>> + } else {
>> + kvm_gmem_mark_prepared(folio);
>> + folio_put(folio);
>> + }
>> + }
>> +
>> + num_written = i;
>> +
>> + for (i = num_written; i < num_grabbed; i++) {
>> + folio_unlock(folios[i]);
>> + folio_put(folios[i]);
>> + }
>> +
>> + return num_written ?: ret;
>> +}
>> +
>> +static struct folio *kvm_kmem_gmem_write_folio(struct inode *inode,
>> pgoff_t index,
>> + const char __user *buf)
>>
>
> This could probably be rewritten as:
>
> struct folio *p_folio;
> int ret;
>
> ret = kvm_kmem_gmem_write_inner(inode, index, buf, &p_folio, 1);
>
> if (ret == 1)
> return p_folio;
> else
> return ERR_PTR(ret);
>
> Would remove a few lines of duplicated code and use only one prototype.
Indeed! Thanks for the suggestion, will apply in the next revision.
>
> Mike
>
> +{
>> + struct folio *folio;
>> + void *vaddr;
>> + int ret = 0;
>> +
>> + folio = kvm_gmem_get_folio(inode, index);
>> + if (IS_ERR(folio))
>> + return ERR_PTR(-EFAULT);
>> +
>> + if (folio_test_hwpoison(folio)) {
>> + ret = -EFAULT;
>> + goto out_unlock_put;
>> + }
>> +
>> + if (folio_test_uptodate(folio)) {
>> + ret = -ENOSPC;
>> + goto out_unlock_put;
>> + }
>> +
>> + folio_unlock(folio);
>> +
>> + vaddr = kmap_local_folio(folio, 0);
>> + ret = copy_from_user(vaddr, buf, PAGE_SIZE);
>> + if (ret)
>> + ret = -EINVAL;
>> + kunmap_local(vaddr);
>> +
>> + if (ret) {
>> + folio_put(folio);
>> + kvm_gmem_mark_prepared(folio);
>> + goto out_err;
>> + }
>> +
>> + folio_put(folio);
>> +
>> + return folio;
>> +
>> +out_unlock_put:
>> + folio_unlock(folio);
>> + folio_put(folio);
>> +out_err:
>> + return ERR_PTR(ret);
>> +}
>> +
>> static ssize_t kvm_kmem_gmem_write(struct file *file, const char
>> __user *buf,
>> size_t count, loff_t *offset)
>> {
>> + struct inode *inode = file_inode(file);
>> + int ret = 0, batch_size = FILEMAP_GET_FOLIOS_BATCH_SIZE;
>> pgoff_t start, end, index;
>> - ssize_t ret = 0;
>>
>> if (!PAGE_ALIGNED(*offset) || !PAGE_ALIGNED(count))
>> return -EINVAL;
>>
>> - if (*offset + count > i_size_read(file_inode(file)))
>> + if (*offset + count > i_size_read(inode))
>> return -EINVAL;
>>
>> if (!buf)
>> @@ -123,9 +240,8 @@ static ssize_t kvm_kmem_gmem_write(struct file
>> *file, const char __user *buf,
>>
>> filemap_invalidate_lock(file->f_mapping);
>>
>> - for (index = start; index < end; ) {
>> - struct folio *folio;
>> - void *vaddr;
>> + for (index = start; index + batch_size - 1 < end; ) {
>> + struct folio *folios[FILEMAP_GET_FOLIOS_BATCH_SIZE] =
>> { NULL };
>> pgoff_t buf_offset = (index - start) << PAGE_SHIFT;
>>
>> if (signal_pending(current)) {
>> @@ -133,46 +249,40 @@ static ssize_t kvm_kmem_gmem_write(struct file
>> *file, const char __user *buf,
>> goto out;
>> }
>>
>> - folio = kvm_gmem_get_folio(file_inode(file), index);
>> - if (IS_ERR(folio)) {
>> - ret = -EFAULT;
>> + ret = kvm_kmem_gmem_write_inner(inode, index, buf +
>> buf_offset, folios, batch_size);
>> + if (ret < 0)
>> goto out;
>> - }
>>
>> - if (folio_test_hwpoison(folio)) {
>> - folio_unlock(folio);
>> - folio_put(folio);
>> - ret = -EFAULT;
>> + index += ret;
>> + if (ret < batch_size)
>> + break;
>> + }
>> +
>> + for (; index < end; index++) {
>> + struct folio *folio;
>> + pgoff_t buf_offset = (index - start) << PAGE_SHIFT;
>> +
>> + if (signal_pending(current)) {
>> + ret = -EINTR;
>> goto out;
>> }
>>
>> - if (folio_test_uptodate(folio)) {
>> - folio_unlock(folio);
>> - folio_put(folio);
>> - ret = -ENOSPC;
>> + folio = kvm_kmem_gmem_write_folio(inode, index,
>> + buf + buf_offset);
>> + if (IS_ERR(folio)) {
>> + ret = PTR_ERR(folio);
>> goto out;
>> }
>> -
>> - folio_unlock(folio);
>> -
>> - vaddr = kmap_local_folio(folio, 0);
>> - ret = copy_from_user(vaddr, buf + buf_offset, PAGE_SIZE);
>> - if (ret)
>> - ret = -EINVAL;
>> - kunmap_local(vaddr);
>> -
>> - kvm_gmem_mark_prepared(folio);
>> - folio_put(folio);
>> -
>> - index = folio_next_index(folio);
>> - *offset += PAGE_SIZE;
>> }
>>
>> out:
>> filemap_invalidate_unlock(file->f_mapping);
>> + if (index > start) {
>> + *offset += (index - start) << PAGE_SHIFT;
>> + return (index - start) << PAGE_SHIFT;
>> + }
>>
>> - return ret && start == (*offset >> PAGE_SHIFT) ?
>> - ret : *offset - (start << PAGE_SHIFT);
>> + return ret;
>> }
>> #endif
>>
Powered by blists - more mailing lists