[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <Ys6p2MvQdL/lx7RE@li-4a3a4a4c-28e5-11b2-a85c-a8d192c6f089.ibm.com>
Date: Wed, 13 Jul 2022 13:17:44 +0200
From: Alexander Gordeev <agordeev@...ux.ibm.com>
To: Matthew Wilcox <willy@...radead.org>
Cc: Alexander Egorenkov <egorenar@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>,
Vasily Gorbik <gor@...ux.ibm.com>, Baoquan He <bhe@...hat.com>,
Christoph Hellwig <hch@....de>, linux-kernel@...r.kernel.org,
linux-s390@...r.kernel.org
Subject: Re: [PATCH v2 1/1] s390/crash: allow multi-segment iterators
On Thu, Jul 07, 2022 at 01:54:22PM +0100, Matthew Wilcox wrote:
> On Thu, Jul 07, 2022 at 08:01:15AM +0200, Alexander Gordeev wrote:
> > Rework copy_oldmem_page() to allow multi-segment iterators.
> > Reuse existing iterate_iovec macro as is and only relevant
> > bits from __iterate_and_advance macro.
> Or do it properly?
>
> You should probably put a mutex around all of this because if you have two
> threads accessing the hsa at the same time, they'll use the same buffer.
> But that's a pre-existing problem. I also fixed the pre-existing bug
> where you were using 'count' when you meant to use 'len'.
Thank you, Matthew!
Would you mind being added with Suggested-by to the fix(es)?
> Uncompiled. You might need to include <linux/uio.h> somewhere.
The problem with your suggestion is memcpy()/copyout() might fail
since pages to be copied are not guaranteed to be mapped on s390.
For that we use s390-specific memcpy_real() routine that only takes
physical addresses and bypasses paging altogether (I added some
comments to the removed code, just in case you are interested).
Yet, I am still going to reuse and extend your approach and
hopefully come up with something soon.
As a side note - there is an intention to make things more like
others, but not just right now.
> diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
> index 236b34b75ddb..d8b4c526e0f0 100644
> --- a/arch/s390/include/asm/sclp.h
> +++ b/arch/s390/include/asm/sclp.h
> @@ -143,7 +143,7 @@ int sclp_ap_configure(u32 apid);
> int sclp_ap_deconfigure(u32 apid);
> int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid);
> int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
> -int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
> +int memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count);
> void sclp_ocf_cpc_name_copy(char *dst);
>
> static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
> diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
> index 28124d0fa1d5..6e4dde377f8e 100644
> --- a/arch/s390/kernel/crash_dump.c
> +++ b/arch/s390/kernel/crash_dump.c
> @@ -130,53 +130,11 @@ static inline void *load_real_addr(void *addr)
> return (void *)real_addr;
> }
>
> -/*
> - * Copy memory of the old, dumped system to a kernel space virtual address
> - */
> -int copy_oldmem_kernel(void *dst, unsigned long src, size_t count)
> -{
> - unsigned long len;
> - void *ra;
> - int rc;
> -
> - while (count) {
> - if (!oldmem_data.start && src < sclp.hsa_size) {
> - /* Copy from zfcp/nvme dump HSA area */
> - len = min(count, sclp.hsa_size - src);
> - rc = memcpy_hsa_kernel(dst, src, len);
> - if (rc)
> - return rc;
> - } else {
> - /* Check for swapped kdump oldmem areas */
> - if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
> - src -= oldmem_data.start;
> - len = min(count, oldmem_data.size - src);
> - } else if (oldmem_data.start && src < oldmem_data.size) {
> - len = min(count, oldmem_data.size - src);
> - src += oldmem_data.start;
> - } else {
> - len = count;
> - }
> - if (is_vmalloc_or_module_addr(dst)) {
There is no 1:1 match between vmalloc/module addresses.
> - ra = load_real_addr(dst);
load_real_addr() obtains the physical addresses from a virtual one to be
able passing it to memcpy_real().
> - len = min(PAGE_SIZE - offset_in_page(ra), len);
> - } else {
> - ra = dst;
Here a virtual address matches the physical one and thus - good to go.
> - }
> - if (memcpy_real(ra, src, len))
> - return -EFAULT;
As the source address might be unmapped, copy_to_iter()->memcpy()
would not be safe to use here.
> - }
> - dst += len;
> - src += len;
> - count -= len;
> - }
> - return 0;
> -}
> -
> /*
> * Copy memory of the old, dumped system to a user space virtual address
> */
> -static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
> +static int copy_oldmem_iter(struct iov_iter *iter, unsigned long src,
> + size_t count)
> {
> unsigned long len;
> int rc;
> @@ -185,7 +143,7 @@ static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
> if (!oldmem_data.start && src < sclp.hsa_size) {
> /* Copy from zfcp/nvme dump HSA area */
> len = min(count, sclp.hsa_size - src);
> - rc = memcpy_hsa_user(dst, src, len);
> + rc = memcpy_hsa_iter(iter, src, len);
> if (rc)
> return rc;
> } else {
> @@ -199,8 +157,8 @@ static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
> } else {
> len = count;
> }
> - rc = copy_to_user_real(dst, src, count);
> - if (rc)
> + rc = copy_to_iter(iter, src, len);
As the source address might be unmapped, copy_to_iter()->raw_copy_to_user()
is not safe to use here.
> + if (rc != len)
> return rc;
> }
> dst += len;
Thanks!
Powered by blists - more mailing lists