[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87a6m6fmhf.fsf@disp2133>
Date: Wed, 28 Jul 2021 11:10:20 -0500
From: ebiederm@...ssion.com (Eric W. Biederman)
To: Arnd Bergmann <arnd@...nel.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Arnd Bergmann <arnd@...db.de>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Thomas Bogendoerfer <tsbogend@...ha.franken.de>,
"James E.J. Bottomley" <James.Bottomley@...senPartnership.com>,
Helge Deller <deller@....de>,
Michael Ellerman <mpe@...erman.id.au>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Heiko Carstens <hca@...ux.ibm.com>,
Vasily Gorbik <gor@...ux.ibm.com>,
Christian Borntraeger <borntraeger@...ibm.com>,
"David S. Miller" <davem@...emloft.net>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>,
Al Viro <viro@...iv.linux.org.uk>,
Christoph Hellwig <hch@...radead.org>,
Feng Tang <feng.tang@...el.com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-mips@...r.kernel.org, linux-parisc@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org, linux-s390@...r.kernel.org,
sparclinux@...r.kernel.org, linux-arch@...r.kernel.org,
linux-api@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH v5 2/6] kexec: avoid compat_alloc_user_space
Arnd Bergmann <arnd@...nel.org> writes:
> From: Arnd Bergmann <arnd@...db.de>
>
> kimage_alloc_init() expects a __user pointer, so compat_sys_kexec_load()
> uses compat_alloc_user_space() to convert the layout and put it back
> onto the user space caller stack.
>
> Moving the user space access into the syscall handler directly actually
> makes the code simpler, as the conversion for compat mode can now be
> done on kernel memory.
Acked-by: "Eric W. Biederman" <ebiederm@...ssion.com>
>
> Co-developed-by: Eric Biederman <ebiederm@...ssion.com>
> Co-developed-by: Christoph Hellwig <hch@...radead.org>
> Link: https://lore.kernel.org/lkml/YPbtsU4GX6PL7%2F42@infradead.org/
> Link: https://lore.kernel.org/lkml/m1y2cbzmnw.fsf@fess.ebiederm.org/
> Signed-off-by: Arnd Bergmann <arnd@...db.de>
> ---
> kernel/kexec.c | 61 +++++++++++++++++++++-----------------------------
> 1 file changed, 25 insertions(+), 36 deletions(-)
>
> diff --git a/kernel/kexec.c b/kernel/kexec.c
> index 9c7aef8f4bb6..b5e40f069768 100644
> --- a/kernel/kexec.c
> +++ b/kernel/kexec.c
> @@ -19,26 +19,9 @@
>
> #include "kexec_internal.h"
>
> -static int copy_user_segment_list(struct kimage *image,
> - unsigned long nr_segments,
> - struct kexec_segment __user *segments)
> -{
> - int ret;
> - size_t segment_bytes;
> -
> - /* Read in the segments */
> - image->nr_segments = nr_segments;
> - segment_bytes = nr_segments * sizeof(*segments);
> - ret = copy_from_user(image->segment, segments, segment_bytes);
> - if (ret)
> - ret = -EFAULT;
> -
> - return ret;
> -}
> -
> static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
> unsigned long nr_segments,
> - struct kexec_segment __user *segments,
> + struct kexec_segment *segments,
> unsigned long flags)
> {
> int ret;
> @@ -58,10 +41,8 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
> return -ENOMEM;
>
> image->start = entry;
> -
> - ret = copy_user_segment_list(image, nr_segments, segments);
> - if (ret)
> - goto out_free_image;
> + image->nr_segments = nr_segments;
> + memcpy(image->segment, segments, nr_segments * sizeof(*segments));
>
> if (kexec_on_panic) {
> /* Enable special crash kernel control page alloc policy. */
> @@ -104,7 +85,7 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
> }
>
> static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
> - struct kexec_segment __user *segments, unsigned long flags)
> + struct kexec_segment *segments, unsigned long flags)
> {
> struct kimage **dest_image, *image;
> unsigned long i;
> @@ -250,7 +231,8 @@ static inline int kexec_load_check(unsigned long nr_segments,
> SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
> struct kexec_segment __user *, segments, unsigned long, flags)
> {
> - int result;
> + struct kexec_segment *ksegments;
> + unsigned long result;
>
> result = kexec_load_check(nr_segments, flags);
> if (result)
> @@ -261,7 +243,12 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
> ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
> return -EINVAL;
>
> - result = do_kexec_load(entry, nr_segments, segments, flags);
> + ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
> + if (IS_ERR(ksegments))
> + return PTR_ERR(ksegments);
> +
> + result = do_kexec_load(entry, nr_segments, ksegments, flags);
> + kfree(ksegments);
>
> return result;
> }
> @@ -273,7 +260,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
> compat_ulong_t, flags)
> {
> struct compat_kexec_segment in;
> - struct kexec_segment out, __user *ksegments;
> + struct kexec_segment *ksegments;
> unsigned long i, result;
>
> result = kexec_load_check(nr_segments, flags);
> @@ -286,24 +273,26 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
> if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
> return -EINVAL;
>
> - ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
> + ksegments = kmalloc_array(nr_segments, sizeof(ksegments[0]),
> + GFP_KERNEL);
> + if (!ksegments)
> + return -ENOMEM;
> +
> for (i = 0; i < nr_segments; i++) {
> result = copy_from_user(&in, &segments[i], sizeof(in));
> if (result)
> - return -EFAULT;
> + goto fail;
>
> - out.buf = compat_ptr(in.buf);
> - out.bufsz = in.bufsz;
> - out.mem = in.mem;
> - out.memsz = in.memsz;
> -
> - result = copy_to_user(&ksegments[i], &out, sizeof(out));
> - if (result)
> - return -EFAULT;
> + ksegments[i].buf = compat_ptr(in.buf);
> + ksegments[i].bufsz = in.bufsz;
> + ksegments[i].mem = in.mem;
> + ksegments[i].memsz = in.memsz;
> }
>
> result = do_kexec_load(entry, nr_segments, ksegments, flags);
>
> +fail:
> + kfree(ksegments);
> return result;
> }
> #endif
Powered by blists - more mailing lists