[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Yvbxgvk+5HqEl7J2@MiWiFi-R3L-srv>
Date: Sat, 13 Aug 2022 08:34:10 +0800
From: Baoquan He <bhe@...hat.com>
To: Eric DeVolder <eric.devolder@...cle.com>
Cc: linux-kernel@...r.kernel.org, x86@...nel.org,
kexec@...ts.infradead.org, ebiederm@...ssion.com,
dyoung@...hat.com, vgoyal@...hat.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, dave.hansen@...ux.intel.com,
hpa@...or.com, nramas@...ux.microsoft.com, thomas.lendacky@....com,
robh@...nel.org, efault@....de, rppt@...nel.org, david@...hat.com,
sourabhjain@...ux.ibm.com, konrad.wilk@...cle.com,
boris.ostrovsky@...cle.com
Subject: Re: [PATCH v10 8/8] x86/crash: Add x86 crash hotplug support
On 07/21/22 at 02:17pm, Eric DeVolder wrote:
...snip....
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index e58798f636d4..bb59596c8bea 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -2065,6 +2065,17 @@ config CRASH_DUMP
> (CONFIG_RELOCATABLE=y).
> For more details see Documentation/admin-guide/kdump/kdump.rst
>
> +config CRASH_MAX_MEMORY_RANGES
> + depends on CRASH_DUMP && KEXEC_FILE && (HOTPLUG_CPU || MEMORY_HOTPLUG)
> + int
> + default 32768
Do we need to enforce the value with page align and minimal size? I
checked crash_load_segments() in arch/x86/kernel/crash.c, it does the
page size aligning in kexec_add_buffer(). And in
load_crashdump_segments() of
kexec-tools/kexec/arch/i386/crashdump-x86.c, it creates elfcorehdr at
below code, the align is 1024, and in generic add_buffer()
implementation, it enforces the memsz page aligned, and changes the
passed align as page alignment.
elfcorehdr = add_buffer(info, tmp, bufsz, memsz, align, min_base,
max_addr, -1);
Maybe we should at least mention this in the help text to notice people.
> + help
> + For the kexec_file_load path, specify the maximum number of
> + memory regions, eg. as represented by the 'System RAM' entries
> + in /proc/iomem, that the elfcorehdr buffer/segment can accommodate.
> + This value is combined with NR_CPUS and multiplied by Elf64_Phdr
> + size to determine the final buffer size.
> +
> config KEXEC_JUMP
> bool "kexec jump"
> depends on KEXEC && HIBERNATION
> diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
> index 8b6bd63530dc..96051d8e4b45 100644
> --- a/arch/x86/include/asm/crash.h
> +++ b/arch/x86/include/asm/crash.h
> @@ -9,4 +9,24 @@ int crash_setup_memmap_entries(struct kimage *image,
> struct boot_params *params);
> void crash_smp_send_stop(void);
>
> +void *arch_map_crash_pages(unsigned long paddr, unsigned long size);
> +#define arch_map_crash_pages arch_map_crash_pages
> +
> +void arch_unmap_crash_pages(void **ptr);
> +#define arch_unmap_crash_pages arch_unmap_crash_pages
> +
> +void arch_crash_handle_hotplug_event(struct kimage *image,
> + unsigned int hp_action, unsigned int cpu);
> +#define arch_crash_handle_hotplug_event arch_crash_handle_hotplug_event
> +
> +#ifdef CONFIG_HOTPLUG_CPU
> +static inline int crash_hotplug_cpu_support(void) { return 1; }
> +#define crash_hotplug_cpu_support crash_hotplug_cpu_support
> +#endif
> +
> +#ifdef CONFIG_MEMORY_HOTPLUG
> +static inline int crash_hotplug_memory_support(void) { return 1; }
> +#define crash_hotplug_memory_support crash_hotplug_memory_support
> +#endif
> +
> #endif /* _ASM_X86_CRASH_H */
> diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
> index 9ceb93c176a6..55dda4fcde6e 100644
> --- a/arch/x86/kernel/crash.c
> +++ b/arch/x86/kernel/crash.c
> @@ -25,6 +25,7 @@
> #include <linux/slab.h>
> #include <linux/vmalloc.h>
> #include <linux/memblock.h>
> +#include <linux/highmem.h>
>
> #include <asm/processor.h>
> #include <asm/hardirq.h>
> @@ -397,7 +398,17 @@ int crash_load_segments(struct kimage *image)
> image->elf_headers = kbuf.buffer;
> image->elf_headers_sz = kbuf.bufsz;
>
> +#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG)
> + /* Ensure elfcorehdr segment large enough for hotplug changes */
> + kbuf.memsz = (CONFIG_NR_CPUS_DEFAULT + CONFIG_CRASH_MAX_MEMORY_RANGES) * sizeof(Elf64_Phdr);
Do we need to break the line to 80 chars?
> + /* For marking as usable to crash kernel */
> + image->elf_headers_sz = kbuf.memsz;
Do we need this code comment?
> + /* Record the index of the elfcorehdr segment */
> + image->elfcorehdr_index = image->nr_segments;
And this place?
> + image->elfcorehdr_index_valid = true;
> +#else
> kbuf.memsz = kbuf.bufsz;
> +#endif
> kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
> kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
> ret = kexec_add_buffer(&kbuf);
> @@ -412,3 +423,107 @@ int crash_load_segments(struct kimage *image)
> return ret;
> }
> #endif /* CONFIG_KEXEC_FILE */
> +
> +#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG)
> +void *arch_map_crash_pages(unsigned long paddr, unsigned long size)
> +{
> + /*
> + * NOTE: The addresses and sizes passed to this routine have
> + * already been fully aligned on page boundaries. There is no
> + * need for massaging the address or size.
> + */
Can we move the code comment above function interface?
> + void *ptr = NULL;
> +
> + /* NOTE: requires arch_kexec_[un]protect_crashkres() for write access */
Do we need this code comment? On ARCH where proctionion is made, we
surely need to the protect/unprotect.
> + if (size > 0) {
> + struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
> +
> + ptr = kmap_local_page(page);
> + }
> +
> + return ptr;
> +}
> +
> +void arch_unmap_crash_pages(void **ptr)
> +{
> + if (ptr) {
> + if (*ptr)
> + kunmap_local(*ptr);
> + *ptr = NULL;
> + }
> +}
> +
> +/**
> + * arch_crash_handle_hotplug_event() - Handle hotplug elfcorehdr changes
> + * @image: the active struct kimage
> + * @hp_action: the hot un/plug action being handled
> + * @cpu: when KEXEC_CRASH_HP_ADD/REMOVE_CPU, the cpu affected
> + *
> + * To accurately reflect hot un/plug changes, the elfcorehdr (which
> + * is passed to the crash kernel via the elfcorehdr= parameter)
> + * must be updated with the new list of CPUs and memories. The new
> + * elfcorehdr is prepared in a kernel buffer, and then it is
> + * written on top of the existing/old elfcorehdr.
> + *
> + * For hotplug changes to elfcorehdr to work, two conditions are
> + * needed:
> + * First, the segment containing the elfcorehdr must be large enough
> + * to permit a growing number of resources. See the
> + * CONFIG_CRASH_MAX_MEMORY_RANGES description.
> + * Second, purgatory must explicitly exclude the elfcorehdr from the
> + * list of segments it checks (since the elfcorehdr changes and thus
> + * would require an update to purgatory itself to update the digest).
Isn't this generic concept to crash hotplug? Should we move it out to
some generic place?
> + *
> + */
> +void arch_crash_handle_hotplug_event(struct kimage *image,
> + unsigned int hp_action, unsigned int cpu)
The passed in 'cpu' is not used at all, what is it added for? I didn't
see explanation about it.
> +{
> + struct kexec_segment *ksegment;
> + unsigned char *ptr = NULL;
> + unsigned long elfsz = 0;
> + void *elfbuf = NULL;
> + unsigned long mem, memsz;
> +
> + /*
> + * Elfcorehdr_index_valid checked in crash_core:handle_hotplug_event()
> + */
> + ksegment = &image->segment[image->elfcorehdr_index];
> + mem = ksegment->mem;
> + memsz = ksegment->memsz;
> +
> + /*
> + * Create the new elfcorehdr reflecting the changes to CPU and/or
> + * memory resources.
> + */
> + if (prepare_elf_headers(image, &elfbuf, &elfsz)) {
> + pr_err("crash hp: unable to prepare elfcore headers");
> + goto out;
> + }
> + if (elfsz > memsz) {
> + pr_err("crash hp: update elfcorehdr elfsz %lu > memsz %lu",
> + elfsz, memsz);
> + goto out;
> + }
> +
> + /*
> + * At this point, we are all but assured of success.
> + * Copy new elfcorehdr into destination.
> + */
> + ptr = arch_map_crash_pages(mem, memsz);
> + if (ptr) {
> + /*
> + * Temporarily invalidate the crash image while the
> + * elfcorehdr is updated.
> + */
> + xchg(&kexec_crash_image, NULL);
> + memcpy_flushcache((void *)ptr, elfbuf, elfsz);
> + xchg(&kexec_crash_image, image);
> + }
> + arch_unmap_crash_pages((void **)&ptr);
> + pr_debug("crash hp: re-loaded elfcorehdr at 0x%lx\n", mem);
> +
> +out:
> + if (elfbuf)
> + vfree(elfbuf);
> +}
> +#endif
> --
> 2.31.1
>
Powered by blists - more mailing lists