[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aAWoyExy0UMyxaoI@fedora>
Date: Mon, 21 Apr 2025 10:09:12 +0800
From: Baoquan He <bhe@...hat.com>
To: steven chen <chenste@...ux.microsoft.com>
Cc: zohar@...ux.ibm.com, stefanb@...ux.ibm.com,
roberto.sassu@...weicloud.com, roberto.sassu@...wei.com,
eric.snowberg@...cle.com, ebiederm@...ssion.com,
paul@...l-moore.com, code@...icks.com, bauermann@...abnow.com,
linux-integrity@...r.kernel.org, kexec@...ts.infradead.org,
linux-security-module@...r.kernel.org, linux-kernel@...r.kernel.org,
madvenka@...ux.microsoft.com, nramas@...ux.microsoft.com,
James.Bottomley@...senpartnership.com, vgoyal@...hat.com,
dyoung@...hat.com
Subject: Re: [PATCH v12 3/9] kexec: define functions to map and unmap segments
On 04/20/25 at 05:30am, steven chen wrote:
> On 4/17/2025 9:36 PM, Baoquan He wrote:
> > On 04/15/25 at 07:10pm, steven chen wrote:
> > > From: Steven Chen <chenste@...ux.microsoft.com>
> > ^^^^^^
> > > Implement kimage_map_segment() to enable IMA to map the measurement log
> > > list to the kimage structure during the kexec 'load' stage. This function
> > > gathers the source pages within the specified address range, and maps them
> > > to a contiguous virtual address range.
> > >
> > > This is a preparation for later usage.
> > >
> > > Implement kimage_unmap_segment() for unmapping segments using vunmap().
> > >
> > > From: Tushar Sugandhi <tusharsu@...ux.microsoft.com>
> > ^^^^^^
> > > Signed-off-by: Tushar Sugandhi <tusharsu@...ux.microsoft.com>
> > ^^^^^^^
> > > Cc: Eric Biederman <ebiederm@...ssion.com>
> > > Cc: Baoquan He <bhe@...hat.com>
> > > Cc: Vivek Goyal <vgoyal@...hat.com>
> > > Cc: Dave Young <dyoung@...hat.com>
> > > Signed-off-by: steven chen <chenste@...ux.microsoft.com>
> > ^^^^^
> >
> > The signing on this patch is a little confusing. I can't see who is the
> > real author, who is the co-author, between you and Tushar. You may need
> > to refer to Documentation/process/5.Posting.rst to make that clear.
>
> Hi Baoquan,
>
> From my understanding, if there is no change from the original author patch,
> need to add
> From tag and Signed-off-by tag; otherwise, if there are changes,
> Signed-off-by can be used.
If you don't change a patch, you can add your Signed-off-by when
posting. However, the From decides who is the real author. There's no
way to have two From on one patch. My personal understanding.
>
> Steven
>
> > > Acked-by: Baoquan He <bhe@...hat.com>
> > > ---
> > > include/linux/kexec.h | 6 +++++
> > > kernel/kexec_core.c | 54 +++++++++++++++++++++++++++++++++++++++++++
> > > 2 files changed, 60 insertions(+)
> > >
> > > diff --git a/include/linux/kexec.h b/include/linux/kexec.h
> > > index f0e9f8eda7a3..7d6b12f8b8d0 100644
> > > --- a/include/linux/kexec.h
> > > +++ b/include/linux/kexec.h
> > > @@ -467,13 +467,19 @@ extern bool kexec_file_dbg_print;
> > > #define kexec_dprintk(fmt, arg...) \
> > > do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
> > > +extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
> > > +extern void kimage_unmap_segment(void *buffer);
> > > #else /* !CONFIG_KEXEC_CORE */
> > > struct pt_regs;
> > > struct task_struct;
> > > +struct kimage;
> > > static inline void __crash_kexec(struct pt_regs *regs) { }
> > > static inline void crash_kexec(struct pt_regs *regs) { }
> > > static inline int kexec_should_crash(struct task_struct *p) { return 0; }
> > > static inline int kexec_crash_loaded(void) { return 0; }
> > > +static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
> > > +{ return NULL; }
> > > +static inline void kimage_unmap_segment(void *buffer) { }
> > > #define kexec_in_progress false
> > > #endif /* CONFIG_KEXEC_CORE */
> > > diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
> > > index c0bdc1686154..a5e378e1dc7f 100644
> > > --- a/kernel/kexec_core.c
> > > +++ b/kernel/kexec_core.c
> > > @@ -867,6 +867,60 @@ int kimage_load_segment(struct kimage *image,
> > > return result;
> > > }
> > > +void *kimage_map_segment(struct kimage *image,
> > > + unsigned long addr, unsigned long size)
> > > +{
> > > + unsigned long src_page_addr, dest_page_addr = 0;
> > > + unsigned long eaddr = addr + size;
> > > + kimage_entry_t *ptr, entry;
> > > + struct page **src_pages;
> > > + unsigned int npages;
> > > + void *vaddr = NULL;
> > > + int i;
> > > +
> > > + /*
> > > + * Collect the source pages and map them in a contiguous VA range.
> > > + */
> > > + npages = PFN_UP(eaddr) - PFN_DOWN(addr);
> > > + src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
> > > + if (!src_pages) {
> > > + pr_err("Could not allocate ima pages array.\n");
> > > + return NULL;
> > > + }
> > > +
> > > + i = 0;
> > > + for_each_kimage_entry(image, ptr, entry) {
> > > + if (entry & IND_DESTINATION) {
> > > + dest_page_addr = entry & PAGE_MASK;
> > > + } else if (entry & IND_SOURCE) {
> > > + if (dest_page_addr >= addr && dest_page_addr < eaddr) {
> > > + src_page_addr = entry & PAGE_MASK;
> > > + src_pages[i++] =
> > > + virt_to_page(__va(src_page_addr));
> > > + if (i == npages)
> > > + break;
> > > + dest_page_addr += PAGE_SIZE;
> > > + }
> > > + }
> > > + }
> > > +
> > > + /* Sanity check. */
> > > + WARN_ON(i < npages);
> > > +
> > > + vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
> > > + kfree(src_pages);
> > > +
> > > + if (!vaddr)
> > > + pr_err("Could not map ima buffer.\n");
> > > +
> > > + return vaddr;
> > > +}
> > > +
> > > +void kimage_unmap_segment(void *segment_buffer)
> > > +{
> > > + vunmap(segment_buffer);
> > > +}
> > > +
> > > struct kexec_load_limit {
> > > /* Mutex protects the limit count. */
> > > struct mutex mutex;
> > > --
> > > 2.43.0
> > >
>
Powered by blists - more mailing lists