[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANpmjNP+nLfMKLj-4L4wXBfQpO5N0Y6q_TEkxjM+Z0WXxPvVxg@mail.gmail.com>
Date: Tue, 28 Mar 2023 13:55:14 +0200
From: Marco Elver <elver@...gle.com>
To: Muchun Song <songmuchun@...edance.com>
Cc: glider@...gle.com, dvyukov@...gle.com, akpm@...ux-foundation.org,
jannh@...gle.com, sjpark@...zon.de, muchun.song@...ux.dev,
kasan-dev@...glegroups.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/6] mm: kfence: simplify kfence pool initialization
On Tue, 28 Mar 2023 at 11:58, Muchun Song <songmuchun@...edance.com> wrote:
>
> There are three similar loops to initialize kfence pool, we could merge
> all of them into one loop to simplify the code and make code more
> efficient.
>
> Signed-off-by: Muchun Song <songmuchun@...edance.com>
Reviewed-by: Marco Elver <elver@...gle.com>
> ---
> mm/kfence/core.c | 47 ++++++-----------------------------------------
> 1 file changed, 6 insertions(+), 41 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 7d01a2c76e80..de62a84d4830 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -539,35 +539,10 @@ static void rcu_guarded_free(struct rcu_head *h)
> static unsigned long kfence_init_pool(void)
> {
> unsigned long addr = (unsigned long)__kfence_pool;
> - struct page *pages;
> int i;
>
> if (!arch_kfence_init_pool())
> return addr;
> -
> - pages = virt_to_page(__kfence_pool);
> -
> - /*
> - * Set up object pages: they must have PG_slab set, to avoid freeing
> - * these as real pages.
> - *
> - * We also want to avoid inserting kfence_free() in the kfree()
> - * fast-path in SLUB, and therefore need to ensure kfree() correctly
> - * enters __slab_free() slow-path.
> - */
> - for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
> - struct slab *slab = page_slab(nth_page(pages, i));
> -
> - if (!i || (i % 2))
> - continue;
> -
> - __folio_set_slab(slab_folio(slab));
> -#ifdef CONFIG_MEMCG
> - slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
> - MEMCG_DATA_OBJCGS;
> -#endif
> - }
> -
> /*
> * Protect the first 2 pages. The first page is mostly unnecessary, and
> * merely serves as an extended guard page. However, adding one
> @@ -581,8 +556,9 @@ static unsigned long kfence_init_pool(void)
> addr += PAGE_SIZE;
> }
>
> - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
> + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++, addr += 2 * PAGE_SIZE) {
> struct kfence_metadata *meta = &kfence_metadata[i];
> + struct slab *slab = page_slab(virt_to_page(addr));
>
> /* Initialize metadata. */
> INIT_LIST_HEAD(&meta->list);
> @@ -593,26 +569,15 @@ static unsigned long kfence_init_pool(void)
>
> /* Protect the right redzone. */
> if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
> - goto reset_slab;
> -
> - addr += 2 * PAGE_SIZE;
> - }
> -
> - return 0;
> -
> -reset_slab:
> - for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
> - struct slab *slab = page_slab(nth_page(pages, i));
> + return addr;
>
> - if (!i || (i % 2))
> - continue;
> + __folio_set_slab(slab_folio(slab));
> #ifdef CONFIG_MEMCG
> - slab->memcg_data = 0;
> + slab->memcg_data = (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS;
> #endif
> - __folio_clear_slab(slab_folio(slab));
> }
>
> - return addr;
> + return 0;
> }
>
> static bool __init kfence_init_pool_early(void)
> --
> 2.11.0
>
Powered by blists - more mailing lists