[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87k0nd1wgx.wl-maz@kernel.org>
Date: Tue, 01 Jun 2021 15:38:22 +0100
From: Marc Zyngier <maz@...nel.org>
To: Quentin Perret <qperret@...gle.com>
Cc: will@...nel.org, james.morse@....com, alexandru.elisei@....com,
catalin.marinas@....com, suzuki.poulose@....com,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
kernel-team@...roid.com, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 3/7] KVM: arm64: Remove list_head from hyp_page
On Thu, 27 May 2021 13:51:30 +0100,
Quentin Perret <qperret@...gle.com> wrote:
>
> The list_head member of struct hyp_page is only needed when the page is
> attached to a free-list, which by definition implies the page is free.
> As such, nothing prevents us from using the page itself to store the
> list_head, hence reducing the size of the vmemmap.
>
> Signed-off-by: Quentin Perret <qperret@...gle.com>
> ---
> arch/arm64/kvm/hyp/include/nvhe/memory.h | 1 -
> arch/arm64/kvm/hyp/nvhe/page_alloc.c | 39 ++++++++++++++++++++----
> 2 files changed, 33 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> index fd78bde939ee..7691ab495eb4 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> @@ -12,7 +12,6 @@ struct hyp_page {
> unsigned int refcount;
> unsigned int order;
> struct hyp_pool *pool;
> - struct list_head node;
> };
>
> extern u64 __hyp_vmemmap;
> diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> index 7ee882f36767..ce7379f1480b 100644
> --- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> @@ -62,6 +62,34 @@ static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool,
>
> }
>
> +/*
> + * Pages that are available for allocation are tracked in free-lists, so we use
> + * the pages themselves to store the list nodes to avoid wasting space. As the
> + * allocator always returns zeroed pages (which are zeroed on the hyp_put_page()
> + * path to optimize allocation speed), we also need to clean-up the list node in
> + * each page when we take it out of the list.
> + */
> +static inline void page_remove_from_list(struct hyp_page *p)
> +{
> + struct list_head *node = (struct list_head *)hyp_page_to_virt(p);
Nit: How about changing hyp_page_to_virt() so that it returns a
convenient 'void *', and get rid of the ugly casts?
> +
> + __list_del_entry(node);
> + memset(node, 0, sizeof(*node));
> +}
> +
> +static inline void page_add_to_list(struct hyp_page *p, struct list_head *head)
> +{
> + struct list_head *node = (struct list_head *)hyp_page_to_virt(p);
> +
> + INIT_LIST_HEAD(node);
> + list_add_tail(node, head);
> +}
> +
> +static inline struct hyp_page *node_to_page(struct list_head *node)
> +{
> + return (struct hyp_page *)hyp_virt_to_page(node);
Why is this cast necessary? If I'm not mistaken, hyp_vmemmap is
already cast as a 'struct hyp_page *', so hyp_virt_to_page() should
return the same type.
> +}
> +
> static void __hyp_attach_page(struct hyp_pool *pool,
> struct hyp_page *p)
> {
> @@ -83,14 +111,14 @@ static void __hyp_attach_page(struct hyp_pool *pool,
> break;
>
> /* Take the buddy out of its list, and coallesce with @p */
> - list_del_init(&buddy->node);
> + page_remove_from_list(buddy);
> buddy->order = HYP_NO_ORDER;
> p = min(p, buddy);
> }
>
> /* Mark the new head, and insert it */
> p->order = order;
> - list_add_tail(&p->node, &pool->free_area[order]);
> + page_add_to_list(p, &pool->free_area[order]);
> }
>
> static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
> @@ -99,7 +127,7 @@ static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
> {
> struct hyp_page *buddy;
>
> - list_del_init(&p->node);
> + page_remove_from_list(p);
> while (p->order > order) {
> /*
> * The buddy of order n - 1 currently has HYP_NO_ORDER as it
> @@ -110,7 +138,7 @@ static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
> p->order--;
> buddy = __find_buddy_nocheck(pool, p, p->order);
> buddy->order = p->order;
> - list_add_tail(&buddy->node, &pool->free_area[buddy->order]);
> + page_add_to_list(buddy, &pool->free_area[buddy->order]);
> }
>
> return p;
> @@ -158,7 +186,7 @@ void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order)
> }
>
> /* Extract it from the tree at the right order */
> - p = list_first_entry(&pool->free_area[i], struct hyp_page, node);
> + p = node_to_page(pool->free_area[i].next);
> p = __hyp_extract_page(pool, p, order);
>
> hyp_set_page_refcounted(p);
> @@ -186,7 +214,6 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
> for (i = 0; i < nr_pages; i++) {
> p[i].pool = pool;
> p[i].order = 0;
> - INIT_LIST_HEAD(&p[i].node);
> hyp_set_page_refcounted(&p[i]);
> }
>
> --
> 2.31.1.818.g46aad6cb9e-goog
>
>
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
Powered by blists - more mailing lists