[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAC_iWjKk4FPF4Pf-Lz15NrR9yWvrOehgFeSB+Wi6UzYDh8r=wQ@mail.gmail.com>
Date: Thu, 9 Nov 2023 10:13:22 +0200
From: Ilias Apalodimas <ilias.apalodimas@...aro.org>
To: Jakub Kicinski <kuba@...nel.org>
Cc: davem@...emloft.net, netdev@...r.kernel.org, edumazet@...gle.com,
pabeni@...hat.com, almasrymina@...gle.com, hawk@...nel.org
Subject: Re: [PATCH net-next 01/15] net: page_pool: split the page_pool_params
into fast and slow
On Tue, 24 Oct 2023 at 19:02, Jakub Kicinski <kuba@...nel.org> wrote:
>
> struct page_pool is rather performance critical and we use
> 16B of the first cache line to store 2 pointers used only
> by test code. Future patches will add more informational
> (non-fast path) attributes.
>
> It's convenient for the user of the API to not have to worry
> which fields are fast and which are slow path. Use struct
> groups to split the params into the two categories internally.
>
> Acked-by: Jesper Dangaard Brouer <hawk@...nel.org>
> Acked-by: Ilias Apalodimas <ilias.apalodimas@...aro.org>
> Reviewed-by: Mina Almasry <almasrymina@...gle.com>
> Signed-off-by: Jakub Kicinski <kuba@...nel.org>
> ---
> include/net/page_pool/types.h | 31 +++++++++++++++++++------------
> net/core/page_pool.c | 7 ++++---
> 2 files changed, 23 insertions(+), 15 deletions(-)
>
> diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
> index 6fc5134095ed..23950fcc4eca 100644
> --- a/include/net/page_pool/types.h
> +++ b/include/net/page_pool/types.h
> @@ -54,18 +54,22 @@ struct pp_alloc_cache {
> * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
> */
> struct page_pool_params {
> - unsigned int flags;
> - unsigned int order;
> - unsigned int pool_size;
> - int nid;
> - struct device *dev;
> - struct napi_struct *napi;
> - enum dma_data_direction dma_dir;
> - unsigned int max_len;
> - unsigned int offset;
> + struct_group_tagged(page_pool_params_fast, fast,
> + unsigned int flags;
> + unsigned int order;
> + unsigned int pool_size;
> + int nid;
> + struct device *dev;
> + struct napi_struct *napi;
> + enum dma_data_direction dma_dir;
> + unsigned int max_len;
> + unsigned int offset;
> + );
> + struct_group_tagged(page_pool_params_slow, slow,
> /* private: used by test code only */
> - void (*init_callback)(struct page *page, void *arg);
> - void *init_arg;
> + void (*init_callback)(struct page *page, void *arg);
> + void *init_arg;
> + );
> };
>
> #ifdef CONFIG_PAGE_POOL_STATS
> @@ -119,7 +123,7 @@ struct page_pool_stats {
> #endif
>
> struct page_pool {
> - struct page_pool_params p;
> + struct page_pool_params_fast p;
>
> long frag_users;
> struct page *frag_page;
> @@ -178,6 +182,9 @@ struct page_pool {
> refcount_t user_cnt;
>
> u64 destroy_cnt;
> +
> + /* Slow/Control-path information follows */
> + struct page_pool_params_slow slow;
> };
>
> struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 5e409b98aba0..5cae413de7cc 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -173,7 +173,8 @@ static int page_pool_init(struct page_pool *pool,
> {
> unsigned int ring_qsize = 1024; /* Default */
>
> - memcpy(&pool->p, params, sizeof(pool->p));
> + memcpy(&pool->p, ¶ms->fast, sizeof(pool->p));
> + memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow));
>
> /* Validate only known flags were used */
> if (pool->p.flags & ~(PP_FLAG_ALL))
> @@ -384,8 +385,8 @@ static void page_pool_set_pp_info(struct page_pool *pool,
> * the overhead is negligible.
> */
> page_pool_fragment_page(page, 1);
> - if (pool->p.init_callback)
> - pool->p.init_callback(page, pool->p.init_arg);
> + if (pool->slow.init_callback)
> + pool->slow.init_callback(page, pool->slow.init_arg);
> }
>
> static void page_pool_clear_pp_info(struct page *page)
> --
> 2.41.0
>
Had time for a close look, feel free to replace my ack with
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@...aro.org>
Powered by blists - more mailing lists