[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAC_iWjJ6t5fhGyKXuGH+RrLJuLKyfuvGWXSQ3PpK2X=T4z3T_w@mail.gmail.com>
Date: Thu, 17 Aug 2023 12:35:59 +0300
From: Ilias Apalodimas <ilias.apalodimas@...aro.org>
To: Jakub Kicinski <kuba@...nel.org>
Cc: netdev@...r.kernel.org, hawk@...nel.org, aleksander.lobakin@...el.com,
linyunsheng@...wei.com, almasrymina@...gle.com
Subject: Re: [RFC net-next 01/13] net: page_pool: split the page_pool_params
into fast and slow
Hi Jakub,
On Thu, 17 Aug 2023 at 02:43, Jakub Kicinski <kuba@...nel.org> wrote:
>
> struct page_pool is rather performance critical and we use
> 16B of the first cache line to store 2 pointers used only
> by test code. Future patches will add more informational
> (non-fast path) attributes.
>
> It's convenient for the user of the API to not have to worry
> which fields are fast and which are slow path. Use struct
> groups to split the params into the two categories internally.
LGTM and valuable, since we've been struggling to explain where new
variables should be placed, in order to affect the cache line
placement as little as possible.
Acked-by: Ilias Apalodimas <ilias.apalodimas@...aro.org>
>
> Signed-off-by: Jakub Kicinski <kuba@...nel.org>
> ---
> include/net/page_pool/types.h | 31 +++++++++++++++++++------------
> net/core/page_pool.c | 7 ++++---
> 2 files changed, 23 insertions(+), 15 deletions(-)
>
> diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
> index 887e7946a597..1c16b95de62f 100644
> --- a/include/net/page_pool/types.h
> +++ b/include/net/page_pool/types.h
> @@ -56,18 +56,22 @@ struct pp_alloc_cache {
> * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
> */
> struct page_pool_params {
> - unsigned int flags;
> - unsigned int order;
> - unsigned int pool_size;
> - int nid;
> - struct device *dev;
> - struct napi_struct *napi;
> - enum dma_data_direction dma_dir;
> - unsigned int max_len;
> - unsigned int offset;
> + struct_group_tagged(page_pool_params_fast, fast,
> + unsigned int flags;
> + unsigned int order;
> + unsigned int pool_size;
> + int nid;
> + struct device *dev;
> + struct napi_struct *napi;
> + enum dma_data_direction dma_dir;
> + unsigned int max_len;
> + unsigned int offset;
> + );
> + struct_group_tagged(page_pool_params_slow, slow,
> /* private: used by test code only */
> - void (*init_callback)(struct page *page, void *arg);
> - void *init_arg;
> + void (*init_callback)(struct page *page, void *arg);
> + void *init_arg;
> + );
> };
>
> #ifdef CONFIG_PAGE_POOL_STATS
> @@ -121,7 +125,7 @@ struct page_pool_stats {
> #endif
>
> struct page_pool {
> - struct page_pool_params p;
> + struct page_pool_params_fast p;
>
> long frag_users;
> struct page *frag_page;
> @@ -180,6 +184,9 @@ struct page_pool {
> refcount_t user_cnt;
>
> u64 destroy_cnt;
> +
> + /* Slow/Control-path information follows */
> + struct page_pool_params_slow slow;
> };
>
> struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 77cb75e63aca..ffe7782d7fc0 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -173,7 +173,8 @@ static int page_pool_init(struct page_pool *pool,
> {
> unsigned int ring_qsize = 1024; /* Default */
>
> - memcpy(&pool->p, params, sizeof(pool->p));
> + memcpy(&pool->p, ¶ms->fast, sizeof(pool->p));
> + memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow));
>
> /* Validate only known flags were used */
> if (pool->p.flags & ~(PP_FLAG_ALL))
> @@ -372,8 +373,8 @@ static void page_pool_set_pp_info(struct page_pool *pool,
> {
> page->pp = pool;
> page->pp_magic |= PP_SIGNATURE;
> - if (pool->p.init_callback)
> - pool->p.init_callback(page, pool->p.init_arg);
> + if (pool->slow.init_callback)
> + pool->slow.init_callback(page, pool->slow.init_arg);
> }
>
> static void page_pool_clear_pp_info(struct page *page)
> --
> 2.41.0
>
Powered by blists - more mailing lists