[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a7bd1e6f-b854-4172-a29a-3f0662c6fd6e@gmail.com>
Date: Mon, 14 Jul 2025 12:30:12 +0100
From: Pavel Begunkov <asml.silence@...il.com>
To: Byungchul Park <byungchul@...com>
Cc: willy@...radead.org, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org, kernel_team@...ynix.com,
kuba@...nel.org, almasrymina@...gle.com, ilias.apalodimas@...aro.org,
harry.yoo@...cle.com, hawk@...nel.org, akpm@...ux-foundation.org,
davem@...emloft.net, john.fastabend@...il.com, andrew+netdev@...n.ch,
toke@...hat.com, tariqt@...dia.com, edumazet@...gle.com, pabeni@...hat.com,
saeedm@...dia.com, leon@...nel.org, ast@...nel.org, daniel@...earbox.net,
david@...hat.com, lorenzo.stoakes@...cle.com, Liam.Howlett@...cle.com,
vbabka@...e.cz, rppt@...nel.org, surenb@...gle.com, mhocko@...e.com,
horms@...nel.org, linux-rdma@...r.kernel.org, bpf@...r.kernel.org,
vishal.moola@...il.com, hannes@...xchg.org, ziy@...dia.com,
jackmanb@...gle.com
Subject: Re: [PATCH net-next v9 1/8] netmem: introduce struct netmem_desc
mirroring struct page
On 7/14/25 05:23, Byungchul Park wrote:
> On Sat, Jul 12, 2025 at 03:39:59PM +0100, Pavel Begunkov wrote:
>> On 7/10/25 09:28, Byungchul Park wrote:
>>> To simplify struct page, the page pool members of struct page should be
>>> moved to other, allowing these members to be removed from struct page.
>>>
>>> Introduce a network memory descriptor to store the members, struct
>>> netmem_desc, and make it union'ed with the existing fields in struct
>>> net_iov, allowing to organize the fields of struct net_iov.
>>
>> FWIW, regardless of memdesc business, I think it'd be great to have
>> this patch, as it'll help with some of the netmem casting ugliness and
>> shed some cycles as well. For example, we have a bunch of
>> niov -> netmem -> niov casts in various places.
>
> If Jakub agrees with this, I will re-post this as a separate patch so
> that works that require this base can go ahead.
I think it'd be a good idea. It's needed to clean up netmem handling,
and I'll convert io_uring and get rid of the union in niov.
The diff below should give a rough idea of what I want to use it for.
It kills __netmem_clear_lsb() to avoid casting struct page * to niov.
And saves some masking for zcrx, see page_pool_get_dma_addr_nmdesc(),
and there are more places like that.
diff --git a/include/net/netmem.h b/include/net/netmem.h
index 535cf17b9134..41f3a3fd6b6c 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -247,6 +247,8 @@ static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
return page_to_pfn(netmem_to_page(netmem));
}
+#define pp_page_to_nmdesc(page) ((struct netmem_desc *)(page))
+
/* __netmem_clear_lsb - convert netmem_ref to struct net_iov * for access to
* common fields.
* @netmem: netmem reference to extract as net_iov.
@@ -262,11 +264,18 @@ static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
*
* Return: the netmem_ref cast to net_iov* regardless of its underlying type.
*/
-static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
+static inline struct net_iov *__netmem_to_niov(netmem_ref netmem)
{
return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
}
+static inline struct netmem_desc *netmem_to_nmdesc(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return &__netmem_to_niov(netmem)->desc;
+ return pp_page_to_nmdesc(__netmem_to_page(netmem));
+}
+
/**
* __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem
* @netmem: netmem reference to get the pointer from
@@ -280,17 +289,17 @@ static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
*/
static inline struct page_pool *__netmem_get_pp(netmem_ref netmem)
{
- return __netmem_to_page(netmem)->pp;
+ return pp_page_to_nmdesc(__netmem_to_page(netmem))->pp;
}
static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
{
- return __netmem_clear_lsb(netmem)->pp;
+ return netmem_to_nmdesc(netmem)->pp;
}
static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem)
{
- return &__netmem_clear_lsb(netmem)->pp_ref_count;
+ return &netmem_to_nmdesc(netmem)->pp_ref_count;
}
static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid)
@@ -355,7 +364,7 @@ static inline bool netmem_is_pfmemalloc(netmem_ref netmem)
static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
{
- return __netmem_clear_lsb(netmem)->dma_addr;
+ return netmem_to_nmdesc(netmem)->dma_addr;
}
void get_netmem(netmem_ref netmem);
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index db180626be06..002858f3bcb3 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -425,9 +425,9 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
}
-static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
+static inline dma_addr_t page_pool_get_dma_addr_nmdesc(struct netmem_desc *desc)
{
- dma_addr_t ret = netmem_get_dma_addr(netmem);
+ dma_addr_t ret = desc->dma_addr;
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
ret <<= PAGE_SHIFT;
@@ -435,6 +435,13 @@ static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
return ret;
}
+static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
+{
+ struct netmem_desc *desc = netmem_to_nmdesc(netmem);
+
+ return page_pool_get_dma_addr_nmdesc(desc);
+}
+
/**
* page_pool_get_dma_addr() - Retrieve the stored DMA address.
* @page: page allocated from a page pool
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 085eeed8cd50..2e80692d9ee1 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -290,7 +290,7 @@ static void io_zcrx_sync_for_device(const struct page_pool *pool,
if (!dma_dev_need_sync(pool->p.dev))
return;
- dma_addr = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
+ dma_addr = page_pool_get_dma_addr_nmdesc(&niov->desc);
__dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
PAGE_SIZE, pool->p.dma_dir);
#endif
diff --git a/net/core/netmem_priv.h b/net/core/netmem_priv.h
index cd95394399b4..97d4beda9174 100644
--- a/net/core/netmem_priv.h
+++ b/net/core/netmem_priv.h
@@ -5,19 +5,21 @@
static inline unsigned long netmem_get_pp_magic(netmem_ref netmem)
{
- return __netmem_clear_lsb(netmem)->pp_magic & ~PP_DMA_INDEX_MASK;
+ return netmem_to_nmdesc(netmem)->pp_magic & ~PP_DMA_INDEX_MASK;
}
static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic)
{
- __netmem_clear_lsb(netmem)->pp_magic |= pp_magic;
+ netmem_to_nmdesc(netmem)->pp_magic |= pp_magic;
}
static inline void netmem_clear_pp_magic(netmem_ref netmem)
{
- WARN_ON_ONCE(__netmem_clear_lsb(netmem)->pp_magic & PP_DMA_INDEX_MASK);
+ struct netmem_desc *desc = netmem_to_nmdesc(netmem);
- __netmem_clear_lsb(netmem)->pp_magic = 0;
+ WARN_ON_ONCE(desc->pp_magic & PP_DMA_INDEX_MASK);
+
+ desc->pp_magic = 0;
}
static inline bool netmem_is_pp(netmem_ref netmem)
@@ -27,13 +29,13 @@ static inline bool netmem_is_pp(netmem_ref netmem)
static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool)
{
- __netmem_clear_lsb(netmem)->pp = pool;
+ netmem_to_nmdesc(netmem)->pp = pool;
}
static inline void netmem_set_dma_addr(netmem_ref netmem,
unsigned long dma_addr)
{
- __netmem_clear_lsb(netmem)->dma_addr = dma_addr;
+ netmem_to_nmdesc(netmem)->dma_addr = dma_addr;
}
static inline unsigned long netmem_get_dma_index(netmem_ref netmem)
@@ -43,7 +45,7 @@ static inline unsigned long netmem_get_dma_index(netmem_ref netmem)
if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
return 0;
- magic = __netmem_clear_lsb(netmem)->pp_magic;
+ magic = netmem_to_nmdesc(netmem)->pp_magic;
return (magic & PP_DMA_INDEX_MASK) >> PP_DMA_INDEX_SHIFT;
}
@@ -51,12 +53,12 @@ static inline unsigned long netmem_get_dma_index(netmem_ref netmem)
static inline void netmem_set_dma_index(netmem_ref netmem,
unsigned long id)
{
- unsigned long magic;
+ struct netmem_desc *desc;
if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
return;
- magic = netmem_get_pp_magic(netmem) | (id << PP_DMA_INDEX_SHIFT);
- __netmem_clear_lsb(netmem)->pp_magic = magic;
+ desc = netmem_to_nmdesc(netmem);
+ desc->pp_magic |= id << PP_DMA_INDEX_SHIFT;
}
#endif
Powered by blists - more mailing lists