[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240709132741.47751-6-linyunsheng@huawei.com>
Date: Tue, 9 Jul 2024 21:27:30 +0800
From: Yunsheng Lin <linyunsheng@...wei.com>
To: <davem@...emloft.net>, <kuba@...nel.org>, <pabeni@...hat.com>
CC: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>, Yunsheng Lin
<linyunsheng@...wei.com>, Alexander Duyck <alexander.duyck@...il.com>,
"Michael S. Tsirkin" <mst@...hat.com>, Jason Wang <jasowang@...hat.com>,
Eugenio PĂ©rez <eperezma@...hat.com>, Andrew Morton
<akpm@...ux-foundation.org>, Eric Dumazet <edumazet@...gle.com>, David
Howells <dhowells@...hat.com>, Marc Dionne <marc.dionne@...istor.com>, Trond
Myklebust <trondmy@...nel.org>, Anna Schumaker <anna@...nel.org>, Chuck Lever
<chuck.lever@...cle.com>, Jeff Layton <jlayton@...nel.org>, Neil Brown
<neilb@...e.de>, Olga Kornievskaia <kolga@...app.com>, Dai Ngo
<Dai.Ngo@...cle.com>, Tom Talpey <tom@...pey.com>, <kvm@...r.kernel.org>,
<virtualization@...ts.linux.dev>, <linux-mm@...ck.org>,
<linux-afs@...ts.infradead.org>, <linux-nfs@...r.kernel.org>
Subject: [PATCH net-next v10 05/15] mm: page_frag: avoid caller accessing 'page_frag_cache' directly
Use appropriate frag_page API instead of caller accessing
'page_frag_cache' directly.
CC: Alexander Duyck <alexander.duyck@...il.com>
Signed-off-by: Yunsheng Lin <linyunsheng@...wei.com>
---
drivers/vhost/net.c | 2 +-
include/linux/page_frag_cache.h | 10 ++++++++++
mm/page_frag_test.c | 2 +-
net/core/skbuff.c | 6 +++---
net/rxrpc/conn_object.c | 4 +---
net/rxrpc/local_object.c | 4 +---
net/sunrpc/svcsock.c | 6 ++----
7 files changed, 19 insertions(+), 15 deletions(-)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 6691fac01e0d..b2737dc0dc50 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1325,7 +1325,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vqs[VHOST_NET_VQ_RX]);
f->private_data = n;
- n->pf_cache.va = NULL;
+ page_frag_cache_init(&n->pf_cache);
return 0;
}
diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index 185d875e3e6b..0ba96b7b64ad 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -26,6 +26,16 @@ struct page_frag_cache {
bool pfmemalloc;
};
+static inline void page_frag_cache_init(struct page_frag_cache *nc)
+{
+ nc->va = NULL;
+}
+
+static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
+{
+ return !!nc->pfmemalloc;
+}
+
void page_frag_cache_drain(struct page_frag_cache *nc);
void __page_frag_cache_drain(struct page *page, unsigned int count);
void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
diff --git a/mm/page_frag_test.c b/mm/page_frag_test.c
index 50166a059c7d..0d47235b5cf2 100644
--- a/mm/page_frag_test.c
+++ b/mm/page_frag_test.c
@@ -340,7 +340,7 @@ static int __init page_frag_test_init(void)
u64 duration;
int ret;
- test_frag.va = NULL;
+ page_frag_cache_init(&test_frag);
atomic_set(&nthreads, 2);
init_completion(&wait);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4b8acd967793..76a473b1072d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -749,14 +749,14 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
if (in_hardirq() || irqs_disabled()) {
nc = this_cpu_ptr(&netdev_alloc_cache);
data = page_frag_alloc_va(nc, len, gfp_mask);
- pfmemalloc = nc->pfmemalloc;
+ pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
} else {
local_bh_disable();
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
nc = this_cpu_ptr(&napi_alloc_cache.page);
data = page_frag_alloc_va(nc, len, gfp_mask);
- pfmemalloc = nc->pfmemalloc;
+ pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
local_bh_enable();
@@ -846,7 +846,7 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
len = SKB_HEAD_ALIGN(len);
data = page_frag_alloc_va(&nc->page, len, gfp_mask);
- pfmemalloc = nc->page.pfmemalloc;
+ pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page);
}
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 1539d315afe7..694c4df7a1a3 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -337,9 +337,7 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
*/
rxrpc_purge_queue(&conn->rx_queue);
- if (conn->tx_data_alloc.va)
- __page_frag_cache_drain(virt_to_page(conn->tx_data_alloc.va),
- conn->tx_data_alloc.pagecnt_bias);
+ page_frag_cache_drain(&conn->tx_data_alloc);
call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 504453c688d7..a8cffe47cf01 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -452,9 +452,7 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
#endif
rxrpc_purge_queue(&local->rx_queue);
rxrpc_purge_client_connections(local);
- if (local->tx_alloc.va)
- __page_frag_cache_drain(virt_to_page(local->tx_alloc.va),
- local->tx_alloc.pagecnt_bias);
+ page_frag_cache_drain(&local->tx_alloc);
}
/*
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 42d20412c1c3..4b1e87187614 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1609,7 +1609,6 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
static void svc_sock_free(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
- struct page_frag_cache *pfc = &svsk->sk_frag_cache;
struct socket *sock = svsk->sk_sock;
trace_svcsock_free(svsk, sock);
@@ -1619,8 +1618,7 @@ static void svc_sock_free(struct svc_xprt *xprt)
sockfd_put(sock);
else
sock_release(sock);
- if (pfc->va)
- __page_frag_cache_drain(virt_to_head_page(pfc->va),
- pfc->pagecnt_bias);
+
+ page_frag_cache_drain(&svsk->sk_frag_cache);
kfree(svsk);
}
--
2.33.0
Powered by blists - more mailing lists