[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20260210031313.29708-1-dqfext@gmail.com>
Date: Tue, 10 Feb 2026 11:13:13 +0800
From: Qingfang Deng <dqfext@...il.com>
To: Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
linux-ppp@...r.kernel.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: Vadim Fedorenko <vadim.fedorenko@...ux.dev>
Subject: [RFC PATCH net-next v2] ppp: don't store tx skb in the fastpath
Currently, ppp->xmit_pending is used in ppp_send_frame() to pass a skb
to ppp_push(), and holds the skb when a PPP channel cannot immediately
transmit it. This state is redundant because the transmit queue
(ppp->file.xq) can already handle the backlog. Furthermore, during
normal operation, an skb is queued in file.xq only to be immediately
dequeued, causing unnecessary overhead.
Refactor the transmit path to avoid stashing the skb when possible:
- Remove ppp->xmit_pending.
- Rename ppp_send_frame() to ppp_prepare_tx_skb(), and don't call
ppp_push() in it. It returns NULL if the skb is consumed
(dropped/queue) or a new skb to be passed to ppp_push().
- Update ppp_push() to accept the skb. It returns 1 if the skb is
consumed, or 0 if the channel is busy.
- Optimize __ppp_xmit_process():
- Fastpath: If the queue is empty, attempt to send the skb directly
via ppp_push(). If busy, queue it.
- Slowpath: If the queue is not empty, process the backlog in
file.xq. Split dequeuing loop into a separate function
ppp_xmit_flush() so ppp_channel_push() uses that directly instead of
passing a NULL skb to __ppp_xmit_process().
This simplifies the states and reduces locking in the fastpath.
Signed-off-by: Qingfang Deng <dqfext@...il.com>
---
PPPoE encap forwarding performance on a RISC-V SBC increased from
86kpps to 91kpps.
v2: fix ppp_prepare_tx_skb() UAF.
- https://lore.kernel.org/netdev/20260209021134.21194-1-dqfext@gmail.com/
drivers/net/ppp/ppp_generic.c | 107 +++++++++++++++++++---------------
1 file changed, 61 insertions(+), 46 deletions(-)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index f8814d7be6f1..b8a41f571aa3 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -134,7 +134,6 @@ struct ppp {
int debug; /* debug flags 70 */
struct slcompress *vj; /* state for VJ header compression */
enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
- struct sk_buff *xmit_pending; /* a packet ready to go out 88 */
struct compressor *xcomp; /* transmit packet compressor 8c */
void *xc_state; /* its internal state 90 */
struct compressor *rcomp; /* receive decompressor 94 */
@@ -264,8 +263,8 @@ struct ppp_net {
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
-static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
-static void ppp_push(struct ppp *ppp);
+static struct sk_buff *ppp_prepare_tx_skb(struct ppp *ppp, struct sk_buff *skb);
+static int ppp_push(struct ppp *ppp, struct sk_buff *skb);
static void ppp_channel_push(struct channel *pch);
static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
struct channel *pch);
@@ -1651,26 +1650,45 @@ static void ppp_setup(struct net_device *dev)
*/
/* Called to do any work queued up on the transmit side that can now be done */
+static void ppp_xmit_flush(struct ppp *ppp)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ppp->file.xq))) {
+ if (unlikely(!ppp_push(ppp, skb))) {
+ skb_queue_head(&ppp->file.xq, skb);
+ return;
+ }
+ }
+ /* If there's no work left to do, tell the core net code that we can
+ * accept some more.
+ */
+ netif_wake_queue(ppp->dev);
+}
+
static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
ppp_xmit_lock(ppp);
- if (!ppp->closing) {
- ppp_push(ppp);
-
- if (skb)
+ if (unlikely(ppp->closing)) {
+ kfree_skb(skb);
+ goto out;
+ }
+ skb = ppp_prepare_tx_skb(ppp, skb);
+ if (unlikely(!skb))
+ goto out;
+ /* Fastpath: No backlog, just send the new skb. */
+ if (likely(skb_queue_empty(&ppp->file.xq))) {
+ if (unlikely(!ppp_push(ppp, skb))) {
skb_queue_tail(&ppp->file.xq, skb);
- while (!ppp->xmit_pending &&
- (skb = skb_dequeue(&ppp->file.xq)))
- ppp_send_frame(ppp, skb);
- /* If there's no work left to do, tell the core net
- code that we can accept some more. */
- if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
- netif_wake_queue(ppp->dev);
- else
netif_stop_queue(ppp->dev);
- } else {
- kfree_skb(skb);
+ }
+ goto out;
}
+
+ /* Slowpath: Enqueue the new skb and process backlog */
+ skb_queue_tail(&ppp->file.xq, skb);
+ ppp_xmit_flush(ppp);
+out:
ppp_xmit_unlock(ppp);
}
@@ -1757,12 +1775,11 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
}
/*
- * Compress and send a frame.
- * The caller should have locked the xmit path,
- * and xmit_pending should be 0.
+ * Compress and prepare to send a frame.
+ * The caller should have locked the xmit path.
*/
-static void
-ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
+static struct sk_buff *
+ppp_prepare_tx_skb(struct ppp *ppp, struct sk_buff *skb)
{
int proto = PPP_PROTO(skb);
struct sk_buff *new_skb;
@@ -1784,7 +1801,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
"PPP: outbound frame "
"not passed\n");
kfree_skb(skb);
- return;
+ return NULL;
}
/* if this packet passes the active filter, record the time */
if (!(ppp->active_filter &&
@@ -1869,42 +1886,38 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
goto drop;
skb_queue_tail(&ppp->file.rq, skb);
wake_up_interruptible(&ppp->file.rwait);
- return;
+ return NULL;
}
- ppp->xmit_pending = skb;
- ppp_push(ppp);
- return;
+ return skb;
drop:
kfree_skb(skb);
++ppp->dev->stats.tx_errors;
+ return NULL;
}
/*
- * Try to send the frame in xmit_pending.
+ * Try to send the frame.
* The caller should have the xmit path locked.
+ * Returns 1 if the skb was consumed, 0 if not.
*/
-static void
-ppp_push(struct ppp *ppp)
+static int
+ppp_push(struct ppp *ppp, struct sk_buff *skb)
{
struct list_head *list;
struct channel *pch;
- struct sk_buff *skb = ppp->xmit_pending;
-
- if (!skb)
- return;
list = &ppp->channels;
if (list_empty(list)) {
/* nowhere to send the packet, just drop it */
- ppp->xmit_pending = NULL;
kfree_skb(skb);
- return;
+ return 1;
}
if ((ppp->flags & SC_MULTILINK) == 0) {
struct ppp_channel *chan;
+ int ret;
/* not doing multilink: send it down the first channel */
list = list->next;
pch = list_entry(list, struct channel, clist);
@@ -1916,27 +1929,26 @@ ppp_push(struct ppp *ppp)
* skb but linearization failed
*/
kfree_skb(skb);
- ppp->xmit_pending = NULL;
+ ret = 1;
goto out;
}
- if (chan->ops->start_xmit(chan, skb))
- ppp->xmit_pending = NULL;
+ ret = chan->ops->start_xmit(chan, skb);
out:
spin_unlock(&pch->downl);
- return;
+ return ret;
}
#ifdef CONFIG_PPP_MULTILINK
/* Multilink: fragment the packet over as many links
as can take the packet at the moment. */
if (!ppp_mp_explode(ppp, skb))
- return;
+ return 0;
#endif /* CONFIG_PPP_MULTILINK */
- ppp->xmit_pending = NULL;
kfree_skb(skb);
+ return 1;
}
#ifdef CONFIG_PPP_MULTILINK
@@ -2005,7 +2017,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
* performance if we have a lot of channels.
*/
if (nfree == 0 || nfree < navail / 2)
- return 0; /* can't take now, leave it in xmit_pending */
+ return 0; /* can't take now, leave it in transmit queue */
/* Do protocol field compression */
if (skb_linearize(skb))
@@ -2199,8 +2211,12 @@ static void __ppp_channel_push(struct channel *pch, struct ppp *ppp)
spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
- if (ppp)
- __ppp_xmit_process(ppp, NULL);
+ if (ppp) {
+ ppp_xmit_lock(ppp);
+ if (!ppp->closing)
+ ppp_xmit_flush(ppp);
+ ppp_xmit_unlock(ppp);
+ }
}
}
@@ -3460,7 +3476,6 @@ static void ppp_destroy_interface(struct ppp *ppp)
}
#endif /* CONFIG_PPP_FILTER */
- kfree_skb(ppp->xmit_pending);
free_percpu(ppp->xmit_recursion);
free_netdev(ppp->dev);
--
2.43.0
Powered by blists - more mailing lists