[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20260123014214.225278-1-dqfext@gmail.com>
Date: Fri, 23 Jan 2026 09:42:13 +0800
From: Qingfang Deng <dqfext@...il.com>
To: Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
linux-ppp@...r.kernel.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: Toke Høiland-Jørgensen <toke@...hat.com>
Subject: [PATCH net-next v3] ppp: enable TX scatter-gather
PPP channels using chan->direct_xmit prepend the PPP header to a skb and
call dev_queue_xmit() directly. In this mode the skb does not need to be
linear, but the PPP netdevice currently does not advertise
scatter-gather features, causing unnecessary linearization and
preventing GSO.
Enable NETIF_F_SG and NETIF_F_FRAGLIST on PPP devices and add an
.ndo_fix_features() callback to disable them when the underlying PPP
channel is not using direct_xmit (i.e. IFF_NO_QUEUE is not set in
priv_flags). This allows the networking core to pass non-linear skbs
directly only when it is safe to do so.
PPP compressors still require linear skbs, and their states can change
at runtime. In order to avoid races, instead of toggling features, call
skb_linearize() before passing buffers to them. Compressors are uncommon
on high-speed links, so this does not affect the fast path.
Signed-off-by: Qingfang Deng <dqfext@...il.com>
---
v2 -> v3:
toggle features only if the underlying ppp_channel changes. Call
skb_linearize() if compressors are in use.
- https://lore.kernel.org/linux-ppp/20251103031501.404141-1-dqfext@gmail.com/
drivers/net/ppp/ppp_generic.c | 27 +++++++++++++++++++++++++++
1 file changed, 27 insertions(+)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index f9f0f16c41d1..d706a175cea9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1545,6 +1545,16 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
dev_fetch_sw_netstats(stats64, dev->tstats);
}
+static netdev_features_t
+ppp_fix_features(struct net_device *dev, netdev_features_t features)
+{
+ /* Don't advertise SG/FRAGLIST when IFF_NO_QUEUE is absent */
+ if (!(dev->priv_flags & IFF_NO_QUEUE))
+ features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
+
+ return features;
+}
+
static int ppp_dev_init(struct net_device *dev)
{
struct ppp *ppp;
@@ -1619,6 +1629,7 @@ static const struct net_device_ops ppp_netdev_ops = {
.ndo_start_xmit = ppp_start_xmit,
.ndo_siocdevprivate = ppp_net_siocdevprivate,
.ndo_get_stats64 = ppp_get_stats64,
+ .ndo_fix_features = ppp_fix_features,
.ndo_fill_forward_path = ppp_fill_forward_path,
};
@@ -1641,6 +1652,8 @@ static void ppp_setup(struct net_device *dev)
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->priv_destructor = ppp_dev_priv_destructor;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ dev->hw_features = dev->features;
netif_keep_dst(dev);
}
@@ -1710,6 +1723,10 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
int compressor_skb_size = ppp->dev->mtu +
ppp->xcomp->comp_extra + PPP_HDRLEN;
+
+ if (skb_linearize(skb))
+ return NULL;
+
new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
if (!new_skb) {
if (net_ratelimit())
@@ -1797,6 +1814,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
case PPP_IP:
if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
break;
+
+ if (skb_linearize(skb))
+ goto drop;
+
/* try to do VJ TCP header compression */
new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
GFP_ATOMIC);
@@ -3537,6 +3558,12 @@ ppp_connect_channel(struct channel *pch, int unit)
spin_unlock(&pch->upl);
out:
mutex_unlock(&pn->all_ppp_mutex);
+ if (ret == 0) {
+ rtnl_lock();
+ netdev_update_features(ppp->dev);
+ rtnl_unlock();
+ }
+
return ret;
}
--
2.43.0
Powered by blists - more mailing lists