[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191230143028.27313-3-alobakin@dlink.ru>
Date: Mon, 30 Dec 2019 17:30:10 +0300
From: Alexander Lobakin <alobakin@...nk.ru>
To: "David S. Miller" <davem@...emloft.net>
Cc: Edward Cree <ecree@...arflare.com>, Andrew Lunn <andrew@...n.ch>,
Vivien Didelot <vivien.didelot@...il.com>,
Florian Fainelli <f.fainelli@...il.com>,
Hauke Mehrtens <hauke@...ke-m.de>,
Sean Wang <sean.wang@...iatek.com>,
Matthias Brugger <matthias.bgg@...il.com>,
Jiri Pirko <jiri@...lanox.com>,
Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
Jakub Kicinski <jakub.kicinski@...ronome.com>,
Alexander Lobakin <alobakin@...nk.ru>,
Taehee Yoo <ap420073@...il.com>,
Stephen Hemminger <stephen@...workplumber.org>,
Stanislav Fomichev <sdf@...gle.com>,
Daniel Borkmann <daniel@...earbox.net>,
Song Liu <songliubraving@...com>,
Matteo Croce <mcroce@...hat.com>,
Jakub Sitnicki <jakub@...udflare.com>,
Paul Blakey <paulb@...lanox.com>,
Yoshiki Komachi <komachi.yoshiki@...il.com>,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
linux-mediatek@...ts.infradead.org
Subject: [PATCH RFC net-next 02/19] net: dsa: add GRO support infrastructure
Add .gro_receive() (with shortcut) and .gro_complete() callbacks to
tagger ops and basic ETH_P_XDSA packet_offload with wrappers around
them, so DSA-tagged frames can now be processed within GRO layer if
the particular tagger implements this (will be added in subsequent
patches).
Note: no need to take RCU read locks in dsa_gro_receive() and
dsa_gro_complete() as dev->cpu_dp is not RCU-protected, at least
for now. The corresponding locks must be taken in the actual
tagger callbacks.
Signed-off-by: Alexander Lobakin <alobakin@...nk.ru>
---
include/net/dsa.h | 5 +++++
net/dsa/dsa.c | 43 +++++++++++++++++++++++++++++++++++++++++--
net/dsa/dsa2.c | 1 +
3 files changed, 47 insertions(+), 2 deletions(-)
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 633d9894ab87..8a7f80709d51 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -79,6 +79,9 @@ struct dsa_device_ops {
* as regular on the master net device.
*/
bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
+ struct sk_buff *(*gro_receive)(struct list_head *head,
+ struct sk_buff *skb);
+ int (*gro_complete)(struct sk_buff *skb, int nhoff);
unsigned int overhead;
const char *name;
enum dsa_tag_protocol proto;
@@ -170,6 +173,8 @@ struct dsa_port {
struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt);
bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
+ struct sk_buff *(*gro_receive)(struct list_head *head,
+ struct sk_buff *skb);
enum {
DSA_PORT_TYPE_UNUSED = 0,
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 17281fec710c..9a8d8ce7473c 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -243,6 +243,34 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
return 0;
}
+static struct sk_buff *dsa_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
+{
+ const struct dsa_port *cpu_dp = skb->dev->dsa_ptr;
+ struct sk_buff *pp = NULL;
+ int flush = 1;
+
+ if (unlikely(!cpu_dp) || !cpu_dp->gro_receive)
+ goto flush;
+
+ pp = cpu_dp->gro_receive(head, skb);
+ flush = 0;
+
+flush:
+ skb_gro_flush_final(skb, pp, flush);
+ return pp;
+}
+
+static int dsa_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ const struct dsa_port *cpu_dp = skb->dev->dsa_ptr;
+
+ if (likely(cpu_dp) && cpu_dp->tag_ops->gro_complete)
+ return cpu_dp->tag_ops->gro_complete(skb, nhoff);
+
+ return -ENOENT;
+}
+
#ifdef CONFIG_PM_SLEEP
static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
@@ -298,8 +326,17 @@ EXPORT_SYMBOL_GPL(dsa_switch_resume);
#endif
static struct packet_type dsa_pack_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_XDSA),
- .func = dsa_switch_rcv,
+ .type = htons(ETH_P_XDSA),
+ .func = dsa_switch_rcv,
+};
+
+static struct packet_offload dsa_pack_offload __read_mostly = {
+ .type = htons(ETH_P_XDSA),
+ .priority = 10,
+ .callbacks = {
+ .gro_receive = dsa_gro_receive,
+ .gro_complete = dsa_gro_complete,
+ },
};
static struct workqueue_struct *dsa_owq;
@@ -430,6 +467,7 @@ static int __init dsa_init_module(void)
goto register_notifier_fail;
dev_add_pack(&dsa_pack_type);
+ dev_add_offload(&dsa_pack_offload);
dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
THIS_MODULE);
@@ -448,6 +486,7 @@ static void __exit dsa_cleanup_module(void)
dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
dsa_slave_unregister_notifier();
+ dev_remove_offload(&dsa_pack_offload);
dev_remove_pack(&dsa_pack_type);
destroy_workqueue(dsa_owq);
}
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index c66abbed4daf..5f66e0280e8e 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -631,6 +631,7 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
}
dp->type = DSA_PORT_TYPE_CPU;
+ dp->gro_receive = tag_ops->gro_receive;
dp->filter = tag_ops->filter;
dp->rcv = tag_ops->rcv;
dp->tag_ops = tag_ops;
--
2.24.1
Powered by blists - more mailing lists