[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191230143028.27313-20-alobakin@dlink.ru>
Date: Mon, 30 Dec 2019 17:30:27 +0300
From: Alexander Lobakin <alobakin@...nk.ru>
To: "David S. Miller" <davem@...emloft.net>
Cc: Edward Cree <ecree@...arflare.com>, Andrew Lunn <andrew@...n.ch>,
Vivien Didelot <vivien.didelot@...il.com>,
Florian Fainelli <f.fainelli@...il.com>,
Hauke Mehrtens <hauke@...ke-m.de>,
Sean Wang <sean.wang@...iatek.com>,
Matthias Brugger <matthias.bgg@...il.com>,
Jiri Pirko <jiri@...lanox.com>,
Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
Jakub Kicinski <jakub.kicinski@...ronome.com>,
Alexander Lobakin <alobakin@...nk.ru>,
Taehee Yoo <ap420073@...il.com>,
Stephen Hemminger <stephen@...workplumber.org>,
Stanislav Fomichev <sdf@...gle.com>,
Daniel Borkmann <daniel@...earbox.net>,
Song Liu <songliubraving@...com>,
Matteo Croce <mcroce@...hat.com>,
Jakub Sitnicki <jakub@...udflare.com>,
Paul Blakey <paulb@...lanox.com>,
Yoshiki Komachi <komachi.yoshiki@...il.com>,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
linux-mediatek@...ts.infradead.org
Subject: [PATCH RFC net-next 19/19] net: dsa: tag_qca: add GRO callbacks
...so that frames tagged with this CPU tag type can be correctly
processed by the GRO layer.
Misc: fix qca_netdev_ops structure identation.
Signed-off-by: Alexander Lobakin <alobakin@...nk.ru>
---
net/dsa/tag_qca.c | 88 +++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 82 insertions(+), 6 deletions(-)
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index bee2788e034d..d0cb2299cbe9 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -106,13 +106,89 @@ static void qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
*proto = qca_tag_encap_proto(skb->data);
}
+static struct sk_buff *qca_tag_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
+{
+ const struct packet_offload *ptype;
+ struct sk_buff *p, *pp = NULL;
+ u32 data_off, data_end;
+ const u8 *data;
+ int flush = 1;
+
+ data_off = skb_gro_offset(skb);
+ data_end = data_off + QCA_HDR_LEN;
+
+ data = skb_gro_header_fast(skb, data_off);
+ if (skb_gro_header_hard(skb, data_end)) {
+ data = skb_gro_header_slow(skb, data_end, data_off);
+ if (unlikely(!data))
+ goto out;
+ }
+
+ /* Data that is to the left to the current position is already
+ * pulled to the head
+ */
+ if (unlikely(!qca_tag_sanity_check(skb->data + data_off)))
+ goto out;
+
+ rcu_read_lock();
+
+ ptype = gro_find_receive_by_type(qca_tag_encap_proto(data));
+ if (!ptype)
+ goto out_unlock;
+
+ flush = 0;
+
+ list_for_each_entry(p, head, list) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ if (qca_tag_source_port(skb->data + data_off) ^
+ qca_tag_source_port(p->data + data_off))
+ NAPI_GRO_CB(p)->same_flow = 0;
+ }
+
+ skb_gro_pull(skb, QCA_HDR_LEN);
+ skb_gro_postpull_rcsum(skb, data, QCA_HDR_LEN);
+
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ skb_gro_flush_final(skb, pp, flush);
+
+ return pp;
+}
+
+static int qca_tag_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ const struct packet_offload *ptype;
+ int err = -ENOENT;
+ __be16 proto;
+
+ proto = qca_tag_encap_proto(skb->data + nhoff);
+
+ rcu_read_lock();
+
+ ptype = gro_find_complete_by_type(proto);
+ if (ptype)
+ err = ptype->callbacks.gro_complete(skb, nhoff + QCA_HDR_LEN);
+
+ rcu_read_unlock();
+
+ return err;
+}
+
static const struct dsa_device_ops qca_netdev_ops = {
- .name = "qca",
- .proto = DSA_TAG_PROTO_QCA,
- .xmit = qca_tag_xmit,
- .rcv = qca_tag_rcv,
- .flow_dissect = qca_tag_flow_dissect,
- .overhead = QCA_HDR_LEN,
+ .name = "qca",
+ .proto = DSA_TAG_PROTO_QCA,
+ .xmit = qca_tag_xmit,
+ .rcv = qca_tag_rcv,
+ .flow_dissect = qca_tag_flow_dissect,
+ .gro_receive = qca_tag_gro_receive,
+ .gro_complete = qca_tag_gro_complete,
+ .overhead = QCA_HDR_LEN,
};
MODULE_LICENSE("GPL");
--
2.24.1
Powered by blists - more mailing lists