lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180914175941.213950-2-willemdebruijn.kernel@gmail.com>
Date:   Fri, 14 Sep 2018 13:59:34 -0400
From:   Willem de Bruijn <willemdebruijn.kernel@...il.com>
To:     netdev@...r.kernel.org
Cc:     pabeni@...hat.com, steffen.klassert@...unet.com,
        davem@...emloft.net, Willem de Bruijn <willemb@...gle.com>
Subject: [PATCH net-next RFC 1/8] gro: convert device offloads to net_offload

From: Willem de Bruijn <willemb@...gle.com>

In preparation of making GRO receive configurable, have all offloads
share the same infrastructure.

Signed-off-by: Willem de Bruijn <willemb@...gle.com>
---
 include/linux/netdevice.h |  17 +++++-
 include/net/protocol.h    |   7 ---
 net/core/dev.c            | 105 +++++++++++++-------------------------
 3 files changed, 51 insertions(+), 78 deletions(-)

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e2b3bd750c98..7425068fa249 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2366,13 +2366,18 @@ struct offload_callbacks {
 	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
 };
 
-struct packet_offload {
+struct net_offload {
 	__be16			 type;	/* This is really htons(ether_type). */
 	u16			 priority;
 	struct offload_callbacks callbacks;
-	struct list_head	 list;
+	unsigned int		 flags;	/* Flags used by IPv6 for now */
 };
 
+#define packet_offload	net_offload
+
+/* This should be set for any extension header which is compatible with GSO. */
+#define INET6_PROTO_GSO_EXTHDR	0x1
+
 /* often modified stats are per-CPU, other are shared (netdev->stats) */
 struct pcpu_sw_netstats {
 	u64     rx_packets;
@@ -3554,6 +3559,14 @@ gro_result_t napi_gro_frags(struct napi_struct *napi);
 struct packet_offload *gro_find_receive_by_type(__be16 type);
 struct packet_offload *gro_find_complete_by_type(__be16 type);
 
+static inline u8 net_offload_from_type(u16 type)
+{
+	/* Do not bother handling collisions. There are none.
+	 * If they do occur with new offloads, add a mapping function here.
+	 */
+	return type & 0xFF;
+}
+
 static inline void napi_free_frags(struct napi_struct *napi)
 {
 	kfree_skb(napi->skb);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 4fc75f7ae23b..53a0322ee545 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -69,13 +69,6 @@ struct inet6_protocol {
 #define INET6_PROTO_FINAL	0x2
 #endif
 
-struct net_offload {
-	struct offload_callbacks callbacks;
-	unsigned int		 flags;	/* Flags used by IPv6 for now */
-};
-/* This should be set for any extension header which is compatible with GSO. */
-#define INET6_PROTO_GSO_EXTHDR	0x1
-
 /* This is used to register socket interfaces for IP protocols.  */
 struct inet_protosw {
 	struct list_head list;
diff --git a/net/core/dev.c b/net/core/dev.c
index 0b2d777e5b9e..55f86b6d3182 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -154,7 +154,6 @@
 #define GRO_MAX_HEAD (MAX_HEADER + 128)
 
 static DEFINE_SPINLOCK(ptype_lock);
-static DEFINE_SPINLOCK(offload_lock);
 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
 struct list_head ptype_all __read_mostly;	/* Taps */
 static struct list_head offload_base __read_mostly;
@@ -467,6 +466,9 @@ void dev_remove_pack(struct packet_type *pt)
 EXPORT_SYMBOL(dev_remove_pack);
 
 
+const struct net_offload __rcu *dev_offloads[256] __read_mostly;
+EXPORT_SYMBOL(dev_offloads);
+
 /**
  *	dev_add_offload - register offload handlers
  *	@po: protocol offload declaration
@@ -481,15 +483,9 @@ EXPORT_SYMBOL(dev_remove_pack);
  */
 void dev_add_offload(struct packet_offload *po)
 {
-	struct packet_offload *elem;
-
-	spin_lock(&offload_lock);
-	list_for_each_entry(elem, &offload_base, list) {
-		if (po->priority < elem->priority)
-			break;
-	}
-	list_add_rcu(&po->list, elem->list.prev);
-	spin_unlock(&offload_lock);
+	cmpxchg((const struct net_offload **)
+		&dev_offloads[net_offload_from_type(po->type)],
+			NULL, po);
 }
 EXPORT_SYMBOL(dev_add_offload);
 
@@ -506,23 +502,11 @@ EXPORT_SYMBOL(dev_add_offload);
  *	and must not be freed until after all the CPU's have gone
  *	through a quiescent state.
  */
-static void __dev_remove_offload(struct packet_offload *po)
+static int __dev_remove_offload(struct packet_offload *po)
 {
-	struct list_head *head = &offload_base;
-	struct packet_offload *po1;
-
-	spin_lock(&offload_lock);
-
-	list_for_each_entry(po1, head, list) {
-		if (po == po1) {
-			list_del_rcu(&po->list);
-			goto out;
-		}
-	}
-
-	pr_warn("dev_remove_offload: %p not found\n", po);
-out:
-	spin_unlock(&offload_lock);
+	return (cmpxchg((const struct net_offload **)
+			&dev_offloads[net_offload_from_type(po->type)],
+		       po, NULL) == po) ? 0 : -1;
 }
 
 /**
@@ -2962,7 +2946,7 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
 				    netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
-	struct packet_offload *ptype;
+	const struct net_offload *off;
 	int vlan_depth = skb->mac_len;
 	__be16 type = skb_network_protocol(skb, &vlan_depth);
 
@@ -2972,12 +2956,9 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
 	__skb_pull(skb, vlan_depth);
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(ptype, &offload_base, list) {
-		if (ptype->type == type && ptype->callbacks.gso_segment) {
-			segs = ptype->callbacks.gso_segment(skb, features);
-			break;
-		}
-	}
+	off = rcu_dereference(dev_offloads[net_offload_from_type(type)]);
+	if (off && off->type == type && off->callbacks.gso_segment)
+		segs = off->callbacks.gso_segment(skb, features);
 	rcu_read_unlock();
 
 	__skb_push(skb, skb->data - skb_mac_header(skb));
@@ -5254,9 +5235,8 @@ static void flush_all_backlogs(void)
 
 static int napi_gro_complete(struct sk_buff *skb)
 {
-	struct packet_offload *ptype;
+	const struct packet_offload *ptype;
 	__be16 type = skb->protocol;
-	struct list_head *head = &offload_base;
 	int err = -ENOENT;
 
 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
@@ -5267,17 +5247,12 @@ static int napi_gro_complete(struct sk_buff *skb)
 	}
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(ptype, head, list) {
-		if (ptype->type != type || !ptype->callbacks.gro_complete)
-			continue;
-
+	ptype = dev_offloads[net_offload_from_type(type)];
+	if (ptype && ptype->callbacks.gro_complete)
 		err = ptype->callbacks.gro_complete(skb, 0);
-		break;
-	}
 	rcu_read_unlock();
 
 	if (err) {
-		WARN_ON(&ptype->list == head);
 		kfree_skb(skb);
 		return NET_RX_SUCCESS;
 	}
@@ -5417,8 +5392,7 @@ static void gro_flush_oldest(struct list_head *head)
 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
 	u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
-	struct list_head *head = &offload_base;
-	struct packet_offload *ptype;
+	const struct packet_offload *ptype;
 	__be16 type = skb->protocol;
 	struct list_head *gro_head;
 	struct sk_buff *pp = NULL;
@@ -5432,10 +5406,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 	gro_head = gro_list_prepare(napi, skb);
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(ptype, head, list) {
-		if (ptype->type != type || !ptype->callbacks.gro_receive)
-			continue;
-
+	ptype = dev_offloads[net_offload_from_type(type)];
+	if (ptype && ptype->callbacks.gro_receive) {
 		skb_set_network_header(skb, skb_gro_offset(skb));
 		skb_reset_mac_len(skb);
 		NAPI_GRO_CB(skb)->same_flow = 0;
@@ -5464,12 +5436,11 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 		}
 
 		pp = ptype->callbacks.gro_receive(gro_head, skb);
-		break;
-	}
-	rcu_read_unlock();
-
-	if (&ptype->list == head)
+		rcu_read_unlock();
+	} else {
+		rcu_read_unlock();
 		goto normal;
+	}
 
 	if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
 		ret = GRO_CONSUMED;
@@ -5524,29 +5495,25 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 
 struct packet_offload *gro_find_receive_by_type(__be16 type)
 {
-	struct list_head *offload_head = &offload_base;
-	struct packet_offload *ptype;
+	struct net_offload *off;
 
-	list_for_each_entry_rcu(ptype, offload_head, list) {
-		if (ptype->type != type || !ptype->callbacks.gro_receive)
-			continue;
-		return ptype;
-	}
-	return NULL;
+	off = (struct net_offload *) rcu_dereference(dev_offloads[type & 0xFF]);
+	if (off && off->type == type && off->callbacks.gro_receive)
+		return off;
+	else
+		return NULL;
 }
 EXPORT_SYMBOL(gro_find_receive_by_type);
 
 struct packet_offload *gro_find_complete_by_type(__be16 type)
 {
-	struct list_head *offload_head = &offload_base;
-	struct packet_offload *ptype;
+	struct net_offload *off;
 
-	list_for_each_entry_rcu(ptype, offload_head, list) {
-		if (ptype->type != type || !ptype->callbacks.gro_complete)
-			continue;
-		return ptype;
-	}
-	return NULL;
+	off = (struct net_offload *) rcu_dereference(dev_offloads[type & 0xFF]);
+	if (off && off->type == type && off->callbacks.gro_complete)
+		return off;
+	else
+		return NULL;
 }
 EXPORT_SYMBOL(gro_find_complete_by_type);
 
-- 
2.19.0.397.gdd90340f6a-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ