lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 11 Aug 2023 18:32:48 +0300
From:   "Radu Pirea (NXP OSS)" <radu-nicolae.pirea@....nxp.com>
To:     andrew@...n.ch, hkallweit1@...il.com, linux@...linux.org.uk,
        davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
        pabeni@...hat.com, richardcochran@...il.com, sd@...asysnail.net
Cc:     netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
        "Radu Pirea (NXP OSS)" <radu-nicolae.pirea@....nxp.com>
Subject: [RFC net-next v1 4/5] net: macsec: introduce mdo_insert_tx_tag

Offloading MACsec in PHYs requires inserting the SecTAG and the ICV in
the ethernet frame. This operation will increase the frame size with 32
bytes. If the frames are sent at line rate, the PHY will not have enough
room to insert the SecTAG and the ICV.

To mitigate this scenario, the PHY offer to use require a specific
ethertype with some padding bytes present in the ethernet frame. This
ethertype and its associated bytes will be replaced by the SecTAG and ICV.

Signed-off-by: Radu Pirea (NXP OSS) <radu-nicolae.pirea@....nxp.com>
---
 drivers/net/macsec.c | 79 ++++++++++++++++++++++++++++++++++++++++++++
 include/net/macsec.h |  5 +++
 2 files changed, 84 insertions(+)

diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 144ec756c796..32ea1fd5f5ab 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2633,6 +2633,18 @@ static int macsec_update_offload(struct net_device *dev, enum macsec_offload off
 	if (ret)
 		macsec->offload = prev_offload;
 
+	if (macsec->offload == MACSEC_OFFLOAD_OFF) {
+		dev->needed_headroom -= ops->needed_headroom;
+		dev->needed_headroom += MACSEC_NEEDED_HEADROOM;
+		dev->needed_tailroom -= ops->needed_tailroom;
+		dev->needed_tailroom += MACSEC_NEEDED_TAILROOM;
+	} else {
+		dev->needed_headroom -= MACSEC_NEEDED_HEADROOM;
+		dev->needed_headroom += ops->needed_headroom;
+		dev->needed_tailroom -= MACSEC_NEEDED_TAILROOM;
+		dev->needed_tailroom += ops->needed_tailroom;
+	}
+
 	return ret;
 }
 
@@ -3389,6 +3401,61 @@ static struct genl_family macsec_fam __ro_after_init = {
 	.resv_start_op	= MACSEC_CMD_UPD_OFFLOAD + 1,
 };
 
+static struct sk_buff *insert_tx_tag(struct sk_buff *skb,
+				     struct net_device *dev)
+{
+	struct macsec_dev *macsec = macsec_priv(dev);
+	const struct macsec_ops *ops;
+	struct macsec_context ctx;
+	int err;
+
+	if (!macsec_is_offloaded(macsec))
+		return ERR_PTR(-EINVAL);
+
+	ops = macsec_get_ops(macsec, &ctx);
+	if (!ops)
+		return ERR_PTR(-EINVAL);
+
+	if (!ops->mdo_insert_tx_tag)
+		return skb;
+
+	if (unlikely(skb_headroom(skb) < ops->needed_headroom ||
+		     skb_tailroom(skb) < ops->needed_tailroom)) {
+		struct sk_buff *nskb = skb_copy_expand(skb,
+						       ops->needed_headroom,
+						       ops->needed_tailroom,
+						       GFP_ATOMIC);
+		if (likely(nskb)) {
+			consume_skb(skb);
+			skb = nskb;
+		} else {
+			err = -ENOMEM;
+			goto cleanup;
+		}
+	} else {
+		skb = skb_unshare(skb, GFP_ATOMIC);
+		if (!skb)
+			return ERR_PTR(-ENOMEM);
+	}
+
+	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
+		err = -EINVAL;
+		goto cleanup;
+	}
+
+	ctx.secy = &macsec->secy;
+	ctx.skb = skb;
+
+	err = ops->mdo_insert_tx_tag(&ctx);
+	if (err)
+		goto cleanup;
+
+	return skb;
+cleanup:
+	kfree_skb(skb);
+	return ERR_PTR(err);
+}
+
 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
 				     struct net_device *dev)
 {
@@ -3403,6 +3470,13 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
 		skb_dst_drop(skb);
 		dst_hold(&md_dst->dst);
 		skb_dst_set(skb, &md_dst->dst);
+
+		skb = insert_tx_tag(skb, dev);
+		if (IS_ERR(skb)) {
+			dev->stats.tx_dropped++;
+			return NETDEV_TX_OK;
+		}
+
 		skb->dev = macsec->real_dev;
 		return dev_queue_xmit(skb);
 	}
@@ -4137,6 +4211,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
 			if (err)
 				goto del_dev;
 		}
+
+		dev->needed_headroom -= MACSEC_NEEDED_HEADROOM;
+		dev->needed_headroom += ops->needed_headroom;
+		dev->needed_tailroom -= MACSEC_NEEDED_TAILROOM;
+		dev->needed_tailroom += ops->needed_tailroom;
 	}
 
 	err = register_macsec_dev(real_dev, dev);
diff --git a/include/net/macsec.h b/include/net/macsec.h
index 33dc7f2aa42e..a988249d9608 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -272,6 +272,7 @@ struct macsec_context {
 		struct macsec_rx_sa_stats *rx_sa_stats;
 		struct macsec_dev_stats  *dev_stats;
 	} stats;
+	struct sk_buff *skb;
 };
 
 /**
@@ -302,6 +303,10 @@ struct macsec_ops {
 	int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
 	int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
 	int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
+	/* Offload tag */
+	int (*mdo_insert_tx_tag)(struct macsec_context *ctx);
+	int needed_headroom;
+	int needed_tailroom;
 };
 
 #if IS_ENABLED(CONFIG_MACSEC)
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ