lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun,  3 Apr 2022 14:06:19 +0100
From:   Pavel Begunkov <asml.silence@...il.com>
To:     netdev@...r.kernel.org, "David S . Miller" <davem@...emloft.net>,
        Jakub Kicinski <kuba@...nel.org>
Cc:     Eric Dumazet <edumazet@...gle.com>, Wei Liu <wei.liu@...nel.org>,
        Paul Durrant <paul@....org>,
        Pavel Begunkov <asml.silence@...il.com>
Subject: [PATCH net-next 07/27] skbuff: introduce skb_is_zcopy()

Add a new helper function called skb_is_zcopy() for checking for an skb
zerocopy status. Before we were using skb_zcopy() for that, but it's
slightly heavier and generates extra code. Note: since the previous
patch we should have a ubuf set IFF an skb is SKBFL_ZEROCOPY_ENABLE
marked apart from nouarg cases.

Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
 include/linux/skbuff.h | 25 +++++++++++++++----------
 net/core/skbuff.c      | 15 +++++++--------
 2 files changed, 22 insertions(+), 18 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 10f94b1909da..410850832b6a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1647,11 +1647,14 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
 	return &skb_shinfo(skb)->hwtstamps;
 }
 
-static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
+static inline bool skb_is_zcopy(struct sk_buff *skb)
 {
-	bool is_zcopy = skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
+	return skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
+}
 
-	return is_zcopy ? skb_uarg(skb) : NULL;
+static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
+{
+	return skb_is_zcopy(skb) ? skb_uarg(skb) : NULL;
 }
 
 static inline bool skb_zcopy_pure(const struct sk_buff *skb)
@@ -1679,7 +1682,7 @@ static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
 static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
 				 bool *have_ref)
 {
-	if (uarg && !skb_zcopy(skb)) {
+	if (uarg && !skb_is_zcopy(skb)) {
 		if (unlikely(have_ref && *have_ref))
 			*have_ref = false;
 		else
@@ -1723,11 +1726,13 @@ static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
 /* Release a reference on a zerocopy structure */
 static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
 {
-	struct ubuf_info *uarg = skb_zcopy(skb);
 
-	if (uarg) {
-		if (!skb_zcopy_is_nouarg(skb))
+	if (skb_is_zcopy(skb)) {
+		if (!skb_zcopy_is_nouarg(skb)) {
+			struct ubuf_info *uarg = skb_zcopy(skb);
+
 			uarg->callback(skb, uarg, zerocopy_success);
+		}
 
 		skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
 	}
@@ -3023,7 +3028,7 @@ static inline void skb_orphan(struct sk_buff *skb)
  */
 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
 {
-	if (likely(!skb_zcopy(skb)))
+	if (likely(!skb_is_zcopy(skb)))
 		return 0;
 	if (!skb_zcopy_is_nouarg(skb) &&
 	    skb_uarg(skb)->callback == msg_zerocopy_callback)
@@ -3034,7 +3039,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
 /* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
 static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
 {
-	if (likely(!skb_zcopy(skb)))
+	if (likely(!skb_is_zcopy(skb)))
 		return 0;
 	return skb_copy_ubufs(skb, gfp_mask);
 }
@@ -3591,7 +3596,7 @@ static inline int skb_add_data(struct sk_buff *skb,
 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
 				    const struct page *page, int off)
 {
-	if (skb_zcopy(skb))
+	if (skb_is_zcopy(skb))
 		return false;
 	if (i) {
 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7680314038b4..f7842bfdd7ae 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1350,14 +1350,13 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
 			     struct msghdr *msg, int len,
 			     struct ubuf_info *uarg)
 {
-	struct ubuf_info *orig_uarg = skb_zcopy(skb);
 	struct iov_iter orig_iter = msg->msg_iter;
 	int err, orig_len = skb->len;
 
 	/* An skb can only point to one uarg. This edge case happens when
 	 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
 	 */
-	if (orig_uarg && uarg != orig_uarg)
+	if (skb_is_zcopy(skb) && uarg != skb_zcopy(skb))
 		return -EEXIST;
 
 	err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
@@ -1380,9 +1379,9 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
 			      gfp_t gfp_mask)
 {
-	if (skb_zcopy(orig)) {
-		if (skb_zcopy(nskb)) {
-			/* !gfp_mask callers are verified to !skb_zcopy(nskb) */
+	if (skb_is_zcopy(orig)) {
+		if (skb_is_zcopy(nskb)) {
+			/* !gfp_mask callers are verified to !skb_is_zcopy(nskb) */
 			if (!gfp_mask) {
 				WARN_ON_ONCE(1);
 				return -ENOMEM;
@@ -1721,8 +1720,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 	if (skb_cloned(skb)) {
 		if (skb_orphan_frags(skb, gfp_mask))
 			goto nofrags;
-		if (skb_zcopy(skb))
-			refcount_inc(&skb_uarg(skb)->refcnt);
+		if (skb_is_zcopy(skb))
+			net_zcopy_get(skb_uarg(skb));
 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 			skb_frag_ref(skb, i);
 
@@ -3535,7 +3534,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
 
 	if (skb_headlen(skb))
 		return 0;
-	if (skb_zcopy(tgt) || skb_zcopy(skb))
+	if (skb_is_zcopy(tgt) || skb_is_zcopy(skb))
 		return 0;
 
 	todo = shiftlen;
-- 
2.35.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ