[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240607054041.2032352-11-chopps@chopps.org>
Date: Fri, 7 Jun 2024 01:40:34 -0400
From: Christian Hopps <chopps@...pps.org>
To: devel@...ux-ipsec.org
Cc: Steffen Klassert <steffen.klassert@...unet.com>,
netdev@...r.kernel.org,
Christian Hopps <chopps@...pps.org>,
Christian Hopps <chopps@...n.net>
Subject: [PATCH ipsec-next v3 10/17] xfrm: iptfs: share page fragments of inner packets
From: Christian Hopps <chopps@...n.net>
When possible rather than appending secondary (aggregated) inner packets
to the fragment list, share their page fragments with the outer IPTFS
packet. This allows for more efficient packet transmission.
Signed-off-by: Christian Hopps <chopps@...n.net>
---
net/xfrm/xfrm_iptfs.c | 88 ++++++++++++++++++++++++++++++++++++++-----
1 file changed, 79 insertions(+), 9 deletions(-)
diff --git a/net/xfrm/xfrm_iptfs.c b/net/xfrm/xfrm_iptfs.c
index 8911f9e50a30..200945f7ed40 100644
--- a/net/xfrm/xfrm_iptfs.c
+++ b/net/xfrm/xfrm_iptfs.c
@@ -45,6 +45,24 @@ struct xfrm_iptfs_data {
static u32 iptfs_get_inner_mtu(struct xfrm_state *x, int outer_mtu);
static enum hrtimer_restart iptfs_delay_timer(struct hrtimer *me);
+/* ================= */
+/* SK_BUFF Functions */
+/* ================= */
+
+/**
+ * skb_head_to_frag() - initialize a skb_frag_t based on skb head data
+ * @skb: skb with the head data
+ * @frag: frag to initialize
+ */
+static void skb_head_to_frag(const struct sk_buff *skb, skb_frag_t *frag)
+{
+ struct page *page = virt_to_head_page(skb->data);
+ unsigned char *addr = (unsigned char *)page_address(page);
+
+ BUG_ON(!skb->head_frag);
+ skb_frag_fill_page_desc(frag, page, skb->data - addr, skb_headlen(skb));
+}
+
/* ================================= */
/* IPTFS Sending (ingress) Functions */
/* ================================= */
@@ -262,14 +280,44 @@ static struct sk_buff **iptfs_rehome_fraglist(struct sk_buff **nextp,
return nextp;
}
+static void iptfs_consume_frags(struct sk_buff *to, struct sk_buff *from)
+{
+ struct skb_shared_info *fromi = skb_shinfo(from);
+ struct skb_shared_info *toi = skb_shinfo(to);
+ unsigned int new_truesize;
+
+ /* If we have data in a head page, grab it */
+ if (!skb_headlen(from)) {
+ new_truesize = SKB_TRUESIZE(skb_end_offset(from));
+ } else {
+ skb_head_to_frag(from, &toi->frags[toi->nr_frags]);
+ skb_frag_ref(to, toi->nr_frags++);
+ new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ }
+
+ /* Move any other page fragments rather than copy */
+ memcpy(&toi->frags[toi->nr_frags], fromi->frags,
+ sizeof(fromi->frags[0]) * fromi->nr_frags);
+ toi->nr_frags += fromi->nr_frags;
+ fromi->nr_frags = 0;
+ from->data_len = 0;
+ from->len = 0;
+ to->truesize += from->truesize - new_truesize;
+ from->truesize = new_truesize;
+
+ /* We are done with this SKB */
+ consume_skb(from);
+}
+
static void iptfs_output_queued(struct xfrm_state *x, struct sk_buff_head *list)
{
struct xfrm_iptfs_data *xtfs = x->mode_data;
struct sk_buff *skb, *skb2, **nextp;
- struct skb_shared_info *shi;
+ struct skb_shared_info *shi, *shi2;
while ((skb = __skb_dequeue(list))) {
u32 mtu = iptfs_get_cur_pmtu(x, xtfs, skb);
+ bool share_ok = true;
int remaining;
/* protocol comes to us cleared sometimes */
@@ -314,7 +362,7 @@ static void iptfs_output_queued(struct xfrm_state *x, struct sk_buff_head *list)
/* Re-home (un-nest) nested fragment lists. We need to do this
* b/c we will simply be appending any following aggregated
- * inner packets to the frag list.
+ * inner packets using the frag list.
*/
shi = skb_shinfo(skb);
nextp = &shi->frag_list;
@@ -326,6 +374,9 @@ static void iptfs_output_queued(struct xfrm_state *x, struct sk_buff_head *list)
nextp = &(*nextp)->next;
}
+ if (shi->frag_list || skb_cloned(skb) || skb_shared(skb))
+ share_ok = false;
+
/* See if we have enough space to simply append.
*
* NOTE: Maybe do not append if we will be mis-aligned,
@@ -351,18 +402,37 @@ static void iptfs_output_queued(struct xfrm_state *x, struct sk_buff_head *list)
}
}
+ /* skb->pp_recycle is passed to __skb_flag_unref for all
+ * frag pages so we can only share pages with skb's who
+ * match ourselves.
+ */
+ shi2 = skb_shinfo(skb2);
+ if (share_ok &&
+ (shi2->frag_list ||
+ (!skb2->head_frag && skb_headlen(skb)) ||
+ skb->pp_recycle != skb2->pp_recycle ||
+ skb_zcopy(skb2) ||
+ (shi->nr_frags + shi2->nr_frags + 1 >
+ MAX_SKB_FRAGS)))
+ share_ok = false;
+
/* Do accounting */
skb->data_len += skb2->len;
skb->len += skb2->len;
remaining -= skb2->len;
- /* Append to the frag_list */
- *nextp = skb2;
- nextp = &skb2->next;
- BUG_ON(*nextp);
- if (skb_has_frag_list(skb2))
- nextp = iptfs_rehome_fraglist(nextp, skb2);
- skb->truesize += skb2->truesize;
+ if (share_ok) {
+ iptfs_consume_frags(skb, skb2);
+ } else {
+ /* Append to the frag_list */
+ *nextp = skb2;
+ nextp = &skb2->next;
+ BUG_ON(*nextp);
+ if (skb_has_frag_list(skb2))
+ nextp = iptfs_rehome_fraglist(nextp,
+ skb2);
+ skb->truesize += skb2->truesize;
+ }
}
xfrm_output(NULL, skb);
--
2.45.2
Powered by blists - more mailing lists