[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230612172307.3923165-6-sdf@google.com>
Date: Mon, 12 Jun 2023 10:23:05 -0700
From: Stanislav Fomichev <sdf@...gle.com>
To: bpf@...r.kernel.org
Cc: ast@...nel.org, daniel@...earbox.net, andrii@...nel.org,
martin.lau@...ux.dev, song@...nel.org, yhs@...com, john.fastabend@...il.com,
kpsingh@...nel.org, sdf@...gle.com, haoluo@...gle.com, jolsa@...nel.org,
netdev@...r.kernel.org
Subject: [RFC bpf-next 5/7] net: veth: implement devtx timestamp kfuncs
Have a software-base example for kfuncs to showcase how it
can be used in the real devices and to have something to
test against in the selftests.
Both path (skb & xdp) are covered. Only the skb path is really
tested though.
Cc: netdev@...r.kernel.org
Signed-off-by: Stanislav Fomichev <sdf@...gle.com>
---
drivers/net/veth.c | 94 ++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 90 insertions(+), 4 deletions(-)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 614f3e3efab0..eb78d51d8352 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -27,6 +27,7 @@
#include <linux/bpf_trace.h>
#include <linux/net_tstamp.h>
#include <net/page_pool.h>
+#include <net/devtx.h>
#define DRV_NAME "veth"
#define DRV_VERSION "1.0"
@@ -123,6 +124,13 @@ struct veth_xdp_buff {
struct sk_buff *skb;
};
+struct veth_devtx_frame {
+ struct devtx_frame frame;
+ bool request_timestamp;
+ ktime_t xdp_tx_timestamp;
+ struct sk_buff *skb;
+};
+
static int veth_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -314,9 +322,29 @@ static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
}
static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
- struct veth_rq *rq, bool xdp)
+ struct veth_rq *rq, bool xdp, bool request_timestamp)
{
- return __dev_forward_skb(dev, skb) ?: xdp ?
+ struct net_device *src_dev = skb->dev;
+ int ret;
+
+ ret = __dev_forward_skb(dev, skb);
+ if (ret)
+ return ret;
+
+ if (devtx_complete_enabled(src_dev)) {
+ struct veth_devtx_frame ctx;
+
+ if (unlikely(request_timestamp))
+ __net_timestamp(skb);
+
+ devtx_frame_from_skb(&ctx.frame, skb);
+ ctx.frame.data -= ETH_HLEN; /* undo eth_type_trans pull */
+ ctx.frame.len += ETH_HLEN;
+ ctx.skb = skb;
+ devtx_complete(src_dev, &ctx.frame);
+ }
+
+ return xdp ?
veth_xdp_rx(rq, skb) :
__netif_rx(skb);
}
@@ -343,6 +371,7 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ bool request_timestamp = false;
struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
@@ -356,6 +385,15 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
+ if (devtx_submit_enabled(dev)) {
+ struct veth_devtx_frame ctx;
+
+ devtx_frame_from_skb(&ctx.frame, skb);
+ ctx.request_timestamp = false;
+ devtx_submit(dev, &ctx.frame);
+ request_timestamp = ctx.request_timestamp;
+ }
+
rcv_priv = netdev_priv(rcv);
rxq = skb_get_queue_mapping(skb);
if (rxq < rcv->real_num_rx_queues) {
@@ -370,7 +408,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
- if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+ if (likely(veth_forward_skb(rcv, skb, rq, use_napi, request_timestamp) == NET_RX_SUCCESS)) {
if (!use_napi)
dev_lstats_add(dev, length);
} else {
@@ -483,6 +521,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
int i, ret = -ENXIO, nxmit = 0;
+ ktime_t tx_timestamp = 0;
struct net_device *rcv;
unsigned int max_len;
struct veth_rq *rq;
@@ -511,9 +550,32 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
void *ptr = veth_xdp_to_ptr(frame);
if (unlikely(xdp_get_frame_len(frame) > max_len ||
- __ptr_ring_produce(&rq->xdp_ring, ptr)))
+ __ptr_ring_full(&rq->xdp_ring)))
+ break;
+
+ if (devtx_submit_enabled(dev)) {
+ struct veth_devtx_frame ctx;
+
+ devtx_frame_from_xdp(&ctx.frame, frame);
+ ctx.request_timestamp = false;
+ devtx_submit(dev, &ctx.frame);
+
+ if (unlikely(ctx.request_timestamp))
+ tx_timestamp = ktime_get_real();
+ }
+
+ if (unlikely(__ptr_ring_produce(&rq->xdp_ring, ptr)))
break;
nxmit++;
+
+ if (devtx_complete_enabled(dev)) {
+ struct veth_devtx_frame ctx;
+
+ devtx_frame_from_xdp(&ctx.frame, frame);
+ ctx.xdp_tx_timestamp = tx_timestamp;
+ ctx.skb = NULL;
+ devtx_complete(dev, &ctx.frame);
+ }
}
spin_unlock(&rq->xdp_ring.producer_lock);
@@ -1732,6 +1794,28 @@ static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
return 0;
}
+static int veth_devtx_sb_request_timestamp(const struct devtx_frame *_ctx)
+{
+ struct veth_devtx_frame *ctx = (struct veth_devtx_frame *)_ctx;
+
+ ctx->request_timestamp = true;
+
+ return 0;
+}
+
+static int veth_devtx_cp_timestamp(const struct devtx_frame *_ctx, u64 *timestamp)
+{
+ struct veth_devtx_frame *ctx = (struct veth_devtx_frame *)_ctx;
+
+ if (ctx->skb) {
+ *timestamp = ctx->skb->tstamp;
+ return 0;
+ }
+
+ *timestamp = ctx->xdp_tx_timestamp;
+ return 0;
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -1756,6 +1840,8 @@ static const struct net_device_ops veth_netdev_ops = {
static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
.xmo_rx_timestamp = veth_xdp_rx_timestamp,
.xmo_rx_hash = veth_xdp_rx_hash,
+ .xmo_sb_request_timestamp = veth_devtx_sb_request_timestamp,
+ .xmo_cp_timestamp = veth_devtx_cp_timestamp,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
--
2.41.0.162.gfafddb0af9-goog
Powered by blists - more mailing lists