[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <ae8fce57-ffbd-4a10-b57b-9dd49ae3b091@molgen.mpg.de>
Date: Sat, 20 Dec 2025 06:59:35 +0100
From: Paul Menzel <pmenzel@...gen.mpg.de>
To: Mina Almasry <almasrymina@...gle.com>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org,
linux-kernel@...r.kernel.org, YiFei Zhu <zhuyifei@...gle.com>,
Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann <daniel@...earbox.net>,
"David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Stanislav Fomichev <sdf@...ichev.me>,
Tony Nguyen <anthony.l.nguyen@...el.com>,
Przemek Kitszel <przemyslaw.kitszel@...el.com>,
Andrew Lunn <andrew+netdev@...n.ch>, Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
Alexander Lobakin <aleksander.lobakin@...el.com>,
Richard Cochran <richardcochran@...il.com>,
intel-wired-lan@...ts.osuosl.org,
Aleksandr Loktionov <aleksandr.loktionov@...el.com>
Subject: Re: [Intel-wired-lan] [PATCH net-next v3] idpf: export RX hardware
timestamping information to XDP
Dear Mina,
Thank you for the patch.
Am 19.12.25 um 21:29 schrieb Mina Almasry via Intel-wired-lan:
> From: YiFei Zhu <zhuyifei@...gle.com>
>
> The logic is similar to idpf_rx_hwtstamp, but the data is exported
> as a BPF kfunc instead of appended to an skb.
Could you add the reason, why it’s done this way?
> A idpf_queue_has(PTP, rxq) condition is added to check the queue
> supports PTP similar to idpf_rx_process_skb_fields.
It’d be great if you added test information.
> Cc: intel-wired-lan@...ts.osuosl.org
>
Remove the blank line.
> Signed-off-by: YiFei Zhu <zhuyifei@...gle.com>
> Signed-off-by: Mina Almasry <almasrymina@...gle.com>
> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@...el.com>
>
> ---
>
> v3: https://lore.kernel.org/netdev/20251218022948.3288897-1-almasrymina@google.com/
> - Do the idpf_queue_has(PTP) check before we read qw1 (lobakin)
> - Fix _qw1 not copying over ts_low on on !__LIBETH_WORD_ACCESS systems
> (AI)
>
> v2: https://lore.kernel.org/netdev/20251122140839.3922015-1-almasrymina@google.com/
> - Fixed alphabetical ordering
> - Use the xdp desc type instead of virtchnl one (required some added
> helpers)
>
> ---
> drivers/net/ethernet/intel/idpf/xdp.c | 31 +++++++++++++++++++++++++++
> drivers/net/ethernet/intel/idpf/xdp.h | 22 ++++++++++++++++++-
> 2 files changed, 52 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
> index 958d16f87424..0916d201bf98 100644
> --- a/drivers/net/ethernet/intel/idpf/xdp.c
> +++ b/drivers/net/ethernet/intel/idpf/xdp.c
> @@ -2,6 +2,7 @@
> /* Copyright (C) 2025 Intel Corporation */
>
> #include "idpf.h"
> +#include "idpf_ptp.h"
> #include "idpf_virtchnl.h"
> #include "xdp.h"
> #include "xsk.h"
> @@ -391,8 +392,38 @@ static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
> pt);
> }
>
> +static int idpf_xdpmo_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
> +{
> + const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
> + struct idpf_xdp_rx_desc desc __uninitialized;
> + const struct idpf_rx_queue *rxq;
> + u64 cached_time, ts_ns;
> + u32 ts_high;
> +
> + rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
> +
> + if (!idpf_queue_has(PTP, rxq))
> + return -ENODATA;
> +
> + idpf_xdp_get_qw1(&desc, xdp->desc);
> +
> + if (!(idpf_xdp_rx_ts_low(&desc) & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
> + return -ENODATA;
> +
> + cached_time = READ_ONCE(rxq->cached_phc_time);
> +
> + idpf_xdp_get_qw3(&desc, xdp->desc);
> +
> + ts_high = idpf_xdp_rx_ts_high(&desc);
> + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
> +
> + *timestamp = ts_ns;
> + return 0;
> +}
> +
> static const struct xdp_metadata_ops idpf_xdpmo = {
> .xmo_rx_hash = idpf_xdpmo_rx_hash,
> + .xmo_rx_timestamp = idpf_xdpmo_rx_timestamp,
> };
>
> void idpf_xdp_set_features(const struct idpf_vport *vport)
> diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
> index 479f5ef3c604..9daae445bde4 100644
> --- a/drivers/net/ethernet/intel/idpf/xdp.h
> +++ b/drivers/net/ethernet/intel/idpf/xdp.h
> @@ -112,11 +112,13 @@ struct idpf_xdp_rx_desc {
> aligned_u64 qw1;
> #define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
> #define IDPF_XDP_RX_EOP BIT_ULL(1)
> +#define IDPF_XDP_RX_TS_LOW GENMASK_ULL(31, 24)
>
> aligned_u64 qw2;
> #define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
>
> aligned_u64 qw3;
> +#define IDPF_XDP_RX_TS_HIGH GENMASK_ULL(63, 32)
> } __aligned(4 * sizeof(u64));
> static_assert(sizeof(struct idpf_xdp_rx_desc) ==
> sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
> @@ -128,6 +130,8 @@ static_assert(sizeof(struct idpf_xdp_rx_desc) ==
> #define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
> #define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
> #define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
> +#define idpf_xdp_rx_ts_low(desc) FIELD_GET(IDPF_XDP_RX_TS_LOW, (desc)->qw1)
> +#define idpf_xdp_rx_ts_high(desc) FIELD_GET(IDPF_XDP_RX_TS_HIGH, (desc)->qw3)
>
> static inline void
> idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
> @@ -149,7 +153,10 @@ idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
> desc->qw1 = ((const typeof(desc))rxd)->qw1;
> #else
> desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
> - rxd->status_err0_qw1;
> + ((u64)rxd->ts_low << 24) |
> + ((u64)rxd->fflags1 << 16) |
> + ((u64)rxd->status_err1 << 8) |
> + rxd->status_err0_qw1;
> #endif
> }
>
> @@ -166,6 +173,19 @@ idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
> #endif
> }
>
> +static inline void
> +idpf_xdp_get_qw3(struct idpf_xdp_rx_desc *desc,
> + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
> +{
> +#ifdef __LIBETH_WORD_ACCESS
> + desc->qw3 = ((const typeof(desc))rxd)->qw3;
> +#else
> + desc->qw3 = ((u64)le32_to_cpu(rxd->ts_high) << 32) |
> + ((u64)le16_to_cpu(rxd->fmd6) << 16) |
> + le16_to_cpu(rxd->l2tag1);
> +#endif
> +}
> +
> void idpf_xdp_set_features(const struct idpf_vport *vport);
>
> int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
The diff looks fine.
Reviewed-by: Paul Menzel <pmenzel@...gen.mpg.de>
Kind regards,
Paul
Powered by blists - more mailing lists