[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260122115857-mutt-send-email-mst@kernel.org>
Date: Thu, 22 Jan 2026 12:21:19 -0500
From: "Michael S. Tsirkin" <mst@...hat.com>
To: Daniel Jurgens <danielj@...dia.com>
Cc: netdev@...r.kernel.org, jasowang@...hat.com, pabeni@...hat.com,
virtualization@...ts.linux.dev, parav@...dia.com,
shshitrit@...dia.com, yohadt@...dia.com, xuanzhuo@...ux.alibaba.com,
eperezma@...hat.com, jgg@...pe.ca, kevin.tian@...el.com,
kuba@...nel.org, andrew+netdev@...n.ch, edumazet@...gle.com
Subject: Re: [PATCH net-next v16 09/12] virtio_net: Implement IPv4 ethtool
flow rules
On Wed, Jan 21, 2026 at 04:06:49PM -0600, Daniel Jurgens wrote:
> Add support for IP_USER type rules from ethtool.
>
> Example:
> $ ethtool -U ens9 flow-type ip4 src-ip 192.168.51.101 action -1
> Added rule with ID 1
>
> The example rule will drop packets with the source IP specified.
>
> Signed-off-by: Daniel Jurgens <danielj@...dia.com>
> Reviewed-by: Parav Pandit <parav@...dia.com>
> Reviewed-by: Shahar Shitrit <shshitrit@...dia.com>
> Reviewed-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> ---
> v4:
> - Fixed bug in protocol check of parse_ip4
> - (u8 *) to (void *) casting.
> - Alignment issues.
>
> v12
> - refactor calculate_flow_sizes to remove goto. MST
> - refactor build_and_insert to remove goto validate. MST
> - Move parse_ip4 l3_mask check to TCP/UDP patch. MST
> - Check saddr/daddr mask before copying in parse_ip4. MST
> - Remove tos check in setup_ip_key_mask.
> - check l4_4_bytes mask is 0 in setup_ip_key_mask. MST
> - changed return of setup_ip_key_mask to -EINVAL.
> - BUG_ON if key overflows u8 size in calculate_flow_sizes. MST
>
> v13:
> - Set tos field if applicable in parse_ip4. MST
> - Check tos in validate_ip4_mask. MST
> - check l3_mask before setting addr and mask in parse_ip4. MST
> - use has_ipv4 vs numhdrs for branching in build_and_insert. MST
> ---
> ---
> drivers/net/virtio_net.c | 129 +++++++++++++++++++++++++++++++++++++--
> 1 file changed, 123 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index ba231f10b803..7ce120baeb41 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -5818,6 +5818,39 @@ static bool validate_eth_mask(const struct virtnet_ff *ff,
> return true;
> }
>
> +static bool validate_ip4_mask(const struct virtnet_ff *ff,
> + const struct virtio_net_ff_selector *sel,
> + const struct virtio_net_ff_selector *sel_cap)
> +{
> + bool partial_mask = !!(sel_cap->flags & VIRTIO_NET_FF_MASK_F_PARTIAL_MASK);
> + struct iphdr *cap, *mask;
> +
> + cap = (struct iphdr *)&sel_cap->mask;
> + mask = (struct iphdr *)&sel->mask;
this cast is only portable if sel and sel_cap are properly aligned.
Because if not then at least saddr/daddr accesses below are
not portable and need
Documentation/core-api/unaligned-memory-access.rst
> +
> + if (mask->saddr &&
> + !check_mask_vs_cap(&mask->saddr, &cap->saddr,
> + sizeof(__be32), partial_mask))
> + return false;
> +
> + if (mask->daddr &&
> + !check_mask_vs_cap(&mask->daddr, &cap->daddr,
> + sizeof(__be32), partial_mask))
> + return false;
> +
> + if (mask->protocol &&
> + !check_mask_vs_cap(&mask->protocol, &cap->protocol,
> + sizeof(u8), partial_mask))
> + return false;
> +
> + if (mask->tos &&
> + !check_mask_vs_cap(&mask->tos, &cap->tos,
> + sizeof(u8), partial_mask))
> + return false;
> +
> + return true;
> +}
> +
> static bool validate_mask(const struct virtnet_ff *ff,
> const struct virtio_net_ff_selector *sel)
> {
> @@ -5829,11 +5862,41 @@ static bool validate_mask(const struct virtnet_ff *ff,
> switch (sel->type) {
> case VIRTIO_NET_FF_MASK_TYPE_ETH:
> return validate_eth_mask(ff, sel, sel_cap);
> +
> + case VIRTIO_NET_FF_MASK_TYPE_IPV4:
> + return validate_ip4_mask(ff, sel, sel_cap);
> }
>
> return false;
> }
>
> +static void parse_ip4(struct iphdr *mask, struct iphdr *key,
> + const struct ethtool_rx_flow_spec *fs)
> +{
> + const struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
> + const struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
this cast is only portable if sel and sel_cap are properly aligned.
Because if not then at least saddr/daddr accesses below are
not portable and need
Documentation/core-api/unaligned-memory-access.rst
> +
> + if (l3_mask->ip4src) {
> + mask->saddr = l3_mask->ip4src;
> + key->saddr = l3_val->ip4src;
> + }
> +
> + if (l3_mask->ip4dst) {
> + mask->daddr = l3_mask->ip4dst;
> + key->daddr = l3_val->ip4dst;
> + }
> +
> + if (l3_mask->tos) {
> + mask->tos = l3_mask->tos;
> + key->tos = l3_val->tos;
> + }
> +}
> +
> +static bool has_ipv4(u32 flow_type)
> +{
> + return flow_type == IP_USER_FLOW;
> +}
> +
> static int setup_classifier(struct virtnet_ff *ff,
> struct virtnet_classifier **c)
> {
> @@ -5969,6 +6032,7 @@ static bool supported_flow_type(const struct ethtool_rx_flow_spec *fs)
> {
> switch (fs->flow_type) {
> case ETHER_FLOW:
> + case IP_USER_FLOW:
> return true;
> }
>
> @@ -6000,8 +6064,18 @@ static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
> u8 *key_size, size_t *classifier_size,
> int *num_hdrs)
> {
> + size_t size = sizeof(struct ethhdr);
> +
> *num_hdrs = 1;
> - *key_size = sizeof(struct ethhdr);
> +
> + if (fs->flow_type != ETHER_FLOW) {
> + ++(*num_hdrs);
> + if (has_ipv4(fs->flow_type))
> + size += sizeof(struct iphdr);
> + }
> +
> + BUG_ON(size > 0xff);
> + *key_size = size;
> /*
> * The classifier size is the size of the classifier header, a selector
> * header for each type of header in the match criteria, and each header
> @@ -6013,8 +6087,9 @@ static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
> }
>
> static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
> - u8 *key,
> - const struct ethtool_rx_flow_spec *fs)
> + u8 *key,
> + const struct ethtool_rx_flow_spec *fs,
> + int num_hdrs)
> {
> struct ethhdr *eth_m = (struct ethhdr *)&selector->mask;
> struct ethhdr *eth_k = (struct ethhdr *)key;
> @@ -6022,8 +6097,35 @@ static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
> selector->type = VIRTIO_NET_FF_MASK_TYPE_ETH;
> selector->length = sizeof(struct ethhdr);
>
> - memcpy(eth_m, &fs->m_u.ether_spec, sizeof(*eth_m));
> - memcpy(eth_k, &fs->h_u.ether_spec, sizeof(*eth_k));
> + if (num_hdrs > 1) {
> + eth_m->h_proto = cpu_to_be16(0xffff);
> + eth_k->h_proto = cpu_to_be16(ETH_P_IP);
> + } else {
> + memcpy(eth_m, &fs->m_u.ether_spec, sizeof(*eth_m));
> + memcpy(eth_k, &fs->h_u.ether_spec, sizeof(*eth_k));
> + }
> +}
> +
> +static int setup_ip_key_mask(struct virtio_net_ff_selector *selector,
> + u8 *key,
> + const struct ethtool_rx_flow_spec *fs)
> +{
> + struct iphdr *v4_m = (struct iphdr *)&selector->mask;
> + struct iphdr *v4_k = (struct iphdr *)key;
Are mask and key guaranteed to be aligned here?
Because if not then at least saddr/daddr accesses in parse_ip4
call below are not portable and need
Documentation/core-api/unaligned-memory-access.rst
> +
> + selector->type = VIRTIO_NET_FF_MASK_TYPE_IPV4;
> + selector->length = sizeof(struct iphdr);
> +
> + if (fs->h_u.usr_ip4_spec.l4_4_bytes ||
> + fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
> + fs->m_u.usr_ip4_spec.l4_4_bytes ||
> + fs->m_u.usr_ip4_spec.ip_ver ||
> + fs->m_u.usr_ip4_spec.proto)
> + return -EINVAL;
> +
> + parse_ip4(v4_m, v4_k, fs);
> +
> + return 0;
> }
>
> static int
> @@ -6045,6 +6147,13 @@ validate_classifier_selectors(struct virtnet_ff *ff,
> return 0;
> }
>
> +static
> +struct virtio_net_ff_selector *next_selector(struct virtio_net_ff_selector *sel)
> +{
> + return (void *)sel + sizeof(struct virtio_net_ff_selector) +
> + sel->length;
> +}
> +
> static int build_and_insert(struct virtnet_ff *ff,
> struct virtnet_ethtool_rule *eth_rule)
> {
> @@ -6082,7 +6191,15 @@ static int build_and_insert(struct virtnet_ff *ff,
> classifier->count = num_hdrs;
> selector = (void *)&classifier->selectors[0];
>
> - setup_eth_hdr_key_mask(selector, key, fs);
> + setup_eth_hdr_key_mask(selector, key, fs, num_hdrs);
So this will set sel->length to 14.
> +
> + if (has_ipv4(fs->flow_type)) {
> + selector = next_selector(selector);
And this will point next one at an offset 8 + 14 then?
> +
> + err = setup_ip_key_mask(selector, key + sizeof(struct ethhdr), fs);
and this will pass unaligned mask and key?
> + if (err)
> + goto err_classifier;
> + }
>
> err = validate_classifier_selectors(ff, classifier, num_hdrs);
> if (err)
> --
> 2.50.1
Powered by blists - more mailing lists