lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHS8izM-2vdudZeRu51TNCRzVPQVBKmrj0YoK80nNgWvR-ft3g@mail.gmail.com>
Date: Fri, 15 Aug 2025 11:05:56 -0700
From: Mina Almasry <almasrymina@...gle.com>
To: Dragos Tatulea <dtatulea@...dia.com>
Cc: asml.silence@...il.com, "David S. Miller" <davem@...emloft.net>, 
	Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, 
	Simon Horman <horms@...nel.org>, cratiu@...dia.com, tariqt@...dia.com, parav@...dia.com, 
	Christoph Hellwig <hch@...radead.org>, netdev@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [RFC net-next v3 6/7] net: devmem: pre-read requested rx queues
 during bind

On Fri, Aug 15, 2025 at 4:07 AM Dragos Tatulea <dtatulea@...dia.com> wrote:
>
> Instead of reading the requested rx queues after binding the buffer,
> read the rx queues in advance in a bitmap and iterate over them when
> needed.
>
> This is a preparation for fetching the DMA device for each queue.
>
> This patch has no functional changes.
>
> Signed-off-by: Dragos Tatulea <dtatulea@...dia.com>
> ---
>  net/core/netdev-genl.c | 76 +++++++++++++++++++++++++++---------------
>  1 file changed, 49 insertions(+), 27 deletions(-)
>
> diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
> index 3e2d6aa6e060..3e990f100bf0 100644
> --- a/net/core/netdev-genl.c
> +++ b/net/core/netdev-genl.c
> @@ -869,17 +869,50 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
>         return err;
>  }
>
> -int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
> +static int netdev_nl_read_rxq_bitmap(struct genl_info *info,
> +                                    unsigned long *rxq_bitmap)
>  {
>         struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
> +       struct nlattr *attr;
> +       int rem, err = 0;
> +       u32 rxq_idx;
> +
> +       nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
> +                              genlmsg_data(info->genlhdr),
> +                              genlmsg_len(info->genlhdr), rem) {
> +               err = nla_parse_nested(
> +                       tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
> +                       netdev_queue_id_nl_policy, info->extack);
> +               if (err < 0)
> +                       return err;
> +
> +               if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
> +                   NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE))
> +                       return -EINVAL;
> +
> +               if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
> +                       NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
> +                       return -EINVAL;
> +               }
> +
> +               rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
> +
> +               bitmap_set(rxq_bitmap, rxq_idx, 1);
> +       }
> +
> +       return 0;
> +}
> +
> +int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
> +{
>         struct net_devmem_dmabuf_binding *binding;
>         u32 ifindex, dmabuf_fd, rxq_idx;
>         struct netdev_nl_sock *priv;
>         struct net_device *netdev;
> +       unsigned long *rxq_bitmap;
>         struct device *dma_dev;
>         struct sk_buff *rsp;
> -       struct nlattr *attr;
> -       int rem, err = 0;
> +       int err = 0;
>         void *hdr;
>
>         if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
> @@ -922,37 +955,22 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
>                 goto err_unlock;
>         }
>
> +       rxq_bitmap = bitmap_alloc(netdev->num_rx_queues, GFP_KERNEL);
> +       if (!rxq_bitmap) {
> +               err = -ENOMEM;
> +               goto err_unlock;
> +       }
> +       netdev_nl_read_rxq_bitmap(info, rxq_bitmap);
> +
>         dma_dev = netdev_queue_get_dma_dev(netdev, 0);
>         binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE,
>                                          dmabuf_fd, priv, info->extack);
>         if (IS_ERR(binding)) {
>                 err = PTR_ERR(binding);
> -               goto err_unlock;
> +               goto err_rxq_bitmap;
>         }
>
> -       nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
> -                              genlmsg_data(info->genlhdr),
> -                              genlmsg_len(info->genlhdr), rem) {
> -               err = nla_parse_nested(
> -                       tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
> -                       netdev_queue_id_nl_policy, info->extack);
> -               if (err < 0)
> -                       goto err_unbind;
> -
> -               if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
> -                   NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
> -                       err = -EINVAL;
> -                       goto err_unbind;
> -               }
> -
> -               if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
> -                       NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
> -                       err = -EINVAL;
> -                       goto err_unbind;
> -               }
> -
> -               rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
> -
> +       for_each_set_bit(rxq_idx, rxq_bitmap, netdev->num_rx_queues) {

Is this code assuming that netdev->num_rx_queues (or
real_num_rx_queues) <= BITS_PER_ULONG? Aren't there devices out there
that support more than 64 hardware queues? If so, I guess you need a
different data structure than a bitmap (or maybe there is arbirary
sized bitmap library somewhere to use).



-- 
Thanks,
Mina

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ