lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJaqyWeW87TqKxNWPEayyi81uBd_W+Jekdt4sq_d9adCkbwp2g@mail.gmail.com>
Date:   Thu, 10 Mar 2022 19:22:06 +0100
From:   Eugenio Perez Martin <eperezma@...hat.com>
To:     Gautam Dawar <gautam.dawar@...inx.com>
Cc:     Gautam Dawar <gdawar@...inx.com>,
        Martin Petrus Hubertus Habets <martinh@...inx.com>,
        Harpreet Singh Anand <hanand@...inx.com>,
        Tanuj Murlidhar Kamde <tanujk@...inx.com>,
        Jason Wang <jasowang@...hat.com>,
        "Michael S. Tsirkin" <mst@...hat.com>,
        Zhu Lingshan <lingshan.zhu@...el.com>,
        Stefano Garzarella <sgarzare@...hat.com>,
        Xie Yongji <xieyongji@...edance.com>,
        Eli Cohen <elic@...dia.com>,
        Si-Wei Liu <si-wei.liu@...cle.com>,
        Parav Pandit <parav@...dia.com>,
        Longpeng <longpeng2@...wei.com>,
        virtualization <virtualization@...ts.linux-foundation.org>,
        linux-kernel@...r.kernel.org, kvm list <kvm@...r.kernel.org>,
        netdev@...r.kernel.org
Subject: Re: [RFC PATCH v2 18/19] vdpa_sim: filter destination mac address

On Thu, Feb 24, 2022 at 10:29 PM Gautam Dawar <gautam.dawar@...inx.com> wrote:
>
> This patch implements a simple unicast filter for vDPA simulator.
>
> Signed-off-by: Jason Wang <jasowang@...hat.com>
> Signed-off-by: Gautam Dawar <gdawar@...inx.com>
> ---
>  drivers/vdpa/vdpa_sim/vdpa_sim_net.c | 49 ++++++++++++++++++----------
>  1 file changed, 31 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
> index 05d552cb7f94..ed5ade4ae570 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
> @@ -47,13 +47,28 @@ static void vdpasim_net_complete(struct vdpasim_virtqueue *vq, size_t len)
>         local_bh_enable();
>  }
>
> +static bool receive_filter(struct vdpasim *vdpasim, size_t len)
> +{
> +       bool modern = vdpasim->features & (1ULL << VIRTIO_F_VERSION_1);
> +       size_t hdr_len = modern ? sizeof(struct virtio_net_hdr_v1) :
> +                                 sizeof(struct virtio_net_hdr);
> +
> +       if (len < ETH_ALEN + hdr_len)
> +               return false;
> +
> +       if (!strncmp(vdpasim->buffer + hdr_len,
> +                    vdpasim->config.mac, ETH_ALEN))
> +               return true;
> +
> +       return false;
> +}
> +
>  static void vdpasim_net_work(struct work_struct *work)
>  {
>         struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
>         struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
>         struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
>         ssize_t read, write;
> -       size_t total_write;
>         int pkts = 0;
>         int err;
>
> @@ -66,36 +81,34 @@ static void vdpasim_net_work(struct work_struct *work)
>                 goto out;
>
>         while (true) {
> -               total_write = 0;
>                 err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL,
>                                            &txq->head, GFP_ATOMIC);
>                 if (err <= 0)
>                         break;
>
> +               read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
> +                                            vdpasim->buffer,
> +                                            PAGE_SIZE);
> +
> +               if (!receive_filter(vdpasim, read)) {
> +                       vdpasim_complete(txq, 0);

vdpasim_net_complete?

Thanks!

> +                       continue;
> +               }
> +
>                 err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov,
>                                            &rxq->head, GFP_ATOMIC);
>                 if (err <= 0) {
> -                       vringh_complete_iotlb(&txq->vring, txq->head, 0);
> +                       vdpasim_net_complete(txq, 0);
>                         break;
>                 }
>
> -               while (true) {
> -                       read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
> -                                                    vdpasim->buffer,
> -                                                    PAGE_SIZE);
> -                       if (read <= 0)
> -                               break;
> -
> -                       write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
> -                                                     vdpasim->buffer, read);
> -                       if (write <= 0)
> -                               break;
> -
> -                       total_write += write;
> -               }
> +               write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
> +                                             vdpasim->buffer, read);
> +               if (write <= 0)
> +                       break;
>
>                 vdpasim_net_complete(txq, 0);
> -               vdpasim_net_complete(rxq, total_write);
> +               vdpasim_net_complete(rxq, write);
>
>                 if (++pkts > 4) {
>                         schedule_work(&vdpasim->work);
> --
> 2.25.0
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ