lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAADnVQ+jr2WG4FF3GoPt==tOkOb72bd7Zhkk5iy4omCJ3=qLJQ@mail.gmail.com>
Date:   Tue, 30 Mar 2021 08:08:00 -0700
From:   Alexei Starovoitov <alexei.starovoitov@...il.com>
To:     Ciara Loftus <ciara.loftus@...el.com>
Cc:     Network Development <netdev@...r.kernel.org>,
        bpf <bpf@...r.kernel.org>,
        "Karlsson, Magnus" <magnus.karlsson@...el.com>, bjorn@...nel.org
Subject: Re: [PATCH v3 bpf 2/3] libbpf: restore umem state after socket create failure

On Tue, Mar 30, 2021 at 5:06 AM Ciara Loftus <ciara.loftus@...el.com> wrote:
>
> If the call to xsk_socket__create fails, the user may want to retry the
> socket creation using the same umem. Ensure that the umem is in the
> same state on exit if the call fails by:
> 1. ensuring the umem _save pointers are unmodified.
> 2. not unmapping the set of umem rings that were set up with the umem
> during xsk_umem__create, since those maps existed before the call to
> xsk_socket__create and should remain in tact even in the event of
> failure.
>
> Fixes: 2f6324a3937f ("libbpf: Support shared umems between queues and devices")
>
> Signed-off-by: Ciara Loftus <ciara.loftus@...el.com>
> ---
>  tools/lib/bpf/xsk.c | 29 ++++++++++++++++-------------
>  1 file changed, 16 insertions(+), 13 deletions(-)
>
> diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
> index 443b0cfb45e8..d4991ddff05a 100644
> --- a/tools/lib/bpf/xsk.c
> +++ b/tools/lib/bpf/xsk.c
> @@ -743,21 +743,23 @@ static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
>         return NULL;
>  }
>
> -static void xsk_put_ctx(struct xsk_ctx *ctx)
> +static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
>  {
>         struct xsk_umem *umem = ctx->umem;
>         struct xdp_mmap_offsets off;
>         int err;
>
>         if (--ctx->refcount == 0) {
> -               err = xsk_get_mmap_offsets(umem->fd, &off);
> -               if (!err) {
> -                       munmap(ctx->fill->ring - off.fr.desc,
> -                              off.fr.desc + umem->config.fill_size *
> -                              sizeof(__u64));
> -                       munmap(ctx->comp->ring - off.cr.desc,
> -                              off.cr.desc + umem->config.comp_size *
> -                              sizeof(__u64));
> +               if (unmap) {
> +                       err = xsk_get_mmap_offsets(umem->fd, &off);
> +                       if (!err) {
> +                               munmap(ctx->fill->ring - off.fr.desc,
> +                                      off.fr.desc + umem->config.fill_size *
> +                               sizeof(__u64));
> +                               munmap(ctx->comp->ring - off.cr.desc,
> +                                      off.cr.desc + umem->config.comp_size *
> +                               sizeof(__u64));
> +                       }

The whole function increases indent, since it changes anyway
could you write it as:
{
if (--ctx->refcount)
  return;
if (!unmap)
  goto out_free;
err = xsk_get
if (err)
 goto out_free;
munmap();
out_free:
list_del
free
}

other than this it looks fine to me.
Bjorn, Magnus,
please review.

>                 }
>
>                 list_del(&ctx->list);
> @@ -797,8 +799,6 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
>         memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
>         ctx->ifname[IFNAMSIZ - 1] = '\0';
>
> -       umem->fill_save = NULL;
> -       umem->comp_save = NULL;
>         ctx->fill = fill;
>         ctx->comp = comp;
>         list_add(&ctx->list, &umem->ctx_list);
> @@ -854,6 +854,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
>         struct xsk_socket *xsk;
>         struct xsk_ctx *ctx;
>         int err, ifindex;
> +       bool unmap = umem->fill_save != fill;
>
>         if (!umem || !xsk_ptr || !(rx || tx))
>                 return -EFAULT;
> @@ -994,6 +995,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
>         }
>
>         *xsk_ptr = xsk;
> +       umem->fill_save = NULL;
> +       umem->comp_save = NULL;
>         return 0;
>
>  out_mmap_tx:
> @@ -1005,7 +1008,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
>                 munmap(rx_map, off.rx.desc +
>                        xsk->config.rx_size * sizeof(struct xdp_desc));
>  out_put_ctx:
> -       xsk_put_ctx(ctx);
> +       xsk_put_ctx(ctx, unmap);
>  out_socket:
>         if (--umem->refcount)
>                 close(xsk->fd);
> @@ -1071,7 +1074,7 @@ void xsk_socket__delete(struct xsk_socket *xsk)
>                 }
>         }
>
> -       xsk_put_ctx(ctx);
> +       xsk_put_ctx(ctx, true);
>
>         umem->refcount--;
>         /* Do not close an fd that also has an associated umem connected
> --
> 2.17.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ