[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220309061018.wn5tddiguywdeyra@kafai-mbp.dhcp.thefacebook.com>
Date: Tue, 8 Mar 2022 22:10:19 -0800
From: Martin KaFai Lau <kafai@...com>
To: Toke Høiland-Jørgensen <toke@...hat.com>
Cc: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Jesper Dangaard Brouer <hawk@...nel.org>,
netdev@...r.kernel.org, bpf@...r.kernel.org
Subject: Re: [PATCH bpf-next v10 1/5] bpf: Add "live packet" mode for XDP in
BPF_PROG_RUN
On Tue, Mar 08, 2022 at 03:57:57PM +0100, Toke Høiland-Jørgensen wrote:
> +static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
> + u32 repeat)
> +{
> + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
> + int err = 0, act, ret, i, nframes = 0, batch_sz;
> + struct xdp_frame **frames = xdp->frames;
> + struct xdp_page_head *head;
> + struct xdp_frame *frm;
> + bool redirect = false;
> + struct xdp_buff *ctx;
> + struct page *page;
> +
> + batch_sz = min_t(u32, repeat, xdp->batch_size);
> +
> + local_bh_disable();
> + xdp_set_return_frame_no_direct();
> +
> + for (i = 0; i < batch_sz; i++) {
> + page = page_pool_dev_alloc_pages(xdp->pp);
> + if (!page) {
> + err = -ENOMEM;
> + goto out;
> + }
> +
> + head = phys_to_virt(page_to_phys(page));
> + reset_ctx(head);
> + ctx = &head->ctx;
> + frm = &head->frm;
> + xdp->frame_cnt++;
> +
> + act = bpf_prog_run_xdp(prog, ctx);
> +
> + /* if program changed pkt bounds we need to update the xdp_frame */
> + if (unlikely(ctx_was_changed(head))) {
> + ret = xdp_update_frame_from_buff(ctx, frm);
> + if (ret) {
> + xdp_return_buff(ctx);
> + continue;
> + }
> + }
> +
> + switch (act) {
> + case XDP_TX:
> + /* we can't do a real XDP_TX since we're not in the
> + * driver, so turn it into a REDIRECT back to the same
> + * index
> + */
> + ri->tgt_index = xdp->dev->ifindex;
> + ri->map_id = INT_MAX;
> + ri->map_type = BPF_MAP_TYPE_UNSPEC;
> + fallthrough;
> + case XDP_REDIRECT:
> + redirect = true;
> + ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
> + if (ret)
> + xdp_return_buff(ctx);
> + break;
> + case XDP_PASS:
> + frames[nframes++] = frm;
> + break;
> + default:
> + bpf_warn_invalid_xdp_action(NULL, prog, act);
> + fallthrough;
> + case XDP_DROP:
> + xdp_return_buff(ctx);
> + break;
> + }
> + }
> +
> +out:
> + if (redirect)
> + xdp_do_flush();
> + if (nframes)
> + err = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
This may overwrite the -ENOMEM with 0.
Others lgtm.
> +
> + xdp_clear_return_frame_no_direct();
> + local_bh_enable();
> + return err;
> +}
> +
> +static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
> + u32 repeat, u32 batch_size, u32 *time)
> +
> +{
> + struct xdp_test_data xdp = { .batch_size = batch_size };
> + struct bpf_test_timer t = { .mode = NO_MIGRATE };
> + int ret;
> +
> + if (!repeat)
> + repeat = 1;
> +
> + ret = xdp_test_run_setup(&xdp, ctx);
> + if (ret)
> + return ret;
> +
> + bpf_test_timer_enter(&t);
> + do {
> + xdp.frame_cnt = 0;
> + ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
> + if (unlikely(ret < 0))
> + break;
> + } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
> + bpf_test_timer_leave(&t);
> +
> + xdp_test_run_teardown(&xdp);
> + return ret;
> +}
> +
Powered by blists - more mailing lists