[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <dfea62aa-6444-1004-5448-f19fe0798d38@iogearbox.net>
Date: Sat, 6 Jan 2018 01:54:48 +0100
From: Daniel Borkmann <daniel@...earbox.net>
To: Jesper Dangaard Brouer <brouer@...hat.com>,
Daniel Borkmann <borkmann@...earbox.net>,
Alexei Starovoitov <alexei.starovoitov@...il.com>
Cc: netdev@...r.kernel.org, Alexei Starovoitov <ast@...nel.org>,
dsahern@...il.com, gospo@...adcom.com, bjorn.topel@...el.com,
michael.chan@...adcom.com
Subject: Re: [bpf-next V4 PATCH 13/14] bpf: finally expose xdp_rxq_info to XDP
bpf-programs
On 01/03/2018 11:26 AM, Jesper Dangaard Brouer wrote:
> Now all XDP driver have been updated to setup xdp_rxq_info and assign
> this to xdp_buff->rxq. Thus, it is now safe to enable access to some
> of the xdp_rxq_info struct members.
>
> This patch extend xdp_md and expose UAPI to userspace for
> ingress_ifindex and rx_queue_index. Access happens via bpf
> instruction rewrite, that load data directly from struct xdp_rxq_info.
>
> * ingress_ifindex map to xdp_rxq_info->dev->ifindex
> * rx_queue_index map to xdp_rxq_info->queue_index
>
> Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
> Acked-by: Alexei Starovoitov <ast@...nel.org>
> ---
> include/uapi/linux/bpf.h | 3 +++
> net/core/filter.c | 19 +++++++++++++++++++
> 2 files changed, 22 insertions(+)
>
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index 69eabfcb9bdb..a6000a95d40e 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -899,6 +899,9 @@ struct xdp_md {
> __u32 data;
> __u32 data_end;
> __u32 data_meta;
> + /* Below access go though struct xdp_rxq_info */
> + __u32 ingress_ifindex; /* rxq->dev->ifindex */
> + __u32 rx_queue_index; /* rxq->queue_index */
> };
>
> enum sk_action {
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 130b842c3a15..acdb94c0e97f 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -4304,6 +4304,25 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
> si->dst_reg, si->src_reg,
> offsetof(struct xdp_buff, data_end));
> break;
> + case offsetof(struct xdp_md, ingress_ifindex):
> + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
> + si->dst_reg, si->src_reg,
> + offsetof(struct xdp_buff, rxq));
> + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
> + si->dst_reg, si->dst_reg,
> + offsetof(struct xdp_rxq_info, dev));
> + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
> + bpf_target_off(struct net_device,
> + ifindex, 4, target_size));
The bpf_target_off() is actually only used in the context of narrow ctx access.
This should just be:
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct net_device, ifindex));
> + break;
> + case offsetof(struct xdp_md, rx_queue_index):
> + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
> + si->dst_reg, si->src_reg,
> + offsetof(struct xdp_buff, rxq));
> + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
> + bpf_target_off(struct xdp_rxq_info,
> + queue_index, 4, target_size));
And here:
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct xdp_rxq_info, queue_index));
> + break;
> }
>
> return insn - insn_buf;
>
Powered by blists - more mailing lists