lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-ID: <460e032d-2fac-6afb-bc4b-30c97f2e31e2@linux.dev> Date: Thu, 6 Oct 2022 13:54:48 -0700 From: Martin KaFai Lau <martin.lau@...ux.dev> To: Daniel Borkmann <daniel@...earbox.net> Cc: razor@...ckwall.org, ast@...nel.org, andrii@...nel.org, john.fastabend@...il.com, joannelkoong@...il.com, memxor@...il.com, toke@...hat.com, joe@...ium.io, netdev@...r.kernel.org, bpf <bpf@...r.kernel.org> Subject: Re: [PATCH bpf-next 01/10] bpf: Add initial fd-based API to attach tc BPF programs On 10/4/22 4:11 PM, Daniel Borkmann wrote: > diff --git a/kernel/bpf/net.c b/kernel/bpf/net.c > new file mode 100644 > index 000000000000..ab9a9dee615b > --- /dev/null > +++ b/kernel/bpf/net.c > @@ -0,0 +1,274 @@ > +// SPDX-License-Identifier: GPL-2.0-only > +/* Copyright (c) 2022 Isovalent */ > + > +#include <linux/bpf.h> > +#include <linux/filter.h> > +#include <linux/netdevice.h> > + > +#include <net/xtc.h> > + > +static int __xtc_prog_attach(struct net_device *dev, bool ingress, u32 limit, > + struct bpf_prog *nprog, u32 prio, u32 flags) > +{ > + struct bpf_prog_array_item *item, *tmp; > + struct xtc_entry *entry, *peer; > + struct bpf_prog *oprog; > + bool created; > + int i, j; > + > + ASSERT_RTNL(); > + > + entry = dev_xtc_entry_fetch(dev, ingress, &created); > + if (!entry) > + return -ENOMEM; > + for (i = 0; i < limit; i++) { > + item = &entry->items[i]; > + oprog = item->prog; > + if (!oprog) > + break; > + if (item->bpf_priority == prio) { > + if (flags & BPF_F_REPLACE) { > + /* Pairs with READ_ONCE() in xtc_run_progs(). */ > + WRITE_ONCE(item->prog, nprog); > + bpf_prog_put(oprog); > + dev_xtc_entry_prio_set(entry, prio, nprog); > + return prio; > + } > + return -EBUSY; > + } > + } > + if (dev_xtc_entry_total(entry) >= limit) > + return -ENOSPC; > + prio = dev_xtc_entry_prio_new(entry, prio, nprog); > + if (prio < 0) { > + if (created) > + dev_xtc_entry_free(entry); > + return -ENOMEM; > + } > + peer = dev_xtc_entry_peer(entry); > + dev_xtc_entry_clear(peer); > + for (i = 0, j = 0; i < limit; i++, j++) { > + item = &entry->items[i]; > + tmp = &peer->items[j]; > + oprog = item->prog; > + if (!oprog) { > + if (i == j) { > + tmp->prog = nprog; > + tmp->bpf_priority = prio; > + } > + break; > + } else if (item->bpf_priority < prio) { > + tmp->prog = oprog; > + tmp->bpf_priority = item->bpf_priority; > + } else if (item->bpf_priority > prio) { > + if (i == j) { > + tmp->prog = nprog; > + tmp->bpf_priority = prio; > + tmp = &peer->items[++j]; > + } > + tmp->prog = oprog; > + tmp->bpf_priority = item->bpf_priority; > + } > + } > + dev_xtc_entry_update(dev, peer, ingress); > + if (ingress) > + net_inc_ingress_queue(); > + else > + net_inc_egress_queue(); > + xtc_inc(); > + return prio; > +} > + > +int xtc_prog_attach(const union bpf_attr *attr, struct bpf_prog *nprog) > +{ > + struct net *net = current->nsproxy->net_ns; > + bool ingress = attr->attach_type == BPF_NET_INGRESS; > + struct net_device *dev; > + int ret; > + > + if (attr->attach_flags & ~BPF_F_REPLACE) > + return -EINVAL; After looking at patch 3, I think it needs to check the attach_priority is non zero when BPF_F_REPLACE is set. Then the __xtc_prog_attach() should return -ENOENT for BPF_F_REPLACE when prio is not found instead of continuing to dev_xtc_entry_prio_new(). However, all these probably could go away if the decision on the prio discussion is to avoid it. > + rtnl_lock(); > + dev = __dev_get_by_index(net, attr->target_ifindex); > + if (!dev) { > + rtnl_unlock(); > + return -EINVAL; > + } > + ret = __xtc_prog_attach(dev, ingress, XTC_MAX_ENTRIES, nprog, > + attr->attach_priority, attr->attach_flags); > + rtnl_unlock(); > + return ret; > +} > +
Powered by blists - more mailing lists