[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240821045629.2856641-1-almasrymina@google.com>
Date: Wed, 21 Aug 2024 04:56:29 +0000
From: Mina Almasry <almasrymina@...gle.com>
To: netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-hyperv@...r.kernel.org, bpf@...r.kernel.org
Cc: Mina Almasry <almasrymina@...gle.com>, Jay Vosburgh <jv@...sburgh.net>,
Andy Gospodarek <andy@...yhouse.net>, "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
"K. Y. Srinivasan" <kys@...rosoft.com>, Haiyang Zhang <haiyangz@...rosoft.com>, Wei Liu <wei.liu@...nel.org>,
Dexuan Cui <decui@...rosoft.com>, Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>, Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>, Eduard Zingerman <eddyz87@...il.com>, Song Liu <song@...nel.org>,
Yonghong Song <yonghong.song@...ux.dev>, John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>, Stanislav Fomichev <sdf@...ichev.me>, Hao Luo <haoluo@...gle.com>,
Jiri Olsa <jolsa@...nel.org>, "Björn Töpel" <bjorn@...nel.org>,
Magnus Karlsson <magnus.karlsson@...el.com>,
Maciej Fijalkowski <maciej.fijalkowski@...el.com>, Jonathan Lemon <jonathan.lemon@...il.com>,
Jesper Dangaard Brouer <hawk@...nel.org>
Subject: [PATCH net-next v21] net: refactor ->ndo_bpf calls into dev_xdp_propagate
When net devices propagate xdp configurations to slave devices, or when
core propagates xdp configuration to a device, we will need to perform
a memory provider check to ensure we're not binding xdp to a device
using unreadable netmem.
Currently ->ndo_bpf calls are all over the place. Adding checks to all
these places would not be ideal.
Refactor all the ->ndo_bpf calls into one place where we can add this
check in the future.
Suggested-by: Jakub Kicinski <kuba@...nel.org>
Signed-off-by: Mina Almasry <almasrymina@...gle.com>
---
drivers/net/bonding/bond_main.c | 8 ++++----
drivers/net/hyperv/netvsc_bpf.c | 2 +-
include/linux/netdevice.h | 1 +
kernel/bpf/offload.c | 2 +-
net/core/dev.c | 9 +++++++++
net/xdp/xsk_buff_pool.c | 4 ++--
6 files changed, 18 insertions(+), 8 deletions(-)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f9633a6f8571..73f9416c6c1b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2258,7 +2258,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_sysfs_del;
}
- res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ res = dev_xdp_propagate(slave_dev, &xdp);
if (res < 0) {
/* ndo_bpf() sets extack error message */
slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
@@ -2394,7 +2394,7 @@ static int __bond_release_one(struct net_device *bond_dev,
.prog = NULL,
.extack = NULL,
};
- if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
+ if (dev_xdp_propagate(slave_dev, &xdp))
slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
}
@@ -5584,7 +5584,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err = dev_xdp_propagate(slave_dev, &xdp);
if (err < 0) {
/* ndo_bpf() sets extack error message */
slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
@@ -5616,7 +5616,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (slave == rollback_slave)
break;
- err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err_unwind = dev_xdp_propagate(slave_dev, &xdp);
if (err_unwind < 0)
slave_err(dev, slave_dev,
"Error %d when unwinding XDP program change\n", err_unwind);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 4a9522689fa4..e01c5997a551 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -183,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
+ ret = dev_xdp_propagate(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0ef3eaa23f4b..a4f876767423 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3918,6 +3918,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
u8 dev_xdp_prog_count(struct net_device *dev);
+int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 1a4fec330eaa..a5b06bd5fe9b 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -130,7 +130,7 @@ static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
/* Caller must make sure netdev is valid */
netdev = offmap->netdev;
- return netdev->netdev_ops->ndo_bpf(netdev, &data);
+ return dev_xdp_propagate(netdev, &data);
}
static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
diff --git a/net/core/dev.c b/net/core/dev.c
index e7260889d4cb..165e9778d422 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -9369,6 +9369,15 @@ u8 dev_xdp_prog_count(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
+int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ if (!dev->netdev_ops->ndo_bpf)
+ return -EOPNOTSUPP;
+
+ return dev->netdev_ops->ndo_bpf(dev, bpf);
+}
+EXPORT_SYMBOL_GPL(dev_xdp_propagate);
+
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
{
struct bpf_prog *prog = dev_xdp_prog(dev, mode);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index c0e0204b9630..f44d68c8d75d 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -149,7 +149,7 @@ static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
bpf.xsk.pool = NULL;
bpf.xsk.queue_id = pool->queue_id;
- err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf);
+ err = dev_xdp_propagate(pool->netdev, &bpf);
if (err)
WARN(1, "Failed to disable zero-copy!\n");
@@ -215,7 +215,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
bpf.xsk.pool = pool;
bpf.xsk.queue_id = queue_id;
- err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
+ err = dev_xdp_propagate(netdev, &bpf);
if (err)
goto err_unreg_pool;
--
2.46.0.184.g6999bdac58-goog
Powered by blists - more mailing lists