[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <00000000000050517b061dc2f665@google.com>
Date: Sun, 21 Jul 2024 07:44:13 -0700
From: syzbot <syzbot+707d98c8649695eaf329@...kaller.appspotmail.com>
To: linux-kernel@...r.kernel.org
Subject: Re: [syzbot] Re: [syzbot] [bpf?] [net?] KASAN: slab-use-after-free
Read in bq_xmit_all
For archival purposes, forwarding an incoming command email to
linux-kernel@...r.kernel.org.
***
Subject: Re: [syzbot] [bpf?] [net?] KASAN: slab-use-after-free Read in bq_xmit_all
Author: aha310510@...il.com
#syz test git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
---
kernel/bpf/cpumap.c | 6 ++----
kernel/bpf/devmap.c | 3 +--
2 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 8fccc311397c..22e1c62fc0f4 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -708,6 +708,7 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
{
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
+ struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
bq_flush_to_queue(bq);
@@ -723,11 +724,8 @@ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
*/
bq->q[bq->count++] = xdpf;
- if (!bq->flush_node.prev) {
- struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
-
+ if (!bq->flush_node.prev)
list_add(&bq->flush_node, flush_list);
- }
}
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 4b9203deb711..dfde65014374 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -454,6 +454,7 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx, struct bpf_prog *xdp_prog)
{
struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
+ struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
bq_xmit_all(bq, 0);
@@ -466,8 +467,6 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
* are only ever modified together.
*/
if (!bq->dev_rx) {
- struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
-
bq->dev_rx = dev_rx;
bq->xdp_prog = xdp_prog;
list_add(&bq->flush_node, flush_list);
--
Powered by blists - more mailing lists