[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230728093346.673994-1-adam@wowsignal.io>
Date: Fri, 28 Jul 2023 11:33:47 +0200
From: Adam Sindelar <adam@...signal.io>
To: bpf@...r.kernel.org
Cc: Adam Sindelar <ats@...com>, David Vernet <void@...ifault.com>,
Brendan Jackman <jackmanb@...gle.com>,
KP Singh <kpsingh@...omium.org>, linux-kernel@...r.kernel.org,
Alexei Starovoitov <ast@...nel.org>,
Florent Revest <revest@...omium.org>
Subject: [PATCH bpf-next v5] libbpf: Expose API to consume one ring at a time
We already provide ring_buffer__epoll_fd to enable use of external
polling systems. However, the only API available to consume the ring
buffer is ring_buffer__consume, which always checks all rings. When
polling for many events, this can be wasteful.
Signed-off-by: Adam Sindelar <adam@...signal.io>
---
v1->v2: Added entry to libbpf.map
v2->v3: Correctly set errno and handle overflow
v3->v4: Fixed an embarrasing typo from zealous autocomplete
v4->v5: Added a selftest to show usage
tools/lib/bpf/libbpf.h | 1 +
tools/lib/bpf/libbpf.map | 1 +
tools/lib/bpf/ringbuf.c | 22 ++++++++++++++++
.../selftests/bpf/prog_tests/ringbuf_multi.c | 26 +++++++++++++++++++
4 files changed, 50 insertions(+)
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 55b97b2087540..20ccc65eb3f9d 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -1195,6 +1195,7 @@ LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd,
ring_buffer_sample_fn sample_cb, void *ctx);
LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
+LIBBPF_API int ring_buffer__consume_ring(struct ring_buffer *rb, uint32_t ring_id);
LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb);
struct user_ring_buffer_opts {
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 9c7538dd5835e..42dc418b4672f 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -398,4 +398,5 @@ LIBBPF_1.3.0 {
bpf_prog_detach_opts;
bpf_program__attach_netfilter;
bpf_program__attach_tcx;
+ ring_buffer__consume_ring;
} LIBBPF_1.2.0;
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index 02199364db136..457469fc7d71e 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -290,6 +290,28 @@ int ring_buffer__consume(struct ring_buffer *rb)
return res;
}
+/* Consume available data from a single RINGBUF map identified by its ID.
+ * The ring ID is returned in epoll_data by epoll_wait when called with
+ * ring_buffer__epoll_fd.
+ */
+int ring_buffer__consume_ring(struct ring_buffer *rb, uint32_t ring_id)
+{
+ struct ring *ring;
+ int64_t res;
+
+ if (ring_id >= rb->ring_cnt)
+ return libbpf_err(-EINVAL);
+
+ ring = &rb->rings[ring_id];
+ res = ringbuf_process_ring(ring);
+ if (res < 0)
+ return libbpf_err(res);
+
+ if (res > INT_MAX)
+ return INT_MAX;
+ return res;
+}
+
/* Poll for available data and consume records, if any are available.
* Returns number of records consumed (or INT_MAX, whichever is less), or
* negative number, if any of the registered callbacks returned error.
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
index 1455911d9fcbe..8123efc94d1a8 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
@@ -29,6 +29,11 @@ static int process_sample(void *ctx, void *data, size_t len)
CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
777L, s->value);
break;
+ case 2:
+ CHECK(ring != 2, "sample3_ring", "exp %d, got %d\n", 2, ring);
+ CHECK(s->value != 1337, "sample3_value", "exp %ld, got %ld\n",
+ 1337L, s->value);
+ break;
default:
CHECK(true, "extra_sample", "unexpected sample seq %d, val %ld\n",
s->seq, s->value);
@@ -45,6 +50,8 @@ void test_ringbuf_multi(void)
int err;
int page_size = getpagesize();
int proto_fd = -1;
+ int epoll_fd;
+ struct epoll_event events[2];
skel = test_ringbuf_multi__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
@@ -124,6 +131,25 @@ void test_ringbuf_multi(void)
CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
2L, skel->bss->total);
+ /* validate APIs to support external polling */
+ epoll_fd = ring_buffer__epoll_fd(ringbuf);
+
+ /* expect events on either ring to trigger through the epoll_fd */
+ skel->bss->target_ring = 2;
+ skel->bss->value = 1337;
+ syscall(__NR_getpgid);
+
+ err = epoll_wait(epoll_fd, events, sizeof(events) / sizeof(struct epoll_event), -1);
+ if (CHECK(err != 1, "epoll_wait", "epoll_wait exp %d, got %d\n", 1, err))
+ goto cleanup;
+ if (CHECK(!(events[0].events & EPOLLIN), "epoll_event", "expected EPOLLIN\n"))
+ goto cleanup;
+
+ /* epoll data can be used to consume only the affected ring */
+ err = ring_buffer__consume_ring(ringbuf, events[0].data.u32);
+ CHECK(err != 1, "consume_ring", "consume_ring %u exp %d, got %d\n",
+ events[0].data.u32, 1, err);
+
cleanup:
if (proto_fd >= 0)
close(proto_fd);
--
2.39.2
Powered by blists - more mailing lists