[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260119235456.1722452-5-axboe@kernel.dk>
Date: Mon, 19 Jan 2026 16:54:27 -0700
From: Jens Axboe <axboe@...nel.dk>
To: io-uring@...r.kernel.org
Cc: brauner@...nel.org,
jannh@...gle.com,
kees@...nel.org,
linux-kernel@...r.kernel.org,
Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 4/7] io_uring/bpf_filter: cache lookup table in ctx->bpf_filters
Currently a few pointer dereferences need to be made to both check if
BPF filters are installed, and then also to retrieve the actual filter
for the opcode. Cache the table in ctx->bpf_filters to avoid that.
Add a bit of debug info on ring exit to show if we ever got this wrong.
Small risk of that given that the table is currently only updated in one
spot, but once task forking is enabled, that will add one more spot.
Signed-off-by: Jens Axboe <axboe@...nel.dk>
---
include/linux/io_uring_types.h | 2 ++
io_uring/bpf_filter.c | 7 ++++---
io_uring/bpf_filter.h | 10 +++++-----
io_uring/io_uring.c | 11 +++++++++--
io_uring/register.c | 3 +++
5 files changed, 23 insertions(+), 10 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 37f0a5f7b2f4..366927635277 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -287,6 +287,8 @@ struct io_ring_ctx {
struct task_struct *submitter_task;
struct io_rings *rings;
+ /* cache of ->restrictions.bpf_filters->filters */
+ struct io_bpf_filter __rcu **bpf_filters;
struct percpu_ref refs;
clockid_t clockid;
diff --git a/io_uring/bpf_filter.c b/io_uring/bpf_filter.c
index 3352f53fd2b9..06fad04c4b54 100644
--- a/io_uring/bpf_filter.c
+++ b/io_uring/bpf_filter.c
@@ -55,14 +55,15 @@ static void io_uring_populate_bpf_ctx(struct io_uring_bpf_ctx *bctx,
* __io_uring_run_bpf_filters() returns 0 on success, allow running the
* request, and -EACCES when a request is denied.
*/
-int __io_uring_run_bpf_filters(struct io_restriction *res, struct io_kiocb *req)
+int __io_uring_run_bpf_filters(struct io_bpf_filter __rcu **filters,
+ struct io_kiocb *req)
{
struct io_bpf_filter *filter;
struct io_uring_bpf_ctx bpf_ctx;
int ret;
/* Fast check for existence of filters outside of RCU */
- if (!rcu_access_pointer(res->bpf_filters->filters[req->opcode]))
+ if (!rcu_access_pointer(filters[req->opcode]))
return 0;
/*
@@ -70,7 +71,7 @@ int __io_uring_run_bpf_filters(struct io_restriction *res, struct io_kiocb *req)
* of what we expect, io_init_req() does this.
*/
rcu_read_lock();
- filter = rcu_dereference(res->bpf_filters->filters[req->opcode]);
+ filter = rcu_dereference(filters[req->opcode]);
if (!filter) {
ret = 1;
goto out;
diff --git a/io_uring/bpf_filter.h b/io_uring/bpf_filter.h
index 27eae9705473..9f3cdb92eb16 100644
--- a/io_uring/bpf_filter.h
+++ b/io_uring/bpf_filter.h
@@ -6,18 +6,18 @@
#ifdef CONFIG_IO_URING_BPF
-int __io_uring_run_bpf_filters(struct io_restriction *res, struct io_kiocb *req);
+int __io_uring_run_bpf_filters(struct io_bpf_filter __rcu **filters, struct io_kiocb *req);
int io_register_bpf_filter(struct io_restriction *res,
struct io_uring_bpf __user *arg);
void io_put_bpf_filters(struct io_restriction *res);
-static inline int io_uring_run_bpf_filters(struct io_restriction *res,
+static inline int io_uring_run_bpf_filters(struct io_bpf_filter __rcu **filters,
struct io_kiocb *req)
{
- if (res->bpf_filters)
- return __io_uring_run_bpf_filters(res, req);
+ if (filters)
+ return __io_uring_run_bpf_filters(filters, req);
return 0;
}
@@ -29,7 +29,7 @@ static inline int io_register_bpf_filter(struct io_restriction *res,
{
return -EINVAL;
}
-static inline int io_uring_run_bpf_filters(struct io_restriction *res,
+static inline int io_uring_run_bpf_filters(struct io_bpf_filter __rcu **filters,
struct io_kiocb *req)
{
return 0;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 67533e494836..62aeaf0fad74 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2262,8 +2262,8 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(ret))
return io_submit_fail_init(sqe, req, ret);
- if (unlikely(ctx->restrictions.bpf_filters)) {
- ret = io_uring_run_bpf_filters(&ctx->restrictions, req);
+ if (unlikely(ctx->bpf_filters)) {
+ ret = io_uring_run_bpf_filters(ctx->bpf_filters, req);
if (ret)
return io_submit_fail_init(sqe, req, ret);
}
@@ -2857,6 +2857,13 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
io_req_caches_free(ctx);
+
+ if (ctx->restrictions.bpf_filters) {
+ WARN_ON_ONCE(ctx->bpf_filters !=
+ ctx->restrictions.bpf_filters->filters);
+ } else {
+ WARN_ON_ONCE(ctx->bpf_filters);
+ }
io_put_bpf_filters(&ctx->restrictions);
WARN_ON_ONCE(ctx->nr_req_allocated);
diff --git a/io_uring/register.c b/io_uring/register.c
index 30957c2cb5eb..40de9b8924b9 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -837,6 +837,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
if (nr_args != 1)
break;
ret = io_register_bpf_filter(&ctx->restrictions, arg);
+ if (!ret)
+ WRITE_ONCE(ctx->bpf_filters,
+ ctx->restrictions.bpf_filters->filters);
break;
default:
ret = -EINVAL;
--
2.51.0
Powered by blists - more mailing lists