[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230628015634.33193-6-alexei.starovoitov@gmail.com>
Date: Tue, 27 Jun 2023 18:56:26 -0700
From: Alexei Starovoitov <alexei.starovoitov@...il.com>
To: daniel@...earbox.net,
andrii@...nel.org,
void@...ifault.com,
houtao@...weicloud.com,
paulmck@...nel.org
Cc: tj@...nel.org,
rcu@...r.kernel.org,
netdev@...r.kernel.org,
bpf@...r.kernel.org,
kernel-team@...com
Subject: [PATCH v3 bpf-next 05/13] bpf: Factor out inc/dec of active flag into helpers.
From: Alexei Starovoitov <ast@...nel.org>
Factor out local_inc/dec_return(&c->active) into helpers.
No functional changes.
Signed-off-by: Alexei Starovoitov <ast@...nel.org>
---
kernel/bpf/memalloc.c | 30 ++++++++++++++++++------------
1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 9693b1f8cbda..052fc801fb9f 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -154,17 +154,15 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
#endif
}
-static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
+static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
{
- unsigned long flags;
-
if (IS_ENABLED(CONFIG_PREEMPT_RT))
/* In RT irq_work runs in per-cpu kthread, so disable
* interrupts to avoid preemption and interrupts and
* reduce the chance of bpf prog executing on this cpu
* when active counter is busy.
*/
- local_irq_save(flags);
+ local_irq_save(*flags);
/* alloc_bulk runs from irq_work which will not preempt a bpf
* program that does unit_alloc/unit_free since IRQs are
* disabled there. There is no race to increment 'active'
@@ -172,13 +170,25 @@ static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
* bpf prog preempted this loop.
*/
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
- __llist_add(obj, &c->free_llist);
- c->free_cnt++;
+}
+
+static void dec_active(struct bpf_mem_cache *c, unsigned long flags)
+{
local_dec(&c->active);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(flags);
}
+static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
+{
+ unsigned long flags;
+
+ inc_active(c, &flags);
+ __llist_add(obj, &c->free_llist);
+ c->free_cnt++;
+ dec_active(c, flags);
+}
+
/* Mostly runs from irq_work except __init phase. */
static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
{
@@ -300,17 +310,13 @@ static void free_bulk(struct bpf_mem_cache *c)
int cnt;
do {
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_save(flags);
- WARN_ON_ONCE(local_inc_return(&c->active) != 1);
+ inc_active(c, &flags);
llnode = __llist_del_first(&c->free_llist);
if (llnode)
cnt = --c->free_cnt;
else
cnt = 0;
- local_dec(&c->active);
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_restore(flags);
+ dec_active(c, flags);
if (llnode)
enque_to_free(c, llnode);
} while (cnt > (c->high_watermark + c->low_watermark) / 2);
--
2.34.1
Powered by blists - more mailing lists