[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220629154832.56986-4-laoar.shao@gmail.com>
Date: Wed, 29 Jun 2022 15:48:31 +0000
From: Yafang Shao <laoar.shao@...il.com>
To: ast@...nel.org, daniel@...earbox.net, andrii@...nel.org,
kafai@...com, songliubraving@...com, yhs@...com,
john.fastabend@...il.com, kpsingh@...nel.org, quentin@...valent.com
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org,
Yafang Shao <laoar.shao@...il.com>
Subject: [PATCH bpf-next 3/4] bpf: Don't do preempt check when migrate is disabled
It doesn't need to do the preempt check when migrate is disabled
after commit
74d862b682f5 ("sched: Make migrate_disable/enable() independent of RT").
Signed-off-by: Yafang Shao <laoar.shao@...il.com>
---
kernel/bpf/bpf_task_storage.c | 8 ++++----
kernel/bpf/hashtab.c | 6 +++---
kernel/bpf/trampoline.c | 4 ++--
3 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index e9014dc62682..6f290623347e 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -26,20 +26,20 @@ static DEFINE_PER_CPU(int, bpf_task_storage_busy);
static void bpf_task_storage_lock(void)
{
migrate_disable();
- __this_cpu_inc(bpf_task_storage_busy);
+ this_cpu_inc(bpf_task_storage_busy);
}
static void bpf_task_storage_unlock(void)
{
- __this_cpu_dec(bpf_task_storage_busy);
+ this_cpu_dec(bpf_task_storage_busy);
migrate_enable();
}
static bool bpf_task_storage_trylock(void)
{
migrate_disable();
- if (unlikely(__this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
- __this_cpu_dec(bpf_task_storage_busy);
+ if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
+ this_cpu_dec(bpf_task_storage_busy);
migrate_enable();
return false;
}
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 9d4559a1c032..6a3a95037aac 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -166,8 +166,8 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
hash = hash & HASHTAB_MAP_LOCK_MASK;
migrate_disable();
- if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
- __this_cpu_dec(*(htab->map_locked[hash]));
+ if (unlikely(this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
+ this_cpu_dec(*(htab->map_locked[hash]));
migrate_enable();
return -EBUSY;
}
@@ -190,7 +190,7 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
raw_spin_unlock_irqrestore(&b->raw_lock, flags);
else
spin_unlock_irqrestore(&b->lock, flags);
- __this_cpu_dec(*(htab->map_locked[hash]));
+ this_cpu_dec(*(htab->map_locked[hash]));
migrate_enable();
}
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 93c7675f0c9e..f4486e54fdb3 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -585,7 +585,7 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *ru
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
- if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+ if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
inc_misses_counter(prog);
return 0;
}
@@ -631,7 +631,7 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_r
migrate_disable();
might_fault();
- if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+ if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
inc_misses_counter(prog);
return 0;
}
--
2.17.1
Powered by blists - more mailing lists