From 6a1d2a4beb180241b63f9bf57454bbe031915dd1 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Sun, 18 Sep 2022 12:17:27 +0200 Subject: [PATCH] lib/percpu_counter: [RFC] potential overflow/underflow If an interrupt happens between __this_cpu_read(*fbc->counters) and this_cpu_add(*fbc->counters, amount), and that interrupt modifies the per_cpu_counter, then the this_cpu_add() after the interrupt returns may under/overflow. Thus: Disable interrupts. Note: The patch is incomplete, if the race is real, then more functions than just percpu_counter_add_batch() needs to be updated. Especially, the !CONFIG_SMP code looks wrong to me as well: > static inline void > percpu_counter_add(struct percpu_counter *fbc, s64 amount) > { > preempt_disable(); > fbc->count += amount; > preempt_enable(); > } The update of fbc->count is not IRQ safe. Signed-off-by: Manfred Spraul --- lib/percpu_counter.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index ed610b75dc32..39de94d59b4f 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -82,18 +82,20 @@ EXPORT_SYMBOL(percpu_counter_set); void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; + unsigned long flags; preempt_disable(); + local_irq_save(flags); count = __this_cpu_read(*fbc->counters) + amount; if (abs(count) >= batch) { - unsigned long flags; - raw_spin_lock_irqsave(&fbc->lock, flags); + raw_spin_lock(&fbc->lock); fbc->count += count; __this_cpu_sub(*fbc->counters, count - amount); - raw_spin_unlock_irqrestore(&fbc->lock, flags); + raw_spin_unlock(&fbc->lock); } else { this_cpu_add(*fbc->counters, amount); } + local_irq_restore(flags); preempt_enable(); } EXPORT_SYMBOL(percpu_counter_add_batch); -- 2.37.2