[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1212363385.4368.69.camel@localhost.localdomain>
Date: Sun, 01 Jun 2008 16:36:25 -0700
From: Mingming Cao <cmm@...ibm.com>
To: linux-ext4@...r.kernel.org
Subject: [RFC][PATCH 6/6] delalloc ENOSPC: improve percpu counter
accounting accurate
percpu counter: update center counter when sum per-cpu counter
From: Mingming cao <cmm@...ibm.com>
Delayed allocation need to check free blocks at every write time.
percpu_counter_read_positive() is not quit accurate
but using percpu_counter_sum_positive() for every write
frequently is quite expensive.
This patch added a new function to update center counter when
sum up per-cpu counters, to increase the accurate rate
for next percpu_counter_read()(which reads only the center counter
and require less calling expensive percpu_counter_sum().(in
ext4_has_free_blocks)
Signed-off-by: Mingming cao <cmm@...ibm.com>
---
fs/ext4/balloc.c | 2 +-
include/linux/percpu_counter.h | 12 +++++++++---
lib/percpu_counter.c | 7 ++++++-
3 files changed, 16 insertions(+), 5 deletions(-)
Index: linux-2.6.26-rc4/include/linux/percpu_counter.h
===================================================================
--- linux-2.6.26-rc4.orig/include/linux/percpu_counter.h 2008-06-01 15:33:09.000000000 -0700
+++ linux-2.6.26-rc4/include/linux/percpu_counter.h 2008-06-01 15:33:14.000000000 -0700
@@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percp
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
-s64 __percpu_counter_sum(struct percpu_counter *fbc);
+s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
@@ -44,13 +44,19 @@ static inline void percpu_counter_add(st
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
- s64 ret = __percpu_counter_sum(fbc);
+ s64 ret = __percpu_counter_sum(fbc, 0);
return ret < 0 ? 0 : ret;
}
+static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum(fbc, 1);
+}
+
+
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
- return __percpu_counter_sum(fbc);
+ return __percpu_counter_sum(fbc, 0);
}
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
Index: linux-2.6.26-rc4/lib/percpu_counter.c
===================================================================
--- linux-2.6.26-rc4.orig/lib/percpu_counter.c 2008-06-01 15:33:09.000000000 -0700
+++ linux-2.6.26-rc4/lib/percpu_counter.c 2008-06-01 15:33:14.000000000 -0700
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
+s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
{
s64 ret;
int cpu;
@@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_c
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
+ if (set)
+ *pcount = 0;
}
+ if (set)
+ fbc->count = ret;
+
spin_unlock(&fbc->lock);
return ret;
}
Index: linux-2.6.26-rc4/fs/ext4/balloc.c
===================================================================
--- linux-2.6.26-rc4.orig/fs/ext4/balloc.c 2008-06-01 15:33:36.000000000 -0700
+++ linux-2.6.26-rc4/fs/ext4/balloc.c 2008-06-01 15:34:28.000000000 -0700
@@ -1626,7 +1626,7 @@ ext4_fsblk_t ext4_has_free_blocks(struct
if (free_blocks - root_blocks < FBC_BATCH)
free_blocks =
- percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
+ percpu_counter_sum_positive_set(&sbi->s_freeblocks_counter);
if (free_blocks - root_blocks < nblocks )
return free_blocks -root_blocks;
return nblocks;
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists