[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <12716980254063@kroah.org>
Date: Mon, 19 Apr 2010 10:27:05 -0700
From: <gregkh@...e.de>
To: cmm@...ibm.com, akpm@...ux-foundation.org, a.p.zijlstra@...llo.nl,
dev@...sonking.com, linux-ext4@...r.kernel.org, tytso@....edu
Cc: <stable@...nel.org>, <stable-commits@...r.kernel.org>
Subject: patch percpu-counter-clean-up-percpu_counter_sum_and_set.patch added to 2.6.27-stable tree
This is a note to let you know that we have just queued up the patch titled
Subject: percpu counter: clean up percpu_counter_sum_and_set()
to the 2.6.27-stable tree. Its filename is
percpu-counter-clean-up-percpu_counter_sum_and_set.patch
A git repo of this tree can be found at
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary
>From tytso@....edu Mon Apr 19 10:20:04 2010
From: Mingming Cao <cmm@...ibm.com>
Date: Mon, 15 Mar 2010 20:25:56 -0400
Subject: percpu counter: clean up percpu_counter_sum_and_set()
To: stable@...nel.org
Cc: "Theodore Ts'o" <tytso@....edu>, Andrew Morton <akpm@...ux-foundation.org>, Ext4 Developers List <linux-ext4@...r.kernel.org>, Mingming Cao <cmm@...ibm.com>, "Jayson R. King" <dev@...sonking.com>
Message-ID: <1268699165-17461-3-git-send-email-tytso@....edu>
From: Mingming Cao <cmm@...ibm.com>
commit 1f7c14c62ce63805f9574664a6c6de3633d4a354 upstream.
percpu_counter_sum_and_set() and percpu_counter_sum() is the same except
the former updates the global counter after accounting. Since we are
taking the fbc->lock to calculate the precise value of the counter in
percpu_counter_sum() anyway, it should simply set fbc->count too, as the
percpu_counter_sum_and_set() does.
This patch merges these two interfaces into one.
Signed-off-by: Mingming Cao <cmm@...ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc: <linux-ext4@...r.kernel.org>
Signed-off-by: Andrew Morton <akpm@...ux-foundation.org>
Signed-off-by: "Theodore Ts'o" <tytso@....edu>
Signed-off-by: Jayson R. King <dev@...sonking.com>
Signed-off-by: Theodore Ts'o <tytso@....edu>
---
fs/ext4/balloc.c | 2 +-
include/linux/percpu_counter.h | 12 +++---------
lib/percpu_counter.c | 8 +++-----
3 files changed, 7 insertions(+), 15 deletions(-)
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -1778,7 +1778,7 @@ ext4_fsblk_t ext4_has_free_blocks(struct
#ifdef CONFIG_SMP
if (free_blocks - root_blocks < FBC_BATCH)
free_blocks =
- percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
+ percpu_counter_sum(&sbi->s_freeblocks_counter);
#endif
if (free_blocks <= root_blocks)
/* we don't have free space */
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percp
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
-s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
+s64 __percpu_counter_sum(struct percpu_counter *fbc);
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
@@ -44,19 +44,13 @@ static inline void percpu_counter_add(st
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
- s64 ret = __percpu_counter_sum(fbc, 0);
+ s64 ret = __percpu_counter_sum(fbc);
return ret < 0 ? 0 : ret;
}
-static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
-{
- return __percpu_counter_sum(fbc, 1);
-}
-
-
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
- return __percpu_counter_sum(fbc, 0);
+ return __percpu_counter_sum(fbc);
}
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
-s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
@@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_c
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
- if (set)
- *pcount = 0;
+ *pcount = 0;
}
- if (set)
- fbc->count = ret;
+ fbc->count = ret;
spin_unlock(&fbc->lock);
return ret;
Patches currently in stable-queue which might be from cmm@...ibm.com are
queue-2.6.27/ext4-retry-block-allocation-if-we-have-free-blocks-left.patch
queue-2.6.27/percpu-counter-clean-up-percpu_counter_sum_and_set.patch
queue-2.6.27/ext4-retry-block-reservation.patch
queue-2.6.27/ext4-add-percpu-dirty-block-accounting.patch
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists