Skip to content

Commit

Permalink
revert "percpu_counter: new function percpu_counter_sum_and_set"
Browse files Browse the repository at this point in the history
Revert

    commit e8ced39
    Author: Mingming Cao <cmm@us.ibm.com>
    Date:   Fri Jul 11 19:27:31 2008 -0400

        percpu_counter: new function percpu_counter_sum_and_set

As described in

	revert "percpu counter: clean up percpu_counter_sum_and_set()"

the new percpu_counter_sum_and_set() is racy against updates to the
cpu-local accumulators on other CPUs.  Revert that change.

This means that ext4 will be slow again.  But correct.

Reported-by: Eric Dumazet <dada1@cosmosbay.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mingming Cao <cmm@us.ibm.com>
Cc: <linux-ext4@vger.kernel.org>
Cc: <stable@kernel.org>		[2.6.27.x]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
akpm00 authored and torvalds committed Dec 10, 2008
1 parent 71c5576 commit 02d2116
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 17 deletions.
4 changes: 2 additions & 2 deletions fs/ext4/balloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -609,8 +609,8 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)

if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
EXT4_FREEBLOCKS_WATERMARK) {
free_blocks = percpu_counter_sum_and_set(fbc);
dirty_blocks = percpu_counter_sum_and_set(dbc);
free_blocks = percpu_counter_sum_positive(fbc);
dirty_blocks = percpu_counter_sum_positive(dbc);
if (dirty_blocks < 0) {
printk(KERN_CRIT "Dirty block accounting "
"went wrong %lld\n",
Expand Down
12 changes: 3 additions & 9 deletions include/linux/percpu_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
s64 __percpu_counter_sum(struct percpu_counter *fbc);

static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
Expand All @@ -44,19 +44,13 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)

static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc, 0);
s64 ret = __percpu_counter_sum(fbc);
return ret < 0 ? 0 : ret;
}

static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
{
return __percpu_counter_sum(fbc, 1);
}


static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return __percpu_counter_sum(fbc, 0);
return __percpu_counter_sum(fbc);
}

static inline s64 percpu_counter_read(struct percpu_counter *fbc)
Expand Down
7 changes: 1 addition & 6 deletions lib/percpu_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
Expand All @@ -62,12 +62,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
if (set)
*pcount = 0;
}
if (set)
fbc->count = ret;

spin_unlock(&fbc->lock);
return ret;
}
Expand Down

0 comments on commit 02d2116

Please sign in to comment.