#ifdef CONFIG_SMP
        if (free_blocks - root_blocks < FBC_BATCH)
                free_blocks =
-                       percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
+                       percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
 #endif
        if (free_blocks - root_blocks < nblocks)
                return free_blocks - root_blocks;
 
 void percpu_counter_destroy(struct percpu_counter *fbc);
 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
-s64 __percpu_counter_sum(struct percpu_counter *fbc);
+s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
 
 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 {
 
 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
 {
-       s64 ret = __percpu_counter_sum(fbc);
+       s64 ret = __percpu_counter_sum(fbc, 0);
        return ret < 0 ? 0 : ret;
 }
 
+static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
+{
+       return __percpu_counter_sum(fbc, 1);
+}
+
+
 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
 {
-       return __percpu_counter_sum(fbc);
+       return __percpu_counter_sum(fbc, 0);
 }
 
 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
 
  * Add up all the per-cpu counts, return the result.  This is a more accurate
  * but much slower version of percpu_counter_read_positive()
  */
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
+s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
 {
        s64 ret;
        int cpu;
        for_each_online_cpu(cpu) {
                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
                ret += *pcount;
+               if (set)
+                       *pcount = 0;
        }
+       if (set)
+               fbc->count = ret;
+
        spin_unlock(&fbc->lock);
        return ret;
 }