]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/swap.c
Add a WARN() macro; this is WARN_ON() + printk arguments
[linux-2.6-omap-h63xx.git] / mm / swap.c
index 91e194445a5eae53ee3ae97e3f616cdcb7d442d0..dd89234ee51fa230f893f6f55d9039dd01693955 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -34,9 +34,9 @@
 /* How many pages do we try to swap or page in/out together? */
 int page_cluster;
 
-static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, };
-static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
-static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, };
+static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
 
 /*
  * This path almost never happens for VM activity - pages are normally
@@ -493,7 +493,7 @@ EXPORT_SYMBOL(pagevec_lookup_tag);
  */
 #define ACCT_THRESHOLD max(16, NR_CPUS * 2)
 
-static DEFINE_PER_CPU(long, committed_space) = 0;
+static DEFINE_PER_CPU(long, committed_space);
 
 void vm_acct_memory(long pages)
 {
@@ -503,7 +503,7 @@ void vm_acct_memory(long pages)
        local = &__get_cpu_var(committed_space);
        *local += pages;
        if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
-               atomic_add(*local, &vm_committed_space);
+               atomic_long_add(*local, &vm_committed_space);
                *local = 0;
        }
        preempt_enable();
@@ -520,7 +520,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
 
        committed = &per_cpu(committed_space, (long)hcpu);
        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               atomic_add(*committed, &vm_committed_space);
+               atomic_long_add(*committed, &vm_committed_space);
                *committed = 0;
                drain_cpu_pagevecs((long)hcpu);
        }