struct kmem_list3 *l3 = NULL;
        int node = cpu_to_node(cpu);
        int memsize = sizeof(struct kmem_list3);
-       struct array_cache *nc = NULL;
 
        switch (action) {
        case CPU_UP_PREPARE:
                /* Now we can go ahead with allocating the shared array's
                   & array cache's */
                list_for_each_entry(cachep, &cache_chain, next) {
+                       struct array_cache *nc;
+
                        nc = alloc_arraycache(node, cachep->limit,
                                              cachep->batchcount);
                        if (!nc)
 
                        goto bad_swap;
                if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
                        goto bad_swap;
-               
+
                /* OK, set up the swap map and apply the bad block list */
                if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
                        error = -ENOMEM;
 
                error = 0;
                memset(p->swap_map, 0, maxpages * sizeof(short));
-               for (i=0; i<swap_header->info.nr_badpages; i++) {
-                       int page = swap_header->info.badpages[i];
-                       if (page <= 0 || page >= swap_header->info.last_page)
+               for (i = 0; i < swap_header->info.nr_badpages; i++) {
+                       int page_nr = swap_header->info.badpages[i];
+                       if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
                                error = -EINVAL;
                        else
-                               p->swap_map[page] = SWAP_MAP_BAD;
+                               p->swap_map[page_nr] = SWAP_MAP_BAD;
                }
                nr_good_pages = swap_header->info.last_page -
                                swap_header->info.nr_badpages -
                                1 /* header page */;
-               if (error) 
+               if (error)
                        goto bad_swap;
        }