static DEFINE_PER_CPU(struct work_struct, reap_work);
 
-static void free_block(kmem_cache_t* cachep, void** objpp, int len);
+static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
 static void enable_cpucache (kmem_cache_t *cachep);
 static void cache_reap (void *unused);
 static int __node_shrink(kmem_cache_t *cachep, int node);
 
        if (ac->avail) {
                spin_lock(&rl3->list_lock);
-               free_block(cachep, ac->entry, ac->avail);
+               free_block(cachep, ac->entry, ac->avail, node);
                ac->avail = 0;
                spin_unlock(&rl3->list_lock);
        }
                        /* Free limit for this kmem_list3 */
                        l3->free_limit -= cachep->batchcount;
                        if (nc)
-                               free_block(cachep, nc->entry, nc->avail);
+                               free_block(cachep, nc->entry, nc->avail, node);
 
                        if (!cpus_empty(mask)) {
                                 spin_unlock(&l3->list_lock);
 
                        if (l3->shared) {
                                free_block(cachep, l3->shared->entry,
-                                               l3->shared->avail);
+                                               l3->shared->avail, node);
                                kfree(l3->shared);
                                l3->shared = NULL;
                        }
 {
        kmem_cache_t *cachep = (kmem_cache_t*)arg;
        struct array_cache *ac;
+       int node = numa_node_id();
 
        check_irq_off();
        ac = ac_data(cachep);
-       spin_lock(&cachep->nodelists[numa_node_id()]->list_lock);
-       free_block(cachep, ac->entry, ac->avail);
-       spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock);
+       spin_lock(&cachep->nodelists[node]->list_lock);
+       free_block(cachep, ac->entry, ac->avail, node);
+       spin_unlock(&cachep->nodelists[node]->list_lock);
        ac->avail = 0;
 }
 
 /*
  * Caller needs to acquire correct kmem_list's list_lock
  */
-static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
+static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
 {
        int i;
        struct kmem_list3 *l3;
                void *objp = objpp[i];
                struct slab *slabp;
                unsigned int objnr;
-               int nodeid = 0;
 
                slabp = GET_PAGE_SLAB(virt_to_page(objp));
-               nodeid = slabp->nodeid;
-               l3 = cachep->nodelists[nodeid];
+               l3 = cachep->nodelists[node];
                list_del(&slabp->list);
                objnr = (objp - slabp->s_mem) / cachep->objsize;
-               check_spinlock_acquired_node(cachep, nodeid);
+               check_spinlock_acquired_node(cachep, node);
                check_slabp(cachep, slabp);
 
 
 {
        int batchcount;
        struct kmem_list3 *l3;
+       int node = numa_node_id();
 
        batchcount = ac->batchcount;
 #if DEBUG
        BUG_ON(!batchcount || batchcount > ac->avail);
 #endif
        check_irq_off();
-       l3 = cachep->nodelists[numa_node_id()];
+       l3 = cachep->nodelists[node];
        spin_lock(&l3->list_lock);
        if (l3->shared) {
                struct array_cache *shared_array = l3->shared;
                }
        }
 
-       free_block(cachep, ac->entry, batchcount);
+       free_block(cachep, ac->entry, batchcount, node);
 free_done:
 #if STATS
        {
                        } else {
                                spin_lock(&(cachep->nodelists[nodeid])->
                                                list_lock);
-                               free_block(cachep, &objp, 1);
+                               free_block(cachep, &objp, 1, nodeid);
                                spin_unlock(&(cachep->nodelists[nodeid])->
                                                list_lock);
                        }
        unsigned long save_flags;
        void *ptr;
 
-       if (nodeid == numa_node_id() || nodeid == -1)
+       if (nodeid == -1)
                return __cache_alloc(cachep, flags);
 
        if (unlikely(!cachep->nodelists[nodeid])) {
 
                        if ((nc = cachep->nodelists[node]->shared))
                                free_block(cachep, nc->entry,
-                                                       nc->avail);
+                                                       nc->avail, node);
 
                        l3->shared = new;
                        if (!cachep->nodelists[node]->alien) {
                if (!ccold)
                        continue;
                spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
-               free_block(cachep, ccold->entry, ccold->avail);
+               free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
                spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
                kfree(ccold);
        }
                if (tofree > ac->avail) {
                        tofree = (ac->avail+1)/2;
                }
-               free_block(cachep, ac->entry, tofree);
+               free_block(cachep, ac->entry, tofree, node);
                ac->avail -= tofree;
                memmove(ac->entry, &(ac->entry[tofree]),
                                        sizeof(void*)*ac->avail);