#ifndef __NETNS_CONNTRACK_H
 #define __NETNS_CONNTRACK_H
 
+#include <linux/list.h>
 #include <asm/atomic.h>
 
 struct netns_ct {
        unsigned int            expect_count;
        struct hlist_head       *hash;
        struct hlist_head       *expect_hash;
+       struct hlist_head       unconfirmed;
        int                     hash_vmalloc;
        int                     expect_vmalloc;
 };
 
 EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
 
 unsigned int nf_ct_log_invalid __read_mostly;
-HLIST_HEAD(unconfirmed);
 static struct kmem_cache *nf_conntrack_cachep __read_mostly;
 
 DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
        }
 
        /* Overload tuple linked list to put us in unconfirmed list. */
-       hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, &unconfirmed);
+       hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
+                      &net->ct.unconfirmed);
 
        spin_unlock_bh(&nf_conntrack_lock);
 
                                goto found;
                }
        }
-       hlist_for_each_entry(h, n, &unconfirmed, hnode) {
+       hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) {
                ct = nf_ct_tuplehash_to_ctrack(h);
                if (iter(ct, data))
                        set_bit(IPS_DYING_BIT, &ct->status);
                printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
                goto err_out;
        }
+       INIT_HLIST_HEAD(&net->ct.unconfirmed);
 
        nf_conntrack_max = max_factor * nf_conntrack_htable_size;
 
 
        }
 
        /* Get rid of expecteds, set helpers to NULL. */
-       hlist_for_each_entry(h, n, &unconfirmed, hnode)
+       hlist_for_each_entry(h, n, &init_net.ct.unconfirmed, hnode)
                unhelp(h, me);
        for (i = 0; i < nf_conntrack_htable_size; i++) {
                hlist_for_each_entry(h, n, &init_net.ct.hash[i], hnode)