struct mfc_cache 
 {
        struct mfc_cache *next;                 /* Next entry on cache line     */
+#ifdef CONFIG_NET_NS
+       struct net *mfc_net;
+#endif
        __be32 mfc_mcastgrp;                    /* Group the entry belongs to   */
        __be32 mfc_origin;                      /* Source of packet             */
        vifi_t mfc_parent;                      /* Source interface             */
        } mfc_un;
 };
 
+static inline
+struct net *mfc_net(const struct mfc_cache *mfc)
+{
+       return read_pnet(&mfc->mfc_net);
+}
+
+static inline
+void mfc_net_set(struct mfc_cache *mfc, struct net *net)
+{
+       write_pnet(&mfc->mfc_net, hold_net(net));
+}
+
 #define MFC_STATIC             1
 #define MFC_NOTIFY             2
 
 
        return 0;
 }
 
+static inline void ipmr_cache_free(struct mfc_cache *c)
+{
+       release_net(mfc_net(c));
+       kmem_cache_free(mrt_cachep, c);
+}
+
 /* Destroy an unresolved cache entry, killing queued skbs
    and reporting error to netlink readers.
  */
                        kfree_skb(skb);
        }
 
-       kmem_cache_free(mrt_cachep, c);
+       ipmr_cache_free(c);
 }
 
 
 /*
  *     Allocate a multicast cache entry
  */
-static struct mfc_cache *ipmr_cache_alloc(void)
+static struct mfc_cache *ipmr_cache_alloc(struct net *net)
 {
        struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
        if (c == NULL)
                return NULL;
        c->mfc_un.res.minvif = MAXVIFS;
+       mfc_net_set(c, net);
        return c;
 }
 
-static struct mfc_cache *ipmr_cache_alloc_unres(void)
+static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
 {
        struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
        if (c == NULL)
                return NULL;
        skb_queue_head_init(&c->mfc_un.unres.unresolved);
        c->mfc_un.unres.expires = jiffies + 10*HZ;
+       mfc_net_set(c, net);
        return c;
 }
 
                 */
 
                if (atomic_read(&cache_resolve_queue_len) >= 10 ||
-                   (c=ipmr_cache_alloc_unres())==NULL) {
+                   (c = ipmr_cache_alloc_unres(&init_net)) == NULL) {
                        spin_unlock_bh(&mfc_unres_lock);
 
                        kfree_skb(skb);
                         */
                        spin_unlock_bh(&mfc_unres_lock);
 
-                       kmem_cache_free(mrt_cachep, c);
+                       ipmr_cache_free(c);
                        kfree_skb(skb);
                        return err;
                }
                        *cp = c->next;
                        write_unlock_bh(&mrt_lock);
 
-                       kmem_cache_free(mrt_cachep, c);
+                       ipmr_cache_free(c);
                        return 0;
                }
        }
        if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
                return -EINVAL;
 
-       c = ipmr_cache_alloc();
+       c = ipmr_cache_alloc(&init_net);
        if (c == NULL)
                return -ENOMEM;
 
 
        if (uc) {
                ipmr_cache_resolve(uc, c);
-               kmem_cache_free(mrt_cachep, uc);
+               ipmr_cache_free(uc);
        }
        return 0;
 }
                        *cp = c->next;
                        write_unlock_bh(&mrt_lock);
 
-                       kmem_cache_free(mrt_cachep, c);
+                       ipmr_cache_free(c);
                }
        }