]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - net/netfilter/nf_conntrack_ecache.c
fs3270: remove extra locks
[linux-2.6-omap-h63xx.git] / net / netfilter / nf_conntrack_ecache.c
index 6bd421df2dbc7a4c6ade02875d88a97597583606..a5f5e2e65d13770bf62a419e94962986f8c21a56 100644 (file)
 ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
 EXPORT_SYMBOL_GPL(nf_conntrack_chain);
 
-ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain);
-EXPORT_SYMBOL_GPL(nf_conntrack_expect_chain);
-
-DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
-EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache);
+ATOMIC_NOTIFIER_HEAD(nf_ct_expect_chain);
+EXPORT_SYMBOL_GPL(nf_ct_expect_chain);
 
 /* deliver cached events and clear cache entry - must be called with locally
  * disabled softirqs */
@@ -51,10 +48,11 @@ __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
  * by code prior to async packet handling for freeing the skb */
 void nf_ct_deliver_cached_events(const struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *ecache;
 
        local_bh_disable();
-       ecache = &__get_cpu_var(nf_conntrack_ecache);
+       ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id());
        if (ecache->ct == ct)
                __nf_ct_deliver_cached_events(ecache);
        local_bh_enable();
@@ -64,10 +62,11 @@ EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
 /* Deliver cached events for old pending events, if current conntrack != old */
 void __nf_ct_event_cache_init(struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *ecache;
 
        /* take care of delivering potentially old events */
-       ecache = &__get_cpu_var(nf_conntrack_ecache);
+       ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id());
        BUG_ON(ecache->ct == ct);
        if (ecache->ct)
                __nf_ct_deliver_cached_events(ecache);
@@ -79,18 +78,31 @@ EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init);
 
 /* flush the event cache - touches other CPU's data and must not be called
  * while packets are still passing through the code */
-void nf_ct_event_cache_flush(void)
+void nf_ct_event_cache_flush(struct net *net)
 {
        struct nf_conntrack_ecache *ecache;
        int cpu;
 
        for_each_possible_cpu(cpu) {
-               ecache = &per_cpu(nf_conntrack_ecache, cpu);
+               ecache = per_cpu_ptr(net->ct.ecache, cpu);
                if (ecache->ct)
                        nf_ct_put(ecache->ct);
        }
 }
 
+int nf_conntrack_ecache_init(struct net *net)
+{
+       net->ct.ecache = alloc_percpu(struct nf_conntrack_ecache);
+       if (!net->ct.ecache)
+               return -ENOMEM;
+       return 0;
+}
+
+void nf_conntrack_ecache_fini(struct net *net)
+{
+       free_percpu(net->ct.ecache);
+}
+
 int nf_conntrack_register_notifier(struct notifier_block *nb)
 {
        return atomic_notifier_chain_register(&nf_conntrack_chain, nb);
@@ -103,14 +115,14 @@ int nf_conntrack_unregister_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
 
-int nf_conntrack_expect_register_notifier(struct notifier_block *nb)
+int nf_ct_expect_register_notifier(struct notifier_block *nb)
 {
-       return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb);
+       return atomic_notifier_chain_register(&nf_ct_expect_chain, nb);
 }
-EXPORT_SYMBOL_GPL(nf_conntrack_expect_register_notifier);
+EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
 
-int nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
+int nf_ct_expect_unregister_notifier(struct notifier_block *nb)
 {
-       return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain, nb);
+       return atomic_notifier_chain_unregister(&nf_ct_expect_chain, nb);
 }
-EXPORT_SYMBOL_GPL(nf_conntrack_expect_unregister_notifier);
+EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);