}
 
 int nf_nat_module_is_loaded = 0;
+EXPORT_SYMBOL_GPL(nf_nat_module_is_loaded);
+
 static u_int32_t ipv4_get_features(const struct nf_conntrack_tuple *tuple)
 {
        if (nf_nat_module_is_loaded)
 
 module_init(nf_conntrack_l3proto_ipv4_init);
 module_exit(nf_conntrack_l3proto_ipv4_fini);
-
-EXPORT_SYMBOL(nf_ct_ipv4_gather_frags);
-#ifdef CONFIG_NF_NAT_NEEDED
-EXPORT_SYMBOL(nf_nat_module_is_loaded);
-#endif
 
 #endif
 #endif
 };
-
-EXPORT_SYMBOL(nf_conntrack_l4proto_icmp);
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_icmp);
 
 #endif
 
 DEFINE_RWLOCK(nf_conntrack_lock);
+EXPORT_SYMBOL_GPL(nf_conntrack_lock);
 
 /* nf_conntrack_standalone needs this */
 atomic_t nf_conntrack_count = ATOMIC_INIT(0);
 EXPORT_SYMBOL_GPL(nf_conntrack_count);
 
-void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL;
+void (*nf_conntrack_destroyed)(struct nf_conn *conntrack);
+EXPORT_SYMBOL_GPL(nf_conntrack_destroyed);
+
 unsigned int nf_conntrack_htable_size __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
+
 int nf_conntrack_max __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_max);
+
 struct list_head *nf_conntrack_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+
 struct nf_conn nf_conntrack_untracked __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
+
 unsigned int nf_ct_log_invalid __read_mostly;
 LIST_HEAD(unconfirmed);
 static int nf_conntrack_vmalloc __read_mostly;
        mutex_unlock(&nf_ct_cache_mutex);
        return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_register_cache);
 
 /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
 void nf_conntrack_unregister_cache(u_int32_t features)
 
        mutex_unlock(&nf_ct_cache_mutex);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_cache);
 
 int
 nf_ct_get_tuple(const struct sk_buff *skb,
 
        return l4proto->pkt_to_tuple(skb, dataoff, tuple);
 }
+EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
 
 int
 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
        inverse->dst.protonum = orig->dst.protonum;
        return l4proto->invert_tuple(inverse, orig);
 }
+EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
 
 static void
 clean_from_lists(struct nf_conn *ct)
 
        return NULL;
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_find);
 
 /* Find a connection corresponding to a tuple. */
 struct nf_conntrack_tuple_hash *
 
        return h;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
                                       unsigned int hash,
        __nf_conntrack_hash_insert(ct, hash, repl_hash);
        write_unlock_bh(&nf_conntrack_lock);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
 
 /* Confirm a connection given skb; places it in hash table */
 int
        write_unlock_bh(&nf_conntrack_lock);
        return NF_DROP;
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
 
 /* Returns true if a connection correspondings to the tuple (required
    for NAT). */
 
        return h != NULL;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
 
 /* There's a small race here where we may free a just-assured
    connection.  Too bad: we're in trouble anyway. */
        l3proto = __nf_ct_l3proto_find(orig->src.l3num);
        return __nf_conntrack_alloc(orig, repl, l3proto, 0);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
 
 void nf_conntrack_free(struct nf_conn *conntrack)
 {
        kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
        atomic_dec(&nf_conntrack_count);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_free);
 
 /* Allocate a new conntrack: we return -ENOMEM if classification
    failed due to stress.  Otherwise it really is unclassifiable. */
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_in);
 
 int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
                         const struct nf_conntrack_tuple *orig)
                                  __nf_ct_l4proto_find(orig->src.l3num,
                                                     orig->dst.protonum));
 }
+EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
 
 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
    implicitly racy: see __nf_conntrack_confirm */
                help->helper = __nf_ct_helper_find(newreply);
        write_unlock_bh(&nf_conntrack_lock);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
 
 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
 void __nf_ct_refresh_acct(struct nf_conn *ct,
        if (event)
                nf_conntrack_event_cache(event, skb);
 }
+EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
 
 #if defined(CONFIG_NF_CT_NETLINK) || \
     defined(CONFIG_NF_CT_NETLINK_MODULE)
 nfattr_failure:
        return -1;
 }
+EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nfattr);
 
 static const size_t cta_min_proto[CTA_PROTO_MAX] = {
        [CTA_PROTO_SRC_PORT-1]  = sizeof(u_int16_t),
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nf_ct_port_nfattr_to_tuple);
 #endif
 
 /* Used by ipt_REJECT and ip6t_REJECT. */
        nskb->nfctinfo = ctinfo;
        nf_conntrack_get(nskb->nfct);
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_attach);
 
 static inline int
 do_iter(const struct nf_conntrack_tuple_hash *i,
                nf_ct_put(ct);
        }
 }
+EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
 
 static int kill_all(struct nf_conn *i, void *data)
 {
 {
        nf_ct_iterate_cleanup(kill_all, NULL);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_flush);
 
 /* Mishearing the voices in his head, our hero wonders how he's
    supposed to kill the mall. */
 
 #include <net/netfilter/nf_conntrack_core.h>
 
 ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
+EXPORT_SYMBOL_GPL(nf_conntrack_chain);
+
 ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_chain);
 
 DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
+EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache);
 
 /* deliver cached events and clear cache entry - must be called with locally
  * disabled softirqs */
                __nf_ct_deliver_cached_events(ecache);
        local_bh_enable();
 }
+EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
 
 /* Deliver cached events for old pending events, if current conntrack != old */
 void __nf_ct_event_cache_init(struct nf_conn *ct)
        ecache->ct = ct;
        nf_conntrack_get(&ct->ct_general);
 }
+EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init);
 
 /* flush the event cache - touches other CPU's data and must not be called
  * while packets are still passing through the code */
 
 #include <net/netfilter/nf_conntrack_tuple.h>
 
 LIST_HEAD(nf_conntrack_expect_list);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_list);
+
 kmem_cache_t *nf_conntrack_expect_cachep __read_mostly;
 static unsigned int nf_conntrack_expect_next_id;
 
        master_help->expecting--;
        nf_conntrack_expect_put(exp);
 }
+EXPORT_SYMBOL_GPL(nf_ct_unlink_expect);
 
 static void expectation_timed_out(unsigned long ul_expect)
 {
        }
        return NULL;
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_expect_find);
 
 /* Just find a expectation corresponding to a tuple. */
 struct nf_conntrack_expect *
 
        return i;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_find_get);
 
 /* If an expectation for this connection is found, it gets delete from
  * global list then returned. */
                }
        }
 }
+EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
 
 /* Would two expected things clash? */
 static inline int expect_clash(const struct nf_conntrack_expect *a,
        }
        write_unlock_bh(&nf_conntrack_lock);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_unexpect_related);
 
 /* We don't increase the master conntrack refcount for non-fulfilled
  * conntracks. During the conntrack destruction, the expectations are
        atomic_set(&new->use, 1);
        return new;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_alloc);
 
 void nf_conntrack_expect_init(struct nf_conntrack_expect *exp, int family,
                              union nf_conntrack_address *saddr,
        if (atomic_dec_and_test(&exp->use))
                kmem_cache_free(nf_conntrack_expect_cachep, exp);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_put);
 
 static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
 {
        write_unlock_bh(&nf_conntrack_lock);
        return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_related);
 
 #ifdef CONFIG_PROC_FS
 static void *exp_seq_start(struct seq_file *s, loff_t *pos)
 
 
        return helper;
 }
+EXPORT_SYMBOL_GPL(nf_ct_helper_find_get);
 
 void nf_ct_helper_put(struct nf_conntrack_helper *helper)
 {
        module_put(helper->me);
 }
+EXPORT_SYMBOL_GPL(nf_ct_helper_put);
 
 struct nf_conntrack_helper *
 __nf_conntrack_helper_find_byname(const char *name)
 
        return NULL;
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname);
 
 static inline int unhelp(struct nf_conntrack_tuple_hash *i,
                         const struct nf_conntrack_helper *me)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_helper_register);
 
 void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
 {
        /* Someone could be still looking at the helper in a bh. */
        synchronize_net();
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
 
 
 struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly;
 struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX] __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_l3protos);
 
 #ifdef CONFIG_SYSCTL
 static DEFINE_MUTEX(nf_ct_proto_sysctl_mutex);
 
        return nf_ct_protos[l3proto][l4proto];
 }
+EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find);
 
 /* this is guaranteed to always return a valid protocol helper, since
  * it falls back to generic_protocol */
 
        return p;
 }
+EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get);
 
 void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p)
 {
        module_put(p->me);
 }
+EXPORT_SYMBOL_GPL(nf_ct_l4proto_put);
 
 struct nf_conntrack_l3proto *
 nf_ct_l3proto_find_get(u_int16_t l3proto)
 
        return p;
 }
+EXPORT_SYMBOL_GPL(nf_ct_l3proto_find_get);
 
 void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p)
 {
        module_put(p->me);
 }
+EXPORT_SYMBOL_GPL(nf_ct_l3proto_put);
 
 int
 nf_ct_l3proto_try_module_get(unsigned short l3proto)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nf_ct_l3proto_try_module_get);
 
 void nf_ct_l3proto_module_put(unsigned short l3proto)
 {
 
        module_put(p->me);
 }
+EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
 
 static int kill_l3proto(struct nf_conn *i, void *data)
 {
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
 
 int nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
 {
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
 
 static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
 {
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
 
 int nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
 {
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
 
                receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
                receiver->td_scale);
 }
- 
+EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update);
 #endif
 
 #define        TH_FIN  0x01
 #endif
 #endif
 };
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
 
 struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
 {
        .ctl_table              = tcp_sysctl_table,
 #endif
 };
-
-EXPORT_SYMBOL(nf_conntrack_l4proto_tcp4);
-EXPORT_SYMBOL(nf_conntrack_l4proto_tcp6);
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
 
 #endif
 #endif
 };
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
 
 struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
 {
        .ctl_table              = udp_sysctl_table,
 #endif
 };
-
-EXPORT_SYMBOL(nf_conntrack_l4proto_udp4);
-EXPORT_SYMBOL(nf_conntrack_l4proto_udp6);
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
 
 /* Sysctl support */
 
 int nf_conntrack_checksum __read_mostly = 1;
+EXPORT_SYMBOL_GPL(nf_conntrack_checksum);
 
 #ifdef CONFIG_SYSCTL
 /* Log invalid packets of a given protocol */
        },
        { .ctl_name = 0 }
 };
-EXPORT_SYMBOL(nf_ct_log_invalid);
+EXPORT_SYMBOL_GPL(nf_ct_log_invalid);
 #endif /* CONFIG_SYSCTL */
 
 static int __init nf_conntrack_standalone_init(void)
 void need_conntrack(void)
 {
 }
-
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-EXPORT_SYMBOL_GPL(nf_conntrack_chain);
-EXPORT_SYMBOL_GPL(nf_conntrack_expect_chain);
-EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
-EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
-EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init);
-EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache);
-EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
-#endif
-EXPORT_SYMBOL(nf_ct_l3proto_try_module_get);
-EXPORT_SYMBOL(nf_ct_l3proto_module_put);
-EXPORT_SYMBOL(nf_conntrack_l3proto_register);
-EXPORT_SYMBOL(nf_conntrack_l3proto_unregister);
-EXPORT_SYMBOL(nf_conntrack_l4proto_register);
-EXPORT_SYMBOL(nf_conntrack_l4proto_unregister);
-EXPORT_SYMBOL(nf_ct_invert_tuplepr);
-EXPORT_SYMBOL(nf_conntrack_destroyed);
-EXPORT_SYMBOL(need_conntrack);
-EXPORT_SYMBOL(nf_conntrack_helper_register);
-EXPORT_SYMBOL(nf_conntrack_helper_unregister);
-EXPORT_SYMBOL(nf_ct_iterate_cleanup);
-EXPORT_SYMBOL(__nf_ct_refresh_acct);
-EXPORT_SYMBOL(nf_ct_protos);
-EXPORT_SYMBOL(__nf_ct_l4proto_find);
-EXPORT_SYMBOL(nf_ct_l4proto_find_get);
-EXPORT_SYMBOL(nf_ct_l4proto_put);
-EXPORT_SYMBOL(nf_ct_l3proto_find_get);
-EXPORT_SYMBOL(nf_ct_l3proto_put);
-EXPORT_SYMBOL(nf_ct_l3protos);
-EXPORT_SYMBOL_GPL(nf_conntrack_checksum);
-EXPORT_SYMBOL(nf_conntrack_expect_alloc);
-EXPORT_SYMBOL(nf_conntrack_expect_put);
-EXPORT_SYMBOL(nf_conntrack_expect_related);
-EXPORT_SYMBOL(nf_conntrack_unexpect_related);
-EXPORT_SYMBOL(nf_conntrack_tuple_taken);
-EXPORT_SYMBOL(nf_conntrack_htable_size);
-EXPORT_SYMBOL(nf_conntrack_lock);
-EXPORT_SYMBOL(nf_conntrack_hash);
-EXPORT_SYMBOL(nf_conntrack_untracked);
-EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
-#ifdef CONFIG_NF_NAT_NEEDED
-EXPORT_SYMBOL(nf_conntrack_tcp_update);
-EXPORT_SYMBOL(nf_conntrack_register_cache);
-EXPORT_SYMBOL(nf_conntrack_unregister_cache);
-EXPORT_SYMBOL(nf_conntrack_alter_reply);
-#endif
-EXPORT_SYMBOL(__nf_conntrack_confirm);
-EXPORT_SYMBOL(nf_ct_get_tuple);
-EXPORT_SYMBOL(nf_ct_invert_tuple);
-EXPORT_SYMBOL(nf_conntrack_in);
-EXPORT_SYMBOL(__nf_conntrack_attach);
-EXPORT_SYMBOL(nf_conntrack_alloc);
-EXPORT_SYMBOL(nf_conntrack_free);
-EXPORT_SYMBOL(nf_conntrack_flush);
-EXPORT_SYMBOL(nf_ct_remove_expectations);
-EXPORT_SYMBOL(nf_ct_helper_find_get);
-EXPORT_SYMBOL(nf_ct_helper_put);
-EXPORT_SYMBOL(__nf_conntrack_helper_find_byname);
-EXPORT_SYMBOL(__nf_conntrack_find);
-EXPORT_SYMBOL(nf_ct_unlink_expect);
-EXPORT_SYMBOL(nf_conntrack_hash_insert);
-EXPORT_SYMBOL(__nf_conntrack_expect_find);
-EXPORT_SYMBOL(nf_conntrack_expect_find_get);
-EXPORT_SYMBOL(nf_conntrack_expect_list);
-#if defined(CONFIG_NF_CT_NETLINK) || \
-    defined(CONFIG_NF_CT_NETLINK_MODULE)
-EXPORT_SYMBOL(nf_ct_port_tuple_to_nfattr);
-EXPORT_SYMBOL(nf_ct_port_nfattr_to_tuple);
-#endif
+EXPORT_SYMBOL_GPL(need_conntrack);