return (struct inet_request_sock *)sk;
 }
 
-struct inet_bind_bucket;
 struct ipv6_pinfo;
 
 struct inet_sock {
        int                     mc_index;       /* Multicast device index */
        __u32                   mc_addr;
        struct ip_mc_socklist   *mc_list;       /* Group array */
-       struct inet_bind_bucket *bind_hash;
        /*
         * Following members are used to retain the infomation to build
         * an ip header on each ip fragmentation while the socket is corked.
 
        return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
 }
 
-static inline int tcp_twsk_ipv6only(const struct sock *sk)
+static inline int inet_twsk_ipv6only(const struct sock *sk)
 {
        return inet_twsk(sk)->tw_ipv6only;
 }
 
-static inline int tcp_v6_ipv6only(const struct sock *sk)
+static inline int inet_v6_ipv6only(const struct sock *sk)
 {
        return likely(sk->sk_state != TCP_TIME_WAIT) ?
-               ipv6_only_sock(sk) : tcp_twsk_ipv6only(sk);
+               ipv6_only_sock(sk) : inet_twsk_ipv6only(sk);
 }
 #else
 #define __ipv6_only_sock(sk)   0
 #define __tcp_v6_rcv_saddr(__sk)       NULL
 #define tcp_v6_rcv_saddr(__sk)         NULL
 #define tcp_twsk_ipv6only(__sk)                0
-#define tcp_v6_ipv6only(__sk)          0
+#define inet_v6_ipv6only(__sk)         0
 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
 
 #define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif)       \
 
 
 #include <linux/config.h>
 #include <linux/skbuff.h>
-#include <linux/ip.h>
 #include <net/sock.h>
+#include <net/inet_connection_sock.h>
 #include <net/inet_timewait_sock.h>
 
 /* This defines a selective acknowledgement block. */
 }
 
 struct tcp_sock {
-       /* inet_sock has to be the first member of tcp_sock */
-       struct inet_sock        inet;
+       /* inet_connection_sock has to be the first member of tcp_sock */
+       struct inet_connection_sock     inet_conn;
        int     tcp_header_len; /* Bytes of tcp header to send          */
 
 /*
        __u32   snd_sml;        /* Last byte of the most recently transmitted small packet */
        __u32   rcv_tstamp;     /* timestamp of last received ACK (for keepalives) */
        __u32   lsndtime;       /* timestamp of last sent data packet (for restart window) */
-       /* Delayed ACK control data */
-       struct {
-               __u8    pending;        /* ACK is pending */
-               __u8    quick;          /* Scheduled number of quick acks       */
-               __u8    pingpong;       /* The session is interactive           */
-               __u8    blocked;        /* Delayed ACK was blocked by socket lock*/
-               __u32   ato;            /* Predicted tick of soft clock         */
-               unsigned long timeout;  /* Currently scheduled timeout          */
-               __u32   lrcvtime;       /* timestamp of last received data packet*/
-               __u16   last_seg_size;  /* Size of last incoming segment        */
-               __u16   rcv_mss;        /* MSS used for delayed ACK decisions   */ 
-       } ack;
 
        /* Data for direct copy to user */
        struct {
        __u16   xmit_size_goal; /* Goal for segmenting output packets   */
        __u16   ext_header_len; /* Network protocol overhead (IP/IPv6 options) */
        __u8    ca_state;       /* State of fast-retransmit machine     */
-       __u8    retransmits;    /* Number of unrecovered RTO timeouts.  */
 
+       __u8    keepalive_probes; /* num of allowed keep alive probes   */
        __u16   advmss;         /* Advertised MSS                       */
        __u32   window_clamp;   /* Maximal window to advertise          */
        __u32   rcv_ssthresh;   /* Current window clamp                 */
        __u8    reordering;     /* Packet reordering metric.            */
        __u8    frto_counter;   /* Number of new acks after RTO */
 
-       __u8    unused;
+       __u8    nonagle;        /* Disable Nagle algorithm?             */
        __u8    defer_accept;   /* User waits for some data after accept() */
 
 /* RTT measurement */
        __u32   mdev_max;       /* maximal mdev for the last rtt period */
        __u32   rttvar;         /* smoothed mdev_max                    */
        __u32   rtt_seq;        /* sequence number to update rttvar     */
-       __u32   rto;            /* retransmit timeout                   */
 
        __u32   packets_out;    /* Packets which are "in flight"        */
        __u32   left_out;       /* Packets which leaved network */
        __u32   retrans_out;    /* Retransmitted packets out            */
-       __u8    backoff;        /* backoff                              */
 /*
  *      Options received (usually on last packet, some only on SYN packets).
  */
-       __u8    nonagle;        /* Disable Nagle algorithm?             */
-       __u8    keepalive_probes; /* num of allowed keep alive probes   */
-
-       __u8    probes_out;     /* unanswered 0 window probes           */
        struct tcp_options_received rx_opt;
 
 /*
        __u32   snd_cwnd_used;
        __u32   snd_cwnd_stamp;
 
-       /* Two commonly used timers in both sender and receiver paths. */
-       unsigned long           timeout;
-       struct timer_list       retransmit_timer;       /* Resend (no ack)      */
-       struct timer_list       delack_timer;           /* Ack delay            */
-
        struct sk_buff_head     out_of_order_queue; /* Out of order segments go here */
 
        struct tcp_func         *af_specific;   /* Operations which are AF_INET{4,6} specific   */
        struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
        struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
 
-       __u8    syn_retries;    /* num of allowed syn retries */
+       __u8    probes_out;     /* unanswered 0 window probes           */
        __u8    ecn_flags;      /* ECN status bits.                     */
        __u16   prior_ssthresh; /* ssthresh saved at recovery start     */
        __u32   lost_out;       /* Lost packets                 */
        int     undo_retrans;   /* number of undoable retransmissions. */
        __u32   urg_seq;        /* Seq of received urgent pointer */
        __u16   urg_data;       /* Saved octet of OOB data and control flags */
-       __u8    pending;        /* Scheduled timer event        */
        __u8    urg_mode;       /* In urgent mode               */
+       /* ONE BYTE HOLE, TRY TO PACK! */
        __u32   snd_up;         /* Urgent pointer               */
 
        __u32   total_retrans;  /* Total retransmits for entire connection */
 
-       struct request_sock_queue accept_queue; /* FIFO of established children */
-
        unsigned int            keepalive_time;   /* time before keep alive takes place */
        unsigned int            keepalive_intvl;  /* time interval between keep alive probes */
        int                     linger2;
 
--- /dev/null
+/*
+ * NET         Generic infrastructure for INET connection oriented protocols.
+ *
+ *             Definitions for inet_connection_sock 
+ *
+ * Authors:    Many people, see the TCP sources
+ *
+ *             From code originally in TCP
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ */
+#ifndef _INET_CONNECTION_SOCK_H
+#define _INET_CONNECTION_SOCK_H
+
+#include <linux/ip.h>
+#include <linux/timer.h>
+#include <net/request_sock.h>
+
+struct inet_bind_bucket;
+struct inet_hashinfo;
+
+/** inet_connection_sock - INET connection oriented sock
+ *
+ * @icsk_accept_queue:    FIFO of established children 
+ * @icsk_bind_hash:       Bind node
+ * @icsk_timeout:         Timeout
+ * @icsk_retransmit_timer: Resend (no ack)
+ * @icsk_rto:             Retransmit timeout
+ * @icsk_retransmits:     Number of unrecovered [RTO] timeouts
+ * @icsk_pending:         Scheduled timer event
+ * @icsk_backoff:         Backoff
+ * @icsk_syn_retries:      Number of allowed SYN (or equivalent) retries
+ * @icsk_ack:             Delayed ACK control data
+ */
+struct inet_connection_sock {
+       /* inet_sock has to be the first member! */
+       struct inet_sock          icsk_inet;
+       struct request_sock_queue icsk_accept_queue;
+       struct inet_bind_bucket   *icsk_bind_hash;
+       unsigned long             icsk_timeout;
+       struct timer_list         icsk_retransmit_timer;
+       struct timer_list         icsk_delack_timer;
+       __u32                     icsk_rto;
+       __u8                      icsk_retransmits;
+       __u8                      icsk_pending;
+       __u8                      icsk_backoff;
+       __u8                      icsk_syn_retries;
+       struct {
+               __u8              pending;       /* ACK is pending                         */
+               __u8              quick;         /* Scheduled number of quick acks         */
+               __u8              pingpong;      /* The session is interactive             */
+               __u8              blocked;       /* Delayed ACK was blocked by socket lock */
+               __u32             ato;           /* Predicted tick of soft clock           */
+               unsigned long     timeout;       /* Currently scheduled timeout            */
+               __u32             lrcvtime;      /* timestamp of last received data packet */
+               __u16             last_seg_size; /* Size of last incoming segment          */
+               __u16             rcv_mss;       /* MSS used for delayed ACK decisions     */ 
+       } icsk_ack;
+};
+
+static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
+{
+       return (struct inet_connection_sock *)sk;
+}
+
+extern void inet_csk_init_xmit_timers(struct sock *sk,
+                                     void (*retransmit_handler)(unsigned long),
+                                     void (*delack_handler)(unsigned long),
+                                     void (*keepalive_handler)(unsigned long));
+extern void inet_csk_clear_xmit_timers(struct sock *sk);
+
+extern struct request_sock *inet_csk_search_req(const struct sock *sk,
+                                               struct request_sock ***prevp,
+                                               const __u16 rport,
+                                               const __u32 raddr,
+                                               const __u32 laddr);
+extern int inet_csk_get_port(struct inet_hashinfo *hashinfo,
+                            struct sock *sk, unsigned short snum);
+
+extern struct dst_entry* inet_csk_route_req(struct sock *sk,
+                                           const struct request_sock *req);
+
+#endif /* _INET_CONNECTION_SOCK_H */
 
 #include <linux/config.h>
 
 #include <linux/interrupt.h>
-#include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/wait.h>
 
+#include <net/inet_connection_sock.h>
 #include <net/sock.h>
 #include <net/tcp_states.h>
 
        struct inet_bind_bucket *tb;
 
        spin_lock(&head->lock);
-       tb = inet_sk(sk)->bind_hash;
+       tb = inet_csk(sk)->icsk_bind_hash;
        sk_add_bind_node(child, &tb->owners);
-       inet_sk(child)->bind_hash = tb;
+       inet_csk(child)->icsk_bind_hash = tb;
        spin_unlock(&head->lock);
 }
 
 
        return prev_qlen;
 }
 
-static inline int reqsk_queue_len(struct request_sock_queue *queue)
+static inline int reqsk_queue_len(const struct request_sock_queue *queue)
 {
        return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
 }
 
-static inline int reqsk_queue_len_young(struct request_sock_queue *queue)
+static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
 {
        return queue->listen_opt->qlen_young;
 }
 
-static inline int reqsk_queue_is_full(struct request_sock_queue *queue)
+static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
 {
        return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
 }
 
 
 struct request_sock_ops;
 
-/* Here is the right place to enable sock refcounting debugging */
-//#define SOCK_REFCNT_DEBUG
-
 /* Networking protocol blocks we attach to sockets.
  * socket layer -> transport layer interface
  * transport -> network interface is defined by struct inet_proto
 
 #define _TCP_H
 
 #define TCP_DEBUG 1
+#define INET_CSK_DEBUG 1
 #define FASTRETRANS_DEBUG 1
 
 /* Cancel timers, when they are not required. */
-#undef TCP_CLEAR_TIMERS
+#undef INET_CSK_CLEAR_TIMERS
 
 #include <linux/config.h>
 #include <linux/list.h>
 #define TCPOLEN_SACK_BASE_ALIGNED      4
 #define TCPOLEN_SACK_PERBLOCK          8
 
-#define TCP_TIME_RETRANS       1       /* Retransmit timer */
-#define TCP_TIME_DACK          2       /* Delayed ack timer */
-#define TCP_TIME_PROBE0                3       /* Zero window probe timer */
-#define TCP_TIME_KEEPOPEN      4       /* Keepalive timer */
+#define ICSK_TIME_RETRANS      1       /* Retransmit timer */
+#define ICSK_TIME_DACK         2       /* Delayed ack timer */
+#define ICSK_TIME_PROBE0       3       /* Zero window probe timer */
+#define ICSK_TIME_KEEPOPEN     4       /* Keepalive timer */
 
 /* Flags in tp->nonagle */
 #define TCP_NAGLE_OFF          1       /* Nagle's algo is disabled */
 extern int tcp_memory_pressure;
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
+#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 #else
-#define TCP_INET_FAMILY(fam) 1
+#define AF_INET_FAMILY(fam) 1
 #endif
 
 /*
 
 extern void                    tcp_rcv_space_adjust(struct sock *sk);
 
-enum tcp_ack_state_t
-{
-       TCP_ACK_SCHED = 1,
-       TCP_ACK_TIMER = 2,
-       TCP_ACK_PUSHED= 4
+enum inet_csk_ack_state_t {
+       ICSK_ACK_SCHED  = 1,
+       ICSK_ACK_TIMER  = 2,
+       ICSK_ACK_PUSHED = 4
 };
 
-static inline void tcp_schedule_ack(struct tcp_sock *tp)
+static inline void inet_csk_schedule_ack(struct sock *sk)
 {
-       tp->ack.pending |= TCP_ACK_SCHED;
+       inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
 }
 
-static inline int tcp_ack_scheduled(struct tcp_sock *tp)
+static inline int inet_csk_ack_scheduled(const struct sock *sk)
 {
-       return tp->ack.pending&TCP_ACK_SCHED;
+       return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
 }
 
-static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts)
+static inline void tcp_dec_quickack_mode(struct sock *sk,
+                                        const unsigned int pkts)
 {
-       if (tp->ack.quick) {
-               if (pkts >= tp->ack.quick) {
-                       tp->ack.quick = 0;
+       struct inet_connection_sock *icsk = inet_csk(sk);
 
+       if (icsk->icsk_ack.quick) {
+               if (pkts >= icsk->icsk_ack.quick) {
+                       icsk->icsk_ack.quick = 0;
                        /* Leaving quickack mode we deflate ATO. */
-                       tp->ack.ato = TCP_ATO_MIN;
+                       icsk->icsk_ack.ato   = TCP_ATO_MIN;
                } else
-                       tp->ack.quick -= pkts;
+                       icsk->icsk_ack.quick -= pkts;
        }
 }
 
-extern void tcp_enter_quickack_mode(struct tcp_sock *tp);
+extern void tcp_enter_quickack_mode(struct sock *sk);
 
-static __inline__ void tcp_delack_init(struct tcp_sock *tp)
+static inline void inet_csk_delack_init(struct sock *sk)
 {
-       memset(&tp->ack, 0, sizeof(tp->ack));
+       memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
 }
 
 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
 
 extern void                    tcp_close(struct sock *sk, 
                                          long timeout);
-extern struct sock *           tcp_accept(struct sock *sk, int flags, int *err);
+extern struct sock *           inet_csk_accept(struct sock *sk, int flags, int *err);
 extern unsigned int            tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
 
 extern int                     tcp_getsockopt(struct sock *sk, int level, 
 
 /* tcp_timer.c */
 extern void tcp_init_xmit_timers(struct sock *);
-extern void tcp_clear_xmit_timers(struct sock *);
+static inline void tcp_clear_xmit_timers(struct sock *sk)
+{
+       inet_csk_clear_xmit_timers(sk);
+}
 
-extern void tcp_delete_keepalive_timer(struct sock *);
-extern void tcp_reset_keepalive_timer(struct sock *, unsigned long);
+extern void inet_csk_delete_keepalive_timer(struct sock *sk);
+extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 extern unsigned int tcp_current_mss(struct sock *sk, int large);
 
-#ifdef TCP_DEBUG
-extern const char tcp_timer_bug_msg[];
+#ifdef INET_CSK_DEBUG
+extern const char inet_csk_timer_bug_msg[];
 #endif
 
 /* tcp_diag.c */
 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                         sk_read_actor_t recv_actor);
 
-static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
+static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        
-       switch (what) {
-       case TCP_TIME_RETRANS:
-       case TCP_TIME_PROBE0:
-               tp->pending = 0;
-
-#ifdef TCP_CLEAR_TIMERS
-               sk_stop_timer(sk, &tp->retransmit_timer);
+       if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
+               icsk->icsk_pending = 0;
+#ifdef INET_CSK_CLEAR_TIMERS
+               sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 #endif
-               break;
-       case TCP_TIME_DACK:
-               tp->ack.blocked = 0;
-               tp->ack.pending = 0;
-
-#ifdef TCP_CLEAR_TIMERS
-               sk_stop_timer(sk, &tp->delack_timer);
+       } else if (what == ICSK_TIME_DACK) {
+               icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
+#ifdef INET_CSK_CLEAR_TIMERS
+               sk_stop_timer(sk, &icsk->icsk_delack_timer);
 #endif
-               break;
-       default:
-#ifdef TCP_DEBUG
-               printk(tcp_timer_bug_msg);
+       }
+#ifdef INET_CSK_DEBUG
+       else {
+               pr_debug(inet_csk_timer_bug_msg);
+       }
 #endif
-               return;
-       };
-
 }
 
 /*
  *     Reset the retransmission timer
  */
-static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
+static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
+                                            unsigned long when)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
 
        if (when > TCP_RTO_MAX) {
-#ifdef TCP_DEBUG
-               printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
+#ifdef INET_CSK_DEBUG
+               pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
+                        sk, what, when, current_text_addr());
 #endif
                when = TCP_RTO_MAX;
        }
 
-       switch (what) {
-       case TCP_TIME_RETRANS:
-       case TCP_TIME_PROBE0:
-               tp->pending = what;
-               tp->timeout = jiffies+when;
-               sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
-               break;
-
-       case TCP_TIME_DACK:
-               tp->ack.pending |= TCP_ACK_TIMER;
-               tp->ack.timeout = jiffies+when;
-               sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
-               break;
-
-       default:
-#ifdef TCP_DEBUG
-               printk(tcp_timer_bug_msg);
+       if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
+               icsk->icsk_pending = what;
+               icsk->icsk_timeout = jiffies + when;
+               sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
+       } else if (what == ICSK_TIME_DACK) {
+               icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
+               icsk->icsk_ack.timeout = jiffies + when;
+               sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
+       }
+#ifdef INET_CSK_DEBUG
+       else {
+               pr_debug(inet_csk_timer_bug_msg);
+       }
 #endif
-               return;
-       };
 }
 
 /* Initialize RCV_MSS value.
        hint = min(hint, TCP_MIN_RCVMSS);
        hint = max(hint, TCP_MIN_MSS);
 
-       tp->ack.rcv_mss = hint;
+       inet_csk(sk)->icsk_ack.rcv_mss = hint;
 }
 
 static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 
        tp->packets_out += tcp_skb_pcount(skb);
        if (!orig)
-               tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
 }
 
 static inline void tcp_packets_out_dec(struct tcp_sock *tp, 
 
 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
 {
-       if (!tp->packets_out && !tp->pending)
-               tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+       if (!tp->packets_out && !icsk->icsk_pending)
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, icsk->icsk_rto);
 }
 
 static __inline__ void tcp_push_pending_frames(struct sock *sk,
                        tp->ucopy.memory = 0;
                } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
                        wake_up_interruptible(sk->sk_sleep);
-                       if (!tcp_ack_scheduled(tp))
-                               tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
+                       if (!inet_csk_ack_scheduled(sk))
+                               inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+                                                         (3 * TCP_RTO_MIN) / 4);
                }
                return 1;
        }
                        TCP_INC_STATS(TCP_MIB_ESTABRESETS);
 
                sk->sk_prot->unhash(sk);
-               if (inet_sk(sk)->bind_hash &&
+               if (inet_csk(sk)->icsk_bind_hash &&
                    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
                        inet_put_port(&tcp_hashinfo, sk);
                /* fall through */
        return tcp_win_from_space(sk->sk_rcvbuf); 
 }
 
-static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req,
-                                        struct sock *child)
+static inline void inet_csk_reqsk_queue_add(struct sock *sk,
+                                           struct request_sock *req,
+                                           struct sock *child)
 {
-       reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child);
+       reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
 }
 
-static inline void
-tcp_synq_removed(struct sock *sk, struct request_sock *req)
+static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
+                                               struct request_sock *req)
 {
-       if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0)
-               tcp_delete_keepalive_timer(sk);
+       if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
+               inet_csk_delete_keepalive_timer(sk);
 }
 
-static inline void tcp_synq_added(struct sock *sk)
+static inline void inet_csk_reqsk_queue_added(struct sock *sk,
+                                             const unsigned long timeout)
 {
-       if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0)
-               tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
+       if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
+               inet_csk_reset_keepalive_timer(sk, timeout);
 }
 
-static inline int tcp_synq_len(struct sock *sk)
+static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
 {
-       return reqsk_queue_len(&tcp_sk(sk)->accept_queue);
+       return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
 }
 
-static inline int tcp_synq_young(struct sock *sk)
+static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
 {
-       return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue);
+       return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
 }
 
-static inline int tcp_synq_is_full(struct sock *sk)
+static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
 {
-       return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue);
+       return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
 }
 
-static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req,
-                                  struct request_sock **prev)
+static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
+                                              struct request_sock *req,
+                                              struct request_sock **prev)
 {
-       reqsk_queue_unlink(&tp->accept_queue, req, prev);
+       reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
 }
 
-static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req,
-                                    struct request_sock **prev)
+static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
+                                            struct request_sock *req,
+                                            struct request_sock **prev)
 {
-       tcp_synq_unlink(tcp_sk(sk), req, prev);
-       tcp_synq_removed(sk, req);
+       inet_csk_reqsk_queue_unlink(sk, req, prev);
+       inet_csk_reqsk_queue_removed(sk, req);
        reqsk_free(req);
 }
 
        return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
 }
 
-static inline int tcp_fin_time(const struct tcp_sock *tp)
+static inline int tcp_fin_time(const struct sock *sk)
 {
-       int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
+       int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
+       const int rto = inet_csk(sk)->icsk_rto;
 
-       if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
-               fin_timeout = (tp->rto<<2) - (tp->rto>>1);
+       if (fin_timeout < (rto << 2) - (rto >> 1))
+               fin_timeout = (rto << 2) - (rto >> 1);
 
        return fin_timeout;
 }
 
                 * it is surely retransmit. It is not in ECN RFC,
                 * but Linux follows this rule. */
                else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
-                       tcp_enter_quickack_mode(tp);
+                       tcp_enter_quickack_mode((struct sock *)tp);
        }
 }
 
 
 #include <linux/slab.h>
 #include <linux/wait.h>
 
+#include <net/inet_connection_sock.h>
 #include <net/inet_hashtables.h>
 
 /*
 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
                    const unsigned short snum)
 {
-       struct inet_sock *inet = inet_sk(sk);
-       inet->num       = snum;
+       inet_sk(sk)->num = snum;
        sk_add_bind_node(sk, &tb->owners);
-       inet->bind_hash = tb;
+       inet_csk(sk)->icsk_bind_hash = tb;
 }
 
 EXPORT_SYMBOL(inet_bind_hash);
  */
 static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
 {
-       struct inet_sock *inet = inet_sk(sk);
-       const int bhash = inet_bhashfn(inet->num, hashinfo->bhash_size);
+       const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
        struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
        struct inet_bind_bucket *tb;
 
        spin_lock(&head->lock);
-       tb = inet->bind_hash;
+       tb = inet_csk(sk)->icsk_bind_hash;
        __sk_del_bind_node(sk);
-       inet->bind_hash = NULL;
-       inet->num = 0;
+       inet_csk(sk)->icsk_bind_hash = NULL;
+       inet_sk(sk)->num = 0;
        inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
        spin_unlock(&head->lock);
 }
 
                           struct inet_hashinfo *hashinfo)
 {
        const struct inet_sock *inet = inet_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent];
        struct inet_bind_hashbucket *bhead;
        /* Step 1: Put TW into bind hash. Original socket stays there too.
         */
        bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)];
        spin_lock(&bhead->lock);
-       tw->tw_tb = inet->bind_hash;
-       BUG_TRAP(inet->bind_hash);
+       tw->tw_tb = icsk->icsk_bind_hash;
+       BUG_TRAP(icsk->icsk_bind_hash);
        inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
        spin_unlock(&bhead->lock);
 
 
 
        child = tp->af_specific->syn_recv_sock(sk, skb, req, dst);
        if (child)
-               tcp_acceptq_queue(sk, req, child);
+               inet_csk_reqsk_queue_add(sk, req, child);
        else
                reqsk_free(req);
 
 
 static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
                                               poll_table *wait)
 {
-       return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0;
+       return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? (POLLIN | POLLRDNORM) : 0;
 }
 
 /*
 int tcp_listen_start(struct sock *sk)
 {
        struct inet_sock *inet = inet_sk(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
-       int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE);
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, TCP_SYNQ_HSIZE);
 
        if (rc != 0)
                return rc;
 
        sk->sk_max_ack_backlog = 0;
        sk->sk_ack_backlog = 0;
-       tcp_delack_init(tp);
+       inet_csk_delack_init(sk);
 
        /* There is race window here: we announce ourselves listening,
         * but this transition is still not validated by get_port().
        }
 
        sk->sk_state = TCP_CLOSE;
-       __reqsk_queue_destroy(&tp->accept_queue);
+       __reqsk_queue_destroy(&icsk->icsk_accept_queue);
        return -EADDRINUSE;
 }
 
 
 static void tcp_listen_stop (struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct request_sock *acc_req;
        struct request_sock *req;
 
-       tcp_delete_keepalive_timer(sk);
+       inet_csk_delete_keepalive_timer(sk);
 
        /* make all the listen_opt local to us */
-       acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue);
+       acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
 
        /* Following specs, it would be better either to send FIN
         * (and enter FIN-WAIT-1, it is normal close)
         * To be honest, we are not able to make either
         * of the variants now.                 --ANK
         */
-       reqsk_queue_destroy(&tp->accept_queue);
+       reqsk_queue_destroy(&icsk->icsk_accept_queue);
 
        while ((req = acc_req) != NULL) {
                struct sock *child = req->sk;
        BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
 #endif
 
-       if (tcp_ack_scheduled(tp)) {
+       if (inet_csk_ack_scheduled(sk)) {
+               const struct inet_connection_sock *icsk = inet_csk(sk);
                   /* Delayed ACKs frequently hit locked sockets during bulk
                    * receive. */
-               if (tp->ack.blocked ||
+               if (icsk->icsk_ack.blocked ||
                    /* Once-per-two-segments ACK was not sent by tcp_input.c */
-                   tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss ||
+                   tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
                    /*
                     * If this read emptied read buffer, we send ACK, if
                     * connection is not bidirectional, user drained
                     * receive buffer and there was a small segment
                     * in queue.
                     */
-                   (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
-                    !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
+                   (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
+                    !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
                        time_to_ack = 1;
        }
 
        BUG_TRAP(sk_unhashed(sk));
 
        /* If it has not 0 inet_sk(sk)->num, it must be bound */
-       BUG_TRAP(!inet_sk(sk)->num || inet_sk(sk)->bind_hash);
+       BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);
 
        sk->sk_prot->destroy(sk);
 
                        tcp_send_active_reset(sk, GFP_ATOMIC);
                        NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
                } else {
-                       int tmo = tcp_fin_time(tp);
+                       const int tmo = tcp_fin_time(sk);
 
                        if (tmo > TCP_TIMEWAIT_LEN) {
-                               tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
+                               inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
                        } else {
                                atomic_inc(&tcp_orphan_count);
                                tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
 int tcp_disconnect(struct sock *sk, int flags)
 {
        struct inet_sock *inet = inet_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int err = 0;
        int old_state = sk->sk_state;
        tp->srtt = 0;
        if ((tp->write_seq += tp->max_window + 2) == 0)
                tp->write_seq = 1;
-       tp->backoff = 0;
+       icsk->icsk_backoff = 0;
        tp->snd_cwnd = 2;
        tp->probes_out = 0;
        tp->packets_out = 0;
        tp->snd_cwnd_cnt = 0;
        tcp_set_ca_state(tp, TCP_CA_Open);
        tcp_clear_retrans(tp);
-       tcp_delack_init(tp);
+       inet_csk_delack_init(sk);
        sk->sk_send_head = NULL;
        tp->rx_opt.saw_tstamp = 0;
        tcp_sack_reset(&tp->rx_opt);
        __sk_dst_reset(sk);
 
-       BUG_TRAP(!inet->num || inet->bind_hash);
+       BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
 
        sk->sk_error_report(sk);
        return err;
  */
 static int wait_for_connect(struct sock *sk, long timeo)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        DEFINE_WAIT(wait);
        int err;
 
                prepare_to_wait_exclusive(sk->sk_sleep, &wait,
                                          TASK_INTERRUPTIBLE);
                release_sock(sk);
-               if (reqsk_queue_empty(&tp->accept_queue))
+               if (reqsk_queue_empty(&icsk->icsk_accept_queue))
                        timeo = schedule_timeout(timeo);
                lock_sock(sk);
                err = 0;
-               if (!reqsk_queue_empty(&tp->accept_queue))
+               if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
                        break;
                err = -EINVAL;
                if (sk->sk_state != TCP_LISTEN)
  *     This will accept the next outstanding connection.
  */
 
-struct sock *tcp_accept(struct sock *sk, int flags, int *err)
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct sock *newsk;
        int error;
 
                goto out_err;
 
        /* Find already established connection */
-       if (reqsk_queue_empty(&tp->accept_queue)) {
+       if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
                long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 
                /* If this is a non blocking socket don't sleep */
                        goto out_err;
        }
 
-       newsk = reqsk_queue_get_child(&tp->accept_queue, sk);
+       newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
        BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
 out:
        release_sock(sk);
                   int optlen)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        int val;
        int err = 0;
 
                                        elapsed = tp->keepalive_time - elapsed;
                                else
                                        elapsed = 0;
-                               tcp_reset_keepalive_timer(sk, elapsed);
+                               inet_csk_reset_keepalive_timer(sk, elapsed);
                        }
                }
                break;
                if (val < 1 || val > MAX_TCP_SYNCNT)
                        err = -EINVAL;
                else
-                       tp->syn_retries = val;
+                       icsk->icsk_syn_retries = val;
                break;
 
        case TCP_LINGER2:
 
        case TCP_QUICKACK:
                if (!val) {
-                       tp->ack.pingpong = 1;
+                       icsk->icsk_ack.pingpong = 1;
                } else {
-                       tp->ack.pingpong = 0;
+                       icsk->icsk_ack.pingpong = 0;
                        if ((1 << sk->sk_state) &
                            (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
-                           tcp_ack_scheduled(tp)) {
-                               tp->ack.pending |= TCP_ACK_PUSHED;
+                           inet_csk_ack_scheduled(sk)) {
+                               icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
                                cleanup_rbuf(sk, 1);
                                if (!(val & 1))
-                                       tp->ack.pingpong = 1;
+                                       icsk->icsk_ack.pingpong = 1;
                        }
                }
                break;
 void tcp_get_info(struct sock *sk, struct tcp_info *info)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now = tcp_time_stamp;
 
        memset(info, 0, sizeof(*info));
 
        info->tcpi_state = sk->sk_state;
        info->tcpi_ca_state = tp->ca_state;
-       info->tcpi_retransmits = tp->retransmits;
+       info->tcpi_retransmits = icsk->icsk_retransmits;
        info->tcpi_probes = tp->probes_out;
-       info->tcpi_backoff = tp->backoff;
+       info->tcpi_backoff = icsk->icsk_backoff;
 
        if (tp->rx_opt.tstamp_ok)
                info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
        if (tp->ecn_flags&TCP_ECN_OK)
                info->tcpi_options |= TCPI_OPT_ECN;
 
-       info->tcpi_rto = jiffies_to_usecs(tp->rto);
-       info->tcpi_ato = jiffies_to_usecs(tp->ack.ato);
+       info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
+       info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
        info->tcpi_snd_mss = tp->mss_cache;
-       info->tcpi_rcv_mss = tp->ack.rcv_mss;
+       info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
 
        info->tcpi_unacked = tp->packets_out;
        info->tcpi_sacked = tp->sacked_out;
        info->tcpi_fackets = tp->fackets_out;
 
        info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
-       info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime);
+       info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
        info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
 
        info->tcpi_pmtu = tp->pmtu_cookie;
                val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
                break;
        case TCP_SYNCNT:
-               val = tp->syn_retries ? : sysctl_tcp_syn_retries;
+               val = inet_csk(sk)->icsk_syn_retries ? : sysctl_tcp_syn_retries;
                break;
        case TCP_LINGER2:
                val = tp->linger2;
                return 0;
        }
        case TCP_QUICKACK:
-               val = !tp->ack.pingpong;
+               val = !inet_csk(sk)->icsk_ack.pingpong;
                break;
 
        case TCP_CONGESTION:
        tcp_register_congestion_control(&tcp_reno);
 }
 
-EXPORT_SYMBOL(tcp_accept);
+EXPORT_SYMBOL(inet_csk_accept);
 EXPORT_SYMBOL(tcp_close);
 EXPORT_SYMBOL(tcp_destroy_sock);
 EXPORT_SYMBOL(tcp_disconnect);
 
 static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
                        int ext, u32 pid, u32 seq, u16 nlmsg_flags)
 {
-       struct inet_sock *inet = inet_sk(sk);
+       const struct inet_sock *inet = inet_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcpdiagmsg *r;
        struct nlmsghdr  *nlh;
        struct tcp_info  *info = NULL;
 
 #define EXPIRES_IN_MS(tmo)  ((tmo-jiffies)*1000+HZ-1)/HZ
 
-       if (tp->pending == TCP_TIME_RETRANS) {
+       if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
                r->tcpdiag_timer = 1;
-               r->tcpdiag_retrans = tp->retransmits;
-               r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout);
-       } else if (tp->pending == TCP_TIME_PROBE0) {
+               r->tcpdiag_retrans = icsk->icsk_retransmits;
+               r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
+       } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
                r->tcpdiag_timer = 4;
                r->tcpdiag_retrans = tp->probes_out;
-               r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout);
+               r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
        } else if (timer_pending(&sk->sk_timer)) {
                r->tcpdiag_timer = 2;
                r->tcpdiag_retrans = tp->probes_out;
 {
        struct tcpdiag_entry entry;
        struct tcpdiagreq *r = NLMSG_DATA(cb->nlh);
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt;
        struct rtattr *bc = NULL;
        struct inet_sock *inet = inet_sk(sk);
 
        entry.family = sk->sk_family;
 
-       read_lock_bh(&tp->accept_queue.syn_wait_lock);
+       read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 
-       lopt = tp->accept_queue.listen_opt;
+       lopt = icsk->icsk_accept_queue.listen_opt;
        if (!lopt || !lopt->qlen)
                goto out;
 
        }
 
 out:
-       read_unlock_bh(&tp->accept_queue.syn_wait_lock);
+       read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 
        return err;
 }
 
 /* Adapt the MSS value used to make delayed ack decision to the 
  * real world.
  */ 
-static inline void tcp_measure_rcv_mss(struct tcp_sock *tp,
-                                      struct sk_buff *skb)
+static inline void tcp_measure_rcv_mss(struct sock *sk,
+                                      const struct sk_buff *skb)
 {
-       unsigned int len, lss;
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       const unsigned int lss = icsk->icsk_ack.last_seg_size; 
+       unsigned int len;
 
-       lss = tp->ack.last_seg_size; 
-       tp->ack.last_seg_size = 0; 
+       icsk->icsk_ack.last_seg_size = 0; 
 
        /* skb->len may jitter because of SACKs, even if peer
         * sends good full-sized frames.
         */
        len = skb->len;
-       if (len >= tp->ack.rcv_mss) {
-               tp->ack.rcv_mss = len;
+       if (len >= icsk->icsk_ack.rcv_mss) {
+               icsk->icsk_ack.rcv_mss = len;
        } else {
                /* Otherwise, we make more careful check taking into account,
                 * that SACKs block is variable.
                         * tcp header plus fixed timestamp option length.
                         * Resulting "len" is MSS free of SACK jitter.
                         */
-                       len -= tp->tcp_header_len;
-                       tp->ack.last_seg_size = len;
+                       len -= tcp_sk(sk)->tcp_header_len;
+                       icsk->icsk_ack.last_seg_size = len;
                        if (len == lss) {
-                               tp->ack.rcv_mss = len;
+                               icsk->icsk_ack.rcv_mss = len;
                                return;
                        }
                }
-               tp->ack.pending |= TCP_ACK_PUSHED;
+               icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
        }
 }
 
-static void tcp_incr_quickack(struct tcp_sock *tp)
+static void tcp_incr_quickack(struct sock *sk)
 {
-       unsigned quickacks = tp->rcv_wnd/(2*tp->ack.rcv_mss);
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
 
        if (quickacks==0)
                quickacks=2;
-       if (quickacks > tp->ack.quick)
-               tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
+       if (quickacks > icsk->icsk_ack.quick)
+               icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
 }
 
-void tcp_enter_quickack_mode(struct tcp_sock *tp)
+void tcp_enter_quickack_mode(struct sock *sk)
 {
-       tcp_incr_quickack(tp);
-       tp->ack.pingpong = 0;
-       tp->ack.ato = TCP_ATO_MIN;
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       tcp_incr_quickack(sk);
+       icsk->icsk_ack.pingpong = 0;
+       icsk->icsk_ack.ato = TCP_ATO_MIN;
 }
 
 /* Send ACKs quickly, if "quick" count is not exhausted
  * and the session is not interactive.
  */
 
-static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp)
+static inline int tcp_in_quickack_mode(const struct sock *sk)
 {
-       return (tp->ack.quick && !tp->ack.pingpong);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+       return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
 }
 
 /* Buffer size and advertised window tuning.
  */
 
 /* Slow part of check#2. */
-static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
-                            struct sk_buff *skb)
+static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
+                            const struct sk_buff *skb)
 {
        /* Optimize this! */
        int truesize = tcp_win_from_space(skb->truesize)/2;
 
        while (tp->rcv_ssthresh <= window) {
                if (truesize <= skb->len)
-                       return 2*tp->ack.rcv_mss;
+                       return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
 
                truesize >>= 1;
                window >>= 1;
 
                if (incr) {
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
-                       tp->ack.quick |= 1;
+                       inet_csk(sk)->icsk_ack.quick |= 1;
                }
        }
 }
        unsigned int app_win = tp->rcv_nxt - tp->copied_seq;
        int ofo_win = 0;
 
-       tp->ack.quick = 0;
+       inet_csk(sk)->icsk_ack.quick = 0;
 
        skb_queue_walk(&tp->out_of_order_queue, skb) {
                ofo_win += skb->len;
                app_win += ofo_win;
                if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf)
                        app_win >>= 1;
-               if (app_win > tp->ack.rcv_mss)
-                       app_win -= tp->ack.rcv_mss;
+               if (app_win > inet_csk(sk)->icsk_ack.rcv_mss)
+                       app_win -= inet_csk(sk)->icsk_ack.rcv_mss;
                app_win = max(app_win, 2U*tp->advmss);
 
                if (!ofo_win)
        tp->rcv_rtt_est.time = tcp_time_stamp;
 }
 
-static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb)
+static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        if (tp->rx_opt.rcv_tsecr &&
            (TCP_SKB_CB(skb)->end_seq -
-            TCP_SKB_CB(skb)->seq >= tp->ack.rcv_mss))
+            TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
                tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
 }
 
  */
 static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
 {
+       struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now;
 
-       tcp_schedule_ack(tp);
+       inet_csk_schedule_ack(sk);
 
-       tcp_measure_rcv_mss(tp, skb);
+       tcp_measure_rcv_mss(sk, skb);
 
        tcp_rcv_rtt_measure(tp);
        
        now = tcp_time_stamp;
 
-       if (!tp->ack.ato) {
+       if (!icsk->icsk_ack.ato) {
                /* The _first_ data packet received, initialize
                 * delayed ACK engine.
                 */
-               tcp_incr_quickack(tp);
-               tp->ack.ato = TCP_ATO_MIN;
+               tcp_incr_quickack(sk);
+               icsk->icsk_ack.ato = TCP_ATO_MIN;
        } else {
-               int m = now - tp->ack.lrcvtime;
+               int m = now - icsk->icsk_ack.lrcvtime;
 
                if (m <= TCP_ATO_MIN/2) {
                        /* The fastest case is the first. */
-                       tp->ack.ato = (tp->ack.ato>>1) + TCP_ATO_MIN/2;
-               } else if (m < tp->ack.ato) {
-                       tp->ack.ato = (tp->ack.ato>>1) + m;
-                       if (tp->ack.ato > tp->rto)
-                               tp->ack.ato = tp->rto;
-               } else if (m > tp->rto) {
+                       icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
+               } else if (m < icsk->icsk_ack.ato) {
+                       icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
+                       if (icsk->icsk_ack.ato > icsk->icsk_rto)
+                               icsk->icsk_ack.ato = icsk->icsk_rto;
+               } else if (m > icsk->icsk_rto) {
                        /* Too long gap. Apparently sender falled to
                         * restart window, so that we send ACKs quickly.
                         */
-                       tcp_incr_quickack(tp);
+                       tcp_incr_quickack(sk);
                        sk_stream_mem_reclaim(sk);
                }
        }
-       tp->ack.lrcvtime = now;
+       icsk->icsk_ack.lrcvtime = now;
 
        TCP_ECN_check_ce(tp, skb);
 
 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
  * routine referred to above.
  */
-static inline void tcp_set_rto(struct tcp_sock *tp)
+static inline void tcp_set_rto(struct sock *sk)
 {
+       const struct tcp_sock *tp = tcp_sk(sk);
        /* Old crap is replaced with new one. 8)
         *
         * More seriously:
         *    is invisible. Actually, Linux-2.4 also generates erratic
         *    ACKs in some curcumstances.
         */
-       tp->rto = (tp->srtt >> 3) + tp->rttvar;
+       inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
 
        /* 2. Fixups made earlier cannot be right.
         *    If we do not estimate RTO correctly without them,
 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
  * guarantees that rto is higher.
  */
-static inline void tcp_bound_rto(struct tcp_sock *tp)
+static inline void tcp_bound_rto(struct sock *sk)
 {
-       if (tp->rto > TCP_RTO_MAX)
-               tp->rto = TCP_RTO_MAX;
+       if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
+               inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 }
 
 /* Save metrics learned by this TCP session.
        if (dst && (dst->flags&DST_HOST)) {
                int m;
 
-               if (tp->backoff || !tp->srtt) {
+               if (inet_csk(sk)->icsk_backoff || !tp->srtt) {
                        /* This session failed to estimate rtt. Why?
                         * Probably, no packets returned in time.
                         * Reset our results.
                tp->mdev = dst_metric(dst, RTAX_RTTVAR);
                tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
        }
-       tcp_set_rto(tp);
-       tcp_bound_rto(tp);
-       if (tp->rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
+       tcp_set_rto(sk);
+       tcp_bound_rto(sk);
+       if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
                goto reset;
        tp->snd_cwnd = tcp_init_cwnd(tp, dst);
        tp->snd_cwnd_stamp = tcp_time_stamp;
        if (!tp->rx_opt.saw_tstamp && tp->srtt) {
                tp->srtt = 0;
                tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
-               tp->rto = TCP_TIMEOUT_INIT;
+               inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
        }
 }
 
 
        if (tp->ca_state <= TCP_CA_Disorder ||
             tp->snd_una == tp->high_seq ||
-            (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) {
+            (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) {
                tp->prior_ssthresh = tcp_current_ssthresh(tp);
                tp->snd_ssthresh = tp->ca_ops->ssthresh(tp);
                tcp_ca_event(tp, CA_EVENT_FRTO);
 
        /* Reduce ssthresh if it has not yet been made inside this window. */
        if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
-           (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) {
+           (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) {
                tp->prior_ssthresh = tcp_current_ssthresh(tp);
                tp->snd_ssthresh = tp->ca_ops->ssthresh(tp);
                tcp_ca_event(tp, CA_EVENT_LOSS);
        TCP_ECN_queue_cwr(tp);
 }
 
-static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp)
+static int tcp_check_sack_reneging(struct sock *sk)
 {
        struct sk_buff *skb;
 
                NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
 
                tcp_enter_loss(sk, 1);
-               tp->retransmits++;
+               inet_csk(sk)->icsk_retransmits++;
                tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
-               tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+                                         inet_csk(sk)->icsk_rto);
                return 1;
        }
        return 0;
        return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out;
 }
 
-static inline int tcp_skb_timedout(struct tcp_sock *tp, struct sk_buff *skb)
+static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
 {
-       return (tcp_time_stamp - TCP_SKB_CB(skb)->when > tp->rto);
+       return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
 }
 
 static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
 {
        return tp->packets_out &&
-              tcp_skb_timedout(tp, skb_peek(&sk->sk_write_queue));
+              tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));
 }
 
 /* Linux NewReno/SACK/FACK/ECN state machine.
                struct sk_buff *skb;
 
                sk_stream_for_retrans_queue(skb, sk) {
-                       if (tcp_skb_timedout(tp, skb) &&
+                       if (tcp_skb_timedout(sk, skb) &&
                            !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
                                TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
                                tp->lost_out += tcp_skb_pcount(skb);
                tp->left_out = tp->sacked_out;
                tcp_undo_cwr(tp, 1);
                NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
-               tp->retransmits = 0;
+               inet_csk(sk)->icsk_retransmits = 0;
                tp->undo_marker = 0;
                if (!IsReno(tp))
                        tcp_set_ca_state(tp, TCP_CA_Open);
                tp->prior_ssthresh = 0;
 
        /* B. In all the states check for reneging SACKs. */
-       if (tp->sacked_out && tcp_check_sack_reneging(sk, tp))
+       if (tp->sacked_out && tcp_check_sack_reneging(sk))
                return;
 
        /* C. Process data loss notification, provided it is valid. */
        } else if (!before(tp->snd_una, tp->high_seq)) {
                switch (tp->ca_state) {
                case TCP_CA_Loss:
-                       tp->retransmits = 0;
+                       inet_csk(sk)->icsk_retransmits = 0;
                        if (tcp_try_undo_recovery(sk, tp))
                                return;
                        break;
                break;
        case TCP_CA_Loss:
                if (flag&FLAG_DATA_ACKED)
-                       tp->retransmits = 0;
+                       inet_csk(sk)->icsk_retransmits = 0;
                if (!tcp_try_undo_loss(sk, tp)) {
                        tcp_moderate_cwnd(tp);
                        tcp_xmit_retransmit_queue(sk);
 /* Read draft-ietf-tcplw-high-performance before mucking
  * with this code. (Superceeds RFC1323)
  */
-static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag)
+static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag)
 {
-       __u32 seq_rtt;
-
        /* RTTM Rule: A TSecr value received in a segment is used to
         * update the averaged RTT measurement only if the segment
         * acknowledges some new data, i.e., only if it advances the
         * answer arrives rto becomes 120 seconds! If at least one of segments
         * in window is lost... Voila.                          --ANK (010210)
         */
-       seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
+       struct tcp_sock *tp = tcp_sk(sk);
+       const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
        tcp_rtt_estimator(tp, seq_rtt, usrtt);
-       tcp_set_rto(tp);
-       tp->backoff = 0;
-       tcp_bound_rto(tp);
+       tcp_set_rto(sk);
+       inet_csk(sk)->icsk_backoff = 0;
+       tcp_bound_rto(sk);
 }
 
-static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int flag)
+static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag)
 {
        /* We don't have a timestamp. Can only use
         * packets that are not retransmitted to determine
        if (flag & FLAG_RETRANS_DATA_ACKED)
                return;
 
-       tcp_rtt_estimator(tp, seq_rtt, usrtt);
-       tcp_set_rto(tp);
-       tp->backoff = 0;
-       tcp_bound_rto(tp);
+       tcp_rtt_estimator(tcp_sk(sk), seq_rtt, usrtt);
+       tcp_set_rto(sk);
+       inet_csk(sk)->icsk_backoff = 0;
+       tcp_bound_rto(sk);
 }
 
-static inline void tcp_ack_update_rtt(struct tcp_sock *tp,
-                                     int flag, s32 seq_rtt, u32 *usrtt)
+static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
+                                     const s32 seq_rtt, u32 *usrtt)
 {
+       const struct tcp_sock *tp = tcp_sk(sk);
        /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
        if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
-               tcp_ack_saw_tstamp(tp, usrtt, flag);
+               tcp_ack_saw_tstamp(sk, usrtt, flag);
        else if (seq_rtt >= 0)
-               tcp_ack_no_tstamp(tp, seq_rtt, usrtt, flag);
+               tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag);
 }
 
 static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
 static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
 {
        if (!tp->packets_out) {
-               tcp_clear_xmit_timer(sk, TCP_TIME_RETRANS);
+               inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
        } else {
-               tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
        }
 }
 
        }
 
        if (acked&FLAG_ACKED) {
-               tcp_ack_update_rtt(tp, acked, seq_rtt, seq_usrtt);
+               tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt);
                tcp_ack_packets_out(sk, tp);
 
                if (tp->ca_ops->pkts_acked)
 
 static void tcp_ack_probe(struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       const struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
 
        /* Was it a usable window open? */
 
        if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
                   tp->snd_una + tp->snd_wnd)) {
-               tp->backoff = 0;
-               tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0);
+               icsk->icsk_backoff = 0;
+               inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
                /* Socket must be waked up by subsequent tcp_data_snd_check().
                 * This function is not for random using!
                 */
        } else {
-               tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0,
-                                    min(tp->rto << tp->backoff, TCP_RTO_MAX));
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+                                         min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX));
        }
 }
 
 /* Check that window update is acceptable.
  * The function assumes that snd_una<=ack<=snd_next.
  */
-static inline int tcp_may_update_window(struct tcp_sock *tp, u32 ack,
-                                       u32 ack_seq, u32 nwin)
+static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
+                                       const u32 ack_seq, const u32 nwin)
 {
        return (after(ack, tp->snd_una) ||
                after(ack_seq, tp->snd_wl1) ||
  * up to bandwidth of 18Gigabit/sec. 8) ]
  */
 
-static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb)
+static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct tcphdr *th = skb->h.th;
        u32 seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
                !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
 
                /* 4. ... and sits in replay window. */
-               (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (tp->rto*1024)/HZ);
+               (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
 }
 
-static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb)
+static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb)
 {
+       const struct tcp_sock *tp = tcp_sk(sk);
        return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
                xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
-               !tcp_disordered_ack(tp, skb));
+               !tcp_disordered_ack(sk, skb));
 }
 
 /* Check segment sequence number for validity.
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       tcp_schedule_ack(tp);
+       inet_csk_schedule_ack(sk);
 
        sk->sk_shutdown |= RCV_SHUTDOWN;
        sock_set_flag(sk, SOCK_DONE);
                case TCP_ESTABLISHED:
                        /* Move to CLOSE_WAIT */
                        tcp_set_state(sk, TCP_CLOSE_WAIT);
-                       tp->ack.pingpong = 1;
+                       inet_csk(sk)->icsk_ack.pingpong = 1;
                        break;
 
                case TCP_CLOSE_WAIT:
        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
            before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
                NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
-               tcp_enter_quickack_mode(tp);
+               tcp_enter_quickack_mode(sk);
 
                if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
                        u32 end_seq = TCP_SKB_CB(skb)->end_seq;
                         * gap in queue is filled.
                         */
                        if (skb_queue_empty(&tp->out_of_order_queue))
-                               tp->ack.pingpong = 0;
+                               inet_csk(sk)->icsk_ack.pingpong = 0;
                }
 
                if (tp->rx_opt.num_sacks)
                tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
 out_of_window:
-               tcp_enter_quickack_mode(tp);
-               tcp_schedule_ack(tp);
+               tcp_enter_quickack_mode(sk);
+               inet_csk_schedule_ack(sk);
 drop:
                __kfree_skb(skb);
                return;
        if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
                goto out_of_window;
 
-       tcp_enter_quickack_mode(tp);
+       tcp_enter_quickack_mode(sk);
 
        if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
                /* Partial packet, seq < rcv_next < end_seq */
 
        /* Disable header prediction. */
        tp->pred_flags = 0;
-       tcp_schedule_ack(tp);
+       inet_csk_schedule_ack(sk);
 
        SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
                   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
        struct tcp_sock *tp = tcp_sk(sk);
 
            /* More than one full frame received... */
-       if (((tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss
+       if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss
             /* ... and right edge of window advances far enough.
              * (tcp_recvmsg() will send ACK otherwise). Or...
              */
             && __tcp_select_window(sk) >= tp->rcv_wnd) ||
            /* We ACK each frame or... */
-           tcp_in_quickack_mode(tp) ||
+           tcp_in_quickack_mode(sk) ||
            /* We have out of order data. */
            (ofo_possible &&
             skb_peek(&tp->out_of_order_queue))) {
 
 static __inline__ void tcp_ack_snd_check(struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
-       if (!tcp_ack_scheduled(tp)) {
+       if (!inet_csk_ack_scheduled(sk)) {
                /* We sent a data segment already. */
                return;
        }
                                    tp->rcv_nxt == tp->rcv_wup)
                                        tcp_store_ts_recent(tp);
 
-                               tcp_rcv_rtt_measure_ts(tp, skb);
+                               tcp_rcv_rtt_measure_ts(sk, skb);
 
                                /* We know that such packets are checksummed
                                 * on entry.
                                            tp->rcv_nxt == tp->rcv_wup)
                                                tcp_store_ts_recent(tp);
 
-                                       tcp_rcv_rtt_measure_ts(tp, skb);
+                                       tcp_rcv_rtt_measure_ts(sk, skb);
 
                                        __skb_pull(skb, tcp_header_len);
                                        tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                                    tp->rcv_nxt == tp->rcv_wup)
                                        tcp_store_ts_recent(tp);
 
-                               tcp_rcv_rtt_measure_ts(tp, skb);
+                               tcp_rcv_rtt_measure_ts(sk, skb);
 
                                if ((int)skb->truesize > sk->sk_forward_alloc)
                                        goto step5;
                                /* Well, only one small jumplet in fast path... */
                                tcp_ack(sk, skb, FLAG_DATA);
                                tcp_data_snd_check(sk, tp);
-                               if (!tcp_ack_scheduled(tp))
+                               if (!inet_csk_ack_scheduled(sk))
                                        goto no_ack;
                        }
 
         * RFC1323: H1. Apply PAWS check first.
         */
        if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
-           tcp_paws_discard(tp, skb)) {
+           tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
                        NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
                        tcp_send_dupack(sk, skb);
        if(th->ack)
                tcp_ack(sk, skb, FLAG_SLOWPATH);
 
-       tcp_rcv_rtt_measure_ts(tp, skb);
+       tcp_rcv_rtt_measure_ts(sk, skb);
 
        /* Process urgent data. */
        tcp_urg(sk, skb, th);
                tcp_init_buffer_space(sk);
 
                if (sock_flag(sk, SOCK_KEEPOPEN))
-                       tcp_reset_keepalive_timer(sk, keepalive_time_when(tp));
+                       inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
 
                if (!tp->rx_opt.snd_wscale)
                        __tcp_fast_path_on(tp, tp->snd_wnd);
                        sk_wake_async(sk, 0, POLL_OUT);
                }
 
-               if (sk->sk_write_pending || tp->defer_accept || tp->ack.pingpong) {
+               if (sk->sk_write_pending || tp->defer_accept || inet_csk(sk)->icsk_ack.pingpong) {
                        /* Save one ACK. Data will be ready after
                         * several ticks, if write_pending is set.
                         *
                         * look so _wonderfully_ clever, that I was not able
                         * to stand against the temptation 8)     --ANK
                         */
-                       tcp_schedule_ack(tp);
-                       tp->ack.lrcvtime = tcp_time_stamp;
-                       tp->ack.ato      = TCP_ATO_MIN;
-                       tcp_incr_quickack(tp);
-                       tcp_enter_quickack_mode(tp);
-                       tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX);
+                       inet_csk_schedule_ack(sk);
+                       inet_csk(sk)->icsk_ack.lrcvtime = tcp_time_stamp;
+                       inet_csk(sk)->icsk_ack.ato       = TCP_ATO_MIN;
+                       tcp_incr_quickack(sk);
+                       tcp_enter_quickack_mode(sk);
+                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX);
 
 discard:
                        __kfree_skb(skb);
        }
 
        if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
-           tcp_paws_discard(tp, skb)) {
+           tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
                        NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
                        tcp_send_dupack(sk, skb);
                                 */
                                if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
                                    !tp->srtt)
-                                       tcp_ack_saw_tstamp(tp, 0, 0);
+                                       tcp_ack_saw_tstamp(sk, 0, 0);
 
                                if (tp->rx_opt.tstamp_ok)
                                        tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
                                                return 1;
                                        }
 
-                                       tmo = tcp_fin_time(tp);
+                                       tmo = tcp_fin_time(sk);
                                        if (tmo > TCP_TIMEWAIT_LEN) {
-                                               tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
+                                               inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
                                        } else if (th->fin || sock_owned_by_user(sk)) {
                                                /* Bad case. We could lose such FIN otherwise.
                                                 * It is not a big problem, but it looks confusing
                                                 * if it spins in bh_lock_sock(), but it is really
                                                 * marginal case.
                                                 */
-                                               tcp_reset_keepalive_timer(sk, tmo);
+                                               inet_csk_reset_keepalive_timer(sk, tmo);
                                        } else {
                                                tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
                                                goto discard;
 
  */
 int sysctl_local_port_range[2] = { 1024, 4999 };
 
-static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
+static inline int inet_csk_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
 {
        const u32 sk_rcv_saddr = inet_rcv_saddr(sk);
        struct sock *sk2;
 
        sk_for_each_bound(sk2, node, &tb->owners) {
                if (sk != sk2 &&
-                   !tcp_v6_ipv6only(sk2) &&
+                   !inet_v6_ipv6only(sk2) &&
                    (!sk->sk_bound_dev_if ||
                     !sk2->sk_bound_dev_if ||
                     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
 /* Obtain a reference to a local port for the given sock,
  * if snum is zero it means select any available local port.
  */
-static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
+int inet_csk_get_port(struct inet_hashinfo *hashinfo,
+                     struct sock *sk, unsigned short snum)
 {
        struct inet_bind_hashbucket *head;
        struct hlist_node *node;
                int remaining = (high - low) + 1;
                int rover;
 
-               spin_lock(&tcp_hashinfo.portalloc_lock);
-               if (tcp_hashinfo.port_rover < low)
+               spin_lock(&hashinfo->portalloc_lock);
+               if (hashinfo->port_rover < low)
                        rover = low;
                else
-                       rover = tcp_hashinfo.port_rover;
+                       rover = hashinfo->port_rover;
                do {
                        rover++;
                        if (rover > high)
                                rover = low;
-                       head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
+                       head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)];
                        spin_lock(&head->lock);
                        inet_bind_bucket_for_each(tb, node, &head->chain)
                                if (tb->port == rover)
                next:
                        spin_unlock(&head->lock);
                } while (--remaining > 0);
-               tcp_hashinfo.port_rover = rover;
-               spin_unlock(&tcp_hashinfo.portalloc_lock);
+               hashinfo->port_rover = rover;
+               spin_unlock(&hashinfo->portalloc_lock);
 
                /* Exhausted local port range during search?  It is not
                 * possible for us to be holding one of the bind hash
                 */
                snum = rover;
        } else {
-               head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
+               head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)];
                spin_lock(&head->lock);
                inet_bind_bucket_for_each(tb, node, &head->chain)
                        if (tb->port == snum)
                        goto success;
                } else {
                        ret = 1;
-                       if (tcp_bind_conflict(sk, tb))
+                       if (inet_csk_bind_conflict(sk, tb))
                                goto fail_unlock;
                }
        }
 tb_not_found:
        ret = 1;
-       if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL)
+       if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL)
                goto fail_unlock;
        if (hlist_empty(&tb->owners)) {
                if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
                   (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
                tb->fastreuse = 0;
 success:
-       if (!inet_sk(sk)->bind_hash)
+       if (!inet_csk(sk)->icsk_bind_hash)
                inet_bind_hash(sk, tb, snum);
-       BUG_TRAP(inet_sk(sk)->bind_hash == tb);
+       BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
        ret = 0;
 
 fail_unlock:
        return ret;
 }
 
+static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
+{
+       return inet_csk_get_port(&tcp_hashinfo, sk, snum);
+}
+
 static void tcp_v4_hash(struct sock *sk)
 {
        inet_hash(&tcp_hashinfo, sk);
        }
 
        head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
-       tb  = inet_sk(sk)->bind_hash;
+       tb  = inet_csk(sk)->icsk_bind_hash;
        spin_lock_bh(&head->lock);
        if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
                __inet_hash(&tcp_hashinfo, sk, 0);
        return err;
 }
 
-static __inline__ int tcp_v4_iif(struct sk_buff *skb)
+static inline int inet_iif(const struct sk_buff *skb)
 {
        return ((struct rtable *)skb->dst)->rt_iif;
 }
 
-static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
+static inline u32 inet_synq_hash(const u32 raddr, const u16 rport,
+                                const u32 rnd, const u16 synq_hsize)
 {
-       return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
+       return jhash_2words(raddr, (u32)rport, rnd) & (synq_hsize - 1);
 }
 
-static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp,
-                                             struct request_sock ***prevp,
-                                             __u16 rport,
-                                             __u32 raddr, __u32 laddr)
+struct request_sock *inet_csk_search_req(const struct sock *sk,
+                                        struct request_sock ***prevp,
+                                        const __u16 rport, const __u32 raddr,
+                                        const __u32 laddr)
 {
-       struct listen_sock *lopt = tp->accept_queue.listen_opt;
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+       struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
        struct request_sock *req, **prev;
 
-       for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
+       for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
+                                                   lopt->nr_table_entries)];
             (req = *prev) != NULL;
             prev = &req->dl_next) {
                const struct inet_request_sock *ireq = inet_rsk(req);
                if (ireq->rmt_port == rport &&
                    ireq->rmt_addr == raddr &&
                    ireq->loc_addr == laddr &&
-                   TCP_INET_FAMILY(req->rsk_ops->family)) {
+                   AF_INET_FAMILY(req->rsk_ops->family)) {
                        BUG_TRAP(!req->sk);
                        *prevp = prev;
                        break;
 
 static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct listen_sock *lopt = tp->accept_queue.listen_opt;
-       u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
+       const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
+                                    lopt->hash_rnd, lopt->nr_table_entries);
 
-       reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
-       tcp_synq_added(sk);
+       reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
+       inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
 }
 
 
        }
 
        sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
-                        th->source, tcp_v4_iif(skb));
+                        th->source, inet_iif(skb));
        if (!sk) {
                ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
                return;
                if (sock_owned_by_user(sk))
                        goto out;
 
-               req = tcp_v4_search_req(tp, &prev, th->dest,
-                                       iph->daddr, iph->saddr);
+               req = inet_csk_search_req(sk, &prev, th->dest,
+                                         iph->daddr, iph->saddr);
                if (!req)
                        goto out;
 
                 * created socket, and POSIX does not want network
                 * errors returned from accept().
                 */
-               tcp_synq_drop(sk, req, prev);
+               inet_csk_reqsk_queue_drop(sk, req, prev);
                goto out;
 
        case TCP_SYN_SENT:
                        req->ts_recent);
 }
 
-static struct dst_entry* tcp_v4_route_req(struct sock *sk,
-                                         struct request_sock *req)
+struct dst_entry* inet_csk_route_req(struct sock *sk,
+                                    const struct request_sock *req)
 {
        struct rtable *rt;
        const struct inet_request_sock *ireq = inet_rsk(req);
                                                  ireq->rmt_addr),
                                        .saddr = ireq->loc_addr,
                                        .tos = RT_CONN_FLAGS(sk) } },
-                           .proto = IPPROTO_TCP,
+                           .proto = sk->sk_protocol,
                            .uli_u = { .ports =
                                       { .sport = inet_sk(sk)->sport,
                                         .dport = ireq->rmt_port } } };
        struct sk_buff * skb;
 
        /* First, grab a route. */
-       if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
+       if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
                goto out;
 
        skb = tcp_make_synack(sk, dst, req);
         * limitations, they conserve resources and peer is
         * evidently real one.
         */
-       if (tcp_synq_is_full(sk) && !isn) {
+       if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
 #ifdef CONFIG_SYN_COOKIES
                if (sysctl_tcp_syncookies) {
                        want_cookie = 1;
         * clogging syn queue with openreqs with exponentially increasing
         * timeout.
         */
-       if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
                goto drop;
 
        req = reqsk_alloc(&tcp_request_sock_ops);
                 */
                if (tmp_opt.saw_tstamp &&
                    sysctl_tcp_tw_recycle &&
-                   (dst = tcp_v4_route_req(sk, req)) != NULL &&
+                   (dst = inet_csk_route_req(sk, req)) != NULL &&
                    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
                    peer->v4daddr == saddr) {
                        if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
                }
                /* Kill the following clause, if you dislike this way. */
                else if (!sysctl_tcp_syncookies &&
-                        (sysctl_max_syn_backlog - tcp_synq_len(sk) <
+                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
                          (sysctl_max_syn_backlog >> 2)) &&
                         (!peer || !peer->tcp_ts_stamp) &&
                         (!dst || !dst_metric(dst, RTAX_RTT))) {
        if (sk_acceptq_is_full(sk))
                goto exit_overflow;
 
-       if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
+       if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
                goto exit;
 
        newsk = tcp_create_openreq_child(sk, req, skb);
        newinet->saddr        = ireq->loc_addr;
        newinet->opt          = ireq->opt;
        ireq->opt             = NULL;
-       newinet->mc_index     = tcp_v4_iif(skb);
+       newinet->mc_index     = inet_iif(skb);
        newinet->mc_ttl       = skb->nh.iph->ttl;
        newtp->ext_header_len = 0;
        if (newinet->opt)
 {
        struct tcphdr *th = skb->h.th;
        struct iphdr *iph = skb->nh.iph;
-       struct tcp_sock *tp = tcp_sk(sk);
        struct sock *nsk;
        struct request_sock **prev;
        /* Find possible connection requests. */
-       struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source,
-                                                    iph->saddr, iph->daddr);
+       struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
+                                                      iph->saddr, iph->daddr);
        if (req)
                return tcp_check_req(sk, skb, req, prev);
 
        nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
                                        th->source, skb->nh.iph->daddr,
-                                       ntohs(th->dest), tcp_v4_iif(skb));
+                                       ntohs(th->dest), inet_iif(skb));
 
        if (nsk) {
                if (nsk->sk_state != TCP_TIME_WAIT) {
 
        sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
                           skb->nh.iph->daddr, ntohs(th->dest),
-                          tcp_v4_iif(skb));
+                          inet_iif(skb));
 
        if (!sk)
                goto no_tcp_socket;
                struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
                                                        skb->nh.iph->daddr,
                                                        ntohs(th->dest),
-                                                       tcp_v4_iif(skb));
+                                                       inet_iif(skb));
                if (sk2) {
                        tcp_tw_deschedule((struct inet_timewait_sock *)sk);
                        inet_twsk_put((struct inet_timewait_sock *)sk);
        tcp_init_xmit_timers(sk);
        tcp_prequeue_init(tp);
 
-       tp->rto  = TCP_TIMEOUT_INIT;
+       inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
        tp->mdev = TCP_TIMEOUT_INIT;
 
        /* So many TCP implementations out there (incorrectly) count the
        __skb_queue_purge(&tp->ucopy.prequeue);
 
        /* Clean up a referenced TCP bind bucket. */
-       if (inet_sk(sk)->bind_hash)
+       if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(&tcp_hashinfo, sk);
 
        /*
 
 static void *listening_get_next(struct seq_file *seq, void *cur)
 {
-       struct tcp_sock *tp;
+       struct inet_connection_sock *icsk;
        struct hlist_node *node;
        struct sock *sk = cur;
        struct tcp_iter_state* st = seq->private;
        if (st->state == TCP_SEQ_STATE_OPENREQ) {
                struct request_sock *req = cur;
 
-               tp = tcp_sk(st->syn_wait_sk);
+               icsk = inet_csk(st->syn_wait_sk);
                req = req->dl_next;
                while (1) {
                        while (req) {
                        if (++st->sbucket >= TCP_SYNQ_HSIZE)
                                break;
 get_req:
-                       req = tp->accept_queue.listen_opt->syn_table[st->sbucket];
+                       req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
                }
                sk        = sk_next(st->syn_wait_sk);
                st->state = TCP_SEQ_STATE_LISTENING;
-               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
+               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        } else {
-               tp = tcp_sk(sk);
-               read_lock_bh(&tp->accept_queue.syn_wait_lock);
-               if (reqsk_queue_len(&tp->accept_queue))
+               icsk = inet_csk(sk);
+               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               if (reqsk_queue_len(&icsk->icsk_accept_queue))
                        goto start_req;
-               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
+               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                sk = sk_next(sk);
        }
 get_sk:
                        cur = sk;
                        goto out;
                }
-               tp = tcp_sk(sk);
-               read_lock_bh(&tp->accept_queue.syn_wait_lock);
-               if (reqsk_queue_len(&tp->accept_queue)) {
+               icsk = inet_csk(sk);
+               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
 start_req:
                        st->uid         = sock_i_uid(sk);
                        st->syn_wait_sk = sk;
                        st->sbucket     = 0;
                        goto get_req;
                }
-               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
+               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        }
        if (++st->bucket < INET_LHTABLE_SIZE) {
                sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
        switch (st->state) {
        case TCP_SEQ_STATE_OPENREQ:
                if (v) {
-                       struct tcp_sock *tp = tcp_sk(st->syn_wait_sk);
-                       read_unlock_bh(&tp->accept_queue.syn_wait_lock);
+                       struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
+                       read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                }
        case TCP_SEQ_STATE_LISTENING:
                if (v != SEQ_START_TOKEN)
        int timer_active;
        unsigned long timer_expires;
        struct tcp_sock *tp = tcp_sk(sp);
+       const struct inet_connection_sock *icsk = inet_csk(sp);
        struct inet_sock *inet = inet_sk(sp);
        unsigned int dest = inet->daddr;
        unsigned int src = inet->rcv_saddr;
        __u16 destp = ntohs(inet->dport);
        __u16 srcp = ntohs(inet->sport);
 
-       if (tp->pending == TCP_TIME_RETRANS) {
+       if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
                timer_active    = 1;
-               timer_expires   = tp->timeout;
-       } else if (tp->pending == TCP_TIME_PROBE0) {
+               timer_expires   = icsk->icsk_timeout;
+       } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
                timer_active    = 4;
-               timer_expires   = tp->timeout;
+               timer_expires   = icsk->icsk_timeout;
        } else if (timer_pending(&sp->sk_timer)) {
                timer_active    = 2;
                timer_expires   = sp->sk_timer.expires;
                tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
                timer_active,
                jiffies_to_clock_t(timer_expires - jiffies),
-               tp->retransmits,
+               icsk->icsk_retransmits,
                sock_i_uid(sp),
                tp->probes_out,
                sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
-               tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
+               icsk->icsk_rto,
+               icsk->icsk_ack.ato,
+               (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
                tp->snd_cwnd,
                tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
 }
        .close                  = tcp_close,
        .connect                = tcp_v4_connect,
        .disconnect             = tcp_disconnect,
-       .accept                 = tcp_accept,
+       .accept                 = inet_csk_accept,
        .ioctl                  = tcp_ioctl,
        .init                   = tcp_v4_init_sock,
        .destroy                = tcp_v4_destroy_sock,
 
 
        if (tw != NULL) {
                struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
-               const int rto = (tp->rto << 2) - (tp->rto >> 1);
+               const struct inet_connection_sock *icsk = inet_csk(sk);
+               const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
 
                tw->tw_rcv_wscale       = tp->rx_opt.rcv_wscale;
                tcptw->tw_rcv_nxt       = tp->rcv_nxt;
                struct inet_request_sock *ireq = inet_rsk(req);
                struct tcp_request_sock *treq = tcp_rsk(req);
                struct inet_sock *newinet = inet_sk(newsk);
+               struct inet_connection_sock *newicsk = inet_csk(newsk);
                struct tcp_sock *newtp;
 
                newsk->sk_state = TCP_SYN_RECV;
-               newinet->bind_hash = NULL;
+               newicsk->icsk_bind_hash = NULL;
 
                /* Clone the TCP header template */
                newinet->dport = ireq->rmt_port;
 
                tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
 
-               newtp->retransmits = 0;
-               newtp->backoff = 0;
+               newicsk->icsk_retransmits = 0;
+               newicsk->icsk_backoff = 0;
                newtp->srtt = 0;
                newtp->mdev = TCP_TIMEOUT_INIT;
-               newtp->rto = TCP_TIMEOUT_INIT;
+               newicsk->icsk_rto = TCP_TIMEOUT_INIT;
 
                newtp->packets_out = 0;
                newtp->left_out = 0;
                newtp->rx_opt.num_sacks = 0;
                newtp->urg_data = 0;
                /* Deinitialize accept_queue to trap illegal accesses. */
-               memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
+               memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
 
                if (sock_flag(newsk, SOCK_KEEPOPEN))
-                       tcp_reset_keepalive_timer(newsk,
-                                                 keepalive_time_when(newtp));
+                       inet_csk_reset_keepalive_timer(newsk,
+                                                      keepalive_time_when(newtp));
 
                newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
                if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
                        newtp->tcp_header_len = sizeof(struct tcphdr);
                }
                if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
-                       newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
+                       newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
                newtp->rx_opt.mss_clamp = req->mss;
                TCP_ECN_openreq_child(newtp, req);
                if (newtp->ecn_flags&TCP_ECN_OK)
                if (child == NULL)
                        goto listen_overflow;
 
-               tcp_synq_unlink(tp, req, prev);
-               tcp_synq_removed(sk, req);
+               inet_csk_reqsk_queue_unlink(sk, req, prev);
+               inet_csk_reqsk_queue_removed(sk, req);
 
-               tcp_acceptq_queue(sk, req, child);
+               inet_csk_reqsk_queue_add(sk, req, child);
                return child;
 
        listen_overflow:
                if (!(flg & TCP_FLAG_RST))
                        req->rsk_ops->send_reset(skb);
 
-               tcp_synq_drop(sk, req, prev);
+               inet_csk_reqsk_queue_drop(sk, req, prev);
                return NULL;
 }
 
 
 
 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
  * This is the first part of cwnd validation mechanism. */
-static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
+static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        s32 delta = tcp_time_stamp - tp->lsndtime;
        u32 restart_cwnd = tcp_init_cwnd(tp, dst);
        u32 cwnd = tp->snd_cwnd;
        tp->snd_ssthresh = tcp_current_ssthresh(tp);
        restart_cwnd = min(restart_cwnd, cwnd);
 
-       while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd)
+       while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
                cwnd >>= 1;
        tp->snd_cwnd = max(cwnd, restart_cwnd);
        tp->snd_cwnd_stamp = tcp_time_stamp;
 static inline void tcp_event_data_sent(struct tcp_sock *tp,
                                       struct sk_buff *skb, struct sock *sk)
 {
-       u32 now = tcp_time_stamp;
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       const u32 now = tcp_time_stamp;
 
-       if (!tp->packets_out && (s32)(now - tp->lsndtime) > tp->rto)
-               tcp_cwnd_restart(tp, __sk_dst_get(sk));
+       if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)
+               tcp_cwnd_restart(sk, __sk_dst_get(sk));
 
        tp->lsndtime = now;
 
        /* If it is a reply for ato after last received
         * packet, enter pingpong mode.
         */
-       if ((u32)(now - tp->ack.lrcvtime) < tp->ack.ato)
-               tp->ack.pingpong = 1;
+       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+               icsk->icsk_ack.pingpong = 1;
 }
 
 static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       tcp_dec_quickack_mode(tp, pkts);
-       tcp_clear_xmit_timer(sk, TCP_TIME_DACK);
+       tcp_dec_quickack_mode(sk, pkts);
+       inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 }
 
 /* Determine a window scaling and initial window to offer.
                if (tp->packets_out > tp->snd_cwnd_used)
                        tp->snd_cwnd_used = tp->packets_out;
 
-               if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
+               if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
                        tcp_cwnd_application_limited(sk);
        }
 }
  */
 u32 __tcp_select_window(struct sock *sk)
 {
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        /* MSS for the peer's data.  Previous verions used mss_clamp
         * here.  I don't know if the value based on our guesses
         * but may be worse for the performance because of rcv_mss
         * fluctuations.  --SAW  1998/11/1
         */
-       int mss = tp->ack.rcv_mss;
+       int mss = icsk->icsk_ack.rcv_mss;
        int free_space = tcp_space(sk);
        int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
        int window;
                mss = full_space; 
 
        if (free_space < full_space/2) {
-               tp->ack.quick = 0;
+               icsk->icsk_ack.quick = 0;
 
                if (tcp_memory_pressure)
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
 
                                        if (skb ==
                                            skb_peek(&sk->sk_write_queue))
-                                               tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+                                               inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+                                                                         inet_csk(sk)->icsk_rto);
                                }
 
                                packet_cnt -= tcp_skb_pcount(skb);
                        break;
 
                if (skb == skb_peek(&sk->sk_write_queue))
-                       tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
 
                NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
        }
        tp->rcv_wup = 0;
        tp->copied_seq = 0;
 
-       tp->rto = TCP_TIMEOUT_INIT;
-       tp->retransmits = 0;
+       inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+       inet_csk(sk)->icsk_retransmits = 0;
        tcp_clear_retrans(tp);
 }
 
        TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
 
        /* Timer for repeating the SYN until an answer. */
-       tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
        return 0;
 }
 
  */
 void tcp_send_delayed_ack(struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
-       int ato = tp->ack.ato;
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       int ato = icsk->icsk_ack.ato;
        unsigned long timeout;
 
        if (ato > TCP_DELACK_MIN) {
+               const struct tcp_sock *tp = tcp_sk(sk);
                int max_ato = HZ/2;
 
-               if (tp->ack.pingpong || (tp->ack.pending&TCP_ACK_PUSHED))
+               if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
                        max_ato = TCP_DELACK_MAX;
 
                /* Slow path, intersegment interval is "high". */
 
                /* If some rtt estimate is known, use it to bound delayed ack.
-                * Do not use tp->rto here, use results of rtt measurements
+                * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
                 * directly.
                 */
                if (tp->srtt) {
        timeout = jiffies + ato;
 
        /* Use new timeout only if there wasn't a older one earlier. */
-       if (tp->ack.pending&TCP_ACK_TIMER) {
+       if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
                /* If delack timer was blocked or is about to expire,
                 * send ACK now.
                 */
-               if (tp->ack.blocked || time_before_eq(tp->ack.timeout, jiffies+(ato>>2))) {
+               if (icsk->icsk_ack.blocked ||
+                   time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
                        tcp_send_ack(sk);
                        return;
                }
 
-               if (!time_before(timeout, tp->ack.timeout))
-                       timeout = tp->ack.timeout;
+               if (!time_before(timeout, icsk->icsk_ack.timeout))
+                       timeout = icsk->icsk_ack.timeout;
        }
-       tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER;
-       tp->ack.timeout = timeout;
-       sk_reset_timer(sk, &tp->delack_timer, timeout);
+       icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
+       icsk->icsk_ack.timeout = timeout;
+       sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
 }
 
 /* This routine sends an ack and also updates the window. */
                 */
                buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
                if (buff == NULL) {
-                       tcp_schedule_ack(tp);
-                       tp->ack.ato = TCP_ATO_MIN;
-                       tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX);
+                       inet_csk_schedule_ack(sk);
+                       inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
+                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX);
                        return;
                }
 
  */
 void tcp_send_probe0(struct sock *sk)
 {
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int err;
 
        if (tp->packets_out || !sk->sk_send_head) {
                /* Cancel probe timer, if it is not required. */
                tp->probes_out = 0;
-               tp->backoff = 0;
+               icsk->icsk_backoff = 0;
                return;
        }
 
        if (err <= 0) {
-               if (tp->backoff < sysctl_tcp_retries2)
-                       tp->backoff++;
+               if (icsk->icsk_backoff < sysctl_tcp_retries2)
+                       icsk->icsk_backoff++;
                tp->probes_out++;
-               tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, 
-                                     min(tp->rto << tp->backoff, TCP_RTO_MAX));
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 
+                                         min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX));
        } else {
                /* If packet was not sent due to local congestion,
                 * do not backoff and do not remember probes_out.
                 */
                if (!tp->probes_out)
                        tp->probes_out=1;
-               tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, 
-                                     min(tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL));
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 
+                                         min(icsk->icsk_rto << icsk->icsk_backoff,
+                                             TCP_RESOURCE_PROBE_INTERVAL));
        }
 }
 
 
 static void tcp_delack_timer(unsigned long);
 static void tcp_keepalive_timer (unsigned long data);
 
-#ifdef TCP_DEBUG
-const char tcp_timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n";
-EXPORT_SYMBOL(tcp_timer_bug_msg);
+#ifdef INET_CSK_DEBUG
+const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
+EXPORT_SYMBOL(inet_csk_timer_bug_msg);
 #endif
 
 /*
  * We may wish use just one timer maintaining a list of expire jiffies 
  * to optimize.
  */
-
-void tcp_init_xmit_timers(struct sock *sk)
+void inet_csk_init_xmit_timers(struct sock *sk,
+                              void (*retransmit_handler)(unsigned long),
+                              void (*delack_handler)(unsigned long),
+                              void (*keepalive_handler)(unsigned long))
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
 
-       init_timer(&tp->retransmit_timer);
-       tp->retransmit_timer.function=&tcp_write_timer;
-       tp->retransmit_timer.data = (unsigned long) sk;
-       tp->pending = 0;
+       init_timer(&icsk->icsk_retransmit_timer);
+       init_timer(&icsk->icsk_delack_timer);
+       init_timer(&sk->sk_timer);
 
-       init_timer(&tp->delack_timer);
-       tp->delack_timer.function=&tcp_delack_timer;
-       tp->delack_timer.data = (unsigned long) sk;
-       tp->ack.pending = 0;
+       icsk->icsk_retransmit_timer.function = retransmit_handler;
+       icsk->icsk_delack_timer.function     = delack_handler;
+       sk->sk_timer.function                = keepalive_handler;
 
-       init_timer(&sk->sk_timer);
-       sk->sk_timer.function   = &tcp_keepalive_timer;
-       sk->sk_timer.data       = (unsigned long)sk;
+       icsk->icsk_retransmit_timer.data = 
+               icsk->icsk_delack_timer.data =
+                       sk->sk_timer.data  = (unsigned long)sk;
+
+       icsk->icsk_pending = icsk->icsk_ack.pending = 0;
 }
 
-void tcp_clear_xmit_timers(struct sock *sk)
+void inet_csk_clear_xmit_timers(struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
 
-       tp->pending = 0;
-       sk_stop_timer(sk, &tp->retransmit_timer);
-
-       tp->ack.pending = 0;
-       tp->ack.blocked = 0;
-       sk_stop_timer(sk, &tp->delack_timer);
+       icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
 
+       sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
+       sk_stop_timer(sk, &icsk->icsk_delack_timer);
        sk_stop_timer(sk, &sk->sk_timer);
 }
 
+void tcp_init_xmit_timers(struct sock *sk)
+{
+       inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
+                                 &tcp_keepalive_timer);
+}
+
 static void tcp_write_err(struct sock *sk)
 {
        sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
 /* A write timeout has occurred. Process the after effects. */
 static int tcp_write_timeout(struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
        int retry_until;
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
-               if (tp->retransmits)
+               if (icsk->icsk_retransmits)
                        dst_negative_advice(&sk->sk_dst_cache);
-               retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries;
+               retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
        } else {
-               if (tp->retransmits >= sysctl_tcp_retries1) {
+               if (icsk->icsk_retransmits >= sysctl_tcp_retries1) {
                        /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black
                           hole detection. :-(
 
 
                retry_until = sysctl_tcp_retries2;
                if (sock_flag(sk, SOCK_DEAD)) {
-                       int alive = (tp->rto < TCP_RTO_MAX);
+                       const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
  
                        retry_until = tcp_orphan_retries(sk, alive);
 
-                       if (tcp_out_of_resources(sk, alive || tp->retransmits < retry_until))
+                       if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until))
                                return 1;
                }
        }
 
-       if (tp->retransmits >= retry_until) {
+       if (icsk->icsk_retransmits >= retry_until) {
                /* Has it gone just too far? */
                tcp_write_err(sk);
                return 1;
 {
        struct sock *sk = (struct sock*)data;
        struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
                /* Try again later. */
-               tp->ack.blocked = 1;
+               icsk->icsk_ack.blocked = 1;
                NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
-               sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN);
+               sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
                goto out_unlock;
        }
 
        sk_stream_mem_reclaim(sk);
 
-       if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER))
+       if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
                goto out;
 
-       if (time_after(tp->ack.timeout, jiffies)) {
-               sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
+       if (time_after(icsk->icsk_ack.timeout, jiffies)) {
+               sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
                goto out;
        }
-       tp->ack.pending &= ~TCP_ACK_TIMER;
+       icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
 
        if (!skb_queue_empty(&tp->ucopy.prequeue)) {
                struct sk_buff *skb;
                tp->ucopy.memory = 0;
        }
 
-       if (tcp_ack_scheduled(tp)) {
-               if (!tp->ack.pingpong) {
+       if (inet_csk_ack_scheduled(sk)) {
+               if (!icsk->icsk_ack.pingpong) {
                        /* Delayed ACK missed: inflate ATO. */
-                       tp->ack.ato = min(tp->ack.ato << 1, tp->rto);
+                       icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
                } else {
                        /* Delayed ACK missed: leave pingpong mode and
                         * deflate ATO.
                         */
-                       tp->ack.pingpong = 0;
-                       tp->ack.ato = TCP_ATO_MIN;
+                       icsk->icsk_ack.pingpong = 0;
+                       icsk->icsk_ack.ato      = TCP_ATO_MIN;
                }
                tcp_send_ack(sk);
                NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
        max_probes = sysctl_tcp_retries2;
 
        if (sock_flag(sk, SOCK_DEAD)) {
-               int alive = ((tp->rto<<tp->backoff) < TCP_RTO_MAX);
+               const struct inet_connection_sock *icsk = inet_csk(sk);
+               const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
  
                max_probes = tcp_orphan_retries(sk, alive);
 
 static void tcp_retransmit_timer(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
 
        if (!tp->packets_out)
                goto out;
        if (tcp_write_timeout(sk))
                goto out;
 
-       if (tp->retransmits == 0) {
+       if (icsk->icsk_retransmits == 0) {
                if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
                        if (tp->rx_opt.sack_ok) {
                                if (tp->ca_state == TCP_CA_Recovery)
                /* Retransmission failed because of local congestion,
                 * do not backoff.
                 */
-               if (!tp->retransmits)
-                       tp->retransmits=1;
-               tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS,
-                                    min(tp->rto, TCP_RESOURCE_PROBE_INTERVAL));
+               if (!icsk->icsk_retransmits)
+                       icsk->icsk_retransmits = 1;
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+                                         min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL));
                goto out;
        }
 
         * implemented ftp to mars will work nicely. We will have to fix
         * the 120 second clamps though!
         */
-       tp->backoff++;
-       tp->retransmits++;
+       icsk->icsk_backoff++;
+       icsk->icsk_retransmits++;
 
 out_reset_timer:
-       tp->rto = min(tp->rto << 1, TCP_RTO_MAX);
-       tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
-       if (tp->retransmits > sysctl_tcp_retries1)
+       icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto);
+       if (icsk->icsk_retransmits > sysctl_tcp_retries1)
                __sk_dst_reset(sk);
 
 out:;
 static void tcp_write_timer(unsigned long data)
 {
        struct sock *sk = (struct sock*)data;
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        int event;
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
                /* Try again later */
-               sk_reset_timer(sk, &tp->retransmit_timer, jiffies + (HZ / 20));
+               sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
                goto out_unlock;
        }
 
-       if (sk->sk_state == TCP_CLOSE || !tp->pending)
+       if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
                goto out;
 
-       if (time_after(tp->timeout, jiffies)) {
-               sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
+       if (time_after(icsk->icsk_timeout, jiffies)) {
+               sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
                goto out;
        }
 
-       event = tp->pending;
-       tp->pending = 0;
+       event = icsk->icsk_pending;
+       icsk->icsk_pending = 0;
 
        switch (event) {
-       case TCP_TIME_RETRANS:
+       case ICSK_TIME_RETRANS:
                tcp_retransmit_timer(sk);
                break;
-       case TCP_TIME_PROBE0:
+       case ICSK_TIME_PROBE0:
                tcp_probe_timer(sk);
                break;
        }
 static void tcp_synack_timer(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct listen_sock *lopt = tp->accept_queue.listen_opt;
-       int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries;
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
+       int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
        int thresh = max_retries;
        unsigned long now = jiffies;
        struct request_sock **reqp, *req;
                                }
 
                                /* Drop this request */
-                               tcp_synq_unlink(tp, req, reqp);
-                               reqsk_queue_removed(&tp->accept_queue, req);
+                               inet_csk_reqsk_queue_unlink(sk, req, reqp);
+                               reqsk_queue_removed(&icsk->icsk_accept_queue, req);
                                reqsk_free(req);
                                continue;
                        }
        lopt->clock_hand = i;
 
        if (lopt->qlen)
-               tcp_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL);
+               inet_csk_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL);
 }
 
-void tcp_delete_keepalive_timer (struct sock *sk)
+void inet_csk_delete_keepalive_timer(struct sock *sk)
 {
        sk_stop_timer(sk, &sk->sk_timer);
 }
 
-void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len)
+void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
 {
        sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
 }
                return;
 
        if (val && !sock_flag(sk, SOCK_KEEPOPEN))
-               tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
+               inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
        else if (!val)
-               tcp_delete_keepalive_timer(sk);
+               inet_csk_delete_keepalive_timer(sk);
 }
 
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
                /* Try again later. */ 
-               tcp_reset_keepalive_timer (sk, HZ/20);
+               inet_csk_reset_keepalive_timer (sk, HZ/20);
                goto out;
        }
 
 
        if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
                if (tp->linger2 >= 0) {
-                       int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN;
+                       const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
 
                        if (tmo > 0) {
                                tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
        sk_stream_mem_reclaim(sk);
 
 resched:
-       tcp_reset_keepalive_timer (sk, elapsed);
+       inet_csk_reset_keepalive_timer (sk, elapsed);
        goto out;
 
 death: 
        sock_put(sk);
 }
 
-EXPORT_SYMBOL(tcp_clear_xmit_timers);
-EXPORT_SYMBOL(tcp_delete_keepalive_timer);
+EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
+EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
 EXPORT_SYMBOL(tcp_init_xmit_timers);
-EXPORT_SYMBOL(tcp_reset_keepalive_timer);
+EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
 
        u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
        u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
        int sk_ipv6only = ipv6_only_sock(sk);
-       int sk2_ipv6only = tcp_v6_ipv6only(sk2);
+       int sk2_ipv6only = inet_v6_ipv6only(sk2);
        int addr_type = ipv6_addr_type(sk_rcv_saddr6);
        int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
 
 
                tb->fastreuse = 0;
 
 success:
-       if (!inet_sk(sk)->bind_hash)
+       if (!inet_csk(sk)->icsk_bind_hash)
                inet_bind_hash(sk, tb, snum);
-       BUG_TRAP(inet_sk(sk)->bind_hash == tb);
+       BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
        ret = 0;
 
 fail_unlock:
  * Open request hash tables.
  */
 
-static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
+static u32 tcp_v6_synq_hash(const struct in6_addr *raddr, const u16 rport, const u32 rnd)
 {
        u32 a, b, c;
 
        return c & (TCP_SYNQ_HSIZE - 1);
 }
 
-static struct request_sock *tcp_v6_search_req(struct tcp_sock *tp,
+static struct request_sock *tcp_v6_search_req(const struct sock *sk,
                                              struct request_sock ***prevp,
                                              __u16 rport,
                                              struct in6_addr *raddr,
                                              struct in6_addr *laddr,
                                              int iif)
 {
-       struct listen_sock *lopt = tp->accept_queue.listen_opt;
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+       struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
        struct request_sock *req, **prev;  
 
        for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
        }
 
        head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
-       tb   = inet_sk(sk)->bind_hash;
+       tb   = inet_csk(sk)->icsk_bind_hash;
        spin_lock_bh(&head->lock);
 
        if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
                if (sock_owned_by_user(sk))
                        goto out;
 
-               req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr,
+               req = tcp_v6_search_req(sk, &prev, th->dest, &hdr->daddr,
                                        &hdr->saddr, tcp_v6_iif(skb));
                if (!req)
                        goto out;
                        goto out;
                }
 
-               tcp_synq_drop(sk, req, prev);
+               inet_csk_reqsk_queue_drop(sk, req, prev);
                goto out;
 
        case TCP_SYN_SENT:
 {
        struct request_sock *req, **prev;
        struct tcphdr *th = skb->h.th;
-       struct tcp_sock *tp = tcp_sk(sk);
        struct sock *nsk;
 
        /* Find possible connection requests. */
-       req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr,
+       req = tcp_v6_search_req(sk, &prev, th->source, &skb->nh.ipv6h->saddr,
                                &skb->nh.ipv6h->daddr, tcp_v6_iif(skb));
        if (req)
                return tcp_check_req(sk, skb, req, prev);
 
 static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct listen_sock *lopt = tp->accept_queue.listen_opt;
-       u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
+       const u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
 
-       reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
-       tcp_synq_added(sk);
+       reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
+       inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
 }
 
 
        /*
         *      There are no SYN attacks on IPv6, yet...        
         */
-       if (tcp_synq_is_full(sk) && !isn) {
+       if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
                if (net_ratelimit())
                        printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
                goto drop;              
        }
 
-       if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
                goto drop;
 
        req = reqsk_alloc(&tcp6_request_sock_ops);
        tcp_init_xmit_timers(sk);
        tcp_prequeue_init(tp);
 
-       tp->rto  = TCP_TIMEOUT_INIT;
+       inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
        tp->mdev = TCP_TIMEOUT_INIT;
 
        /* So many TCP implementations out there (incorrectly) count the
        unsigned long timer_expires;
        struct inet_sock *inet = inet_sk(sp);
        struct tcp_sock *tp = tcp_sk(sp);
+       const struct inet_connection_sock *icsk = inet_csk(sp);
        struct ipv6_pinfo *np = inet6_sk(sp);
 
        dest  = &np->daddr;
        src   = &np->rcv_saddr;
        destp = ntohs(inet->dport);
        srcp  = ntohs(inet->sport);
-       if (tp->pending == TCP_TIME_RETRANS) {
+
+       if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
                timer_active    = 1;
-               timer_expires   = tp->timeout;
-       } else if (tp->pending == TCP_TIME_PROBE0) {
+               timer_expires   = icsk->icsk_timeout;
+       } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
                timer_active    = 4;
-               timer_expires   = tp->timeout;
+               timer_expires   = icsk->icsk_timeout;
        } else if (timer_pending(&sp->sk_timer)) {
                timer_active    = 2;
                timer_expires   = sp->sk_timer.expires;
                   tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
                   timer_active,
                   jiffies_to_clock_t(timer_expires - jiffies),
-                  tp->retransmits,
+                  icsk->icsk_retransmits,
                   sock_i_uid(sp),
                   tp->probes_out,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp,
-                  tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong,
+                  icsk->icsk_rto,
+                  icsk->icsk_ack.ato,
+                  (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
                   tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
                   );
 }
        .close                  = tcp_close,
        .connect                = tcp_v6_connect,
        .disconnect             = tcp_disconnect,
-       .accept                 = tcp_accept,
+       .accept                 = inet_csk_accept,
        .ioctl                  = tcp_ioctl,
        .init                   = tcp_v6_init_sock,
        .destroy                = tcp_v6_destroy_sock,