]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - net/ipv4/tcp.c
PCI PM: make more PCI PM core functionality available to drivers
[linux-2.6-omap-h63xx.git] / net / ipv4 / tcp.c
index 56a133c614520107e197028e5b68d7a2f3ad1a87..0b491bf03db48e275e0b4ad2978d80e9ffd92d57 100644 (file)
 
 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 
-DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
-
 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
 
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
@@ -316,10 +314,10 @@ int tcp_memory_pressure __read_mostly;
 
 EXPORT_SYMBOL(tcp_memory_pressure);
 
-void tcp_enter_memory_pressure(void)
+void tcp_enter_memory_pressure(struct sock *sk)
 {
        if (!tcp_memory_pressure) {
-               NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
                tcp_memory_pressure = 1;
        }
 }
@@ -344,8 +342,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                return inet_csk_listen_poll(sk);
 
        /* Socket is not locked. We are protected from async events
-          by poll logic and correct handling of state changes
-          made by another threads is impossible in any case.
+        * by poll logic and correct handling of state changes
+        * made by other threads is impossible in any case.
         */
 
        mask = 0;
@@ -371,10 +369,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
         * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
         * if and only if shutdown has been made in both directions.
         * Actually, it is interesting to look how Solaris and DUX
-        * solve this dilemma. I would prefer, if PULLHUP were maskable,
+        * solve this dilemma. I would prefer, if POLLHUP were maskable,
         * then we could set it on SND_SHUTDOWN. BTW examples given
         * in Stevens' books assume exactly this behaviour, it explains
-        * why PULLHUP is incompatible with POLLOUT.    --ANK
+        * why POLLHUP is incompatible with POLLOUT.    --ANK
         *
         * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
         * blocking on fresh not-connected or disconnected socket. --ANK
@@ -649,7 +647,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
                }
                __kfree_skb(skb);
        } else {
-               sk->sk_prot->enter_memory_pressure();
+               sk->sk_prot->enter_memory_pressure(sk);
                sk_stream_moderate_sndbuf(sk);
        }
        return NULL;
@@ -1153,7 +1151,7 @@ static void tcp_prequeue_process(struct sock *sk)
        struct sk_buff *skb;
        struct tcp_sock *tp = tcp_sk(sk);
 
-       NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
+       NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
 
        /* RX process wants to run with disabled BHs, though it is not
         * necessary */
@@ -1475,7 +1473,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        /* __ Restore normal policy in scheduler __ */
 
                        if ((chunk = len - tp->ucopy.len) != 0) {
-                               NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
+                               NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
                                len -= chunk;
                                copied += chunk;
                        }
@@ -1486,7 +1484,7 @@ do_prequeue:
                                tcp_prequeue_process(sk);
 
                                if ((chunk = len - tp->ucopy.len) != 0) {
-                                       NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+                                       NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
                                        len -= chunk;
                                        copied += chunk;
                                }
@@ -1601,7 +1599,7 @@ skip_copy:
                        tcp_prequeue_process(sk);
 
                        if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
-                               NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+                               NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
                                len -= chunk;
                                copied += chunk;
                        }
@@ -1668,12 +1666,12 @@ void tcp_set_state(struct sock *sk, int state)
        switch (state) {
        case TCP_ESTABLISHED:
                if (oldstate != TCP_ESTABLISHED)
-                       TCP_INC_STATS(TCP_MIB_CURRESTAB);
+                       TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
                break;
 
        case TCP_CLOSE:
                if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
-                       TCP_INC_STATS(TCP_MIB_ESTABRESETS);
+                       TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
 
                sk->sk_prot->unhash(sk);
                if (inet_csk(sk)->icsk_bind_hash &&
@@ -1682,7 +1680,7 @@ void tcp_set_state(struct sock *sk, int state)
                /* fall through */
        default:
                if (oldstate==TCP_ESTABLISHED)
-                       TCP_DEC_STATS(TCP_MIB_CURRESTAB);
+                       TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
        }
 
        /* Change state AFTER socket is unhashed to avoid closed
@@ -1793,13 +1791,13 @@ void tcp_close(struct sock *sk, long timeout)
         */
        if (data_was_unread) {
                /* Unread data was tossed, zap the connection. */
-               NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
+               NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
                tcp_set_state(sk, TCP_CLOSE);
                tcp_send_active_reset(sk, GFP_KERNEL);
        } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
                /* Check zero linger _after_ checking for unread data. */
                sk->sk_prot->disconnect(sk, 0);
-               NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
+               NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
        } else if (tcp_close_state(sk)) {
                /* We FIN if the application ate all the data before
                 * zapping the connection.
@@ -1871,7 +1869,8 @@ adjudge_to_death:
                if (tp->linger2 < 0) {
                        tcp_set_state(sk, TCP_CLOSE);
                        tcp_send_active_reset(sk, GFP_ATOMIC);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                       LINUX_MIB_TCPABORTONLINGER);
                } else {
                        const int tmo = tcp_fin_time(sk);
 
@@ -1893,7 +1892,8 @@ adjudge_to_death:
                                       "sockets\n");
                        tcp_set_state(sk, TCP_CLOSE);
                        tcp_send_active_reset(sk, GFP_ATOMIC);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                       LINUX_MIB_TCPABORTONMEMORY);
                }
        }
 
@@ -2465,76 +2465,6 @@ static unsigned long tcp_md5sig_users;
 static struct tcp_md5sig_pool **tcp_md5sig_pool;
 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
 
-int tcp_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
-                     int bplen,
-                     struct tcphdr *th, unsigned int tcplen,
-                     struct tcp_md5sig_pool *hp)
-{
-       struct scatterlist sg[4];
-       __u16 data_len;
-       int block = 0;
-       __sum16 cksum;
-       struct hash_desc *desc = &hp->md5_desc;
-       int err;
-       unsigned int nbytes = 0;
-
-       sg_init_table(sg, 4);
-
-       /* 1. The TCP pseudo-header */
-       sg_set_buf(&sg[block++], &hp->md5_blk, bplen);
-       nbytes += bplen;
-
-       /* 2. The TCP header, excluding options, and assuming a
-        * checksum of zero
-        */
-       cksum = th->check;
-       th->check = 0;
-       sg_set_buf(&sg[block++], th, sizeof(*th));
-       nbytes += sizeof(*th);
-
-       /* 3. The TCP segment data (if any) */
-       data_len = tcplen - (th->doff << 2);
-       if (data_len > 0) {
-               u8 *data = (u8 *)th + (th->doff << 2);
-               sg_set_buf(&sg[block++], data, data_len);
-               nbytes += data_len;
-       }
-
-       /* 4. an independently-specified key or password, known to both
-        * TCPs and presumably connection-specific
-        */
-       sg_set_buf(&sg[block++], key->key, key->keylen);
-       nbytes += key->keylen;
-
-       sg_mark_end(&sg[block - 1]);
-
-       /* Now store the hash into the packet */
-       err = crypto_hash_init(desc);
-       if (err) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
-               return -1;
-       }
-       err = crypto_hash_update(desc, sg, nbytes);
-       if (err) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
-               return -1;
-       }
-       err = crypto_hash_final(desc, md5_hash);
-       if (err) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
-               return -1;
-       }
-
-       /* Reset header */
-       th->check = cksum;
-
-       return 0;
-}
-EXPORT_SYMBOL(tcp_calc_md5_hash);
-
 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
 {
        int cpu;
@@ -2658,12 +2588,69 @@ void __tcp_put_md5sig_pool(void)
 }
 
 EXPORT_SYMBOL(__tcp_put_md5sig_pool);
+
+int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
+                       struct tcphdr *th)
+{
+       struct scatterlist sg;
+       int err;
+
+       __sum16 old_checksum = th->check;
+       th->check = 0;
+       /* options aren't included in the hash */
+       sg_init_one(&sg, th, sizeof(struct tcphdr));
+       err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr));
+       th->check = old_checksum;
+       return err;
+}
+
+EXPORT_SYMBOL(tcp_md5_hash_header);
+
+int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
+                         struct sk_buff *skb, unsigned header_len)
+{
+       struct scatterlist sg;
+       const struct tcphdr *tp = tcp_hdr(skb);
+       struct hash_desc *desc = &hp->md5_desc;
+       unsigned i;
+       const unsigned head_data_len = skb_headlen(skb) > header_len ?
+                                      skb_headlen(skb) - header_len : 0;
+       const struct skb_shared_info *shi = skb_shinfo(skb);
+
+       sg_init_table(&sg, 1);
+
+       sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
+       if (crypto_hash_update(desc, &sg, head_data_len))
+               return 1;
+
+       for (i = 0; i < shi->nr_frags; ++i) {
+               const struct skb_frag_struct *f = &shi->frags[i];
+               sg_set_page(&sg, f->page, f->size, f->page_offset);
+               if (crypto_hash_update(desc, &sg, f->size))
+                       return 1;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(tcp_md5_hash_skb_data);
+
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
+{
+       struct scatterlist sg;
+
+       sg_init_one(&sg, key->key, key->keylen);
+       return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
+}
+
+EXPORT_SYMBOL(tcp_md5_hash_key);
+
 #endif
 
 void tcp_done(struct sock *sk)
 {
        if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
-               TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
 
        tcp_set_state(sk, TCP_CLOSE);
        tcp_clear_xmit_timers(sk);
@@ -2800,4 +2787,3 @@ EXPORT_SYMBOL(tcp_splice_read);
 EXPORT_SYMBOL(tcp_sendpage);
 EXPORT_SYMBOL(tcp_setsockopt);
 EXPORT_SYMBOL(tcp_shutdown);
-EXPORT_SYMBOL(tcp_statistics);