]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - net/ipv4/tcp_input.c
[NETNS][ICMP]: Make ctl tables for ICMP sysctls per-net.
[linux-2.6-omap-h63xx.git] / net / ipv4 / tcp_input.c
index 19c449f62672d7cf41f03a65f6d6dbc0cc3b3c04..6e46b4c0f28cd4e3a22e5267e4fd2076240e4820 100644 (file)
@@ -1367,7 +1367,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
  * a normal way
  */
 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
-                                       u32 skip_to_seq)
+                                       u32 skip_to_seq, int *fack_count)
 {
        tcp_for_write_queue_from(skb, sk) {
                if (skb == tcp_send_head(sk))
@@ -1375,6 +1375,8 @@ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
 
                if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
                        break;
+
+               *fack_count += tcp_skb_pcount(skb);
        }
        return skb;
 }
@@ -1390,7 +1392,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
                return skb;
 
        if (before(next_dup->start_seq, skip_to_seq)) {
-               skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
+               skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count);
                tcp_sacktag_walk(skb, sk, NULL,
                                 next_dup->start_seq, next_dup->end_seq,
                                 1, fack_count, reord, flag);
@@ -1537,7 +1539,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
 
                        /* Head todo? */
                        if (before(start_seq, cache->start_seq)) {
-                               skb = tcp_sacktag_skip(skb, sk, start_seq);
+                               skb = tcp_sacktag_skip(skb, sk, start_seq,
+                                                      &fack_count);
                                skb = tcp_sacktag_walk(skb, sk, next_dup,
                                                       start_seq,
                                                       cache->start_seq,
@@ -1565,7 +1568,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
                                goto walk;
                        }
 
-                       skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
+                       skb = tcp_sacktag_skip(skb, sk, cache->end_seq,
+                                              &fack_count);
                        /* Check overlap against next cached too (past this one already) */
                        cache++;
                        continue;
@@ -1577,7 +1581,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
                                break;
                        fack_count = tp->fackets_out;
                }
-               skb = tcp_sacktag_skip(skb, sk, start_seq);
+               skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count);
 
 walk:
                skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq,
@@ -3557,7 +3561,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
                 * cases we should never reach this piece of code.
                 */
                printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
-                      __FUNCTION__, sk->sk_state);
+                      __func__, sk->sk_state);
                break;
        }
 
@@ -4447,6 +4451,49 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
        }
 }
 
+static int tcp_defer_accept_check(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (tp->defer_tcp_accept.request) {
+               int queued_data =  tp->rcv_nxt - tp->copied_seq;
+               int hasfin =  !skb_queue_empty(&sk->sk_receive_queue) ?
+                       tcp_hdr((struct sk_buff *)
+                               sk->sk_receive_queue.prev)->fin : 0;
+
+               if (queued_data && hasfin)
+                       queued_data--;
+
+               if (queued_data &&
+                   tp->defer_tcp_accept.listen_sk->sk_state == TCP_LISTEN) {
+                       if (sock_flag(sk, SOCK_KEEPOPEN)) {
+                               inet_csk_reset_keepalive_timer(sk,
+                                                              keepalive_time_when(tp));
+                       } else {
+                               inet_csk_delete_keepalive_timer(sk);
+                       }
+
+                       inet_csk_reqsk_queue_add(
+                               tp->defer_tcp_accept.listen_sk,
+                               tp->defer_tcp_accept.request,
+                               sk);
+
+                       tp->defer_tcp_accept.listen_sk->sk_data_ready(
+                               tp->defer_tcp_accept.listen_sk, 0);
+
+                       sock_put(tp->defer_tcp_accept.listen_sk);
+                       sock_put(sk);
+                       tp->defer_tcp_accept.listen_sk = NULL;
+                       tp->defer_tcp_accept.request = NULL;
+               } else if (hasfin ||
+                          tp->defer_tcp_accept.listen_sk->sk_state != TCP_LISTEN) {
+                       tcp_reset(sk);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -4807,6 +4854,9 @@ step5:
 
        tcp_data_snd_check(sk);
        tcp_ack_snd_check(sk);
+
+       if (tcp_defer_accept_check(sk))
+               return -1;
        return 0;
 
 csum_error:
@@ -5326,6 +5376,7 @@ discard:
 
 EXPORT_SYMBOL(sysctl_tcp_ecn);
 EXPORT_SYMBOL(sysctl_tcp_reordering);
+EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
 EXPORT_SYMBOL(tcp_parse_options);
 EXPORT_SYMBOL(tcp_rcv_established);
 EXPORT_SYMBOL(tcp_rcv_state_process);