]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - net/ipv4/tcp.c
[SK_BUFF] ipmr: Missed one conversion to skb_network_header()
[linux-2.6-omap-h63xx.git] / net / ipv4 / tcp.c
index 3834b10b51152aa24d3b69e08392d23274bca5f8..689f9330f1b945279dc0a9a4ce333d7b58f0b253 100644 (file)
@@ -470,10 +470,8 @@ static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
        tcb->flags   = TCPCB_FLAG_ACK;
        tcb->sacked  = 0;
        skb_header_release(skb);
-       __skb_queue_tail(&sk->sk_write_queue, skb);
+       tcp_add_write_queue_tail(sk, skb);
        sk_charge_skb(sk, skb);
-       if (!sk->sk_send_head)
-               sk->sk_send_head = skb;
        if (tp->nonagle & TCP_NAGLE_PUSH)
                tp->nonagle &= ~TCP_NAGLE_PUSH;
 }
@@ -491,8 +489,8 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
 static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
                            int mss_now, int nonagle)
 {
-       if (sk->sk_send_head) {
-               struct sk_buff *skb = sk->sk_write_queue.prev;
+       if (tcp_send_head(sk)) {
+               struct sk_buff *skb = tcp_write_queue_tail(sk);
                if (!(flags & MSG_MORE) || forced_push(tp))
                        tcp_mark_push(tp, skb);
                tcp_mark_urg(tp, flags, skb);
@@ -526,13 +524,13 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
                goto do_error;
 
        while (psize > 0) {
-               struct sk_buff *skb = sk->sk_write_queue.prev;
+               struct sk_buff *skb = tcp_write_queue_tail(sk);
                struct page *page = pages[poffset / PAGE_SIZE];
                int copy, i, can_coalesce;
                int offset = poffset % PAGE_SIZE;
                int size = min_t(size_t, psize, PAGE_SIZE - offset);
 
-               if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
+               if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
 new_segment:
                        if (!sk_stream_memory_free(sk))
                                goto wait_for_sndbuf;
@@ -589,7 +587,7 @@ new_segment:
                if (forced_push(tp)) {
                        tcp_mark_push(tp, skb);
                        __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
-               } else if (skb == sk->sk_send_head)
+               } else if (skb == tcp_send_head(sk))
                        tcp_push_one(sk, mss_now);
                continue;
 
@@ -704,9 +702,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                while (seglen > 0) {
                        int copy;
 
-                       skb = sk->sk_write_queue.prev;
+                       skb = tcp_write_queue_tail(sk);
 
-                       if (!sk->sk_send_head ||
+                       if (!tcp_send_head(sk) ||
                            (copy = size_goal - skb->len) <= 0) {
 
 new_segment:
@@ -833,7 +831,7 @@ new_segment:
                        if (forced_push(tp)) {
                                tcp_mark_push(tp, skb);
                                __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
-                       } else if (skb == sk->sk_send_head)
+                       } else if (skb == tcp_send_head(sk))
                                tcp_push_one(sk, mss_now);
                        continue;
 
@@ -860,9 +858,11 @@ out:
 
 do_fault:
        if (!skb->len) {
-               if (sk->sk_send_head == skb)
-                       sk->sk_send_head = NULL;
-               __skb_unlink(skb, &sk->sk_write_queue);
+               tcp_unlink_write_queue(skb, sk);
+               /* It is the one place in all of TCP, except connection
+                * reset, where we can be unlinking the send_head.
+                */
+               tcp_check_send_head(sk, skb);
                sk_stream_free_skb(sk, skb);
        }
 
@@ -1732,7 +1732,7 @@ int tcp_disconnect(struct sock *sk, int flags)
 
        tcp_clear_xmit_timers(sk);
        __skb_queue_purge(&sk->sk_receive_queue);
-       sk_stream_writequeue_purge(sk);
+       tcp_write_queue_purge(sk);
        __skb_queue_purge(&tp->out_of_order_queue);
 #ifdef CONFIG_NET_DMA
        __skb_queue_purge(&sk->sk_async_wait_queue);
@@ -1758,7 +1758,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tcp_set_ca_state(sk, TCP_CA_Open);
        tcp_clear_retrans(tp);
        inet_csk_delack_init(sk);
-       sk->sk_send_head = NULL;
+       tcp_init_send_head(sk);
        tp->rx_opt.saw_tstamp = 0;
        tcp_sack_reset(&tp->rx_opt);
        __sk_dst_reset(sk);