]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - net/ipv4/tcp.c
Merge branch 'fixes-davem' of master.kernel.org:/pub/scm/linux/kernel/git/linville...
[linux-2.6-omap-h63xx.git] / net / ipv4 / tcp.c
index cd3c7e95de9e07296de8583fdc5b3df73aa45cdf..7e740112b2383c93b535a0657baf3f5b6fe75d48 100644 (file)
@@ -658,9 +658,10 @@ static inline int select_size(struct sock *sk)
        return tmp;
 }
 
-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
                size_t size)
 {
+       struct sock *sk = sock->sk;
        struct iovec *iov;
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -1064,7 +1065,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                                        break;
                        }
                        used = recv_actor(desc, skb, offset, len);
-                       if (used <= len) {
+                       if (used < 0) {
+                               if (!copied)
+                                       copied = used;
+                               break;
+                       } else if (used <= len) {
                                seq += used;
                                copied += used;
                                offset += used;
@@ -1086,7 +1091,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
        tcp_rcv_space_adjust(sk);
 
        /* Clean up data we have read: This will do ACK frames. */
-       if (copied)
+       if (copied > 0)
                tcp_cleanup_rbuf(sk, copied);
        return copied;
 }
@@ -1112,6 +1117,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        long timeo;
        struct task_struct *user_recv = NULL;
        int copied_early = 0;
+       struct sk_buff *skb;
 
        lock_sock(sk);
 
@@ -1138,16 +1144,26 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 #ifdef CONFIG_NET_DMA
        tp->ucopy.dma_chan = NULL;
        preempt_disable();
-       if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
-           !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
-               preempt_enable_no_resched();
-               tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
-       } else
-               preempt_enable_no_resched();
+       skb = skb_peek_tail(&sk->sk_receive_queue);
+       {
+               int available = 0;
+
+               if (skb)
+                       available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
+               if ((available < target) &&
+                   (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
+                   !sysctl_tcp_low_latency &&
+                   __get_cpu_var(softnet_data).net_dma) {
+                       preempt_enable_no_resched();
+                       tp->ucopy.pinned_list =
+                                       dma_pin_iovec_pages(msg->msg_iov, len);
+               } else {
+                       preempt_enable_no_resched();
+               }
+       }
 #endif
 
        do {
-               struct sk_buff *skb;
                u32 offset;
 
                /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
@@ -1435,7 +1451,6 @@ skip_copy:
 
 #ifdef CONFIG_NET_DMA
        if (tp->ucopy.dma_chan) {
-               struct sk_buff *skb;
                dma_cookie_t done, used;
 
                dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
@@ -2416,7 +2431,7 @@ void __init tcp_init(void)
        tcp_hashinfo.bind_bucket_cachep =
                kmem_cache_create("tcp_bind_bucket",
                                  sizeof(struct inet_bind_bucket), 0,
-                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
        /* Size and allocate the main established and bind bucket
         * hash tables.