2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
57 * Alan Cox : Tidied tcp_data to avoid a potential
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/config.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/smp_lock.h>
257 #include <linux/fs.h>
258 #include <linux/random.h>
259 #include <linux/bootmem.h>
261 #include <net/icmp.h>
263 #include <net/xfrm.h>
267 #include <asm/uaccess.h>
268 #include <asm/ioctls.h>
270 int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
272 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
274 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
276 int sysctl_tcp_mem[3];
277 int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
278 int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
280 EXPORT_SYMBOL(sysctl_tcp_mem);
281 EXPORT_SYMBOL(sysctl_tcp_rmem);
282 EXPORT_SYMBOL(sysctl_tcp_wmem);
284 atomic_t tcp_memory_allocated; /* Current allocated memory. */
285 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
287 EXPORT_SYMBOL(tcp_memory_allocated);
288 EXPORT_SYMBOL(tcp_sockets_allocated);
291 * Pressure flag: try to collapse.
292 * Technical note: it is used by multiple contexts non atomically.
293 * All the sk_stream_mem_schedule() is of this nature: accounting
294 * is strict, actions are advisory and have some latency.
296 int tcp_memory_pressure;
298 EXPORT_SYMBOL(tcp_memory_pressure);
300 void tcp_enter_memory_pressure(void)
302 if (!tcp_memory_pressure) {
303 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
304 tcp_memory_pressure = 1;
308 EXPORT_SYMBOL(tcp_enter_memory_pressure);
311 * LISTEN is a special case for poll..
313 static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
316 return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0;
320 * Wait for a TCP event.
322 * Note that we don't need to lock the socket, as the upper poll layers
323 * take care of normal races (between the test and the event) and we don't
324 * go look at any of the socket buffers directly.
326 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
329 struct sock *sk = sock->sk;
330 struct tcp_sock *tp = tcp_sk(sk);
332 poll_wait(file, sk->sk_sleep, wait);
333 if (sk->sk_state == TCP_LISTEN)
334 return tcp_listen_poll(sk, wait);
336 /* Socket is not locked. We are protected from async events
337 by poll logic and correct handling of state changes
338 made by another threads is impossible in any case.
346 * POLLHUP is certainly not done right. But poll() doesn't
347 * have a notion of HUP in just one direction, and for a
348 * socket the read side is more interesting.
350 * Some poll() documentation says that POLLHUP is incompatible
351 * with the POLLOUT/POLLWR flags, so somebody should check this
352 * all. But careful, it tends to be safer to return too many
353 * bits than too few, and you can easily break real applications
354 * if you don't tell them that something has hung up!
358 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
359 * our fs/select.c). It means that after we received EOF,
360 * poll always returns immediately, making impossible poll() on write()
361 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
362 * if and only if shutdown has been made in both directions.
363 * Actually, it is interesting to look how Solaris and DUX
364 * solve this dilemma. I would prefer, if PULLHUP were maskable,
365 * then we could set it on SND_SHUTDOWN. BTW examples given
366 * in Stevens' books assume exactly this behaviour, it explains
367 * why PULLHUP is incompatible with POLLOUT. --ANK
369 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
370 * blocking on fresh not-connected or disconnected socket. --ANK
372 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
374 if (sk->sk_shutdown & RCV_SHUTDOWN)
375 mask |= POLLIN | POLLRDNORM;
378 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
379 /* Potential race condition. If read of tp below will
380 * escape above sk->sk_state, we can be illegally awaken
381 * in SYN_* states. */
382 if ((tp->rcv_nxt != tp->copied_seq) &&
383 (tp->urg_seq != tp->copied_seq ||
384 tp->rcv_nxt != tp->copied_seq + 1 ||
385 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
386 mask |= POLLIN | POLLRDNORM;
388 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
389 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
390 mask |= POLLOUT | POLLWRNORM;
391 } else { /* send SIGIO later */
392 set_bit(SOCK_ASYNC_NOSPACE,
393 &sk->sk_socket->flags);
394 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
396 /* Race breaker. If space is freed after
397 * wspace test but before the flags are set,
398 * IO signal will be lost.
400 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
401 mask |= POLLOUT | POLLWRNORM;
405 if (tp->urg_data & TCP_URG_VALID)
411 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
413 struct tcp_sock *tp = tcp_sk(sk);
418 if (sk->sk_state == TCP_LISTEN)
422 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
424 else if (sock_flag(sk, SOCK_URGINLINE) ||
426 before(tp->urg_seq, tp->copied_seq) ||
427 !before(tp->urg_seq, tp->rcv_nxt)) {
428 answ = tp->rcv_nxt - tp->copied_seq;
430 /* Subtract 1, if FIN is in queue. */
431 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
433 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
435 answ = tp->urg_seq - tp->copied_seq;
439 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
442 if (sk->sk_state == TCP_LISTEN)
445 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
448 answ = tp->write_seq - tp->snd_una;
454 return put_user(answ, (int __user *)arg);
458 int tcp_listen_start(struct sock *sk)
460 struct inet_sock *inet = inet_sk(sk);
461 struct tcp_sock *tp = tcp_sk(sk);
462 int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE);
467 sk->sk_max_ack_backlog = 0;
468 sk->sk_ack_backlog = 0;
471 /* There is race window here: we announce ourselves listening,
472 * but this transition is still not validated by get_port().
473 * It is OK, because this socket enters to hash table only
474 * after validation is complete.
476 sk->sk_state = TCP_LISTEN;
477 if (!sk->sk_prot->get_port(sk, inet->num)) {
478 inet->sport = htons(inet->num);
481 sk->sk_prot->hash(sk);
486 sk->sk_state = TCP_CLOSE;
487 __reqsk_queue_destroy(&tp->accept_queue);
492 * This routine closes sockets which have been at least partially
493 * opened, but not yet accepted.
496 static void tcp_listen_stop (struct sock *sk)
498 struct tcp_sock *tp = tcp_sk(sk);
499 struct request_sock *acc_req;
500 struct request_sock *req;
502 tcp_delete_keepalive_timer(sk);
504 /* make all the listen_opt local to us */
505 acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue);
507 /* Following specs, it would be better either to send FIN
508 * (and enter FIN-WAIT-1, it is normal close)
509 * or to send active reset (abort).
510 * Certainly, it is pretty dangerous while synflood, but it is
511 * bad justification for our negligence 8)
512 * To be honest, we are not able to make either
513 * of the variants now. --ANK
515 reqsk_queue_destroy(&tp->accept_queue);
517 while ((req = acc_req) != NULL) {
518 struct sock *child = req->sk;
520 acc_req = req->dl_next;
524 BUG_TRAP(!sock_owned_by_user(child));
527 tcp_disconnect(child, O_NONBLOCK);
531 atomic_inc(&tcp_orphan_count);
533 tcp_destroy_sock(child);
535 bh_unlock_sock(child);
539 sk_acceptq_removed(sk);
542 BUG_TRAP(!sk->sk_ack_backlog);
545 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
547 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
548 tp->pushed_seq = tp->write_seq;
551 static inline int forced_push(struct tcp_sock *tp)
553 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
556 static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
560 TCP_SKB_CB(skb)->seq = tp->write_seq;
561 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
562 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
563 TCP_SKB_CB(skb)->sacked = 0;
564 skb_header_release(skb);
565 __skb_queue_tail(&sk->sk_write_queue, skb);
566 sk_charge_skb(sk, skb);
567 if (!sk->sk_send_head)
568 sk->sk_send_head = skb;
569 if (tp->nonagle & TCP_NAGLE_PUSH)
570 tp->nonagle &= ~TCP_NAGLE_PUSH;
573 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
576 if (flags & MSG_OOB) {
578 tp->snd_up = tp->write_seq;
579 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
583 static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
584 int mss_now, int nonagle)
586 if (sk->sk_send_head) {
587 struct sk_buff *skb = sk->sk_write_queue.prev;
588 if (!(flags & MSG_MORE) || forced_push(tp))
589 tcp_mark_push(tp, skb);
590 tcp_mark_urg(tp, flags, skb);
591 __tcp_push_pending_frames(sk, tp, mss_now,
592 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
596 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
597 size_t psize, int flags)
599 struct tcp_sock *tp = tcp_sk(sk);
600 int mss_now, size_goal;
603 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
605 /* Wait for a connection to finish. */
606 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
607 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
610 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
612 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
613 size_goal = tp->xmit_size_goal;
617 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
621 struct sk_buff *skb = sk->sk_write_queue.prev;
622 struct page *page = pages[poffset / PAGE_SIZE];
623 int copy, i, can_coalesce;
624 int offset = poffset % PAGE_SIZE;
625 int size = min_t(size_t, psize, PAGE_SIZE - offset);
627 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
629 if (!sk_stream_memory_free(sk))
630 goto wait_for_sndbuf;
632 skb = sk_stream_alloc_pskb(sk, 0, 0,
635 goto wait_for_memory;
637 skb_entail(sk, tp, skb);
644 i = skb_shinfo(skb)->nr_frags;
645 can_coalesce = skb_can_coalesce(skb, i, page, offset);
646 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
647 tcp_mark_push(tp, skb);
650 if (sk->sk_forward_alloc < copy &&
651 !sk_stream_mem_schedule(sk, copy, 0))
652 goto wait_for_memory;
655 skb_shinfo(skb)->frags[i - 1].size += copy;
658 skb_fill_page_desc(skb, i, page, offset, copy);
662 skb->data_len += copy;
663 skb->truesize += copy;
664 sk->sk_wmem_queued += copy;
665 sk->sk_forward_alloc -= copy;
666 skb->ip_summed = CHECKSUM_HW;
667 tp->write_seq += copy;
668 TCP_SKB_CB(skb)->end_seq += copy;
669 skb_shinfo(skb)->tso_segs = 0;
672 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
676 if (!(psize -= copy))
679 if (skb->len < mss_now || (flags & MSG_OOB))
682 if (forced_push(tp)) {
683 tcp_mark_push(tp, skb);
684 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
685 } else if (skb == sk->sk_send_head)
686 tcp_push_one(sk, mss_now);
690 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
693 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
695 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
698 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
699 size_goal = tp->xmit_size_goal;
704 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
711 return sk_stream_error(sk, flags, err);
714 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
715 size_t size, int flags)
718 struct sock *sk = sock->sk;
720 #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
722 if (!(sk->sk_route_caps & NETIF_F_SG) ||
723 !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
724 return sock_no_sendpage(sock, page, offset, size, flags);
726 #undef TCP_ZC_CSUM_FLAGS
730 res = do_tcp_sendpages(sk, &page, offset, size, flags);
736 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
737 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
739 static inline int select_size(struct sock *sk, struct tcp_sock *tp)
741 int tmp = tp->mss_cache;
743 if (sk->sk_route_caps & NETIF_F_SG) {
744 if (sk->sk_route_caps & NETIF_F_TSO)
747 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
749 if (tmp >= pgbreak &&
750 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
758 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
762 struct tcp_sock *tp = tcp_sk(sk);
765 int mss_now, size_goal;
772 flags = msg->msg_flags;
773 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
775 /* Wait for a connection to finish. */
776 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
777 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
780 /* This should be in poll */
781 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
783 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
784 size_goal = tp->xmit_size_goal;
786 /* Ok commence sending. */
787 iovlen = msg->msg_iovlen;
792 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
795 while (--iovlen >= 0) {
796 int seglen = iov->iov_len;
797 unsigned char __user *from = iov->iov_base;
804 skb = sk->sk_write_queue.prev;
806 if (!sk->sk_send_head ||
807 (copy = size_goal - skb->len) <= 0) {
810 /* Allocate new segment. If the interface is SG,
811 * allocate skb fitting to single page.
813 if (!sk_stream_memory_free(sk))
814 goto wait_for_sndbuf;
816 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
817 0, sk->sk_allocation);
819 goto wait_for_memory;
822 * Check whether we can use HW checksum.
824 if (sk->sk_route_caps &
825 (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
827 skb->ip_summed = CHECKSUM_HW;
829 skb_entail(sk, tp, skb);
833 /* Try to append data to the end of skb. */
837 /* Where to copy to? */
838 if (skb_tailroom(skb) > 0) {
839 /* We have some space in skb head. Superb! */
840 if (copy > skb_tailroom(skb))
841 copy = skb_tailroom(skb);
842 if ((err = skb_add_data(skb, from, copy)) != 0)
846 int i = skb_shinfo(skb)->nr_frags;
847 struct page *page = TCP_PAGE(sk);
848 int off = TCP_OFF(sk);
850 if (skb_can_coalesce(skb, i, page, off) &&
852 /* We can extend the last page
855 } else if (i == MAX_SKB_FRAGS ||
857 !(sk->sk_route_caps & NETIF_F_SG))) {
858 /* Need to add new fragment and cannot
859 * do this because interface is non-SG,
860 * or because all the page slots are
862 tcp_mark_push(tp, skb);
865 if (off == PAGE_SIZE) {
867 TCP_PAGE(sk) = page = NULL;
872 /* Allocate new cache page. */
873 if (!(page = sk_stream_alloc_page(sk)))
874 goto wait_for_memory;
878 if (copy > PAGE_SIZE - off)
879 copy = PAGE_SIZE - off;
881 /* Time to copy data. We are close to
883 err = skb_copy_to_page(sk, from, skb, page,
886 /* If this page was new, give it to the
887 * socket so it does not get leaked.
896 /* Update the skb. */
898 skb_shinfo(skb)->frags[i - 1].size +=
901 skb_fill_page_desc(skb, i, page, off, copy);
904 } else if (off + copy < PAGE_SIZE) {
910 TCP_OFF(sk) = off + copy;
914 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
916 tp->write_seq += copy;
917 TCP_SKB_CB(skb)->end_seq += copy;
918 skb_shinfo(skb)->tso_segs = 0;
922 if ((seglen -= copy) == 0 && iovlen == 0)
925 if (skb->len < mss_now || (flags & MSG_OOB))
928 if (forced_push(tp)) {
929 tcp_mark_push(tp, skb);
930 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
931 } else if (skb == sk->sk_send_head)
932 tcp_push_one(sk, mss_now);
936 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
939 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
941 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
944 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
945 size_goal = tp->xmit_size_goal;
951 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
958 if (sk->sk_send_head == skb)
959 sk->sk_send_head = NULL;
960 __skb_unlink(skb, &sk->sk_write_queue);
961 sk_stream_free_skb(sk, skb);
968 err = sk_stream_error(sk, flags, err);
975 * Handle reading urgent data. BSD has very simple semantics for
976 * this, no blocking and very strange errors 8)
979 static int tcp_recv_urg(struct sock *sk, long timeo,
980 struct msghdr *msg, int len, int flags,
983 struct tcp_sock *tp = tcp_sk(sk);
985 /* No URG data to read. */
986 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
987 tp->urg_data == TCP_URG_READ)
988 return -EINVAL; /* Yes this is right ! */
990 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
993 if (tp->urg_data & TCP_URG_VALID) {
995 char c = tp->urg_data;
997 if (!(flags & MSG_PEEK))
998 tp->urg_data = TCP_URG_READ;
1000 /* Read urgent data. */
1001 msg->msg_flags |= MSG_OOB;
1004 if (!(flags & MSG_TRUNC))
1005 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1008 msg->msg_flags |= MSG_TRUNC;
1010 return err ? -EFAULT : len;
1013 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1016 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1017 * the available implementations agree in this case:
1018 * this call should never block, independent of the
1019 * blocking state of the socket.
1020 * Mike <pall@rz.uni-karlsruhe.de>
1025 /* Clean up the receive buffer for full frames taken by the user,
1026 * then send an ACK if necessary. COPIED is the number of bytes
1027 * tcp_recvmsg has given to the user so far, it speeds up the
1028 * calculation of whether or not we must ACK for the sake of
1031 static void cleanup_rbuf(struct sock *sk, int copied)
1033 struct tcp_sock *tp = tcp_sk(sk);
1034 int time_to_ack = 0;
1037 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1039 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1042 if (tcp_ack_scheduled(tp)) {
1043 /* Delayed ACKs frequently hit locked sockets during bulk
1045 if (tp->ack.blocked ||
1046 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1047 tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss ||
1049 * If this read emptied read buffer, we send ACK, if
1050 * connection is not bidirectional, user drained
1051 * receive buffer and there was a small segment
1054 (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
1055 !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1059 /* We send an ACK if we can now advertise a non-zero window
1060 * which has been raised "significantly".
1062 * Even if window raised up to infinity, do not send window open ACK
1063 * in states, where we will not receive more. It is useless.
1065 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1066 __u32 rcv_window_now = tcp_receive_window(tp);
1068 /* Optimize, __tcp_select_window() is not cheap. */
1069 if (2*rcv_window_now <= tp->window_clamp) {
1070 __u32 new_window = __tcp_select_window(sk);
1072 /* Send ACK now, if this read freed lots of space
1073 * in our buffer. Certainly, new_window is new window.
1074 * We can advertise it now, if it is not less than current one.
1075 * "Lots" means "at least twice" here.
1077 if (new_window && new_window >= 2 * rcv_window_now)
1085 static void tcp_prequeue_process(struct sock *sk)
1087 struct sk_buff *skb;
1088 struct tcp_sock *tp = tcp_sk(sk);
1090 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1092 /* RX process wants to run with disabled BHs, though it is not
1095 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1096 sk->sk_backlog_rcv(sk, skb);
1099 /* Clear memory counter. */
1100 tp->ucopy.memory = 0;
1103 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1105 struct sk_buff *skb;
1108 skb_queue_walk(&sk->sk_receive_queue, skb) {
1109 offset = seq - TCP_SKB_CB(skb)->seq;
1112 if (offset < skb->len || skb->h.th->fin) {
1121 * This routine provides an alternative to tcp_recvmsg() for routines
1122 * that would like to handle copying from skbuffs directly in 'sendfile'
1125 * - It is assumed that the socket was locked by the caller.
1126 * - The routine does not block.
1127 * - At present, there is no support for reading OOB data
1128 * or for 'peeking' the socket using this routine
1129 * (although both would be easy to implement).
1131 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1132 sk_read_actor_t recv_actor)
1134 struct sk_buff *skb;
1135 struct tcp_sock *tp = tcp_sk(sk);
1136 u32 seq = tp->copied_seq;
1140 if (sk->sk_state == TCP_LISTEN)
1142 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1143 if (offset < skb->len) {
1146 len = skb->len - offset;
1147 /* Stop reading if we hit a patch of urgent data */
1149 u32 urg_offset = tp->urg_seq - seq;
1150 if (urg_offset < len)
1155 used = recv_actor(desc, skb, offset, len);
1161 if (offset != skb->len)
1164 if (skb->h.th->fin) {
1165 sk_eat_skb(sk, skb);
1169 sk_eat_skb(sk, skb);
1173 tp->copied_seq = seq;
1175 tcp_rcv_space_adjust(sk);
1177 /* Clean up data we have read: This will do ACK frames. */
1179 cleanup_rbuf(sk, copied);
1184 * This routine copies from a sock struct into the user buffer.
1186 * Technical note: in 2.3 we work on _locked_ socket, so that
1187 * tricks with *seq access order and skb->users are not required.
1188 * Probably, code can be easily improved even more.
1191 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1192 size_t len, int nonblock, int flags, int *addr_len)
1194 struct tcp_sock *tp = tcp_sk(sk);
1200 int target; /* Read at least this many bytes */
1202 struct task_struct *user_recv = NULL;
1206 TCP_CHECK_TIMER(sk);
1209 if (sk->sk_state == TCP_LISTEN)
1212 timeo = sock_rcvtimeo(sk, nonblock);
1214 /* Urgent data needs to be handled specially. */
1215 if (flags & MSG_OOB)
1218 seq = &tp->copied_seq;
1219 if (flags & MSG_PEEK) {
1220 peek_seq = tp->copied_seq;
1224 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1227 struct sk_buff *skb;
1230 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1231 if (tp->urg_data && tp->urg_seq == *seq) {
1234 if (signal_pending(current)) {
1235 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1240 /* Next get a buffer. */
1242 skb = skb_peek(&sk->sk_receive_queue);
1247 /* Now that we have two receive queues this
1250 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1251 printk(KERN_INFO "recvmsg bug: copied %X "
1252 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1255 offset = *seq - TCP_SKB_CB(skb)->seq;
1258 if (offset < skb->len)
1262 BUG_TRAP(flags & MSG_PEEK);
1264 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1266 /* Well, if we have backlog, try to process it now yet. */
1268 if (copied >= target && !sk->sk_backlog.tail)
1273 sk->sk_state == TCP_CLOSE ||
1274 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1276 signal_pending(current) ||
1280 if (sock_flag(sk, SOCK_DONE))
1284 copied = sock_error(sk);
1288 if (sk->sk_shutdown & RCV_SHUTDOWN)
1291 if (sk->sk_state == TCP_CLOSE) {
1292 if (!sock_flag(sk, SOCK_DONE)) {
1293 /* This occurs when user tries to read
1294 * from never connected socket.
1307 if (signal_pending(current)) {
1308 copied = sock_intr_errno(timeo);
1313 cleanup_rbuf(sk, copied);
1315 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1316 /* Install new reader */
1317 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1318 user_recv = current;
1319 tp->ucopy.task = user_recv;
1320 tp->ucopy.iov = msg->msg_iov;
1323 tp->ucopy.len = len;
1325 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1326 (flags & (MSG_PEEK | MSG_TRUNC)));
1328 /* Ugly... If prequeue is not empty, we have to
1329 * process it before releasing socket, otherwise
1330 * order will be broken at second iteration.
1331 * More elegant solution is required!!!
1333 * Look: we have the following (pseudo)queues:
1335 * 1. packets in flight
1340 * Each queue can be processed only if the next ones
1341 * are empty. At this point we have empty receive_queue.
1342 * But prequeue _can_ be not empty after 2nd iteration,
1343 * when we jumped to start of loop because backlog
1344 * processing added something to receive_queue.
1345 * We cannot release_sock(), because backlog contains
1346 * packets arrived _after_ prequeued ones.
1348 * Shortly, algorithm is clear --- to process all
1349 * the queues in order. We could make it more directly,
1350 * requeueing packets from backlog to prequeue, if
1351 * is not empty. It is more elegant, but eats cycles,
1354 if (!skb_queue_empty(&tp->ucopy.prequeue))
1357 /* __ Set realtime policy in scheduler __ */
1360 if (copied >= target) {
1361 /* Do not sleep, just process backlog. */
1365 sk_wait_data(sk, &timeo);
1370 /* __ Restore normal policy in scheduler __ */
1372 if ((chunk = len - tp->ucopy.len) != 0) {
1373 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1378 if (tp->rcv_nxt == tp->copied_seq &&
1379 !skb_queue_empty(&tp->ucopy.prequeue)) {
1381 tcp_prequeue_process(sk);
1383 if ((chunk = len - tp->ucopy.len) != 0) {
1384 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1390 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1391 if (net_ratelimit())
1392 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1393 current->comm, current->pid);
1394 peek_seq = tp->copied_seq;
1399 /* Ok so how much can we use? */
1400 used = skb->len - offset;
1404 /* Do we have urgent data here? */
1406 u32 urg_offset = tp->urg_seq - *seq;
1407 if (urg_offset < used) {
1409 if (!sock_flag(sk, SOCK_URGINLINE)) {
1421 if (!(flags & MSG_TRUNC)) {
1422 err = skb_copy_datagram_iovec(skb, offset,
1423 msg->msg_iov, used);
1425 /* Exception. Bailout! */
1436 tcp_rcv_space_adjust(sk);
1439 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1441 tcp_fast_path_check(sk, tp);
1443 if (used + offset < skb->len)
1448 if (!(flags & MSG_PEEK))
1449 sk_eat_skb(sk, skb);
1453 /* Process the FIN. */
1455 if (!(flags & MSG_PEEK))
1456 sk_eat_skb(sk, skb);
1461 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1464 tp->ucopy.len = copied > 0 ? len : 0;
1466 tcp_prequeue_process(sk);
1468 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1469 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1475 tp->ucopy.task = NULL;
1479 /* According to UNIX98, msg_name/msg_namelen are ignored
1480 * on connected socket. I was just happy when found this 8) --ANK
1483 /* Clean up data we have read: This will do ACK frames. */
1484 cleanup_rbuf(sk, copied);
1486 TCP_CHECK_TIMER(sk);
1491 TCP_CHECK_TIMER(sk);
1496 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1501 * State processing on a close. This implements the state shift for
1502 * sending our FIN frame. Note that we only send a FIN for some
1503 * states. A shutdown() may have already sent the FIN, or we may be
1507 static unsigned char new_state[16] = {
1508 /* current state: new state: action: */
1509 /* (Invalid) */ TCP_CLOSE,
1510 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1511 /* TCP_SYN_SENT */ TCP_CLOSE,
1512 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1513 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1514 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1515 /* TCP_TIME_WAIT */ TCP_CLOSE,
1516 /* TCP_CLOSE */ TCP_CLOSE,
1517 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1518 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1519 /* TCP_LISTEN */ TCP_CLOSE,
1520 /* TCP_CLOSING */ TCP_CLOSING,
1523 static int tcp_close_state(struct sock *sk)
1525 int next = (int)new_state[sk->sk_state];
1526 int ns = next & TCP_STATE_MASK;
1528 tcp_set_state(sk, ns);
1530 return next & TCP_ACTION_FIN;
1534 * Shutdown the sending side of a connection. Much like close except
1535 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1538 void tcp_shutdown(struct sock *sk, int how)
1540 /* We need to grab some memory, and put together a FIN,
1541 * and then put it into the queue to be sent.
1542 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1544 if (!(how & SEND_SHUTDOWN))
1547 /* If we've already sent a FIN, or it's a closed state, skip this. */
1548 if ((1 << sk->sk_state) &
1549 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1550 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1551 /* Clear out any half completed packets. FIN if needed. */
1552 if (tcp_close_state(sk))
1558 * At this point, there should be no process reference to this
1559 * socket, and thus no user references at all. Therefore we
1560 * can assume the socket waitqueue is inactive and nobody will
1561 * try to jump onto it.
1563 void tcp_destroy_sock(struct sock *sk)
1565 BUG_TRAP(sk->sk_state == TCP_CLOSE);
1566 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
1568 /* It cannot be in hash table! */
1569 BUG_TRAP(sk_unhashed(sk));
1571 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1572 BUG_TRAP(!inet_sk(sk)->num || inet_sk(sk)->bind_hash);
1574 sk->sk_prot->destroy(sk);
1576 sk_stream_kill_queues(sk);
1578 xfrm_sk_free_policy(sk);
1580 sk_refcnt_debug_release(sk);
1582 atomic_dec(&tcp_orphan_count);
1586 void tcp_close(struct sock *sk, long timeout)
1588 struct sk_buff *skb;
1589 int data_was_unread = 0;
1592 sk->sk_shutdown = SHUTDOWN_MASK;
1594 if (sk->sk_state == TCP_LISTEN) {
1595 tcp_set_state(sk, TCP_CLOSE);
1598 tcp_listen_stop(sk);
1600 goto adjudge_to_death;
1603 /* We need to flush the recv. buffs. We do this only on the
1604 * descriptor close, not protocol-sourced closes, because the
1605 * reader process may not have drained the data yet!
1607 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1608 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1610 data_was_unread += len;
1614 sk_stream_mem_reclaim(sk);
1616 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1617 * 3.10, we send a RST here because data was lost. To
1618 * witness the awful effects of the old behavior of always
1619 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1620 * a bulk GET in an FTP client, suspend the process, wait
1621 * for the client to advertise a zero window, then kill -9
1622 * the FTP client, wheee... Note: timeout is always zero
1625 if (data_was_unread) {
1626 /* Unread data was tossed, zap the connection. */
1627 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1628 tcp_set_state(sk, TCP_CLOSE);
1629 tcp_send_active_reset(sk, GFP_KERNEL);
1630 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1631 /* Check zero linger _after_ checking for unread data. */
1632 sk->sk_prot->disconnect(sk, 0);
1633 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1634 } else if (tcp_close_state(sk)) {
1635 /* We FIN if the application ate all the data before
1636 * zapping the connection.
1639 /* RED-PEN. Formally speaking, we have broken TCP state
1640 * machine. State transitions:
1642 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1643 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1644 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1646 * are legal only when FIN has been sent (i.e. in window),
1647 * rather than queued out of window. Purists blame.
1649 * F.e. "RFC state" is ESTABLISHED,
1650 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1652 * The visible declinations are that sometimes
1653 * we enter time-wait state, when it is not required really
1654 * (harmless), do not send active resets, when they are
1655 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1656 * they look as CLOSING or LAST_ACK for Linux)
1657 * Probably, I missed some more holelets.
1663 sk_stream_wait_close(sk, timeout);
1666 /* It is the last release_sock in its life. It will remove backlog. */
1670 /* Now socket is owned by kernel and we acquire BH lock
1671 to finish close. No need to check for user refs.
1675 BUG_TRAP(!sock_owned_by_user(sk));
1680 /* This is a (useful) BSD violating of the RFC. There is a
1681 * problem with TCP as specified in that the other end could
1682 * keep a socket open forever with no application left this end.
1683 * We use a 3 minute timeout (about the same as BSD) then kill
1684 * our end. If they send after that then tough - BUT: long enough
1685 * that we won't make the old 4*rto = almost no time - whoops
1688 * Nope, it was not mistake. It is really desired behaviour
1689 * f.e. on http servers, when such sockets are useless, but
1690 * consume significant resources. Let's do it with special
1691 * linger2 option. --ANK
1694 if (sk->sk_state == TCP_FIN_WAIT2) {
1695 struct tcp_sock *tp = tcp_sk(sk);
1696 if (tp->linger2 < 0) {
1697 tcp_set_state(sk, TCP_CLOSE);
1698 tcp_send_active_reset(sk, GFP_ATOMIC);
1699 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1701 int tmo = tcp_fin_time(tp);
1703 if (tmo > TCP_TIMEWAIT_LEN) {
1704 tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
1706 atomic_inc(&tcp_orphan_count);
1707 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1712 if (sk->sk_state != TCP_CLOSE) {
1713 sk_stream_mem_reclaim(sk);
1714 if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
1715 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1716 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1717 if (net_ratelimit())
1718 printk(KERN_INFO "TCP: too many of orphaned "
1720 tcp_set_state(sk, TCP_CLOSE);
1721 tcp_send_active_reset(sk, GFP_ATOMIC);
1722 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1725 atomic_inc(&tcp_orphan_count);
1727 if (sk->sk_state == TCP_CLOSE)
1728 tcp_destroy_sock(sk);
1729 /* Otherwise, socket is reprieved until protocol close. */
1737 /* These states need RST on ABORT according to RFC793 */
1739 static inline int tcp_need_reset(int state)
1741 return (1 << state) &
1742 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1743 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1746 int tcp_disconnect(struct sock *sk, int flags)
1748 struct inet_sock *inet = inet_sk(sk);
1749 struct tcp_sock *tp = tcp_sk(sk);
1751 int old_state = sk->sk_state;
1753 if (old_state != TCP_CLOSE)
1754 tcp_set_state(sk, TCP_CLOSE);
1756 /* ABORT function of RFC793 */
1757 if (old_state == TCP_LISTEN) {
1758 tcp_listen_stop(sk);
1759 } else if (tcp_need_reset(old_state) ||
1760 (tp->snd_nxt != tp->write_seq &&
1761 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1762 /* The last check adjusts for discrepance of Linux wrt. RFC
1765 tcp_send_active_reset(sk, gfp_any());
1766 sk->sk_err = ECONNRESET;
1767 } else if (old_state == TCP_SYN_SENT)
1768 sk->sk_err = ECONNRESET;
1770 tcp_clear_xmit_timers(sk);
1771 __skb_queue_purge(&sk->sk_receive_queue);
1772 sk_stream_writequeue_purge(sk);
1773 __skb_queue_purge(&tp->out_of_order_queue);
1777 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1778 inet_reset_saddr(sk);
1780 sk->sk_shutdown = 0;
1781 sock_reset_flag(sk, SOCK_DONE);
1783 if ((tp->write_seq += tp->max_window + 2) == 0)
1788 tp->packets_out = 0;
1789 tp->snd_ssthresh = 0x7fffffff;
1790 tp->snd_cwnd_cnt = 0;
1791 tcp_set_ca_state(tp, TCP_CA_Open);
1792 tcp_clear_retrans(tp);
1793 tcp_delack_init(tp);
1794 sk->sk_send_head = NULL;
1795 tp->rx_opt.saw_tstamp = 0;
1796 tcp_sack_reset(&tp->rx_opt);
1799 BUG_TRAP(!inet->num || inet->bind_hash);
1801 sk->sk_error_report(sk);
1806 * Wait for an incoming connection, avoid race
1807 * conditions. This must be called with the socket locked.
1809 static int wait_for_connect(struct sock *sk, long timeo)
1811 struct tcp_sock *tp = tcp_sk(sk);
1816 * True wake-one mechanism for incoming connections: only
1817 * one process gets woken up, not the 'whole herd'.
1818 * Since we do not 'race & poll' for established sockets
1819 * anymore, the common case will execute the loop only once.
1821 * Subtle issue: "add_wait_queue_exclusive()" will be added
1822 * after any current non-exclusive waiters, and we know that
1823 * it will always _stay_ after any new non-exclusive waiters
1824 * because all non-exclusive waiters are added at the
1825 * beginning of the wait-queue. As such, it's ok to "drop"
1826 * our exclusiveness temporarily when we get woken up without
1827 * having to remove and re-insert us on the wait queue.
1830 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
1831 TASK_INTERRUPTIBLE);
1833 if (reqsk_queue_empty(&tp->accept_queue))
1834 timeo = schedule_timeout(timeo);
1837 if (!reqsk_queue_empty(&tp->accept_queue))
1840 if (sk->sk_state != TCP_LISTEN)
1842 err = sock_intr_errno(timeo);
1843 if (signal_pending(current))
1849 finish_wait(sk->sk_sleep, &wait);
1854 * This will accept the next outstanding connection.
1857 struct sock *tcp_accept(struct sock *sk, int flags, int *err)
1859 struct tcp_sock *tp = tcp_sk(sk);
1865 /* We need to make sure that this socket is listening,
1866 * and that it has something pending.
1869 if (sk->sk_state != TCP_LISTEN)
1872 /* Find already established connection */
1873 if (reqsk_queue_empty(&tp->accept_queue)) {
1874 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1876 /* If this is a non blocking socket don't sleep */
1881 error = wait_for_connect(sk, timeo);
1886 newsk = reqsk_queue_get_child(&tp->accept_queue, sk);
1887 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
1898 * Socket option code for TCP.
1900 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1903 struct tcp_sock *tp = tcp_sk(sk);
1907 if (level != SOL_TCP)
1908 return tp->af_specific->setsockopt(sk, level, optname,
1911 /* This is a string value all the others are int's */
1912 if (optname == TCP_CONGESTION) {
1913 char name[TCP_CA_NAME_MAX];
1918 val = strncpy_from_user(name, optval,
1919 min(TCP_CA_NAME_MAX-1, optlen));
1925 err = tcp_set_congestion_control(tp, name);
1930 if (optlen < sizeof(int))
1933 if (get_user(val, (int __user *)optval))
1940 /* Values greater than interface MTU won't take effect. However
1941 * at the point when this call is done we typically don't yet
1942 * know which interface is going to be used */
1943 if (val < 8 || val > MAX_TCP_WINDOW) {
1947 tp->rx_opt.user_mss = val;
1952 /* TCP_NODELAY is weaker than TCP_CORK, so that
1953 * this option on corked socket is remembered, but
1954 * it is not activated until cork is cleared.
1956 * However, when TCP_NODELAY is set we make
1957 * an explicit push, which overrides even TCP_CORK
1958 * for currently queued segments.
1960 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1961 tcp_push_pending_frames(sk, tp);
1963 tp->nonagle &= ~TCP_NAGLE_OFF;
1968 /* When set indicates to always queue non-full frames.
1969 * Later the user clears this option and we transmit
1970 * any pending partial frames in the queue. This is
1971 * meant to be used alongside sendfile() to get properly
1972 * filled frames when the user (for example) must write
1973 * out headers with a write() call first and then use
1974 * sendfile to send out the data parts.
1976 * TCP_CORK can be set together with TCP_NODELAY and it is
1977 * stronger than TCP_NODELAY.
1980 tp->nonagle |= TCP_NAGLE_CORK;
1982 tp->nonagle &= ~TCP_NAGLE_CORK;
1983 if (tp->nonagle&TCP_NAGLE_OFF)
1984 tp->nonagle |= TCP_NAGLE_PUSH;
1985 tcp_push_pending_frames(sk, tp);
1990 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1993 tp->keepalive_time = val * HZ;
1994 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1995 !((1 << sk->sk_state) &
1996 (TCPF_CLOSE | TCPF_LISTEN))) {
1997 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1998 if (tp->keepalive_time > elapsed)
1999 elapsed = tp->keepalive_time - elapsed;
2002 tcp_reset_keepalive_timer(sk, elapsed);
2007 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2010 tp->keepalive_intvl = val * HZ;
2013 if (val < 1 || val > MAX_TCP_KEEPCNT)
2016 tp->keepalive_probes = val;
2019 if (val < 1 || val > MAX_TCP_SYNCNT)
2022 tp->syn_retries = val;
2028 else if (val > sysctl_tcp_fin_timeout / HZ)
2031 tp->linger2 = val * HZ;
2034 case TCP_DEFER_ACCEPT:
2035 tp->defer_accept = 0;
2037 /* Translate value in seconds to number of
2039 while (tp->defer_accept < 32 &&
2040 val > ((TCP_TIMEOUT_INIT / HZ) <<
2047 case TCP_WINDOW_CLAMP:
2049 if (sk->sk_state != TCP_CLOSE) {
2053 tp->window_clamp = 0;
2055 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2056 SOCK_MIN_RCVBUF / 2 : val;
2061 tp->ack.pingpong = 1;
2063 tp->ack.pingpong = 0;
2064 if ((1 << sk->sk_state) &
2065 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2066 tcp_ack_scheduled(tp)) {
2067 tp->ack.pending |= TCP_ACK_PUSHED;
2068 cleanup_rbuf(sk, 1);
2070 tp->ack.pingpong = 1;
2083 /* Return information about state of tcp endpoint in API format. */
2084 void tcp_get_info(struct sock *sk, struct tcp_info *info)
2086 struct tcp_sock *tp = tcp_sk(sk);
2087 u32 now = tcp_time_stamp;
2089 memset(info, 0, sizeof(*info));
2091 info->tcpi_state = sk->sk_state;
2092 info->tcpi_ca_state = tp->ca_state;
2093 info->tcpi_retransmits = tp->retransmits;
2094 info->tcpi_probes = tp->probes_out;
2095 info->tcpi_backoff = tp->backoff;
2097 if (tp->rx_opt.tstamp_ok)
2098 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2099 if (tp->rx_opt.sack_ok)
2100 info->tcpi_options |= TCPI_OPT_SACK;
2101 if (tp->rx_opt.wscale_ok) {
2102 info->tcpi_options |= TCPI_OPT_WSCALE;
2103 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2104 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2107 if (tp->ecn_flags&TCP_ECN_OK)
2108 info->tcpi_options |= TCPI_OPT_ECN;
2110 info->tcpi_rto = jiffies_to_usecs(tp->rto);
2111 info->tcpi_ato = jiffies_to_usecs(tp->ack.ato);
2112 info->tcpi_snd_mss = tp->mss_cache;
2113 info->tcpi_rcv_mss = tp->ack.rcv_mss;
2115 info->tcpi_unacked = tp->packets_out;
2116 info->tcpi_sacked = tp->sacked_out;
2117 info->tcpi_lost = tp->lost_out;
2118 info->tcpi_retrans = tp->retrans_out;
2119 info->tcpi_fackets = tp->fackets_out;
2121 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2122 info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime);
2123 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2125 info->tcpi_pmtu = tp->pmtu_cookie;
2126 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2127 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2128 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2129 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2130 info->tcpi_snd_cwnd = tp->snd_cwnd;
2131 info->tcpi_advmss = tp->advmss;
2132 info->tcpi_reordering = tp->reordering;
2134 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2135 info->tcpi_rcv_space = tp->rcvq_space.space;
2137 info->tcpi_total_retrans = tp->total_retrans;
2140 EXPORT_SYMBOL_GPL(tcp_get_info);
2142 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2145 struct tcp_sock *tp = tcp_sk(sk);
2148 if (level != SOL_TCP)
2149 return tp->af_specific->getsockopt(sk, level, optname,
2152 if (get_user(len, optlen))
2155 len = min_t(unsigned int, len, sizeof(int));
2162 val = tp->mss_cache;
2163 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2164 val = tp->rx_opt.user_mss;
2167 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2170 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2173 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2176 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2179 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2182 val = tp->syn_retries ? : sysctl_tcp_syn_retries;
2187 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2189 case TCP_DEFER_ACCEPT:
2190 val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) <<
2191 (tp->defer_accept - 1));
2193 case TCP_WINDOW_CLAMP:
2194 val = tp->window_clamp;
2197 struct tcp_info info;
2199 if (get_user(len, optlen))
2202 tcp_get_info(sk, &info);
2204 len = min_t(unsigned int, len, sizeof(info));
2205 if (put_user(len, optlen))
2207 if (copy_to_user(optval, &info, len))
2212 val = !tp->ack.pingpong;
2215 case TCP_CONGESTION:
2216 if (get_user(len, optlen))
2218 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2219 if (put_user(len, optlen))
2221 if (copy_to_user(optval, tp->ca_ops->name, len))
2225 return -ENOPROTOOPT;
2228 if (put_user(len, optlen))
2230 if (copy_to_user(optval, &val, len))
2236 extern void __skb_cb_too_small_for_tcp(int, int);
2237 extern struct tcp_congestion_ops tcp_reno;
2239 static __initdata unsigned long thash_entries;
2240 static int __init set_thash_entries(char *str)
2244 thash_entries = simple_strtoul(str, &str, 0);
2247 __setup("thash_entries=", set_thash_entries);
2249 void __init tcp_init(void)
2251 struct sk_buff *skb = NULL;
2254 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2255 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2258 tcp_hashinfo.bind_bucket_cachep =
2259 kmem_cache_create("tcp_bind_bucket",
2260 sizeof(struct inet_bind_bucket), 0,
2261 SLAB_HWCACHE_ALIGN, NULL, NULL);
2262 if (!tcp_hashinfo.bind_bucket_cachep)
2263 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2265 /* Size and allocate the main established and bind bucket
2268 * The methodology is similar to that of the buffer cache.
2270 tcp_hashinfo.ehash =
2271 alloc_large_system_hash("TCP established",
2272 sizeof(struct inet_ehash_bucket),
2274 (num_physpages >= 128 * 1024) ?
2278 &tcp_hashinfo.ehash_size,
2281 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2282 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2283 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2284 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2287 tcp_hashinfo.bhash =
2288 alloc_large_system_hash("TCP bind",
2289 sizeof(struct inet_bind_hashbucket),
2290 tcp_hashinfo.ehash_size,
2291 (num_physpages >= 128 * 1024) ?
2295 &tcp_hashinfo.bhash_size,
2298 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2299 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2300 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2301 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2304 /* Try to be a bit smarter and adjust defaults depending
2305 * on available memory.
2307 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2308 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2312 sysctl_local_port_range[0] = 32768;
2313 sysctl_local_port_range[1] = 61000;
2314 sysctl_tcp_max_tw_buckets = 180000;
2315 sysctl_tcp_max_orphans = 4096 << (order - 4);
2316 sysctl_max_syn_backlog = 1024;
2317 } else if (order < 3) {
2318 sysctl_local_port_range[0] = 1024 * (3 - order);
2319 sysctl_tcp_max_tw_buckets >>= (3 - order);
2320 sysctl_tcp_max_orphans >>= (3 - order);
2321 sysctl_max_syn_backlog = 128;
2323 tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;
2325 sysctl_tcp_mem[0] = 768 << order;
2326 sysctl_tcp_mem[1] = 1024 << order;
2327 sysctl_tcp_mem[2] = 1536 << order;
2330 sysctl_tcp_wmem[2] = 64 * 1024;
2331 sysctl_tcp_rmem[0] = PAGE_SIZE;
2332 sysctl_tcp_rmem[1] = 43689;
2333 sysctl_tcp_rmem[2] = 2 * 43689;
2336 printk(KERN_INFO "TCP: Hash tables configured "
2337 "(established %d bind %d)\n",
2338 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
2340 tcp_register_congestion_control(&tcp_reno);
2343 EXPORT_SYMBOL(tcp_accept);
2344 EXPORT_SYMBOL(tcp_close);
2345 EXPORT_SYMBOL(tcp_destroy_sock);
2346 EXPORT_SYMBOL(tcp_disconnect);
2347 EXPORT_SYMBOL(tcp_getsockopt);
2348 EXPORT_SYMBOL(tcp_ioctl);
2349 EXPORT_SYMBOL(tcp_poll);
2350 EXPORT_SYMBOL(tcp_read_sock);
2351 EXPORT_SYMBOL(tcp_recvmsg);
2352 EXPORT_SYMBOL(tcp_sendmsg);
2353 EXPORT_SYMBOL(tcp_sendpage);
2354 EXPORT_SYMBOL(tcp_setsockopt);
2355 EXPORT_SYMBOL(tcp_shutdown);
2356 EXPORT_SYMBOL(tcp_statistics);