]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - net/dccp/input.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[linux-2.6-omap-h63xx.git] / net / dccp / input.c
1 /*
2  *  net/dccp/input.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12
13 #include <linux/dccp.h>
14 #include <linux/skbuff.h>
15
16 #include <net/sock.h>
17
18 #include "ackvec.h"
19 #include "ccid.h"
20 #include "dccp.h"
21
22 /* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */
23 int sysctl_dccp_sync_ratelimit  __read_mostly = HZ / 8;
24
25 static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
26 {
27         __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
28         __skb_queue_tail(&sk->sk_receive_queue, skb);
29         skb_set_owner_r(skb, sk);
30         sk->sk_data_ready(sk, 0);
31 }
32
33 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
34 {
35         /*
36          * On receiving Close/CloseReq, both RD/WR shutdown are performed.
37          * RFC 4340, 8.3 says that we MAY send further Data/DataAcks after
38          * receiving the closing segment, but there is no guarantee that such
39          * data will be processed at all.
40          */
41         sk->sk_shutdown = SHUTDOWN_MASK;
42         sock_set_flag(sk, SOCK_DONE);
43         dccp_enqueue_skb(sk, skb);
44 }
45
46 static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
47 {
48         int queued = 0;
49
50         switch (sk->sk_state) {
51         /*
52          * We ignore Close when received in one of the following states:
53          *  - CLOSED            (may be a late or duplicate packet)
54          *  - PASSIVE_CLOSEREQ  (the peer has sent a CloseReq earlier)
55          *  - RESPOND           (already handled by dccp_check_req)
56          */
57         case DCCP_CLOSING:
58                 /*
59                  * Simultaneous-close: receiving a Close after sending one. This
60                  * can happen if both client and server perform active-close and
61                  * will result in an endless ping-pong of crossing and retrans-
62                  * mitted Close packets, which only terminates when one of the
63                  * nodes times out (min. 64 seconds). Quicker convergence can be
64                  * achieved when one of the nodes acts as tie-breaker.
65                  * This is ok as both ends are done with data transfer and each
66                  * end is just waiting for the other to acknowledge termination.
67                  */
68                 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
69                         break;
70                 /* fall through */
71         case DCCP_REQUESTING:
72         case DCCP_ACTIVE_CLOSEREQ:
73                 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
74                 dccp_done(sk);
75                 break;
76         case DCCP_OPEN:
77         case DCCP_PARTOPEN:
78                 /* Give waiting application a chance to read pending data */
79                 queued = 1;
80                 dccp_fin(sk, skb);
81                 dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
82                 /* fall through */
83         case DCCP_PASSIVE_CLOSE:
84                 /*
85                  * Retransmitted Close: we have already enqueued the first one.
86                  */
87                 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
88         }
89         return queued;
90 }
91
92 static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
93 {
94         int queued = 0;
95
96         /*
97          *   Step 7: Check for unexpected packet types
98          *      If (S.is_server and P.type == CloseReq)
99          *        Send Sync packet acknowledging P.seqno
100          *        Drop packet and return
101          */
102         if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
103                 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
104                 return queued;
105         }
106
107         /* Step 13: process relevant Client states < CLOSEREQ */
108         switch (sk->sk_state) {
109         case DCCP_REQUESTING:
110                 dccp_send_close(sk, 0);
111                 dccp_set_state(sk, DCCP_CLOSING);
112                 break;
113         case DCCP_OPEN:
114         case DCCP_PARTOPEN:
115                 /* Give waiting application a chance to read pending data */
116                 queued = 1;
117                 dccp_fin(sk, skb);
118                 dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
119                 /* fall through */
120         case DCCP_PASSIVE_CLOSEREQ:
121                 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
122         }
123         return queued;
124 }
125
126 static u8 dccp_reset_code_convert(const u8 code)
127 {
128         const u8 error_code[] = {
129         [DCCP_RESET_CODE_CLOSED]             = 0,       /* normal termination */
130         [DCCP_RESET_CODE_UNSPECIFIED]        = 0,       /* nothing known */
131         [DCCP_RESET_CODE_ABORTED]            = ECONNRESET,
132
133         [DCCP_RESET_CODE_NO_CONNECTION]      = ECONNREFUSED,
134         [DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
135         [DCCP_RESET_CODE_TOO_BUSY]           = EUSERS,
136         [DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
137
138         [DCCP_RESET_CODE_PACKET_ERROR]       = ENOMSG,
139         [DCCP_RESET_CODE_BAD_INIT_COOKIE]    = EBADR,
140         [DCCP_RESET_CODE_BAD_SERVICE_CODE]   = EBADRQC,
141         [DCCP_RESET_CODE_OPTION_ERROR]       = EILSEQ,
142         [DCCP_RESET_CODE_MANDATORY_ERROR]    = EOPNOTSUPP,
143         };
144
145         return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
146 }
147
148 static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
149 {
150         u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
151
152         sk->sk_err = err;
153
154         /* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
155         dccp_fin(sk, skb);
156
157         if (err && !sock_flag(sk, SOCK_DEAD))
158                 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
159         dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
160 }
161
162 static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
163 {
164         struct dccp_sock *dp = dccp_sk(sk);
165
166         if (dccp_msk(sk)->dccpms_send_ack_vector)
167                 dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk,
168                                             DCCP_SKB_CB(skb)->dccpd_ack_seq);
169 }
170
171 static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
172 {
173         const struct dccp_sock *dp = dccp_sk(sk);
174
175         /* Don't deliver to RX CCID when node has shut down read end. */
176         if (!(sk->sk_shutdown & RCV_SHUTDOWN))
177                 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
178         /*
179          * Until the TX queue has been drained, we can not honour SHUT_WR, since
180          * we need received feedback as input to adjust congestion control.
181          */
182         if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
183                 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
184 }
185
186 static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
187 {
188         const struct dccp_hdr *dh = dccp_hdr(skb);
189         struct dccp_sock *dp = dccp_sk(sk);
190         u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq,
191                         ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
192
193         /*
194          *   Step 5: Prepare sequence numbers for Sync
195          *     If P.type == Sync or P.type == SyncAck,
196          *        If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
197          *           / * P is valid, so update sequence number variables
198          *               accordingly.  After this update, P will pass the tests
199          *               in Step 6.  A SyncAck is generated if necessary in
200          *               Step 15 * /
201          *           Update S.GSR, S.SWL, S.SWH
202          *        Otherwise,
203          *           Drop packet and return
204          */
205         if (dh->dccph_type == DCCP_PKT_SYNC ||
206             dh->dccph_type == DCCP_PKT_SYNCACK) {
207                 if (between48(ackno, dp->dccps_awl, dp->dccps_awh) &&
208                     dccp_delta_seqno(dp->dccps_swl, seqno) >= 0)
209                         dccp_update_gsr(sk, seqno);
210                 else
211                         return -1;
212         }
213
214         /*
215          *   Step 6: Check sequence numbers
216          *      Let LSWL = S.SWL and LAWL = S.AWL
217          *      If P.type == CloseReq or P.type == Close or P.type == Reset,
218          *        LSWL := S.GSR + 1, LAWL := S.GAR
219          *      If LSWL <= P.seqno <= S.SWH
220          *           and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
221          *        Update S.GSR, S.SWL, S.SWH
222          *        If P.type != Sync,
223          *           Update S.GAR
224          */
225         lswl = dp->dccps_swl;
226         lawl = dp->dccps_awl;
227
228         if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
229             dh->dccph_type == DCCP_PKT_CLOSE ||
230             dh->dccph_type == DCCP_PKT_RESET) {
231                 lswl = ADD48(dp->dccps_gsr, 1);
232                 lawl = dp->dccps_gar;
233         }
234
235         if (between48(seqno, lswl, dp->dccps_swh) &&
236             (ackno == DCCP_PKT_WITHOUT_ACK_SEQ ||
237              between48(ackno, lawl, dp->dccps_awh))) {
238                 dccp_update_gsr(sk, seqno);
239
240                 if (dh->dccph_type != DCCP_PKT_SYNC &&
241                     (ackno != DCCP_PKT_WITHOUT_ACK_SEQ))
242                         dp->dccps_gar = ackno;
243         } else {
244                 unsigned long now = jiffies;
245                 /*
246                  *   Step 6: Check sequence numbers
247                  *      Otherwise,
248                  *         If P.type == Reset,
249                  *            Send Sync packet acknowledging S.GSR
250                  *         Otherwise,
251                  *            Send Sync packet acknowledging P.seqno
252                  *      Drop packet and return
253                  *
254                  *   These Syncs are rate-limited as per RFC 4340, 7.5.4:
255                  *   at most 1 / (dccp_sync_rate_limit * HZ) Syncs per second.
256                  */
257                 if (time_before(now, (dp->dccps_rate_last +
258                                       sysctl_dccp_sync_ratelimit)))
259                         return 0;
260
261                 DCCP_WARN("DCCP: Step 6 failed for %s packet, "
262                           "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
263                           "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
264                           "sending SYNC...\n",  dccp_packet_name(dh->dccph_type),
265                           (unsigned long long) lswl, (unsigned long long) seqno,
266                           (unsigned long long) dp->dccps_swh,
267                           (ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist"
268                                                               : "exists",
269                           (unsigned long long) lawl, (unsigned long long) ackno,
270                           (unsigned long long) dp->dccps_awh);
271
272                 dp->dccps_rate_last = now;
273
274                 if (dh->dccph_type == DCCP_PKT_RESET)
275                         seqno = dp->dccps_gsr;
276                 dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
277                 return -1;
278         }
279
280         return 0;
281 }
282
283 static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
284                                   const struct dccp_hdr *dh, const unsigned len)
285 {
286         struct dccp_sock *dp = dccp_sk(sk);
287
288         switch (dccp_hdr(skb)->dccph_type) {
289         case DCCP_PKT_DATAACK:
290         case DCCP_PKT_DATA:
291                 /*
292                  * FIXME: schedule DATA_DROPPED (RFC 4340, 11.7.2) if and when
293                  * - sk_shutdown == RCV_SHUTDOWN, use Code 1, "Not Listening"
294                  * - sk_receive_queue is full, use Code 2, "Receive Buffer"
295                  */
296                 dccp_enqueue_skb(sk, skb);
297                 return 0;
298         case DCCP_PKT_ACK:
299                 goto discard;
300         case DCCP_PKT_RESET:
301                 /*
302                  *  Step 9: Process Reset
303                  *      If P.type == Reset,
304                  *              Tear down connection
305                  *              S.state := TIMEWAIT
306                  *              Set TIMEWAIT timer
307                  *              Drop packet and return
308                  */
309                 dccp_rcv_reset(sk, skb);
310                 return 0;
311         case DCCP_PKT_CLOSEREQ:
312                 if (dccp_rcv_closereq(sk, skb))
313                         return 0;
314                 goto discard;
315         case DCCP_PKT_CLOSE:
316                 if (dccp_rcv_close(sk, skb))
317                         return 0;
318                 goto discard;
319         case DCCP_PKT_REQUEST:
320                 /* Step 7
321                  *   or (S.is_server and P.type == Response)
322                  *   or (S.is_client and P.type == Request)
323                  *   or (S.state >= OPEN and P.type == Request
324                  *      and P.seqno >= S.OSR)
325                  *    or (S.state >= OPEN and P.type == Response
326                  *      and P.seqno >= S.OSR)
327                  *    or (S.state == RESPOND and P.type == Data),
328                  *  Send Sync packet acknowledging P.seqno
329                  *  Drop packet and return
330                  */
331                 if (dp->dccps_role != DCCP_ROLE_LISTEN)
332                         goto send_sync;
333                 goto check_seq;
334         case DCCP_PKT_RESPONSE:
335                 if (dp->dccps_role != DCCP_ROLE_CLIENT)
336                         goto send_sync;
337 check_seq:
338                 if (dccp_delta_seqno(dp->dccps_osr,
339                                      DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
340 send_sync:
341                         dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
342                                        DCCP_PKT_SYNC);
343                 }
344                 break;
345         case DCCP_PKT_SYNC:
346                 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
347                                DCCP_PKT_SYNCACK);
348                 /*
349                  * From RFC 4340, sec. 5.7
350                  *
351                  * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
352                  * MAY have non-zero-length application data areas, whose
353                  * contents receivers MUST ignore.
354                  */
355                 goto discard;
356         }
357
358         DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
359 discard:
360         __kfree_skb(skb);
361         return 0;
362 }
363
364 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
365                          const struct dccp_hdr *dh, const unsigned len)
366 {
367         struct dccp_sock *dp = dccp_sk(sk);
368
369         if (dccp_check_seqno(sk, skb))
370                 goto discard;
371
372         if (dccp_parse_options(sk, NULL, skb))
373                 goto discard;
374
375         if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
376                 dccp_event_ack_recv(sk, skb);
377
378         if (dccp_msk(sk)->dccpms_send_ack_vector &&
379             dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
380                             DCCP_SKB_CB(skb)->dccpd_seq,
381                             DCCP_ACKVEC_STATE_RECEIVED))
382                 goto discard;
383         dccp_deliver_input_to_ccids(sk, skb);
384
385         return __dccp_rcv_established(sk, skb, dh, len);
386 discard:
387         __kfree_skb(skb);
388         return 0;
389 }
390
391 EXPORT_SYMBOL_GPL(dccp_rcv_established);
392
393 static int dccp_rcv_request_sent_state_process(struct sock *sk,
394                                                struct sk_buff *skb,
395                                                const struct dccp_hdr *dh,
396                                                const unsigned len)
397 {
398         /*
399          *  Step 4: Prepare sequence numbers in REQUEST
400          *     If S.state == REQUEST,
401          *        If (P.type == Response or P.type == Reset)
402          *              and S.AWL <= P.ackno <= S.AWH,
403          *           / * Set sequence number variables corresponding to the
404          *              other endpoint, so P will pass the tests in Step 6 * /
405          *           Set S.GSR, S.ISR, S.SWL, S.SWH
406          *           / * Response processing continues in Step 10; Reset
407          *              processing continues in Step 9 * /
408         */
409         if (dh->dccph_type == DCCP_PKT_RESPONSE) {
410                 const struct inet_connection_sock *icsk = inet_csk(sk);
411                 struct dccp_sock *dp = dccp_sk(sk);
412                 long tstamp = dccp_timestamp();
413
414                 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
415                                dp->dccps_awl, dp->dccps_awh)) {
416                         dccp_pr_debug("invalid ackno: S.AWL=%llu, "
417                                       "P.ackno=%llu, S.AWH=%llu \n",
418                                       (unsigned long long)dp->dccps_awl,
419                            (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
420                                       (unsigned long long)dp->dccps_awh);
421                         goto out_invalid_packet;
422                 }
423
424                 if (dccp_parse_options(sk, NULL, skb))
425                         goto out_invalid_packet;
426
427                 /* Obtain usec RTT sample from SYN exchange (used by CCID 3) */
428                 if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
429                         dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
430                             dp->dccps_options_received.dccpor_timestamp_echo));
431
432                 if (dccp_msk(sk)->dccpms_send_ack_vector &&
433                     dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
434                                     DCCP_SKB_CB(skb)->dccpd_seq,
435                                     DCCP_ACKVEC_STATE_RECEIVED))
436                         goto out_invalid_packet; /* FIXME: change error code */
437
438                 /* Stop the REQUEST timer */
439                 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
440                 WARN_ON(sk->sk_send_head == NULL);
441                 kfree_skb(sk->sk_send_head);
442                 sk->sk_send_head = NULL;
443
444                 dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
445                 dccp_update_gsr(sk, dp->dccps_isr);
446                 /*
447                  * SWL and AWL are initially adjusted so that they are not less than
448                  * the initial Sequence Numbers received and sent, respectively:
449                  *      SWL := max(GSR + 1 - floor(W/4), ISR),
450                  *      AWL := max(GSS - W' + 1, ISS).
451                  * These adjustments MUST be applied only at the beginning of the
452                  * connection.
453                  *
454                  * AWL was adjusted in dccp_v4_connect -acme
455                  */
456                 dccp_set_seqno(&dp->dccps_swl,
457                                max48(dp->dccps_swl, dp->dccps_isr));
458
459                 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
460
461                 /*
462                  *    Step 10: Process REQUEST state (second part)
463                  *       If S.state == REQUEST,
464                  *        / * If we get here, P is a valid Response from the
465                  *            server (see Step 4), and we should move to
466                  *            PARTOPEN state. PARTOPEN means send an Ack,
467                  *            don't send Data packets, retransmit Acks
468                  *            periodically, and always include any Init Cookie
469                  *            from the Response * /
470                  *        S.state := PARTOPEN
471                  *        Set PARTOPEN timer
472                  *        Continue with S.state == PARTOPEN
473                  *        / * Step 12 will send the Ack completing the
474                  *            three-way handshake * /
475                  */
476                 dccp_set_state(sk, DCCP_PARTOPEN);
477
478                 /* Make sure socket is routed, for correct metrics. */
479                 icsk->icsk_af_ops->rebuild_header(sk);
480
481                 if (!sock_flag(sk, SOCK_DEAD)) {
482                         sk->sk_state_change(sk);
483                         sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
484                 }
485
486                 if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
487                     icsk->icsk_accept_queue.rskq_defer_accept) {
488                         /* Save one ACK. Data will be ready after
489                          * several ticks, if write_pending is set.
490                          *
491                          * It may be deleted, but with this feature tcpdumps
492                          * look so _wonderfully_ clever, that I was not able
493                          * to stand against the temptation 8)     --ANK
494                          */
495                         /*
496                          * OK, in DCCP we can as well do a similar trick, its
497                          * even in the draft, but there is no need for us to
498                          * schedule an ack here, as dccp_sendmsg does this for
499                          * us, also stated in the draft. -acme
500                          */
501                         __kfree_skb(skb);
502                         return 0;
503                 }
504                 dccp_send_ack(sk);
505                 return -1;
506         }
507
508 out_invalid_packet:
509         /* dccp_v4_do_rcv will send a reset */
510         DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
511         return 1;
512 }
513
514 static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
515                                                    struct sk_buff *skb,
516                                                    const struct dccp_hdr *dh,
517                                                    const unsigned len)
518 {
519         int queued = 0;
520
521         switch (dh->dccph_type) {
522         case DCCP_PKT_RESET:
523                 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
524                 break;
525         case DCCP_PKT_DATA:
526                 if (sk->sk_state == DCCP_RESPOND)
527                         break;
528         case DCCP_PKT_DATAACK:
529         case DCCP_PKT_ACK:
530                 /*
531                  * FIXME: we should be reseting the PARTOPEN (DELACK) timer
532                  * here but only if we haven't used the DELACK timer for
533                  * something else, like sending a delayed ack for a TIMESTAMP
534                  * echo, etc, for now were not clearing it, sending an extra
535                  * ACK when there is nothing else to do in DELACK is not a big
536                  * deal after all.
537                  */
538
539                 /* Stop the PARTOPEN timer */
540                 if (sk->sk_state == DCCP_PARTOPEN)
541                         inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
542
543                 dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
544                 dccp_set_state(sk, DCCP_OPEN);
545
546                 if (dh->dccph_type == DCCP_PKT_DATAACK ||
547                     dh->dccph_type == DCCP_PKT_DATA) {
548                         __dccp_rcv_established(sk, skb, dh, len);
549                         queued = 1; /* packet was queued
550                                        (by __dccp_rcv_established) */
551                 }
552                 break;
553         }
554
555         return queued;
556 }
557
558 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
559                            struct dccp_hdr *dh, unsigned len)
560 {
561         struct dccp_sock *dp = dccp_sk(sk);
562         struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
563         const int old_state = sk->sk_state;
564         int queued = 0;
565
566         /*
567          *  Step 3: Process LISTEN state
568          *
569          *     If S.state == LISTEN,
570          *       If P.type == Request or P contains a valid Init Cookie option,
571          *            (* Must scan the packet's options to check for Init
572          *               Cookies.  Only Init Cookies are processed here,
573          *               however; other options are processed in Step 8.  This
574          *               scan need only be performed if the endpoint uses Init
575          *               Cookies *)
576          *            (* Generate a new socket and switch to that socket *)
577          *            Set S := new socket for this port pair
578          *            S.state = RESPOND
579          *            Choose S.ISS (initial seqno) or set from Init Cookies
580          *            Initialize S.GAR := S.ISS
581          *            Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
582          *            Cookies Continue with S.state == RESPOND
583          *            (* A Response packet will be generated in Step 11 *)
584          *       Otherwise,
585          *            Generate Reset(No Connection) unless P.type == Reset
586          *            Drop packet and return
587          */
588         if (sk->sk_state == DCCP_LISTEN) {
589                 if (dh->dccph_type == DCCP_PKT_REQUEST) {
590                         if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
591                                                                     skb) < 0)
592                                 return 1;
593
594                         /* FIXME: do congestion control initialization */
595                         goto discard;
596                 }
597                 if (dh->dccph_type == DCCP_PKT_RESET)
598                         goto discard;
599
600                 /* Caller (dccp_v4_do_rcv) will send Reset */
601                 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
602                 return 1;
603         }
604
605         if (sk->sk_state != DCCP_REQUESTING) {
606                 if (dccp_check_seqno(sk, skb))
607                         goto discard;
608
609                 /*
610                  * Step 8: Process options and mark acknowledgeable
611                  */
612                 if (dccp_parse_options(sk, NULL, skb))
613                         goto discard;
614
615                 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
616                         dccp_event_ack_recv(sk, skb);
617
618                 if (dccp_msk(sk)->dccpms_send_ack_vector &&
619                     dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
620                                     DCCP_SKB_CB(skb)->dccpd_seq,
621                                     DCCP_ACKVEC_STATE_RECEIVED))
622                         goto discard;
623
624                 dccp_deliver_input_to_ccids(sk, skb);
625         }
626
627         /*
628          *  Step 9: Process Reset
629          *      If P.type == Reset,
630          *              Tear down connection
631          *              S.state := TIMEWAIT
632          *              Set TIMEWAIT timer
633          *              Drop packet and return
634         */
635         if (dh->dccph_type == DCCP_PKT_RESET) {
636                 dccp_rcv_reset(sk, skb);
637                 return 0;
638                 /*
639                  *   Step 7: Check for unexpected packet types
640                  *      If (S.is_server and P.type == Response)
641                  *          or (S.is_client and P.type == Request)
642                  *          or (S.state == RESPOND and P.type == Data),
643                  *        Send Sync packet acknowledging P.seqno
644                  *        Drop packet and return
645                  */
646         } else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
647                     dh->dccph_type == DCCP_PKT_RESPONSE) ||
648                     (dp->dccps_role == DCCP_ROLE_CLIENT &&
649                      dh->dccph_type == DCCP_PKT_REQUEST) ||
650                     (sk->sk_state == DCCP_RESPOND &&
651                      dh->dccph_type == DCCP_PKT_DATA)) {
652                 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
653                 goto discard;
654         } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
655                 if (dccp_rcv_closereq(sk, skb))
656                         return 0;
657                 goto discard;
658         } else if (dh->dccph_type == DCCP_PKT_CLOSE) {
659                 if (dccp_rcv_close(sk, skb))
660                         return 0;
661                 goto discard;
662         }
663
664         switch (sk->sk_state) {
665         case DCCP_CLOSED:
666                 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
667                 return 1;
668
669         case DCCP_REQUESTING:
670                 /* FIXME: do congestion control initialization */
671
672                 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
673                 if (queued >= 0)
674                         return queued;
675
676                 __kfree_skb(skb);
677                 return 0;
678
679         case DCCP_RESPOND:
680         case DCCP_PARTOPEN:
681                 queued = dccp_rcv_respond_partopen_state_process(sk, skb,
682                                                                  dh, len);
683                 break;
684         }
685
686         if (dh->dccph_type == DCCP_PKT_ACK ||
687             dh->dccph_type == DCCP_PKT_DATAACK) {
688                 switch (old_state) {
689                 case DCCP_PARTOPEN:
690                         sk->sk_state_change(sk);
691                         sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
692                         break;
693                 }
694         } else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
695                 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
696                 goto discard;
697         }
698
699         if (!queued) {
700 discard:
701                 __kfree_skb(skb);
702         }
703         return 0;
704 }
705
706 EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
707
708 /**
709  *  dccp_sample_rtt  -  Validate and finalise computation of RTT sample
710  *  @delta:     number of microseconds between packet and acknowledgment
711  *  The routine is kept generic to work in different contexts. It should be
712  *  called immediately when the ACK used for the RTT sample arrives.
713  */
714 u32 dccp_sample_rtt(struct sock *sk, long delta)
715 {
716         /* dccpor_elapsed_time is either zeroed out or set and > 0 */
717         delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;
718
719         if (unlikely(delta <= 0)) {
720                 DCCP_WARN("unusable RTT sample %ld, using min\n", delta);
721                 return DCCP_SANE_RTT_MIN;
722         }
723         if (unlikely(delta > DCCP_SANE_RTT_MAX)) {
724                 DCCP_WARN("RTT sample %ld too large, using max\n", delta);
725                 return DCCP_SANE_RTT_MAX;
726         }
727
728         return delta;
729 }
730
731 EXPORT_SYMBOL_GPL(dccp_sample_rtt);