X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=net%2Fipv4%2Ftcp_input.c;h=a62e0f90f566eda8df48bee60ca9b4c7d8501bfc;hb=9e10c47cb9fe3154416787523f7a0df02133063f;hp=b59da5308ac0ed35c135162fbfcc54cac32c8bd6;hpb=746aa32d280084dbd520249170852e4616799928;p=linux-2.6-omap-h63xx.git diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b59da5308ac..a62e0f90f56 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -863,6 +863,9 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) */ static void tcp_disable_fack(struct tcp_sock *tp) { + /* RFC3517 uses different metric in lost marker => reset on change */ + if (tcp_is_fack(tp)) + tp->lost_skb_hint = NULL; tp->rx_opt.sack_ok &= ~2; } @@ -923,7 +926,7 @@ static void tcp_init_metrics(struct sock *sk) } if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) { tp->mdev = dst_metric(dst, RTAX_RTTVAR); - tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); + tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); } tcp_set_rto(sk); tcp_bound_rto(sk); @@ -1112,16 +1115,23 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, * * Search retransmitted skbs from write_queue that were sent when snd_nxt was * less than what is now known to be received by the other end (derived from - * SACK blocks by the caller). Also calculate the lowest snd_nxt among the - * remaining retransmitted skbs to avoid some costly processing per ACKs. + * highest SACK block). Also calculate the lowest snd_nxt among the remaining + * retransmitted skbs to avoid some costly processing per ACKs. */ -static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto) +static int tcp_mark_lost_retrans(struct sock *sk) { + const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int flag = 0; int cnt = 0; u32 new_low_seq = tp->snd_nxt; + u32 received_upto = TCP_SKB_CB(tp->highest_sack)->end_seq; + + if (!tcp_is_fack(tp) || !tp->retrans_out || + !after(received_upto, tp->lost_retrans_low) || + icsk->icsk_ca_state != TCP_CA_Recovery) + return flag; tcp_for_write_queue(skb, sk) { u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; @@ -1230,6 +1240,99 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, return in_sack; } +static int tcp_sacktag_one(struct sk_buff *skb, struct tcp_sock *tp, + int *reord, int dup_sack, int fack_count) +{ + u8 sacked = TCP_SKB_CB(skb)->sacked; + int flag = 0; + + /* Account D-SACK for retransmitted packet. */ + if (dup_sack && (sacked & TCPCB_RETRANS)) { + if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) + tp->undo_retrans--; + if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una) && + (sacked & TCPCB_SACKED_ACKED)) + *reord = min(fack_count, *reord); + } + + /* Nothing to do; acked frame is about to be dropped (was ACKed). */ + if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) + return flag; + + if (!(sacked & TCPCB_SACKED_ACKED)) { + if (sacked & TCPCB_SACKED_RETRANS) { + /* If the segment is not tagged as lost, + * we do not clear RETRANS, believing + * that retransmission is still in flight. + */ + if (sacked & TCPCB_LOST) { + TCP_SKB_CB(skb)->sacked &= + ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); + tp->lost_out -= tcp_skb_pcount(skb); + tp->retrans_out -= tcp_skb_pcount(skb); + + /* clear lost hint */ + tp->retransmit_skb_hint = NULL; + } + } else { + if (!(sacked & TCPCB_RETRANS)) { + /* New sack for not retransmitted frame, + * which was in hole. It is reordering. + */ + if (before(TCP_SKB_CB(skb)->seq, + tcp_highest_sack_seq(tp))) + *reord = min(fack_count, *reord); + + /* SACK enhanced F-RTO (RFC4138; Appendix B) */ + if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) + flag |= FLAG_ONLY_ORIG_SACKED; + } + + if (sacked & TCPCB_LOST) { + TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; + tp->lost_out -= tcp_skb_pcount(skb); + + /* clear lost hint */ + tp->retransmit_skb_hint = NULL; + } + } + + TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED; + flag |= FLAG_DATA_SACKED; + tp->sacked_out += tcp_skb_pcount(skb); + + fack_count += tcp_skb_pcount(skb); + + /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ + if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && + before(TCP_SKB_CB(skb)->seq, + TCP_SKB_CB(tp->lost_skb_hint)->seq)) + tp->lost_cnt_hint += tcp_skb_pcount(skb); + + if (fack_count > tp->fackets_out) + tp->fackets_out = fack_count; + + if (after(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) + tp->highest_sack = skb; + + } else { + if (dup_sack && (sacked & TCPCB_RETRANS)) + *reord = min(fack_count, *reord); + } + + /* D-SACK. We can detect redundant retransmission in S|R and plain R + * frames and clear it. undo_retrans is decreased above, L|R frames + * are accounted above as well. + */ + if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) { + TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; + tp->retrans_out -= tcp_skb_pcount(skb); + tp->retransmit_skb_hint = NULL; + } + + return flag; +} + static int tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) { @@ -1241,8 +1344,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ struct sk_buff *cached_skb; int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; int reord = tp->packets_out; - int prior_fackets; - u32 highest_sack_end_seq = tp->lost_retrans_low; int flag = 0; int found_dup_sack = 0; int cached_fack_count; @@ -1253,9 +1354,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (!tp->sacked_out) { if (WARN_ON(tp->fackets_out)) tp->fackets_out = 0; - tp->highest_sack = tp->snd_una; + tp->highest_sack = tcp_write_queue_head(sk); } - prior_fackets = tp->fackets_out; found_dup_sack = tcp_check_dsack(tp, ack_skb, sp, num_sacks, prior_snd_una); @@ -1269,6 +1369,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) return 0; + if (!tp->packets_out) + goto out; + /* SACK fastpath: * if the only SACK change is the increase of the end_seq of * the first block then only apply that SACK block @@ -1365,7 +1468,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ tcp_for_write_queue_from(skb, sk) { int in_sack = 0; - u8 sacked; if (skb == tcp_send_head(sk)) break; @@ -1403,98 +1505,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (unlikely(in_sack < 0)) break; - sacked = TCP_SKB_CB(skb)->sacked; - - /* Account D-SACK for retransmitted packet. */ - if ((dup_sack && in_sack) && - (sacked & TCPCB_RETRANS) && - after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) - tp->undo_retrans--; + if (in_sack) + flag |= tcp_sacktag_one(skb, tp, &reord, dup_sack, fack_count); - /* The frame is ACKed. */ - if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) { - if (sacked&TCPCB_RETRANS) { - if ((dup_sack && in_sack) && - (sacked&TCPCB_SACKED_ACKED)) - reord = min(fack_count, reord); - } - - /* Nothing to do; acked frame is about to be dropped. */ - fack_count += tcp_skb_pcount(skb); - continue; - } - - if (!in_sack) { - fack_count += tcp_skb_pcount(skb); - continue; - } - - if (!(sacked&TCPCB_SACKED_ACKED)) { - if (sacked & TCPCB_SACKED_RETRANS) { - /* If the segment is not tagged as lost, - * we do not clear RETRANS, believing - * that retransmission is still in flight. - */ - if (sacked & TCPCB_LOST) { - TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); - tp->lost_out -= tcp_skb_pcount(skb); - tp->retrans_out -= tcp_skb_pcount(skb); - - /* clear lost hint */ - tp->retransmit_skb_hint = NULL; - } - } else { - if (!(sacked & TCPCB_RETRANS)) { - /* New sack for not retransmitted frame, - * which was in hole. It is reordering. - */ - if (fack_count < prior_fackets) - reord = min(fack_count, reord); - - /* SACK enhanced F-RTO (RFC4138; Appendix B) */ - if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) - flag |= FLAG_ONLY_ORIG_SACKED; - } - - if (sacked & TCPCB_LOST) { - TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; - tp->lost_out -= tcp_skb_pcount(skb); - - /* clear lost hint */ - tp->retransmit_skb_hint = NULL; - } - } - - TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED; - flag |= FLAG_DATA_SACKED; - tp->sacked_out += tcp_skb_pcount(skb); - - fack_count += tcp_skb_pcount(skb); - if (fack_count > tp->fackets_out) - tp->fackets_out = fack_count; - - if (after(TCP_SKB_CB(skb)->seq, tp->highest_sack)) { - tp->highest_sack = TCP_SKB_CB(skb)->seq; - highest_sack_end_seq = TCP_SKB_CB(skb)->end_seq; - } - } else { - if (dup_sack && (sacked&TCPCB_RETRANS)) - reord = min(fack_count, reord); - - fack_count += tcp_skb_pcount(skb); - } - - /* D-SACK. We can detect redundant retransmission - * in S|R and plain R frames and clear it. - * undo_retrans is decreased above, L|R frames - * are accounted above as well. - */ - if (dup_sack && - (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS)) { - TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; - tp->retrans_out -= tcp_skb_pcount(skb); - tp->retransmit_skb_hint = NULL; - } + fack_count += tcp_skb_pcount(skb); } /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct @@ -1504,17 +1518,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ flag &= ~FLAG_ONLY_ORIG_SACKED; } - if (tp->retrans_out && - after(highest_sack_end_seq, tp->lost_retrans_low) && - icsk->icsk_ca_state == TCP_CA_Recovery) - flag |= tcp_mark_lost_retrans(sk, highest_sack_end_seq); + flag |= tcp_mark_lost_retrans(sk); tcp_verify_left_out(tp); - if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss && + if ((reord < tp->fackets_out) && + ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) tcp_update_reordering(sk, tp->fackets_out - reord, 0); +out: + #if FASTRETRANS_DEBUG > 0 BUG_TRAP((int)tp->sacked_out >= 0); BUG_TRAP((int)tp->lost_out >= 0); @@ -1704,6 +1718,8 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; + + TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; /* * Count the retransmission made on RTO correctly (only when * waiting for the first ACK and did not get it)... @@ -1717,7 +1733,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) } else { if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) tp->undo_marker = 0; - TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); + TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; } /* Don't lost mark skbs that were fwd transmitted after RTO */ @@ -1853,6 +1869,26 @@ static inline int tcp_fackets_out(struct tcp_sock *tp) return tcp_is_reno(tp) ? tp->sacked_out+1 : tp->fackets_out; } +/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs + * counter when SACK is enabled (without SACK, sacked_out is used for + * that purpose). + * + * Instead, with FACK TCP uses fackets_out that includes both SACKed + * segments up to the highest received SACK block so far and holes in + * between them. + * + * With reordering, holes may still be in flight, so RFC3517 recovery + * uses pure sacked_out (total number of SACKed segments) even though + * it violates the RFC that uses duplicate ACKs, often these are equal + * but when e.g. out-of-window ACKs or packet duplication occurs, + * they differ. Since neither occurs due to loss, TCP should really + * ignore them. + */ +static inline int tcp_dupack_heurestics(struct tcp_sock *tp) +{ + return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; +} + static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) { return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); @@ -1973,13 +2009,13 @@ static int tcp_time_to_recover(struct sock *sk) return 1; /* Not-A-Trick#2 : Classic rule... */ - if (tcp_fackets_out(tp) > tp->reordering) + if (tcp_dupack_heurestics(tp) > tp->reordering) return 1; /* Trick#3 : when we use RFC2988 timer restart, fast * retransmit can be triggered by timeout of queue head. */ - if (tcp_head_timedout(sk)) + if (tcp_is_fack(tp) && tcp_head_timedout(sk)) return 1; /* Trick#4: It is still not OK... But will it be useful to delay @@ -2012,8 +2048,10 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, tp->retransmit_skb_hint = NULL; } -/* Mark head of queue up as lost. */ -static void tcp_mark_head_lost(struct sock *sk, int packets) +/* Mark head of queue up as lost. With RFC3517 SACK, the packets is + * is against sacked "cnt", otherwise it's against facked "cnt" + */ +static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; @@ -2035,8 +2073,13 @@ static void tcp_mark_head_lost(struct sock *sk, int packets) /* this is not the most efficient way to do this... */ tp->lost_skb_hint = skb; tp->lost_cnt_hint = cnt; - cnt += tcp_skb_pcount(skb); - if (cnt > packets || after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) + + if (tcp_is_fack(tp) || + (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) + cnt += tcp_skb_pcount(skb); + + if (((!fast_rexmit || (tp->lost_out > 0)) && (cnt > packets)) || + after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) break; if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; @@ -2049,17 +2092,22 @@ static void tcp_mark_head_lost(struct sock *sk, int packets) /* Account newly detected lost packet(s) */ -static void tcp_update_scoreboard(struct sock *sk) +static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) { struct tcp_sock *tp = tcp_sk(sk); - if (tcp_is_fack(tp)) { + if (tcp_is_reno(tp)) { + tcp_mark_head_lost(sk, 1, fast_rexmit); + } else if (tcp_is_fack(tp)) { int lost = tp->fackets_out - tp->reordering; if (lost <= 0) lost = 1; - tcp_mark_head_lost(sk, lost); + tcp_mark_head_lost(sk, lost, fast_rexmit); } else { - tcp_mark_head_lost(sk, 1); + int sacked_upto = tp->sacked_out - tp->reordering; + if (sacked_upto < 0) + sacked_upto = 0; + tcp_mark_head_lost(sk, sacked_upto, fast_rexmit); } /* New heuristics: it is possible only after we switched @@ -2067,7 +2115,7 @@ static void tcp_update_scoreboard(struct sock *sk) * Hence, we can detect timed out packets during fast * retransmit without falling to slow start. */ - if (!tcp_is_reno(tp) && tcp_head_timedout(sk)) { + if (tcp_is_fack(tp) && tcp_head_timedout(sk)) { struct sk_buff *skb; skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint @@ -2240,7 +2288,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); /* Partial ACK arrived. Force Hoe's retransmit. */ - int failed = tcp_is_reno(tp) || tp->fackets_out>tp->reordering; + int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); if (tcp_may_undo(tp)) { /* Plain luck! Hole if filled with delayed @@ -2374,7 +2422,8 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) struct tcp_sock *tp = tcp_sk(sk); int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP)); int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) && - (tp->fackets_out > tp->reordering)); + (tcp_fackets_out(tp) > tp->reordering)); + int fast_rexmit = 0; /* Some technical things: * 1. Reno does not count dupacks (sacked_out) automatically. */ @@ -2394,11 +2443,11 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) return; /* C. Process data loss notification, provided it is valid. */ - if ((flag&FLAG_DATA_LOST) && + if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) && before(tp->snd_una, tp->high_seq) && icsk->icsk_ca_state != TCP_CA_Open && tp->fackets_out > tp->reordering) { - tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); + tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, 0); NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); } @@ -2517,10 +2566,11 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) tp->bytes_acked = 0; tp->snd_cwnd_cnt = 0; tcp_set_ca_state(sk, TCP_CA_Recovery); + fast_rexmit = 1; } - if (do_lost || tcp_head_timedout(sk)) - tcp_update_scoreboard(sk); + if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) + tcp_update_scoreboard(sk, fast_rexmit); tcp_cwnd_down(sk, flag); tcp_xmit_retransmit_queue(sk); } @@ -2644,6 +2694,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p, u32 cnt = 0; u32 reord = tp->packets_out; s32 seq_rtt = -1; + s32 ca_seq_rtt = -1; ktime_t last_ackt = net_invalid_timestamp(); while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { @@ -2652,6 +2703,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p, u32 packets_acked; u8 sacked = scb->sacked; + /* Determine how many packets and what bytes were acked, tso and else */ if (after(scb->end_seq, tp->snd_una)) { if (tcp_skb_pcount(skb) == 1 || !after(tp->snd_una, scb->seq)) @@ -2679,15 +2731,16 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p, if (sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= packets_acked; flag |= FLAG_RETRANS_DATA_ACKED; + ca_seq_rtt = -1; seq_rtt = -1; if ((flag & FLAG_DATA_ACKED) || (packets_acked > 1)) flag |= FLAG_NONHEAD_RETRANS_ACKED; } else { + ca_seq_rtt = now - scb->when; + last_ackt = skb->tstamp; if (seq_rtt < 0) { - seq_rtt = now - scb->when; - if (fully_acked) - last_ackt = skb->tstamp; + seq_rtt = ca_seq_rtt; } if (!(sacked & TCPCB_SACKED_ACKED)) reord = min(cnt, reord); @@ -2702,10 +2755,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p, !before(end_seq, tp->snd_up)) tp->urg_mode = 0; } else { + ca_seq_rtt = now - scb->when; + last_ackt = skb->tstamp; if (seq_rtt < 0) { - seq_rtt = now - scb->when; - if (fully_acked) - last_ackt = skb->tstamp; + seq_rtt = ca_seq_rtt; } reord = min(cnt, reord); } @@ -2765,8 +2818,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p, net_invalid_timestamp())) rtt_us = ktime_us_delta(ktime_get_real(), last_ackt); - else if (seq_rtt > 0) - rtt_us = jiffies_to_usecs(seq_rtt); + else if (ca_seq_rtt > 0) + rtt_us = jiffies_to_usecs(ca_seq_rtt); } ca_ops->pkts_acked(sk, pkts_acked, rtt_us); @@ -2996,17 +3049,13 @@ static int tcp_process_frto(struct sock *sk, int flag) } if (tp->frto_counter == 1) { - /* Sending of the next skb must be allowed or no F-RTO */ - if (!tcp_send_head(sk) || - after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, - tp->snd_una + tp->snd_wnd)) { - tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), - flag); - return 1; - } - + /* tcp_may_send_now needs to see updated state */ tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; tp->frto_counter = 2; + + if (!tcp_may_send_now(sk)) + tcp_enter_frto_loss(sk, 2, flag); + return 1; } else { switch (sysctl_tcp_frto_response) { @@ -3062,6 +3111,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) } prior_fackets = tp->fackets_out; + prior_in_flight = tcp_packets_in_flight(tp); if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. @@ -3101,16 +3151,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) if (!prior_packets) goto no_queue; - prior_in_flight = tcp_packets_in_flight(tp); - /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets); + if (tp->frto_counter) + frto_cwnd = tcp_process_frto(sk, flag); /* Guarantee sacktag reordering detection against wrap-arounds */ if (before(tp->frto_highmark, tp->snd_una)) tp->frto_highmark = 0; - if (tp->frto_counter) - frto_cwnd = tcp_process_frto(sk, flag); if (tcp_ack_is_dubious(sk, flag)) { /* Advance CWND, if state allows this. */