2 * TCP Westwood+: end-to-end bandwidth estimation for TCP
4 * Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
6 * Support at http://c3lab.poliba.it/index.php/Westwood
7 * Main references in literature:
9 * - Mascolo S, Casetti, M. Gerla et al.
10 * "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
12 * - A. Grieco, s. Mascolo
13 * "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
16 * - A. Dell'Aera, L. Grieco, S. Mascolo.
17 * "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
18 * A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
20 * Westwood+ employs end-to-end bandwidth measurement to set cwnd and
21 * ssthresh after packet loss. The probing phase is as the original Reno.
24 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/skbuff.h>
28 #include <linux/inet_diag.h>
31 /* TCP Westwood structure */
33 u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
34 u32 bw_est; /* bandwidth estimate */
35 u32 rtt_win_sx; /* here starts a new evaluation... */
37 u32 snd_una; /* used for evaluating the number of acked bytes */
41 u32 rtt_min; /* minimum observed RTT */
42 u8 first_ack; /* flag which infers that this is the first ack */
46 /* TCP Westwood functions and constants */
47 #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
48 #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
51 * @tcp_westwood_create
52 * This function initializes fields used in TCP Westwood+,
53 * it is called after the initial SYN, so the sequence numbers
54 * are correct but new passive connections we have no
55 * information about RTTmin at this time so we simply set it to
56 * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
57 * since in this way we're sure it will be updated in a consistent
58 * way as soon as possible. It will reasonably happen within the first
59 * RTT period of the connection lifetime.
61 static void tcp_westwood_init(struct sock *sk)
63 struct westwood *w = inet_csk_ca(sk);
70 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
71 w->rtt_win_sx = tcp_time_stamp;
72 w->snd_una = tcp_sk(sk)->snd_una;
78 * Low-pass filter. Implemented using constant coefficients.
80 static inline u32 westwood_do_filter(u32 a, u32 b)
82 return (((7 * a) + b) >> 3);
85 static void westwood_filter(struct westwood *w, u32 delta)
87 /* If the filter is empty fill it with the first sample of bandwidth */
88 if (w->bw_ns_est == 0 && w->bw_est == 0) {
89 w->bw_ns_est = w->bk / delta;
90 w->bw_est = w->bw_ns_est;
92 w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
93 w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
98 * @westwood_pkts_acked
99 * Called after processing group of packets.
100 * but all westwood needs is the last sample of srtt.
102 static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
104 struct westwood *w = inet_csk_ca(sk);
106 w->rtt = tcp_sk(sk)->srtt >> 3;
110 * @westwood_update_window
111 * It updates RTT evaluation window if it is the right moment to do
112 * it. If so it calls filter for evaluating bandwidth.
114 static void westwood_update_window(struct sock *sk)
116 struct westwood *w = inet_csk_ca(sk);
117 s32 delta = tcp_time_stamp - w->rtt_win_sx;
119 /* Initialize w->snd_una with the first acked sequence number in order
120 * to fix mismatch between tp->snd_una and w->snd_una for the first
124 w->snd_una = tcp_sk(sk)->snd_una;
129 * See if a RTT-window has passed.
130 * Be careful since if RTT is less than
131 * 50ms we don't filter but we continue 'building the sample'.
132 * This minimum limit was chosen since an estimation on small
133 * time intervals is better to avoid...
134 * Obviously on a LAN we reasonably will always have
135 * right_bound = left_bound + WESTWOOD_RTT_MIN
137 if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
138 westwood_filter(w, delta);
141 w->rtt_win_sx = tcp_time_stamp;
147 * It is called when we are in fast path. In particular it is called when
148 * header prediction is successful. In such case in fact update is
149 * straight forward and doesn't need any particular care.
151 static inline void westwood_fast_bw(struct sock *sk)
153 const struct tcp_sock *tp = tcp_sk(sk);
154 struct westwood *w = inet_csk_ca(sk);
156 westwood_update_window(sk);
158 w->bk += tp->snd_una - w->snd_una;
159 w->snd_una = tp->snd_una;
160 w->rtt_min = min(w->rtt, w->rtt_min);
164 * @westwood_acked_count
165 * This function evaluates cumul_ack for evaluating bk in case of
166 * delayed or partial acks.
168 static inline u32 westwood_acked_count(struct sock *sk)
170 const struct tcp_sock *tp = tcp_sk(sk);
171 struct westwood *w = inet_csk_ca(sk);
173 w->cumul_ack = tp->snd_una - w->snd_una;
175 /* If cumul_ack is 0 this is a dupack since it's not moving
179 w->accounted += tp->mss_cache;
180 w->cumul_ack = tp->mss_cache;
183 if (w->cumul_ack > tp->mss_cache) {
184 /* Partial or delayed ack */
185 if (w->accounted >= w->cumul_ack) {
186 w->accounted -= w->cumul_ack;
187 w->cumul_ack = tp->mss_cache;
189 w->cumul_ack -= w->accounted;
194 w->snd_una = tp->snd_una;
202 * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
203 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
204 * so avoids ever returning 0.
206 static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
208 const struct tcp_sock *tp = tcp_sk(sk);
209 const struct westwood *w = inet_csk_ca(sk);
210 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
213 static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
215 struct tcp_sock *tp = tcp_sk(sk);
216 struct westwood *w = inet_csk_ca(sk);
219 case CA_EVENT_FAST_ACK:
220 westwood_fast_bw(sk);
223 case CA_EVENT_COMPLETE_CWR:
224 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
228 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
231 case CA_EVENT_SLOW_ACK:
232 westwood_update_window(sk);
233 w->bk += westwood_acked_count(sk);
234 w->rtt_min = min(w->rtt, w->rtt_min);
244 /* Extract info for Tcp socket info provided via netlink. */
245 static void tcp_westwood_info(struct sock *sk, u32 ext,
248 const struct westwood *ca = inet_csk_ca(sk);
249 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
251 struct tcpvegas_info *info;
253 rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
254 info = RTA_DATA(rta);
255 info->tcpv_enabled = 1;
256 info->tcpv_rttcnt = 0;
257 info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
258 info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
264 static struct tcp_congestion_ops tcp_westwood = {
265 .init = tcp_westwood_init,
266 .ssthresh = tcp_reno_ssthresh,
267 .cong_avoid = tcp_reno_cong_avoid,
268 .min_cwnd = tcp_westwood_bw_rttmin,
269 .cwnd_event = tcp_westwood_event,
270 .get_info = tcp_westwood_info,
271 .pkts_acked = tcp_westwood_pkts_acked,
273 .owner = THIS_MODULE,
277 static int __init tcp_westwood_register(void)
279 BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
280 return tcp_register_congestion_control(&tcp_westwood);
283 static void __exit tcp_westwood_unregister(void)
285 tcp_unregister_congestion_control(&tcp_westwood);
288 module_init(tcp_westwood_register);
289 module_exit(tcp_westwood_unregister);
291 MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
292 MODULE_LICENSE("GPL");
293 MODULE_DESCRIPTION("TCP Westwood+");