This source file includes following definitions.
- tcp_westwood_init
- westwood_do_filter
- westwood_filter
- tcp_westwood_pkts_acked
- westwood_update_window
- update_rtt_min
- westwood_fast_bw
- westwood_acked_count
- tcp_westwood_bw_rttmin
- tcp_westwood_ack
- tcp_westwood_event
- tcp_westwood_info
- tcp_westwood_register
- tcp_westwood_unregister
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 #include <linux/mm.h>
26 #include <linux/module.h>
27 #include <linux/skbuff.h>
28 #include <linux/inet_diag.h>
29 #include <net/tcp.h>
30
31
32 struct westwood {
33 u32 bw_ns_est;
34 u32 bw_est;
35 u32 rtt_win_sx;
36 u32 bk;
37 u32 snd_una;
38 u32 cumul_ack;
39 u32 accounted;
40 u32 rtt;
41 u32 rtt_min;
42 u8 first_ack;
43 u8 reset_rtt_min;
44 };
45
46
47 #define TCP_WESTWOOD_RTT_MIN (HZ/20)
48 #define TCP_WESTWOOD_INIT_RTT (20*HZ)
49
50
51
52
53
54
55
56
57
58
59
60
61 static void tcp_westwood_init(struct sock *sk)
62 {
63 struct westwood *w = inet_csk_ca(sk);
64
65 w->bk = 0;
66 w->bw_ns_est = 0;
67 w->bw_est = 0;
68 w->accounted = 0;
69 w->cumul_ack = 0;
70 w->reset_rtt_min = 1;
71 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
72 w->rtt_win_sx = tcp_jiffies32;
73 w->snd_una = tcp_sk(sk)->snd_una;
74 w->first_ack = 1;
75 }
76
77
78
79
80
81 static inline u32 westwood_do_filter(u32 a, u32 b)
82 {
83 return ((7 * a) + b) >> 3;
84 }
85
86 static void westwood_filter(struct westwood *w, u32 delta)
87 {
88
89 if (w->bw_ns_est == 0 && w->bw_est == 0) {
90 w->bw_ns_est = w->bk / delta;
91 w->bw_est = w->bw_ns_est;
92 } else {
93 w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
94 w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
95 }
96 }
97
98
99
100
101
102
103 static void tcp_westwood_pkts_acked(struct sock *sk,
104 const struct ack_sample *sample)
105 {
106 struct westwood *w = inet_csk_ca(sk);
107
108 if (sample->rtt_us > 0)
109 w->rtt = usecs_to_jiffies(sample->rtt_us);
110 }
111
112
113
114
115
116
117 static void westwood_update_window(struct sock *sk)
118 {
119 struct westwood *w = inet_csk_ca(sk);
120 s32 delta = tcp_jiffies32 - w->rtt_win_sx;
121
122
123
124
125
126 if (w->first_ack) {
127 w->snd_una = tcp_sk(sk)->snd_una;
128 w->first_ack = 0;
129 }
130
131
132
133
134
135
136
137
138
139
140 if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
141 westwood_filter(w, delta);
142
143 w->bk = 0;
144 w->rtt_win_sx = tcp_jiffies32;
145 }
146 }
147
148 static inline void update_rtt_min(struct westwood *w)
149 {
150 if (w->reset_rtt_min) {
151 w->rtt_min = w->rtt;
152 w->reset_rtt_min = 0;
153 } else
154 w->rtt_min = min(w->rtt, w->rtt_min);
155 }
156
157
158
159
160
161
162
163 static inline void westwood_fast_bw(struct sock *sk)
164 {
165 const struct tcp_sock *tp = tcp_sk(sk);
166 struct westwood *w = inet_csk_ca(sk);
167
168 westwood_update_window(sk);
169
170 w->bk += tp->snd_una - w->snd_una;
171 w->snd_una = tp->snd_una;
172 update_rtt_min(w);
173 }
174
175
176
177
178
179
180 static inline u32 westwood_acked_count(struct sock *sk)
181 {
182 const struct tcp_sock *tp = tcp_sk(sk);
183 struct westwood *w = inet_csk_ca(sk);
184
185 w->cumul_ack = tp->snd_una - w->snd_una;
186
187
188
189
190 if (!w->cumul_ack) {
191 w->accounted += tp->mss_cache;
192 w->cumul_ack = tp->mss_cache;
193 }
194
195 if (w->cumul_ack > tp->mss_cache) {
196
197 if (w->accounted >= w->cumul_ack) {
198 w->accounted -= w->cumul_ack;
199 w->cumul_ack = tp->mss_cache;
200 } else {
201 w->cumul_ack -= w->accounted;
202 w->accounted = 0;
203 }
204 }
205
206 w->snd_una = tp->snd_una;
207
208 return w->cumul_ack;
209 }
210
211
212
213
214
215
216
217 static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
218 {
219 const struct tcp_sock *tp = tcp_sk(sk);
220 const struct westwood *w = inet_csk_ca(sk);
221
222 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
223 }
224
225 static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
226 {
227 if (ack_flags & CA_ACK_SLOWPATH) {
228 struct westwood *w = inet_csk_ca(sk);
229
230 westwood_update_window(sk);
231 w->bk += westwood_acked_count(sk);
232
233 update_rtt_min(w);
234 return;
235 }
236
237 westwood_fast_bw(sk);
238 }
239
240 static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
241 {
242 struct tcp_sock *tp = tcp_sk(sk);
243 struct westwood *w = inet_csk_ca(sk);
244
245 switch (event) {
246 case CA_EVENT_COMPLETE_CWR:
247 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
248 break;
249 case CA_EVENT_LOSS:
250 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
251
252 w->reset_rtt_min = 1;
253 break;
254 default:
255
256 break;
257 }
258 }
259
260
261 static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
262 union tcp_cc_info *info)
263 {
264 const struct westwood *ca = inet_csk_ca(sk);
265
266 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
267 info->vegas.tcpv_enabled = 1;
268 info->vegas.tcpv_rttcnt = 0;
269 info->vegas.tcpv_rtt = jiffies_to_usecs(ca->rtt);
270 info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
271
272 *attr = INET_DIAG_VEGASINFO;
273 return sizeof(struct tcpvegas_info);
274 }
275 return 0;
276 }
277
278 static struct tcp_congestion_ops tcp_westwood __read_mostly = {
279 .init = tcp_westwood_init,
280 .ssthresh = tcp_reno_ssthresh,
281 .cong_avoid = tcp_reno_cong_avoid,
282 .undo_cwnd = tcp_reno_undo_cwnd,
283 .cwnd_event = tcp_westwood_event,
284 .in_ack_event = tcp_westwood_ack,
285 .get_info = tcp_westwood_info,
286 .pkts_acked = tcp_westwood_pkts_acked,
287
288 .owner = THIS_MODULE,
289 .name = "westwood"
290 };
291
292 static int __init tcp_westwood_register(void)
293 {
294 BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
295 return tcp_register_congestion_control(&tcp_westwood);
296 }
297
298 static void __exit tcp_westwood_unregister(void)
299 {
300 tcp_unregister_congestion_control(&tcp_westwood);
301 }
302
303 module_init(tcp_westwood_register);
304 module_exit(tcp_westwood_unregister);
305
306 MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
307 MODULE_LICENSE("GPL");
308 MODULE_DESCRIPTION("TCP Westwood+");