1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/bootmem.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
11 #include <net/inet_connection_sock.h>
12 #include <net/net_namespace.h>
13 #include <net/request_sock.h>
14 #include <net/inetpeer.h>
20 int sysctl_tcp_nometrics_save __read_mostly;
22 enum tcp_metric_index {
27 TCP_METRIC_REORDERING,
33 struct tcp_metrics_block {
34 struct tcp_metrics_block __rcu *tcpm_next;
35 struct inetpeer_addr tcpm_addr;
36 unsigned long tcpm_stamp;
40 u32 tcpm_vals[TCP_METRIC_MAX];
43 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
44 enum tcp_metric_index idx)
46 return tm->tcpm_lock & (1 << idx);
49 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
50 enum tcp_metric_index idx)
52 return tm->tcpm_vals[idx];
55 static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
56 enum tcp_metric_index idx)
58 return msecs_to_jiffies(tm->tcpm_vals[idx]);
61 static void tcp_metric_set(struct tcp_metrics_block *tm,
62 enum tcp_metric_index idx,
65 tm->tcpm_vals[idx] = val;
68 static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
69 enum tcp_metric_index idx,
72 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
75 static bool addr_same(const struct inetpeer_addr *a,
76 const struct inetpeer_addr *b)
78 const struct in6_addr *a6, *b6;
80 if (a->family != b->family)
82 if (a->family == AF_INET)
83 return a->addr.a4 == b->addr.a4;
85 a6 = (const struct in6_addr *) &a->addr.a6[0];
86 b6 = (const struct in6_addr *) &b->addr.a6[0];
88 return ipv6_addr_equal(a6, b6);
91 struct tcpm_hash_bucket {
92 struct tcp_metrics_block __rcu *chain;
95 static DEFINE_SPINLOCK(tcp_metrics_lock);
97 static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
102 if (dst_metric_locked(dst, RTAX_RTT))
103 val |= 1 << TCP_METRIC_RTT;
104 if (dst_metric_locked(dst, RTAX_RTTVAR))
105 val |= 1 << TCP_METRIC_RTTVAR;
106 if (dst_metric_locked(dst, RTAX_SSTHRESH))
107 val |= 1 << TCP_METRIC_SSTHRESH;
108 if (dst_metric_locked(dst, RTAX_CWND))
109 val |= 1 << TCP_METRIC_CWND;
110 if (dst_metric_locked(dst, RTAX_REORDERING))
111 val |= 1 << TCP_METRIC_REORDERING;
114 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
115 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
116 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
117 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
118 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
120 tm->tcpm_ts_stamp = 0;
123 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
124 struct inetpeer_addr *addr,
128 struct tcp_metrics_block *tm;
131 spin_lock_bh(&tcp_metrics_lock);
132 net = dev_net(dst->dev);
133 if (unlikely(reclaim)) {
134 struct tcp_metrics_block *oldest;
136 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
137 for (tm = rcu_dereference(oldest->tcpm_next); tm;
138 tm = rcu_dereference(tm->tcpm_next)) {
139 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
144 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
148 tm->tcpm_addr = *addr;
149 tm->tcpm_stamp = jiffies;
151 tcpm_suck_dst(tm, dst);
153 if (likely(!reclaim)) {
154 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
155 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
159 spin_unlock_bh(&tcp_metrics_lock);
163 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
165 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
167 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
168 tcpm_suck_dst(tm, dst);
171 #define TCP_METRICS_RECLAIM_DEPTH 5
172 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
174 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
178 if (depth > TCP_METRICS_RECLAIM_DEPTH)
179 return TCP_METRICS_RECLAIM_PTR;
183 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
184 struct net *net, unsigned int hash)
186 struct tcp_metrics_block *tm;
189 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
190 tm = rcu_dereference(tm->tcpm_next)) {
191 if (addr_same(&tm->tcpm_addr, addr))
195 return tcp_get_encode(tm, depth);
198 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
199 struct dst_entry *dst)
201 struct tcp_metrics_block *tm;
202 struct inetpeer_addr addr;
206 addr.family = req->rsk_ops->family;
207 switch (addr.family) {
209 addr.addr.a4 = inet_rsk(req)->rmt_addr;
210 hash = (__force unsigned int) addr.addr.a4;
213 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
214 hash = ((__force unsigned int) addr.addr.a6[0] ^
215 (__force unsigned int) addr.addr.a6[1] ^
216 (__force unsigned int) addr.addr.a6[2] ^
217 (__force unsigned int) addr.addr.a6[3]);
223 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
225 net = dev_net(dst->dev);
226 hash &= net->ipv4.tcp_metrics_hash_mask;
228 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
229 tm = rcu_dereference(tm->tcpm_next)) {
230 if (addr_same(&tm->tcpm_addr, &addr))
233 tcpm_check_stamp(tm, dst);
237 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
239 struct inet6_timewait_sock *tw6;
240 struct tcp_metrics_block *tm;
241 struct inetpeer_addr addr;
245 addr.family = tw->tw_family;
246 switch (addr.family) {
248 addr.addr.a4 = tw->tw_daddr;
249 hash = (__force unsigned int) addr.addr.a4;
252 tw6 = inet6_twsk((struct sock *)tw);
253 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
254 hash = ((__force unsigned int) addr.addr.a6[0] ^
255 (__force unsigned int) addr.addr.a6[1] ^
256 (__force unsigned int) addr.addr.a6[2] ^
257 (__force unsigned int) addr.addr.a6[3]);
263 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
266 hash &= net->ipv4.tcp_metrics_hash_mask;
268 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
269 tm = rcu_dereference(tm->tcpm_next)) {
270 if (addr_same(&tm->tcpm_addr, &addr))
276 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
277 struct dst_entry *dst,
280 struct tcp_metrics_block *tm;
281 struct inetpeer_addr addr;
286 addr.family = sk->sk_family;
287 switch (addr.family) {
289 addr.addr.a4 = inet_sk(sk)->inet_daddr;
290 hash = (__force unsigned int) addr.addr.a4;
293 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
294 hash = ((__force unsigned int) addr.addr.a6[0] ^
295 (__force unsigned int) addr.addr.a6[1] ^
296 (__force unsigned int) addr.addr.a6[2] ^
297 (__force unsigned int) addr.addr.a6[3]);
303 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
305 net = dev_net(dst->dev);
306 hash &= net->ipv4.tcp_metrics_hash_mask;
308 tm = __tcp_get_metrics(&addr, net, hash);
310 if (tm == TCP_METRICS_RECLAIM_PTR) {
315 tm = tcpm_new(dst, &addr, hash, reclaim);
317 tcpm_check_stamp(tm, dst);
322 /* Save metrics learned by this TCP session. This function is called
323 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
324 * or goes from LAST-ACK to CLOSE.
326 void tcp_update_metrics(struct sock *sk)
328 const struct inet_connection_sock *icsk = inet_csk(sk);
329 struct dst_entry *dst = __sk_dst_get(sk);
330 struct tcp_sock *tp = tcp_sk(sk);
331 struct tcp_metrics_block *tm;
336 if (sysctl_tcp_nometrics_save || !dst)
339 if (dst->flags & DST_HOST)
343 if (icsk->icsk_backoff || !tp->srtt) {
344 /* This session failed to estimate rtt. Why?
345 * Probably, no packets returned in time. Reset our
348 tm = tcp_get_metrics(sk, dst, false);
349 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
350 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
353 tm = tcp_get_metrics(sk, dst, true);
358 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
361 /* If newly calculated rtt larger than stored one, store new
362 * one. Otherwise, use EWMA. Remember, rtt overestimation is
363 * always better than underestimation.
365 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
370 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
373 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
379 /* Scale deviation to rttvar fixed point */
384 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
388 var -= (var - m) >> 2;
390 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
393 if (tcp_in_initial_slowstart(tp)) {
394 /* Slow start still did not finish. */
395 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
396 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
397 if (val && (tp->snd_cwnd >> 1) > val)
398 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
401 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
402 val = tcp_metric_get(tm, TCP_METRIC_CWND);
403 if (tp->snd_cwnd > val)
404 tcp_metric_set(tm, TCP_METRIC_CWND,
407 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
408 icsk->icsk_ca_state == TCP_CA_Open) {
409 /* Cong. avoidance phase, cwnd is reliable. */
410 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
411 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
412 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
413 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
414 val = tcp_metric_get(tm, TCP_METRIC_CWND);
415 tcp_metric_set(tm, RTAX_CWND, (val + tp->snd_cwnd) >> 1);
418 /* Else slow start did not finish, cwnd is non-sense,
419 * ssthresh may be also invalid.
421 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
422 val = tcp_metric_get(tm, TCP_METRIC_CWND);
423 tcp_metric_set(tm, TCP_METRIC_CWND,
424 (val + tp->snd_ssthresh) >> 1);
426 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
427 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
428 if (val && tp->snd_ssthresh > val)
429 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
432 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
433 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
434 if (val < tp->reordering &&
435 tp->reordering != sysctl_tcp_reordering)
436 tcp_metric_set(tm, TCP_METRIC_REORDERING,
440 tm->tcpm_stamp = jiffies;
445 /* Initialize metrics on socket. */
447 void tcp_init_metrics(struct sock *sk)
449 struct dst_entry *dst = __sk_dst_get(sk);
450 struct tcp_sock *tp = tcp_sk(sk);
451 struct tcp_metrics_block *tm;
460 tm = tcp_get_metrics(sk, dst, true);
466 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
467 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
469 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
471 tp->snd_ssthresh = val;
472 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
473 tp->snd_ssthresh = tp->snd_cwnd_clamp;
475 /* ssthresh may have been reduced unnecessarily during.
476 * 3WHS. Restore it back to its initial default.
478 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
480 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
481 if (val && tp->reordering != val) {
482 tcp_disable_fack(tp);
483 tcp_disable_early_retrans(tp);
484 tp->reordering = val;
487 val = tcp_metric_get(tm, TCP_METRIC_RTT);
488 if (val == 0 || tp->srtt == 0) {
492 /* Initial rtt is determined from SYN,SYN-ACK.
493 * The segment is small and rtt may appear much
494 * less than real one. Use per-dst memory
495 * to make it more realistic.
497 * A bit of theory. RTT is time passed after "normal" sized packet
498 * is sent until it is ACKed. In normal circumstances sending small
499 * packets force peer to delay ACKs and calculation is correct too.
500 * The algorithm is adaptive and, provided we follow specs, it
501 * NEVER underestimate RTT. BUT! If peer tries to make some clever
502 * tricks sort of "quick acks" for time long enough to decrease RTT
503 * to low value, and then abruptly stops to do it and starts to delay
504 * ACKs, wait for troubles.
506 val = msecs_to_jiffies(val);
507 if (val > tp->srtt) {
509 tp->rtt_seq = tp->snd_nxt;
511 val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
512 if (val > tp->mdev) {
514 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
521 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
522 * 3WHS. This is most likely due to retransmission,
523 * including spurious one. Reset the RTO back to 3secs
524 * from the more aggressive 1sec to avoid more spurious
527 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
528 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
530 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
531 * retransmitted. In light of RFC6298 more aggressive 1sec
532 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
533 * retransmission has occurred.
535 if (tp->total_retrans > 1)
538 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
539 tp->snd_cwnd_stamp = tcp_time_stamp;
542 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
544 struct tcp_metrics_block *tm;
551 tm = __tcp_get_metrics_req(req, dst);
554 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
555 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
560 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
569 EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
571 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
573 struct tcp_metrics_block *tm;
576 tm = tcp_get_metrics(sk, dst, true);
578 struct tcp_sock *tp = tcp_sk(sk);
580 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
581 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
582 tp->rx_opt.ts_recent = tm->tcpm_ts;
587 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
589 /* VJ's idea. Save last timestamp seen from this destination and hold
590 * it at least for normal timewait interval to use for duplicate
591 * segment detection in subsequent connections, before they enter
592 * synchronized state.
594 bool tcp_remember_stamp(struct sock *sk)
596 struct dst_entry *dst = __sk_dst_get(sk);
600 struct tcp_metrics_block *tm;
603 tm = tcp_get_metrics(sk, dst, true);
605 struct tcp_sock *tp = tcp_sk(sk);
607 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
608 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
609 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
610 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
611 tm->tcpm_ts = tp->rx_opt.ts_recent;
620 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
622 struct tcp_metrics_block *tm;
626 tm = __tcp_get_metrics_tw(tw);
628 const struct tcp_timewait_sock *tcptw;
629 struct sock *sk = (struct sock *) tw;
631 tcptw = tcp_twsk(sk);
632 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
633 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
634 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
635 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
636 tm->tcpm_ts = tcptw->tw_ts_recent;
645 static unsigned long tcpmhash_entries;
646 static int __init set_tcpmhash_entries(char *str)
653 ret = kstrtoul(str, 0, &tcpmhash_entries);
659 __setup("tcpmhash_entries=", set_tcpmhash_entries);
661 static int __net_init tcp_net_metrics_init(struct net *net)
665 slots = tcpmhash_entries;
667 if (totalram_pages >= 128 * 1024)
673 size = slots * sizeof(struct tcpm_hash_bucket);
675 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
676 if (!net->ipv4.tcp_metrics_hash)
679 net->ipv4.tcp_metrics_hash_mask = (slots - 1);
684 static void __net_exit tcp_net_metrics_exit(struct net *net)
686 kfree(net->ipv4.tcp_metrics_hash);
689 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
690 .init = tcp_net_metrics_init,
691 .exit = tcp_net_metrics_exit,
694 void __init tcp_metrics_init(void)
696 register_pernet_subsys(&tcp_net_metrics_ops);