root/net/ipv4/tcp_metrics.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tm_net
  2. tcp_metric_locked
  3. tcp_metric_get
  4. tcp_metric_set
  5. addr_same
  6. tcpm_suck_dst
  7. tcpm_check_stamp
  8. tcpm_new
  9. tcp_get_encode
  10. __tcp_get_metrics
  11. __tcp_get_metrics_req
  12. tcp_get_metrics
  13. tcp_update_metrics
  14. tcp_init_metrics
  15. tcp_peer_is_proven
  16. tcp_fastopen_cache_get
  17. tcp_fastopen_cache_set
  18. tcp_metrics_fill_info
  19. tcp_metrics_dump_info
  20. tcp_metrics_nl_dump
  21. __parse_nl_addr
  22. parse_nl_addr
  23. parse_nl_saddr
  24. tcp_metrics_nl_cmd_get
  25. tcp_metrics_flush_all
  26. tcp_metrics_nl_cmd_del
  27. set_tcpmhash_entries
  28. tcp_net_metrics_init
  29. tcp_net_metrics_exit_batch
  30. tcp_metrics_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 #include <linux/rcupdate.h>
   3 #include <linux/spinlock.h>
   4 #include <linux/jiffies.h>
   5 #include <linux/module.h>
   6 #include <linux/cache.h>
   7 #include <linux/slab.h>
   8 #include <linux/init.h>
   9 #include <linux/tcp.h>
  10 #include <linux/hash.h>
  11 #include <linux/tcp_metrics.h>
  12 #include <linux/vmalloc.h>
  13 
  14 #include <net/inet_connection_sock.h>
  15 #include <net/net_namespace.h>
  16 #include <net/request_sock.h>
  17 #include <net/inetpeer.h>
  18 #include <net/sock.h>
  19 #include <net/ipv6.h>
  20 #include <net/dst.h>
  21 #include <net/tcp.h>
  22 #include <net/genetlink.h>
  23 
  24 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  25                                                    const struct inetpeer_addr *daddr,
  26                                                    struct net *net, unsigned int hash);
  27 
  28 struct tcp_fastopen_metrics {
  29         u16     mss;
  30         u16     syn_loss:10,            /* Recurring Fast Open SYN losses */
  31                 try_exp:2;              /* Request w/ exp. option (once) */
  32         unsigned long   last_syn_loss;  /* Last Fast Open SYN loss */
  33         struct  tcp_fastopen_cookie     cookie;
  34 };
  35 
  36 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
  37  * Kernel only stores RTT and RTTVAR in usec resolution
  38  */
  39 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
  40 
  41 struct tcp_metrics_block {
  42         struct tcp_metrics_block __rcu  *tcpm_next;
  43         possible_net_t                  tcpm_net;
  44         struct inetpeer_addr            tcpm_saddr;
  45         struct inetpeer_addr            tcpm_daddr;
  46         unsigned long                   tcpm_stamp;
  47         u32                             tcpm_lock;
  48         u32                             tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
  49         struct tcp_fastopen_metrics     tcpm_fastopen;
  50 
  51         struct rcu_head                 rcu_head;
  52 };
  53 
  54 static inline struct net *tm_net(struct tcp_metrics_block *tm)
  55 {
  56         return read_pnet(&tm->tcpm_net);
  57 }
  58 
  59 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
  60                               enum tcp_metric_index idx)
  61 {
  62         return tm->tcpm_lock & (1 << idx);
  63 }
  64 
  65 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
  66                           enum tcp_metric_index idx)
  67 {
  68         return tm->tcpm_vals[idx];
  69 }
  70 
  71 static void tcp_metric_set(struct tcp_metrics_block *tm,
  72                            enum tcp_metric_index idx,
  73                            u32 val)
  74 {
  75         tm->tcpm_vals[idx] = val;
  76 }
  77 
  78 static bool addr_same(const struct inetpeer_addr *a,
  79                       const struct inetpeer_addr *b)
  80 {
  81         return inetpeer_addr_cmp(a, b) == 0;
  82 }
  83 
  84 struct tcpm_hash_bucket {
  85         struct tcp_metrics_block __rcu  *chain;
  86 };
  87 
  88 static struct tcpm_hash_bucket  *tcp_metrics_hash __read_mostly;
  89 static unsigned int             tcp_metrics_hash_log __read_mostly;
  90 
  91 static DEFINE_SPINLOCK(tcp_metrics_lock);
  92 
  93 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
  94                           const struct dst_entry *dst,
  95                           bool fastopen_clear)
  96 {
  97         u32 msval;
  98         u32 val;
  99 
 100         tm->tcpm_stamp = jiffies;
 101 
 102         val = 0;
 103         if (dst_metric_locked(dst, RTAX_RTT))
 104                 val |= 1 << TCP_METRIC_RTT;
 105         if (dst_metric_locked(dst, RTAX_RTTVAR))
 106                 val |= 1 << TCP_METRIC_RTTVAR;
 107         if (dst_metric_locked(dst, RTAX_SSTHRESH))
 108                 val |= 1 << TCP_METRIC_SSTHRESH;
 109         if (dst_metric_locked(dst, RTAX_CWND))
 110                 val |= 1 << TCP_METRIC_CWND;
 111         if (dst_metric_locked(dst, RTAX_REORDERING))
 112                 val |= 1 << TCP_METRIC_REORDERING;
 113         tm->tcpm_lock = val;
 114 
 115         msval = dst_metric_raw(dst, RTAX_RTT);
 116         tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
 117 
 118         msval = dst_metric_raw(dst, RTAX_RTTVAR);
 119         tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
 120         tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
 121         tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
 122         tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
 123         if (fastopen_clear) {
 124                 tm->tcpm_fastopen.mss = 0;
 125                 tm->tcpm_fastopen.syn_loss = 0;
 126                 tm->tcpm_fastopen.try_exp = 0;
 127                 tm->tcpm_fastopen.cookie.exp = false;
 128                 tm->tcpm_fastopen.cookie.len = 0;
 129         }
 130 }
 131 
 132 #define TCP_METRICS_TIMEOUT             (60 * 60 * HZ)
 133 
 134 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
 135 {
 136         if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
 137                 tcpm_suck_dst(tm, dst, false);
 138 }
 139 
 140 #define TCP_METRICS_RECLAIM_DEPTH       5
 141 #define TCP_METRICS_RECLAIM_PTR         (struct tcp_metrics_block *) 0x1UL
 142 
 143 #define deref_locked(p) \
 144         rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
 145 
 146 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
 147                                           struct inetpeer_addr *saddr,
 148                                           struct inetpeer_addr *daddr,
 149                                           unsigned int hash)
 150 {
 151         struct tcp_metrics_block *tm;
 152         struct net *net;
 153         bool reclaim = false;
 154 
 155         spin_lock_bh(&tcp_metrics_lock);
 156         net = dev_net(dst->dev);
 157 
 158         /* While waiting for the spin-lock the cache might have been populated
 159          * with this entry and so we have to check again.
 160          */
 161         tm = __tcp_get_metrics(saddr, daddr, net, hash);
 162         if (tm == TCP_METRICS_RECLAIM_PTR) {
 163                 reclaim = true;
 164                 tm = NULL;
 165         }
 166         if (tm) {
 167                 tcpm_check_stamp(tm, dst);
 168                 goto out_unlock;
 169         }
 170 
 171         if (unlikely(reclaim)) {
 172                 struct tcp_metrics_block *oldest;
 173 
 174                 oldest = deref_locked(tcp_metrics_hash[hash].chain);
 175                 for (tm = deref_locked(oldest->tcpm_next); tm;
 176                      tm = deref_locked(tm->tcpm_next)) {
 177                         if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
 178                                 oldest = tm;
 179                 }
 180                 tm = oldest;
 181         } else {
 182                 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
 183                 if (!tm)
 184                         goto out_unlock;
 185         }
 186         write_pnet(&tm->tcpm_net, net);
 187         tm->tcpm_saddr = *saddr;
 188         tm->tcpm_daddr = *daddr;
 189 
 190         tcpm_suck_dst(tm, dst, true);
 191 
 192         if (likely(!reclaim)) {
 193                 tm->tcpm_next = tcp_metrics_hash[hash].chain;
 194                 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
 195         }
 196 
 197 out_unlock:
 198         spin_unlock_bh(&tcp_metrics_lock);
 199         return tm;
 200 }
 201 
 202 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
 203 {
 204         if (tm)
 205                 return tm;
 206         if (depth > TCP_METRICS_RECLAIM_DEPTH)
 207                 return TCP_METRICS_RECLAIM_PTR;
 208         return NULL;
 209 }
 210 
 211 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
 212                                                    const struct inetpeer_addr *daddr,
 213                                                    struct net *net, unsigned int hash)
 214 {
 215         struct tcp_metrics_block *tm;
 216         int depth = 0;
 217 
 218         for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 219              tm = rcu_dereference(tm->tcpm_next)) {
 220                 if (addr_same(&tm->tcpm_saddr, saddr) &&
 221                     addr_same(&tm->tcpm_daddr, daddr) &&
 222                     net_eq(tm_net(tm), net))
 223                         break;
 224                 depth++;
 225         }
 226         return tcp_get_encode(tm, depth);
 227 }
 228 
 229 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
 230                                                        struct dst_entry *dst)
 231 {
 232         struct tcp_metrics_block *tm;
 233         struct inetpeer_addr saddr, daddr;
 234         unsigned int hash;
 235         struct net *net;
 236 
 237         saddr.family = req->rsk_ops->family;
 238         daddr.family = req->rsk_ops->family;
 239         switch (daddr.family) {
 240         case AF_INET:
 241                 inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
 242                 inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
 243                 hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
 244                 break;
 245 #if IS_ENABLED(CONFIG_IPV6)
 246         case AF_INET6:
 247                 inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
 248                 inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
 249                 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
 250                 break;
 251 #endif
 252         default:
 253                 return NULL;
 254         }
 255 
 256         net = dev_net(dst->dev);
 257         hash ^= net_hash_mix(net);
 258         hash = hash_32(hash, tcp_metrics_hash_log);
 259 
 260         for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 261              tm = rcu_dereference(tm->tcpm_next)) {
 262                 if (addr_same(&tm->tcpm_saddr, &saddr) &&
 263                     addr_same(&tm->tcpm_daddr, &daddr) &&
 264                     net_eq(tm_net(tm), net))
 265                         break;
 266         }
 267         tcpm_check_stamp(tm, dst);
 268         return tm;
 269 }
 270 
 271 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
 272                                                  struct dst_entry *dst,
 273                                                  bool create)
 274 {
 275         struct tcp_metrics_block *tm;
 276         struct inetpeer_addr saddr, daddr;
 277         unsigned int hash;
 278         struct net *net;
 279 
 280         if (sk->sk_family == AF_INET) {
 281                 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 282                 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 283                 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 284         }
 285 #if IS_ENABLED(CONFIG_IPV6)
 286         else if (sk->sk_family == AF_INET6) {
 287                 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
 288                         inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
 289                         inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
 290                         hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 291                 } else {
 292                         inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
 293                         inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
 294                         hash = ipv6_addr_hash(&sk->sk_v6_daddr);
 295                 }
 296         }
 297 #endif
 298         else
 299                 return NULL;
 300 
 301         net = dev_net(dst->dev);
 302         hash ^= net_hash_mix(net);
 303         hash = hash_32(hash, tcp_metrics_hash_log);
 304 
 305         tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
 306         if (tm == TCP_METRICS_RECLAIM_PTR)
 307                 tm = NULL;
 308         if (!tm && create)
 309                 tm = tcpm_new(dst, &saddr, &daddr, hash);
 310         else
 311                 tcpm_check_stamp(tm, dst);
 312 
 313         return tm;
 314 }
 315 
 316 /* Save metrics learned by this TCP session.  This function is called
 317  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
 318  * or goes from LAST-ACK to CLOSE.
 319  */
 320 void tcp_update_metrics(struct sock *sk)
 321 {
 322         const struct inet_connection_sock *icsk = inet_csk(sk);
 323         struct dst_entry *dst = __sk_dst_get(sk);
 324         struct tcp_sock *tp = tcp_sk(sk);
 325         struct net *net = sock_net(sk);
 326         struct tcp_metrics_block *tm;
 327         unsigned long rtt;
 328         u32 val;
 329         int m;
 330 
 331         sk_dst_confirm(sk);
 332         if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
 333                 return;
 334 
 335         rcu_read_lock();
 336         if (icsk->icsk_backoff || !tp->srtt_us) {
 337                 /* This session failed to estimate rtt. Why?
 338                  * Probably, no packets returned in time.  Reset our
 339                  * results.
 340                  */
 341                 tm = tcp_get_metrics(sk, dst, false);
 342                 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
 343                         tcp_metric_set(tm, TCP_METRIC_RTT, 0);
 344                 goto out_unlock;
 345         } else
 346                 tm = tcp_get_metrics(sk, dst, true);
 347 
 348         if (!tm)
 349                 goto out_unlock;
 350 
 351         rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 352         m = rtt - tp->srtt_us;
 353 
 354         /* If newly calculated rtt larger than stored one, store new
 355          * one. Otherwise, use EWMA. Remember, rtt overestimation is
 356          * always better than underestimation.
 357          */
 358         if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
 359                 if (m <= 0)
 360                         rtt = tp->srtt_us;
 361                 else
 362                         rtt -= (m >> 3);
 363                 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
 364         }
 365 
 366         if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
 367                 unsigned long var;
 368 
 369                 if (m < 0)
 370                         m = -m;
 371 
 372                 /* Scale deviation to rttvar fixed point */
 373                 m >>= 1;
 374                 if (m < tp->mdev_us)
 375                         m = tp->mdev_us;
 376 
 377                 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
 378                 if (m >= var)
 379                         var = m;
 380                 else
 381                         var -= (var - m) >> 2;
 382 
 383                 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
 384         }
 385 
 386         if (tcp_in_initial_slowstart(tp)) {
 387                 /* Slow start still did not finish. */
 388                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 389                         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 390                         if (val && (tp->snd_cwnd >> 1) > val)
 391                                 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 392                                                tp->snd_cwnd >> 1);
 393                 }
 394                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 395                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
 396                         if (tp->snd_cwnd > val)
 397                                 tcp_metric_set(tm, TCP_METRIC_CWND,
 398                                                tp->snd_cwnd);
 399                 }
 400         } else if (!tcp_in_slow_start(tp) &&
 401                    icsk->icsk_ca_state == TCP_CA_Open) {
 402                 /* Cong. avoidance phase, cwnd is reliable. */
 403                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
 404                         tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 405                                        max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
 406                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 407                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
 408                         tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
 409                 }
 410         } else {
 411                 /* Else slow start did not finish, cwnd is non-sense,
 412                  * ssthresh may be also invalid.
 413                  */
 414                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 415                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
 416                         tcp_metric_set(tm, TCP_METRIC_CWND,
 417                                        (val + tp->snd_ssthresh) >> 1);
 418                 }
 419                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 420                         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 421                         if (val && tp->snd_ssthresh > val)
 422                                 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
 423                                                tp->snd_ssthresh);
 424                 }
 425                 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
 426                         val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 427                         if (val < tp->reordering &&
 428                             tp->reordering != net->ipv4.sysctl_tcp_reordering)
 429                                 tcp_metric_set(tm, TCP_METRIC_REORDERING,
 430                                                tp->reordering);
 431                 }
 432         }
 433         tm->tcpm_stamp = jiffies;
 434 out_unlock:
 435         rcu_read_unlock();
 436 }
 437 
 438 /* Initialize metrics on socket. */
 439 
 440 void tcp_init_metrics(struct sock *sk)
 441 {
 442         struct dst_entry *dst = __sk_dst_get(sk);
 443         struct tcp_sock *tp = tcp_sk(sk);
 444         struct tcp_metrics_block *tm;
 445         u32 val, crtt = 0; /* cached RTT scaled by 8 */
 446 
 447         sk_dst_confirm(sk);
 448         if (!dst)
 449                 goto reset;
 450 
 451         rcu_read_lock();
 452         tm = tcp_get_metrics(sk, dst, true);
 453         if (!tm) {
 454                 rcu_read_unlock();
 455                 goto reset;
 456         }
 457 
 458         if (tcp_metric_locked(tm, TCP_METRIC_CWND))
 459                 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
 460 
 461         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
 462         if (val) {
 463                 tp->snd_ssthresh = val;
 464                 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
 465                         tp->snd_ssthresh = tp->snd_cwnd_clamp;
 466         } else {
 467                 /* ssthresh may have been reduced unnecessarily during.
 468                  * 3WHS. Restore it back to its initial default.
 469                  */
 470                 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 471         }
 472         val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
 473         if (val && tp->reordering != val)
 474                 tp->reordering = val;
 475 
 476         crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
 477         rcu_read_unlock();
 478 reset:
 479         /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
 480          * to seed the RTO for later data packets because SYN packets are
 481          * small. Use the per-dst cached values to seed the RTO but keep
 482          * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
 483          * Later the RTO will be updated immediately upon obtaining the first
 484          * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
 485          * influences the first RTO but not later RTT estimation.
 486          *
 487          * But if RTT is not available from the SYN (due to retransmits or
 488          * syn cookies) or the cache, force a conservative 3secs timeout.
 489          *
 490          * A bit of theory. RTT is time passed after "normal" sized packet
 491          * is sent until it is ACKed. In normal circumstances sending small
 492          * packets force peer to delay ACKs and calculation is correct too.
 493          * The algorithm is adaptive and, provided we follow specs, it
 494          * NEVER underestimate RTT. BUT! If peer tries to make some clever
 495          * tricks sort of "quick acks" for time long enough to decrease RTT
 496          * to low value, and then abruptly stops to do it and starts to delay
 497          * ACKs, wait for troubles.
 498          */
 499         if (crtt > tp->srtt_us) {
 500                 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
 501                 crtt /= 8 * USEC_PER_SEC / HZ;
 502                 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
 503         } else if (tp->srtt_us == 0) {
 504                 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
 505                  * 3WHS. This is most likely due to retransmission,
 506                  * including spurious one. Reset the RTO back to 3secs
 507                  * from the more aggressive 1sec to avoid more spurious
 508                  * retransmission.
 509                  */
 510                 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
 511                 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
 512 
 513                 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
 514         }
 515 }
 516 
 517 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
 518 {
 519         struct tcp_metrics_block *tm;
 520         bool ret;
 521 
 522         if (!dst)
 523                 return false;
 524 
 525         rcu_read_lock();
 526         tm = __tcp_get_metrics_req(req, dst);
 527         if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
 528                 ret = true;
 529         else
 530                 ret = false;
 531         rcu_read_unlock();
 532 
 533         return ret;
 534 }
 535 
 536 static DEFINE_SEQLOCK(fastopen_seqlock);
 537 
 538 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
 539                             struct tcp_fastopen_cookie *cookie)
 540 {
 541         struct tcp_metrics_block *tm;
 542 
 543         rcu_read_lock();
 544         tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
 545         if (tm) {
 546                 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 547                 unsigned int seq;
 548 
 549                 do {
 550                         seq = read_seqbegin(&fastopen_seqlock);
 551                         if (tfom->mss)
 552                                 *mss = tfom->mss;
 553                         *cookie = tfom->cookie;
 554                         if (cookie->len <= 0 && tfom->try_exp == 1)
 555                                 cookie->exp = true;
 556                 } while (read_seqretry(&fastopen_seqlock, seq));
 557         }
 558         rcu_read_unlock();
 559 }
 560 
 561 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 562                             struct tcp_fastopen_cookie *cookie, bool syn_lost,
 563                             u16 try_exp)
 564 {
 565         struct dst_entry *dst = __sk_dst_get(sk);
 566         struct tcp_metrics_block *tm;
 567 
 568         if (!dst)
 569                 return;
 570         rcu_read_lock();
 571         tm = tcp_get_metrics(sk, dst, true);
 572         if (tm) {
 573                 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 574 
 575                 write_seqlock_bh(&fastopen_seqlock);
 576                 if (mss)
 577                         tfom->mss = mss;
 578                 if (cookie && cookie->len > 0)
 579                         tfom->cookie = *cookie;
 580                 else if (try_exp > tfom->try_exp &&
 581                          tfom->cookie.len <= 0 && !tfom->cookie.exp)
 582                         tfom->try_exp = try_exp;
 583                 if (syn_lost) {
 584                         ++tfom->syn_loss;
 585                         tfom->last_syn_loss = jiffies;
 586                 } else
 587                         tfom->syn_loss = 0;
 588                 write_sequnlock_bh(&fastopen_seqlock);
 589         }
 590         rcu_read_unlock();
 591 }
 592 
 593 static struct genl_family tcp_metrics_nl_family;
 594 
 595 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
 596         [TCP_METRICS_ATTR_ADDR_IPV4]    = { .type = NLA_U32, },
 597         [TCP_METRICS_ATTR_ADDR_IPV6]    = { .type = NLA_BINARY,
 598                                             .len = sizeof(struct in6_addr), },
 599         /* Following attributes are not received for GET/DEL,
 600          * we keep them for reference
 601          */
 602 #if 0
 603         [TCP_METRICS_ATTR_AGE]          = { .type = NLA_MSECS, },
 604         [TCP_METRICS_ATTR_TW_TSVAL]     = { .type = NLA_U32, },
 605         [TCP_METRICS_ATTR_TW_TS_STAMP]  = { .type = NLA_S32, },
 606         [TCP_METRICS_ATTR_VALS]         = { .type = NLA_NESTED, },
 607         [TCP_METRICS_ATTR_FOPEN_MSS]    = { .type = NLA_U16, },
 608         [TCP_METRICS_ATTR_FOPEN_SYN_DROPS]      = { .type = NLA_U16, },
 609         [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]    = { .type = NLA_MSECS, },
 610         [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
 611                                             .len = TCP_FASTOPEN_COOKIE_MAX, },
 612 #endif
 613 };
 614 
 615 /* Add attributes, caller cancels its header on failure */
 616 static int tcp_metrics_fill_info(struct sk_buff *msg,
 617                                  struct tcp_metrics_block *tm)
 618 {
 619         struct nlattr *nest;
 620         int i;
 621 
 622         switch (tm->tcpm_daddr.family) {
 623         case AF_INET:
 624                 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
 625                                     inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
 626                         goto nla_put_failure;
 627                 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
 628                                     inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
 629                         goto nla_put_failure;
 630                 break;
 631         case AF_INET6:
 632                 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
 633                                      inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
 634                         goto nla_put_failure;
 635                 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
 636                                      inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
 637                         goto nla_put_failure;
 638                 break;
 639         default:
 640                 return -EAFNOSUPPORT;
 641         }
 642 
 643         if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
 644                           jiffies - tm->tcpm_stamp,
 645                           TCP_METRICS_ATTR_PAD) < 0)
 646                 goto nla_put_failure;
 647 
 648         {
 649                 int n = 0;
 650 
 651                 nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
 652                 if (!nest)
 653                         goto nla_put_failure;
 654                 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
 655                         u32 val = tm->tcpm_vals[i];
 656 
 657                         if (!val)
 658                                 continue;
 659                         if (i == TCP_METRIC_RTT) {
 660                                 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
 661                                                 val) < 0)
 662                                         goto nla_put_failure;
 663                                 n++;
 664                                 val = max(val / 1000, 1U);
 665                         }
 666                         if (i == TCP_METRIC_RTTVAR) {
 667                                 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
 668                                                 val) < 0)
 669                                         goto nla_put_failure;
 670                                 n++;
 671                                 val = max(val / 1000, 1U);
 672                         }
 673                         if (nla_put_u32(msg, i + 1, val) < 0)
 674                                 goto nla_put_failure;
 675                         n++;
 676                 }
 677                 if (n)
 678                         nla_nest_end(msg, nest);
 679                 else
 680                         nla_nest_cancel(msg, nest);
 681         }
 682 
 683         {
 684                 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
 685                 unsigned int seq;
 686 
 687                 do {
 688                         seq = read_seqbegin(&fastopen_seqlock);
 689                         tfom_copy[0] = tm->tcpm_fastopen;
 690                 } while (read_seqretry(&fastopen_seqlock, seq));
 691 
 692                 tfom = tfom_copy;
 693                 if (tfom->mss &&
 694                     nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
 695                                 tfom->mss) < 0)
 696                         goto nla_put_failure;
 697                 if (tfom->syn_loss &&
 698                     (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
 699                                 tfom->syn_loss) < 0 ||
 700                      nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
 701                                 jiffies - tfom->last_syn_loss,
 702                                 TCP_METRICS_ATTR_PAD) < 0))
 703                         goto nla_put_failure;
 704                 if (tfom->cookie.len > 0 &&
 705                     nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
 706                             tfom->cookie.len, tfom->cookie.val) < 0)
 707                         goto nla_put_failure;
 708         }
 709 
 710         return 0;
 711 
 712 nla_put_failure:
 713         return -EMSGSIZE;
 714 }
 715 
 716 static int tcp_metrics_dump_info(struct sk_buff *skb,
 717                                  struct netlink_callback *cb,
 718                                  struct tcp_metrics_block *tm)
 719 {
 720         void *hdr;
 721 
 722         hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 723                           &tcp_metrics_nl_family, NLM_F_MULTI,
 724                           TCP_METRICS_CMD_GET);
 725         if (!hdr)
 726                 return -EMSGSIZE;
 727 
 728         if (tcp_metrics_fill_info(skb, tm) < 0)
 729                 goto nla_put_failure;
 730 
 731         genlmsg_end(skb, hdr);
 732         return 0;
 733 
 734 nla_put_failure:
 735         genlmsg_cancel(skb, hdr);
 736         return -EMSGSIZE;
 737 }
 738 
 739 static int tcp_metrics_nl_dump(struct sk_buff *skb,
 740                                struct netlink_callback *cb)
 741 {
 742         struct net *net = sock_net(skb->sk);
 743         unsigned int max_rows = 1U << tcp_metrics_hash_log;
 744         unsigned int row, s_row = cb->args[0];
 745         int s_col = cb->args[1], col = s_col;
 746 
 747         for (row = s_row; row < max_rows; row++, s_col = 0) {
 748                 struct tcp_metrics_block *tm;
 749                 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
 750 
 751                 rcu_read_lock();
 752                 for (col = 0, tm = rcu_dereference(hb->chain); tm;
 753                      tm = rcu_dereference(tm->tcpm_next), col++) {
 754                         if (!net_eq(tm_net(tm), net))
 755                                 continue;
 756                         if (col < s_col)
 757                                 continue;
 758                         if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
 759                                 rcu_read_unlock();
 760                                 goto done;
 761                         }
 762                 }
 763                 rcu_read_unlock();
 764         }
 765 
 766 done:
 767         cb->args[0] = row;
 768         cb->args[1] = col;
 769         return skb->len;
 770 }
 771 
 772 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 773                            unsigned int *hash, int optional, int v4, int v6)
 774 {
 775         struct nlattr *a;
 776 
 777         a = info->attrs[v4];
 778         if (a) {
 779                 inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
 780                 if (hash)
 781                         *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
 782                 return 0;
 783         }
 784         a = info->attrs[v6];
 785         if (a) {
 786                 struct in6_addr in6;
 787 
 788                 if (nla_len(a) != sizeof(struct in6_addr))
 789                         return -EINVAL;
 790                 in6 = nla_get_in6_addr(a);
 791                 inetpeer_set_addr_v6(addr, &in6);
 792                 if (hash)
 793                         *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
 794                 return 0;
 795         }
 796         return optional ? 1 : -EAFNOSUPPORT;
 797 }
 798 
 799 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
 800                          unsigned int *hash, int optional)
 801 {
 802         return __parse_nl_addr(info, addr, hash, optional,
 803                                TCP_METRICS_ATTR_ADDR_IPV4,
 804                                TCP_METRICS_ATTR_ADDR_IPV6);
 805 }
 806 
 807 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
 808 {
 809         return __parse_nl_addr(info, addr, NULL, 0,
 810                                TCP_METRICS_ATTR_SADDR_IPV4,
 811                                TCP_METRICS_ATTR_SADDR_IPV6);
 812 }
 813 
 814 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
 815 {
 816         struct tcp_metrics_block *tm;
 817         struct inetpeer_addr saddr, daddr;
 818         unsigned int hash;
 819         struct sk_buff *msg;
 820         struct net *net = genl_info_net(info);
 821         void *reply;
 822         int ret;
 823         bool src = true;
 824 
 825         ret = parse_nl_addr(info, &daddr, &hash, 0);
 826         if (ret < 0)
 827                 return ret;
 828 
 829         ret = parse_nl_saddr(info, &saddr);
 830         if (ret < 0)
 831                 src = false;
 832 
 833         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 834         if (!msg)
 835                 return -ENOMEM;
 836 
 837         reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
 838                                   info->genlhdr->cmd);
 839         if (!reply)
 840                 goto nla_put_failure;
 841 
 842         hash ^= net_hash_mix(net);
 843         hash = hash_32(hash, tcp_metrics_hash_log);
 844         ret = -ESRCH;
 845         rcu_read_lock();
 846         for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
 847              tm = rcu_dereference(tm->tcpm_next)) {
 848                 if (addr_same(&tm->tcpm_daddr, &daddr) &&
 849                     (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
 850                     net_eq(tm_net(tm), net)) {
 851                         ret = tcp_metrics_fill_info(msg, tm);
 852                         break;
 853                 }
 854         }
 855         rcu_read_unlock();
 856         if (ret < 0)
 857                 goto out_free;
 858 
 859         genlmsg_end(msg, reply);
 860         return genlmsg_reply(msg, info);
 861 
 862 nla_put_failure:
 863         ret = -EMSGSIZE;
 864 
 865 out_free:
 866         nlmsg_free(msg);
 867         return ret;
 868 }
 869 
 870 static void tcp_metrics_flush_all(struct net *net)
 871 {
 872         unsigned int max_rows = 1U << tcp_metrics_hash_log;
 873         struct tcpm_hash_bucket *hb = tcp_metrics_hash;
 874         struct tcp_metrics_block *tm;
 875         unsigned int row;
 876 
 877         for (row = 0; row < max_rows; row++, hb++) {
 878                 struct tcp_metrics_block __rcu **pp;
 879                 bool match;
 880 
 881                 spin_lock_bh(&tcp_metrics_lock);
 882                 pp = &hb->chain;
 883                 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 884                         match = net ? net_eq(tm_net(tm), net) :
 885                                 !refcount_read(&tm_net(tm)->count);
 886                         if (match) {
 887                                 *pp = tm->tcpm_next;
 888                                 kfree_rcu(tm, rcu_head);
 889                         } else {
 890                                 pp = &tm->tcpm_next;
 891                         }
 892                 }
 893                 spin_unlock_bh(&tcp_metrics_lock);
 894         }
 895 }
 896 
 897 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
 898 {
 899         struct tcpm_hash_bucket *hb;
 900         struct tcp_metrics_block *tm;
 901         struct tcp_metrics_block __rcu **pp;
 902         struct inetpeer_addr saddr, daddr;
 903         unsigned int hash;
 904         struct net *net = genl_info_net(info);
 905         int ret;
 906         bool src = true, found = false;
 907 
 908         ret = parse_nl_addr(info, &daddr, &hash, 1);
 909         if (ret < 0)
 910                 return ret;
 911         if (ret > 0) {
 912                 tcp_metrics_flush_all(net);
 913                 return 0;
 914         }
 915         ret = parse_nl_saddr(info, &saddr);
 916         if (ret < 0)
 917                 src = false;
 918 
 919         hash ^= net_hash_mix(net);
 920         hash = hash_32(hash, tcp_metrics_hash_log);
 921         hb = tcp_metrics_hash + hash;
 922         pp = &hb->chain;
 923         spin_lock_bh(&tcp_metrics_lock);
 924         for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
 925                 if (addr_same(&tm->tcpm_daddr, &daddr) &&
 926                     (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
 927                     net_eq(tm_net(tm), net)) {
 928                         *pp = tm->tcpm_next;
 929                         kfree_rcu(tm, rcu_head);
 930                         found = true;
 931                 } else {
 932                         pp = &tm->tcpm_next;
 933                 }
 934         }
 935         spin_unlock_bh(&tcp_metrics_lock);
 936         if (!found)
 937                 return -ESRCH;
 938         return 0;
 939 }
 940 
 941 static const struct genl_ops tcp_metrics_nl_ops[] = {
 942         {
 943                 .cmd = TCP_METRICS_CMD_GET,
 944                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 945                 .doit = tcp_metrics_nl_cmd_get,
 946                 .dumpit = tcp_metrics_nl_dump,
 947         },
 948         {
 949                 .cmd = TCP_METRICS_CMD_DEL,
 950                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 951                 .doit = tcp_metrics_nl_cmd_del,
 952                 .flags = GENL_ADMIN_PERM,
 953         },
 954 };
 955 
 956 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
 957         .hdrsize        = 0,
 958         .name           = TCP_METRICS_GENL_NAME,
 959         .version        = TCP_METRICS_GENL_VERSION,
 960         .maxattr        = TCP_METRICS_ATTR_MAX,
 961         .policy = tcp_metrics_nl_policy,
 962         .netnsok        = true,
 963         .module         = THIS_MODULE,
 964         .ops            = tcp_metrics_nl_ops,
 965         .n_ops          = ARRAY_SIZE(tcp_metrics_nl_ops),
 966 };
 967 
 968 static unsigned int tcpmhash_entries;
 969 static int __init set_tcpmhash_entries(char *str)
 970 {
 971         ssize_t ret;
 972 
 973         if (!str)
 974                 return 0;
 975 
 976         ret = kstrtouint(str, 0, &tcpmhash_entries);
 977         if (ret)
 978                 return 0;
 979 
 980         return 1;
 981 }
 982 __setup("tcpmhash_entries=", set_tcpmhash_entries);
 983 
 984 static int __net_init tcp_net_metrics_init(struct net *net)
 985 {
 986         size_t size;
 987         unsigned int slots;
 988 
 989         if (!net_eq(net, &init_net))
 990                 return 0;
 991 
 992         slots = tcpmhash_entries;
 993         if (!slots) {
 994                 if (totalram_pages() >= 128 * 1024)
 995                         slots = 16 * 1024;
 996                 else
 997                         slots = 8 * 1024;
 998         }
 999 
1000         tcp_metrics_hash_log = order_base_2(slots);
1001         size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1002 
1003         tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1004         if (!tcp_metrics_hash)
1005                 return -ENOMEM;
1006 
1007         return 0;
1008 }
1009 
1010 static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1011 {
1012         tcp_metrics_flush_all(NULL);
1013 }
1014 
1015 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1016         .init           =       tcp_net_metrics_init,
1017         .exit_batch     =       tcp_net_metrics_exit_batch,
1018 };
1019 
1020 void __init tcp_metrics_init(void)
1021 {
1022         int ret;
1023 
1024         ret = register_pernet_subsys(&tcp_net_metrics_ops);
1025         if (ret < 0)
1026                 panic("Could not allocate the tcp_metrics hash table\n");
1027 
1028         ret = genl_register_family(&tcp_metrics_nl_family);
1029         if (ret < 0)
1030                 panic("Could not register tcp_metrics generic netlink\n");
1031 }

/* [<][>][^][v][top][bottom][index][help] */