root/net/ipv6/ip6_flowlabel.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __fl_lookup
  2. fl_lookup
  3. fl_shared_exclusive
  4. fl_free_rcu
  5. fl_free
  6. fl_release
  7. ip6_fl_gc
  8. ip6_fl_purge
  9. fl_intern
  10. __fl6_sock_lookup
  11. fl6_free_socklist
  12. fl6_merge_options
  13. check_linger
  14. fl6_renew
  15. fl_create
  16. mem_check
  17. fl_link
  18. ipv6_flowlabel_opt_get
  19. ipv6_flowlabel_opt
  20. ip6fl_get_first
  21. ip6fl_get_next
  22. ip6fl_get_idx
  23. ip6fl_seq_start
  24. ip6fl_seq_next
  25. ip6fl_seq_stop
  26. ip6fl_seq_show
  27. ip6_flowlabel_proc_init
  28. ip6_flowlabel_proc_fini
  29. ip6_flowlabel_proc_init
  30. ip6_flowlabel_proc_fini
  31. ip6_flowlabel_net_exit
  32. ip6_flowlabel_init
  33. ip6_flowlabel_cleanup

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *      ip6_flowlabel.c         IPv6 flowlabel manager.
   4  *
   5  *      Authors:        Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6  */
   7 
   8 #include <linux/capability.h>
   9 #include <linux/errno.h>
  10 #include <linux/types.h>
  11 #include <linux/socket.h>
  12 #include <linux/net.h>
  13 #include <linux/netdevice.h>
  14 #include <linux/in6.h>
  15 #include <linux/proc_fs.h>
  16 #include <linux/seq_file.h>
  17 #include <linux/slab.h>
  18 #include <linux/export.h>
  19 #include <linux/pid_namespace.h>
  20 #include <linux/jump_label_ratelimit.h>
  21 
  22 #include <net/net_namespace.h>
  23 #include <net/sock.h>
  24 
  25 #include <net/ipv6.h>
  26 #include <net/rawv6.h>
  27 #include <net/transp_v6.h>
  28 
  29 #include <linux/uaccess.h>
  30 
  31 #define FL_MIN_LINGER   6       /* Minimal linger. It is set to 6sec specified
  32                                    in old IPv6 RFC. Well, it was reasonable value.
  33                                  */
  34 #define FL_MAX_LINGER   150     /* Maximal linger timeout */
  35 
  36 /* FL hash table */
  37 
  38 #define FL_MAX_PER_SOCK 32
  39 #define FL_MAX_SIZE     4096
  40 #define FL_HASH_MASK    255
  41 #define FL_HASH(l)      (ntohl(l)&FL_HASH_MASK)
  42 
  43 static atomic_t fl_size = ATOMIC_INIT(0);
  44 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
  45 
  46 static void ip6_fl_gc(struct timer_list *unused);
  47 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
  48 
  49 /* FL hash table lock: it protects only of GC */
  50 
  51 static DEFINE_SPINLOCK(ip6_fl_lock);
  52 
  53 /* Big socket sock */
  54 
  55 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
  56 
  57 DEFINE_STATIC_KEY_DEFERRED_FALSE(ipv6_flowlabel_exclusive, HZ);
  58 EXPORT_SYMBOL(ipv6_flowlabel_exclusive);
  59 
  60 #define for_each_fl_rcu(hash, fl)                               \
  61         for (fl = rcu_dereference_bh(fl_ht[(hash)]);            \
  62              fl != NULL;                                        \
  63              fl = rcu_dereference_bh(fl->next))
  64 #define for_each_fl_continue_rcu(fl)                            \
  65         for (fl = rcu_dereference_bh(fl->next);                 \
  66              fl != NULL;                                        \
  67              fl = rcu_dereference_bh(fl->next))
  68 
  69 #define for_each_sk_fl_rcu(np, sfl)                             \
  70         for (sfl = rcu_dereference_bh(np->ipv6_fl_list);        \
  71              sfl != NULL;                                       \
  72              sfl = rcu_dereference_bh(sfl->next))
  73 
  74 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
  75 {
  76         struct ip6_flowlabel *fl;
  77 
  78         for_each_fl_rcu(FL_HASH(label), fl) {
  79                 if (fl->label == label && net_eq(fl->fl_net, net))
  80                         return fl;
  81         }
  82         return NULL;
  83 }
  84 
  85 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
  86 {
  87         struct ip6_flowlabel *fl;
  88 
  89         rcu_read_lock_bh();
  90         fl = __fl_lookup(net, label);
  91         if (fl && !atomic_inc_not_zero(&fl->users))
  92                 fl = NULL;
  93         rcu_read_unlock_bh();
  94         return fl;
  95 }
  96 
  97 static bool fl_shared_exclusive(struct ip6_flowlabel *fl)
  98 {
  99         return fl->share == IPV6_FL_S_EXCL ||
 100                fl->share == IPV6_FL_S_PROCESS ||
 101                fl->share == IPV6_FL_S_USER;
 102 }
 103 
 104 static void fl_free_rcu(struct rcu_head *head)
 105 {
 106         struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
 107 
 108         if (fl->share == IPV6_FL_S_PROCESS)
 109                 put_pid(fl->owner.pid);
 110         kfree(fl->opt);
 111         kfree(fl);
 112 }
 113 
 114 
 115 static void fl_free(struct ip6_flowlabel *fl)
 116 {
 117         if (!fl)
 118                 return;
 119 
 120         if (fl_shared_exclusive(fl) || fl->opt)
 121                 static_branch_slow_dec_deferred(&ipv6_flowlabel_exclusive);
 122 
 123         call_rcu(&fl->rcu, fl_free_rcu);
 124 }
 125 
 126 static void fl_release(struct ip6_flowlabel *fl)
 127 {
 128         spin_lock_bh(&ip6_fl_lock);
 129 
 130         fl->lastuse = jiffies;
 131         if (atomic_dec_and_test(&fl->users)) {
 132                 unsigned long ttd = fl->lastuse + fl->linger;
 133                 if (time_after(ttd, fl->expires))
 134                         fl->expires = ttd;
 135                 ttd = fl->expires;
 136                 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
 137                         struct ipv6_txoptions *opt = fl->opt;
 138                         fl->opt = NULL;
 139                         kfree(opt);
 140                 }
 141                 if (!timer_pending(&ip6_fl_gc_timer) ||
 142                     time_after(ip6_fl_gc_timer.expires, ttd))
 143                         mod_timer(&ip6_fl_gc_timer, ttd);
 144         }
 145         spin_unlock_bh(&ip6_fl_lock);
 146 }
 147 
 148 static void ip6_fl_gc(struct timer_list *unused)
 149 {
 150         int i;
 151         unsigned long now = jiffies;
 152         unsigned long sched = 0;
 153 
 154         spin_lock(&ip6_fl_lock);
 155 
 156         for (i = 0; i <= FL_HASH_MASK; i++) {
 157                 struct ip6_flowlabel *fl;
 158                 struct ip6_flowlabel __rcu **flp;
 159 
 160                 flp = &fl_ht[i];
 161                 while ((fl = rcu_dereference_protected(*flp,
 162                                                        lockdep_is_held(&ip6_fl_lock))) != NULL) {
 163                         if (atomic_read(&fl->users) == 0) {
 164                                 unsigned long ttd = fl->lastuse + fl->linger;
 165                                 if (time_after(ttd, fl->expires))
 166                                         fl->expires = ttd;
 167                                 ttd = fl->expires;
 168                                 if (time_after_eq(now, ttd)) {
 169                                         *flp = fl->next;
 170                                         fl_free(fl);
 171                                         atomic_dec(&fl_size);
 172                                         continue;
 173                                 }
 174                                 if (!sched || time_before(ttd, sched))
 175                                         sched = ttd;
 176                         }
 177                         flp = &fl->next;
 178                 }
 179         }
 180         if (!sched && atomic_read(&fl_size))
 181                 sched = now + FL_MAX_LINGER;
 182         if (sched) {
 183                 mod_timer(&ip6_fl_gc_timer, sched);
 184         }
 185         spin_unlock(&ip6_fl_lock);
 186 }
 187 
 188 static void __net_exit ip6_fl_purge(struct net *net)
 189 {
 190         int i;
 191 
 192         spin_lock_bh(&ip6_fl_lock);
 193         for (i = 0; i <= FL_HASH_MASK; i++) {
 194                 struct ip6_flowlabel *fl;
 195                 struct ip6_flowlabel __rcu **flp;
 196 
 197                 flp = &fl_ht[i];
 198                 while ((fl = rcu_dereference_protected(*flp,
 199                                                        lockdep_is_held(&ip6_fl_lock))) != NULL) {
 200                         if (net_eq(fl->fl_net, net) &&
 201                             atomic_read(&fl->users) == 0) {
 202                                 *flp = fl->next;
 203                                 fl_free(fl);
 204                                 atomic_dec(&fl_size);
 205                                 continue;
 206                         }
 207                         flp = &fl->next;
 208                 }
 209         }
 210         spin_unlock_bh(&ip6_fl_lock);
 211 }
 212 
 213 static struct ip6_flowlabel *fl_intern(struct net *net,
 214                                        struct ip6_flowlabel *fl, __be32 label)
 215 {
 216         struct ip6_flowlabel *lfl;
 217 
 218         fl->label = label & IPV6_FLOWLABEL_MASK;
 219 
 220         spin_lock_bh(&ip6_fl_lock);
 221         if (label == 0) {
 222                 for (;;) {
 223                         fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
 224                         if (fl->label) {
 225                                 lfl = __fl_lookup(net, fl->label);
 226                                 if (!lfl)
 227                                         break;
 228                         }
 229                 }
 230         } else {
 231                 /*
 232                  * we dropper the ip6_fl_lock, so this entry could reappear
 233                  * and we need to recheck with it.
 234                  *
 235                  * OTOH no need to search the active socket first, like it is
 236                  * done in ipv6_flowlabel_opt - sock is locked, so new entry
 237                  * with the same label can only appear on another sock
 238                  */
 239                 lfl = __fl_lookup(net, fl->label);
 240                 if (lfl) {
 241                         atomic_inc(&lfl->users);
 242                         spin_unlock_bh(&ip6_fl_lock);
 243                         return lfl;
 244                 }
 245         }
 246 
 247         fl->lastuse = jiffies;
 248         fl->next = fl_ht[FL_HASH(fl->label)];
 249         rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
 250         atomic_inc(&fl_size);
 251         spin_unlock_bh(&ip6_fl_lock);
 252         return NULL;
 253 }
 254 
 255 
 256 
 257 /* Socket flowlabel lists */
 258 
 259 struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label)
 260 {
 261         struct ipv6_fl_socklist *sfl;
 262         struct ipv6_pinfo *np = inet6_sk(sk);
 263 
 264         label &= IPV6_FLOWLABEL_MASK;
 265 
 266         rcu_read_lock_bh();
 267         for_each_sk_fl_rcu(np, sfl) {
 268                 struct ip6_flowlabel *fl = sfl->fl;
 269 
 270                 if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
 271                         fl->lastuse = jiffies;
 272                         rcu_read_unlock_bh();
 273                         return fl;
 274                 }
 275         }
 276         rcu_read_unlock_bh();
 277         return NULL;
 278 }
 279 EXPORT_SYMBOL_GPL(__fl6_sock_lookup);
 280 
 281 void fl6_free_socklist(struct sock *sk)
 282 {
 283         struct ipv6_pinfo *np = inet6_sk(sk);
 284         struct ipv6_fl_socklist *sfl;
 285 
 286         if (!rcu_access_pointer(np->ipv6_fl_list))
 287                 return;
 288 
 289         spin_lock_bh(&ip6_sk_fl_lock);
 290         while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
 291                                                 lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
 292                 np->ipv6_fl_list = sfl->next;
 293                 spin_unlock_bh(&ip6_sk_fl_lock);
 294 
 295                 fl_release(sfl->fl);
 296                 kfree_rcu(sfl, rcu);
 297 
 298                 spin_lock_bh(&ip6_sk_fl_lock);
 299         }
 300         spin_unlock_bh(&ip6_sk_fl_lock);
 301 }
 302 
 303 /* Service routines */
 304 
 305 
 306 /*
 307    It is the only difficult place. flowlabel enforces equal headers
 308    before and including routing header, however user may supply options
 309    following rthdr.
 310  */
 311 
 312 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
 313                                          struct ip6_flowlabel *fl,
 314                                          struct ipv6_txoptions *fopt)
 315 {
 316         struct ipv6_txoptions *fl_opt = fl->opt;
 317 
 318         if (!fopt || fopt->opt_flen == 0)
 319                 return fl_opt;
 320 
 321         if (fl_opt) {
 322                 opt_space->hopopt = fl_opt->hopopt;
 323                 opt_space->dst0opt = fl_opt->dst0opt;
 324                 opt_space->srcrt = fl_opt->srcrt;
 325                 opt_space->opt_nflen = fl_opt->opt_nflen;
 326         } else {
 327                 if (fopt->opt_nflen == 0)
 328                         return fopt;
 329                 opt_space->hopopt = NULL;
 330                 opt_space->dst0opt = NULL;
 331                 opt_space->srcrt = NULL;
 332                 opt_space->opt_nflen = 0;
 333         }
 334         opt_space->dst1opt = fopt->dst1opt;
 335         opt_space->opt_flen = fopt->opt_flen;
 336         opt_space->tot_len = fopt->tot_len;
 337         return opt_space;
 338 }
 339 EXPORT_SYMBOL_GPL(fl6_merge_options);
 340 
 341 static unsigned long check_linger(unsigned long ttl)
 342 {
 343         if (ttl < FL_MIN_LINGER)
 344                 return FL_MIN_LINGER*HZ;
 345         if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
 346                 return 0;
 347         return ttl*HZ;
 348 }
 349 
 350 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
 351 {
 352         linger = check_linger(linger);
 353         if (!linger)
 354                 return -EPERM;
 355         expires = check_linger(expires);
 356         if (!expires)
 357                 return -EPERM;
 358 
 359         spin_lock_bh(&ip6_fl_lock);
 360         fl->lastuse = jiffies;
 361         if (time_before(fl->linger, linger))
 362                 fl->linger = linger;
 363         if (time_before(expires, fl->linger))
 364                 expires = fl->linger;
 365         if (time_before(fl->expires, fl->lastuse + expires))
 366                 fl->expires = fl->lastuse + expires;
 367         spin_unlock_bh(&ip6_fl_lock);
 368 
 369         return 0;
 370 }
 371 
 372 static struct ip6_flowlabel *
 373 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
 374           char __user *optval, int optlen, int *err_p)
 375 {
 376         struct ip6_flowlabel *fl = NULL;
 377         int olen;
 378         int addr_type;
 379         int err;
 380 
 381         olen = optlen - CMSG_ALIGN(sizeof(*freq));
 382         err = -EINVAL;
 383         if (olen > 64 * 1024)
 384                 goto done;
 385 
 386         err = -ENOMEM;
 387         fl = kzalloc(sizeof(*fl), GFP_KERNEL);
 388         if (!fl)
 389                 goto done;
 390 
 391         if (olen > 0) {
 392                 struct msghdr msg;
 393                 struct flowi6 flowi6;
 394                 struct ipcm6_cookie ipc6;
 395 
 396                 err = -ENOMEM;
 397                 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
 398                 if (!fl->opt)
 399                         goto done;
 400 
 401                 memset(fl->opt, 0, sizeof(*fl->opt));
 402                 fl->opt->tot_len = sizeof(*fl->opt) + olen;
 403                 err = -EFAULT;
 404                 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
 405                         goto done;
 406 
 407                 msg.msg_controllen = olen;
 408                 msg.msg_control = (void *)(fl->opt+1);
 409                 memset(&flowi6, 0, sizeof(flowi6));
 410 
 411                 ipc6.opt = fl->opt;
 412                 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6);
 413                 if (err)
 414                         goto done;
 415                 err = -EINVAL;
 416                 if (fl->opt->opt_flen)
 417                         goto done;
 418                 if (fl->opt->opt_nflen == 0) {
 419                         kfree(fl->opt);
 420                         fl->opt = NULL;
 421                 }
 422         }
 423 
 424         fl->fl_net = net;
 425         fl->expires = jiffies;
 426         err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
 427         if (err)
 428                 goto done;
 429         fl->share = freq->flr_share;
 430         addr_type = ipv6_addr_type(&freq->flr_dst);
 431         if ((addr_type & IPV6_ADDR_MAPPED) ||
 432             addr_type == IPV6_ADDR_ANY) {
 433                 err = -EINVAL;
 434                 goto done;
 435         }
 436         fl->dst = freq->flr_dst;
 437         atomic_set(&fl->users, 1);
 438         switch (fl->share) {
 439         case IPV6_FL_S_EXCL:
 440         case IPV6_FL_S_ANY:
 441                 break;
 442         case IPV6_FL_S_PROCESS:
 443                 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
 444                 break;
 445         case IPV6_FL_S_USER:
 446                 fl->owner.uid = current_euid();
 447                 break;
 448         default:
 449                 err = -EINVAL;
 450                 goto done;
 451         }
 452         if (fl_shared_exclusive(fl) || fl->opt)
 453                 static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
 454         return fl;
 455 
 456 done:
 457         if (fl) {
 458                 kfree(fl->opt);
 459                 kfree(fl);
 460         }
 461         *err_p = err;
 462         return NULL;
 463 }
 464 
 465 static int mem_check(struct sock *sk)
 466 {
 467         struct ipv6_pinfo *np = inet6_sk(sk);
 468         struct ipv6_fl_socklist *sfl;
 469         int room = FL_MAX_SIZE - atomic_read(&fl_size);
 470         int count = 0;
 471 
 472         if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
 473                 return 0;
 474 
 475         rcu_read_lock_bh();
 476         for_each_sk_fl_rcu(np, sfl)
 477                 count++;
 478         rcu_read_unlock_bh();
 479 
 480         if (room <= 0 ||
 481             ((count >= FL_MAX_PER_SOCK ||
 482               (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
 483              !capable(CAP_NET_ADMIN)))
 484                 return -ENOBUFS;
 485 
 486         return 0;
 487 }
 488 
 489 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
 490                 struct ip6_flowlabel *fl)
 491 {
 492         spin_lock_bh(&ip6_sk_fl_lock);
 493         sfl->fl = fl;
 494         sfl->next = np->ipv6_fl_list;
 495         rcu_assign_pointer(np->ipv6_fl_list, sfl);
 496         spin_unlock_bh(&ip6_sk_fl_lock);
 497 }
 498 
 499 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
 500                            int flags)
 501 {
 502         struct ipv6_pinfo *np = inet6_sk(sk);
 503         struct ipv6_fl_socklist *sfl;
 504 
 505         if (flags & IPV6_FL_F_REMOTE) {
 506                 freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
 507                 return 0;
 508         }
 509 
 510         if (np->repflow) {
 511                 freq->flr_label = np->flow_label;
 512                 return 0;
 513         }
 514 
 515         rcu_read_lock_bh();
 516 
 517         for_each_sk_fl_rcu(np, sfl) {
 518                 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
 519                         spin_lock_bh(&ip6_fl_lock);
 520                         freq->flr_label = sfl->fl->label;
 521                         freq->flr_dst = sfl->fl->dst;
 522                         freq->flr_share = sfl->fl->share;
 523                         freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
 524                         freq->flr_linger = sfl->fl->linger / HZ;
 525 
 526                         spin_unlock_bh(&ip6_fl_lock);
 527                         rcu_read_unlock_bh();
 528                         return 0;
 529                 }
 530         }
 531         rcu_read_unlock_bh();
 532 
 533         return -ENOENT;
 534 }
 535 
 536 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
 537 {
 538         int uninitialized_var(err);
 539         struct net *net = sock_net(sk);
 540         struct ipv6_pinfo *np = inet6_sk(sk);
 541         struct in6_flowlabel_req freq;
 542         struct ipv6_fl_socklist *sfl1 = NULL;
 543         struct ipv6_fl_socklist *sfl;
 544         struct ipv6_fl_socklist __rcu **sflp;
 545         struct ip6_flowlabel *fl, *fl1 = NULL;
 546 
 547 
 548         if (optlen < sizeof(freq))
 549                 return -EINVAL;
 550 
 551         if (copy_from_user(&freq, optval, sizeof(freq)))
 552                 return -EFAULT;
 553 
 554         switch (freq.flr_action) {
 555         case IPV6_FL_A_PUT:
 556                 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
 557                         if (sk->sk_protocol != IPPROTO_TCP)
 558                                 return -ENOPROTOOPT;
 559                         if (!np->repflow)
 560                                 return -ESRCH;
 561                         np->flow_label = 0;
 562                         np->repflow = 0;
 563                         return 0;
 564                 }
 565                 spin_lock_bh(&ip6_sk_fl_lock);
 566                 for (sflp = &np->ipv6_fl_list;
 567                      (sfl = rcu_dereference_protected(*sflp,
 568                                                       lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
 569                      sflp = &sfl->next) {
 570                         if (sfl->fl->label == freq.flr_label) {
 571                                 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
 572                                         np->flow_label &= ~IPV6_FLOWLABEL_MASK;
 573                                 *sflp = sfl->next;
 574                                 spin_unlock_bh(&ip6_sk_fl_lock);
 575                                 fl_release(sfl->fl);
 576                                 kfree_rcu(sfl, rcu);
 577                                 return 0;
 578                         }
 579                 }
 580                 spin_unlock_bh(&ip6_sk_fl_lock);
 581                 return -ESRCH;
 582 
 583         case IPV6_FL_A_RENEW:
 584                 rcu_read_lock_bh();
 585                 for_each_sk_fl_rcu(np, sfl) {
 586                         if (sfl->fl->label == freq.flr_label) {
 587                                 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
 588                                 rcu_read_unlock_bh();
 589                                 return err;
 590                         }
 591                 }
 592                 rcu_read_unlock_bh();
 593 
 594                 if (freq.flr_share == IPV6_FL_S_NONE &&
 595                     ns_capable(net->user_ns, CAP_NET_ADMIN)) {
 596                         fl = fl_lookup(net, freq.flr_label);
 597                         if (fl) {
 598                                 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
 599                                 fl_release(fl);
 600                                 return err;
 601                         }
 602                 }
 603                 return -ESRCH;
 604 
 605         case IPV6_FL_A_GET:
 606                 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
 607                         struct net *net = sock_net(sk);
 608                         if (net->ipv6.sysctl.flowlabel_consistency) {
 609                                 net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
 610                                 return -EPERM;
 611                         }
 612 
 613                         if (sk->sk_protocol != IPPROTO_TCP)
 614                                 return -ENOPROTOOPT;
 615 
 616                         np->repflow = 1;
 617                         return 0;
 618                 }
 619 
 620                 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
 621                         return -EINVAL;
 622 
 623                 if (net->ipv6.sysctl.flowlabel_state_ranges &&
 624                     (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
 625                         return -ERANGE;
 626 
 627                 fl = fl_create(net, sk, &freq, optval, optlen, &err);
 628                 if (!fl)
 629                         return err;
 630                 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
 631 
 632                 if (freq.flr_label) {
 633                         err = -EEXIST;
 634                         rcu_read_lock_bh();
 635                         for_each_sk_fl_rcu(np, sfl) {
 636                                 if (sfl->fl->label == freq.flr_label) {
 637                                         if (freq.flr_flags&IPV6_FL_F_EXCL) {
 638                                                 rcu_read_unlock_bh();
 639                                                 goto done;
 640                                         }
 641                                         fl1 = sfl->fl;
 642                                         if (!atomic_inc_not_zero(&fl1->users))
 643                                                 fl1 = NULL;
 644                                         break;
 645                                 }
 646                         }
 647                         rcu_read_unlock_bh();
 648 
 649                         if (!fl1)
 650                                 fl1 = fl_lookup(net, freq.flr_label);
 651                         if (fl1) {
 652 recheck:
 653                                 err = -EEXIST;
 654                                 if (freq.flr_flags&IPV6_FL_F_EXCL)
 655                                         goto release;
 656                                 err = -EPERM;
 657                                 if (fl1->share == IPV6_FL_S_EXCL ||
 658                                     fl1->share != fl->share ||
 659                                     ((fl1->share == IPV6_FL_S_PROCESS) &&
 660                                      (fl1->owner.pid != fl->owner.pid)) ||
 661                                     ((fl1->share == IPV6_FL_S_USER) &&
 662                                      !uid_eq(fl1->owner.uid, fl->owner.uid)))
 663                                         goto release;
 664 
 665                                 err = -ENOMEM;
 666                                 if (!sfl1)
 667                                         goto release;
 668                                 if (fl->linger > fl1->linger)
 669                                         fl1->linger = fl->linger;
 670                                 if ((long)(fl->expires - fl1->expires) > 0)
 671                                         fl1->expires = fl->expires;
 672                                 fl_link(np, sfl1, fl1);
 673                                 fl_free(fl);
 674                                 return 0;
 675 
 676 release:
 677                                 fl_release(fl1);
 678                                 goto done;
 679                         }
 680                 }
 681                 err = -ENOENT;
 682                 if (!(freq.flr_flags&IPV6_FL_F_CREATE))
 683                         goto done;
 684 
 685                 err = -ENOMEM;
 686                 if (!sfl1)
 687                         goto done;
 688 
 689                 err = mem_check(sk);
 690                 if (err != 0)
 691                         goto done;
 692 
 693                 fl1 = fl_intern(net, fl, freq.flr_label);
 694                 if (fl1)
 695                         goto recheck;
 696 
 697                 if (!freq.flr_label) {
 698                         if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
 699                                          &fl->label, sizeof(fl->label))) {
 700                                 /* Intentionally ignore fault. */
 701                         }
 702                 }
 703 
 704                 fl_link(np, sfl1, fl);
 705                 return 0;
 706 
 707         default:
 708                 return -EINVAL;
 709         }
 710 
 711 done:
 712         fl_free(fl);
 713         kfree(sfl1);
 714         return err;
 715 }
 716 
 717 #ifdef CONFIG_PROC_FS
 718 
 719 struct ip6fl_iter_state {
 720         struct seq_net_private p;
 721         struct pid_namespace *pid_ns;
 722         int bucket;
 723 };
 724 
 725 #define ip6fl_seq_private(seq)  ((struct ip6fl_iter_state *)(seq)->private)
 726 
 727 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
 728 {
 729         struct ip6_flowlabel *fl = NULL;
 730         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
 731         struct net *net = seq_file_net(seq);
 732 
 733         for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
 734                 for_each_fl_rcu(state->bucket, fl) {
 735                         if (net_eq(fl->fl_net, net))
 736                                 goto out;
 737                 }
 738         }
 739         fl = NULL;
 740 out:
 741         return fl;
 742 }
 743 
 744 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
 745 {
 746         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
 747         struct net *net = seq_file_net(seq);
 748 
 749         for_each_fl_continue_rcu(fl) {
 750                 if (net_eq(fl->fl_net, net))
 751                         goto out;
 752         }
 753 
 754 try_again:
 755         if (++state->bucket <= FL_HASH_MASK) {
 756                 for_each_fl_rcu(state->bucket, fl) {
 757                         if (net_eq(fl->fl_net, net))
 758                                 goto out;
 759                 }
 760                 goto try_again;
 761         }
 762         fl = NULL;
 763 
 764 out:
 765         return fl;
 766 }
 767 
 768 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
 769 {
 770         struct ip6_flowlabel *fl = ip6fl_get_first(seq);
 771         if (fl)
 772                 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
 773                         --pos;
 774         return pos ? NULL : fl;
 775 }
 776 
 777 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
 778         __acquires(RCU)
 779 {
 780         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
 781 
 782         state->pid_ns = proc_pid_ns(file_inode(seq->file));
 783 
 784         rcu_read_lock_bh();
 785         return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 786 }
 787 
 788 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 789 {
 790         struct ip6_flowlabel *fl;
 791 
 792         if (v == SEQ_START_TOKEN)
 793                 fl = ip6fl_get_first(seq);
 794         else
 795                 fl = ip6fl_get_next(seq, v);
 796         ++*pos;
 797         return fl;
 798 }
 799 
 800 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
 801         __releases(RCU)
 802 {
 803         rcu_read_unlock_bh();
 804 }
 805 
 806 static int ip6fl_seq_show(struct seq_file *seq, void *v)
 807 {
 808         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
 809         if (v == SEQ_START_TOKEN) {
 810                 seq_puts(seq, "Label S Owner  Users  Linger Expires  Dst                              Opt\n");
 811         } else {
 812                 struct ip6_flowlabel *fl = v;
 813                 seq_printf(seq,
 814                            "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
 815                            (unsigned int)ntohl(fl->label),
 816                            fl->share,
 817                            ((fl->share == IPV6_FL_S_PROCESS) ?
 818                             pid_nr_ns(fl->owner.pid, state->pid_ns) :
 819                             ((fl->share == IPV6_FL_S_USER) ?
 820                              from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
 821                              0)),
 822                            atomic_read(&fl->users),
 823                            fl->linger/HZ,
 824                            (long)(fl->expires - jiffies)/HZ,
 825                            &fl->dst,
 826                            fl->opt ? fl->opt->opt_nflen : 0);
 827         }
 828         return 0;
 829 }
 830 
 831 static const struct seq_operations ip6fl_seq_ops = {
 832         .start  =       ip6fl_seq_start,
 833         .next   =       ip6fl_seq_next,
 834         .stop   =       ip6fl_seq_stop,
 835         .show   =       ip6fl_seq_show,
 836 };
 837 
 838 static int __net_init ip6_flowlabel_proc_init(struct net *net)
 839 {
 840         if (!proc_create_net("ip6_flowlabel", 0444, net->proc_net,
 841                         &ip6fl_seq_ops, sizeof(struct ip6fl_iter_state)))
 842                 return -ENOMEM;
 843         return 0;
 844 }
 845 
 846 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
 847 {
 848         remove_proc_entry("ip6_flowlabel", net->proc_net);
 849 }
 850 #else
 851 static inline int ip6_flowlabel_proc_init(struct net *net)
 852 {
 853         return 0;
 854 }
 855 static inline void ip6_flowlabel_proc_fini(struct net *net)
 856 {
 857 }
 858 #endif
 859 
 860 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
 861 {
 862         ip6_fl_purge(net);
 863         ip6_flowlabel_proc_fini(net);
 864 }
 865 
 866 static struct pernet_operations ip6_flowlabel_net_ops = {
 867         .init = ip6_flowlabel_proc_init,
 868         .exit = ip6_flowlabel_net_exit,
 869 };
 870 
 871 int ip6_flowlabel_init(void)
 872 {
 873         return register_pernet_subsys(&ip6_flowlabel_net_ops);
 874 }
 875 
 876 void ip6_flowlabel_cleanup(void)
 877 {
 878         static_key_deferred_flush(&ipv6_flowlabel_exclusive);
 879         del_timer(&ip6_fl_gc_timer);
 880         unregister_pernet_subsys(&ip6_flowlabel_net_ops);
 881 }

/* [<][>][^][v][top][bottom][index][help] */