root/kernel/taskstats.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. prepare_reply
  2. send_reply
  3. send_cpu_listeners
  4. fill_stats
  5. fill_stats_for_pid
  6. fill_stats_for_tgid
  7. fill_tgid_exit
  8. add_del_listener
  9. parse
  10. mk_reply
  11. cgroupstats_user_cmd
  12. cmd_attr_register_cpumask
  13. cmd_attr_deregister_cpumask
  14. taskstats_packet_size
  15. cmd_attr_pid
  16. cmd_attr_tgid
  17. taskstats_user_cmd
  18. taskstats_tgid_alloc
  19. taskstats_exit
  20. taskstats_pre_doit
  21. taskstats_init_early
  22. taskstats_init

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * taskstats.c - Export per-task statistics to userland
   4  *
   5  * Copyright (C) Shailabh Nagar, IBM Corp. 2006
   6  *           (C) Balbir Singh,   IBM Corp. 2006
   7  */
   8 
   9 #include <linux/kernel.h>
  10 #include <linux/taskstats_kern.h>
  11 #include <linux/tsacct_kern.h>
  12 #include <linux/delayacct.h>
  13 #include <linux/cpumask.h>
  14 #include <linux/percpu.h>
  15 #include <linux/slab.h>
  16 #include <linux/cgroupstats.h>
  17 #include <linux/cgroup.h>
  18 #include <linux/fs.h>
  19 #include <linux/file.h>
  20 #include <linux/pid_namespace.h>
  21 #include <net/genetlink.h>
  22 #include <linux/atomic.h>
  23 #include <linux/sched/cputime.h>
  24 
  25 /*
  26  * Maximum length of a cpumask that can be specified in
  27  * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
  28  */
  29 #define TASKSTATS_CPUMASK_MAXLEN        (100+6*NR_CPUS)
  30 
  31 static DEFINE_PER_CPU(__u32, taskstats_seqnum);
  32 static int family_registered;
  33 struct kmem_cache *taskstats_cache;
  34 
  35 static struct genl_family family;
  36 
  37 static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
  38         [TASKSTATS_CMD_ATTR_PID]  = { .type = NLA_U32 },
  39         [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
  40         [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
  41         [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
  42 
  43 /*
  44  * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
  45  * Make sure they are always aligned.
  46  */
  47 static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
  48         [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
  49 };
  50 
  51 struct listener {
  52         struct list_head list;
  53         pid_t pid;
  54         char valid;
  55 };
  56 
  57 struct listener_list {
  58         struct rw_semaphore sem;
  59         struct list_head list;
  60 };
  61 static DEFINE_PER_CPU(struct listener_list, listener_array);
  62 
  63 enum actions {
  64         REGISTER,
  65         DEREGISTER,
  66         CPU_DONT_CARE
  67 };
  68 
  69 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
  70                                 size_t size)
  71 {
  72         struct sk_buff *skb;
  73         void *reply;
  74 
  75         /*
  76          * If new attributes are added, please revisit this allocation
  77          */
  78         skb = genlmsg_new(size, GFP_KERNEL);
  79         if (!skb)
  80                 return -ENOMEM;
  81 
  82         if (!info) {
  83                 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
  84 
  85                 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
  86         } else
  87                 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
  88         if (reply == NULL) {
  89                 nlmsg_free(skb);
  90                 return -EINVAL;
  91         }
  92 
  93         *skbp = skb;
  94         return 0;
  95 }
  96 
  97 /*
  98  * Send taskstats data in @skb to listener with nl_pid @pid
  99  */
 100 static int send_reply(struct sk_buff *skb, struct genl_info *info)
 101 {
 102         struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
 103         void *reply = genlmsg_data(genlhdr);
 104 
 105         genlmsg_end(skb, reply);
 106 
 107         return genlmsg_reply(skb, info);
 108 }
 109 
 110 /*
 111  * Send taskstats data in @skb to listeners registered for @cpu's exit data
 112  */
 113 static void send_cpu_listeners(struct sk_buff *skb,
 114                                         struct listener_list *listeners)
 115 {
 116         struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
 117         struct listener *s, *tmp;
 118         struct sk_buff *skb_next, *skb_cur = skb;
 119         void *reply = genlmsg_data(genlhdr);
 120         int rc, delcount = 0;
 121 
 122         genlmsg_end(skb, reply);
 123 
 124         rc = 0;
 125         down_read(&listeners->sem);
 126         list_for_each_entry(s, &listeners->list, list) {
 127                 skb_next = NULL;
 128                 if (!list_is_last(&s->list, &listeners->list)) {
 129                         skb_next = skb_clone(skb_cur, GFP_KERNEL);
 130                         if (!skb_next)
 131                                 break;
 132                 }
 133                 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
 134                 if (rc == -ECONNREFUSED) {
 135                         s->valid = 0;
 136                         delcount++;
 137                 }
 138                 skb_cur = skb_next;
 139         }
 140         up_read(&listeners->sem);
 141 
 142         if (skb_cur)
 143                 nlmsg_free(skb_cur);
 144 
 145         if (!delcount)
 146                 return;
 147 
 148         /* Delete invalidated entries */
 149         down_write(&listeners->sem);
 150         list_for_each_entry_safe(s, tmp, &listeners->list, list) {
 151                 if (!s->valid) {
 152                         list_del(&s->list);
 153                         kfree(s);
 154                 }
 155         }
 156         up_write(&listeners->sem);
 157 }
 158 
 159 static void fill_stats(struct user_namespace *user_ns,
 160                        struct pid_namespace *pid_ns,
 161                        struct task_struct *tsk, struct taskstats *stats)
 162 {
 163         memset(stats, 0, sizeof(*stats));
 164         /*
 165          * Each accounting subsystem adds calls to its functions to
 166          * fill in relevant parts of struct taskstsats as follows
 167          *
 168          *      per-task-foo(stats, tsk);
 169          */
 170 
 171         delayacct_add_tsk(stats, tsk);
 172 
 173         /* fill in basic acct fields */
 174         stats->version = TASKSTATS_VERSION;
 175         stats->nvcsw = tsk->nvcsw;
 176         stats->nivcsw = tsk->nivcsw;
 177         bacct_add_tsk(user_ns, pid_ns, stats, tsk);
 178 
 179         /* fill in extended acct fields */
 180         xacct_add_tsk(stats, tsk);
 181 }
 182 
 183 static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
 184 {
 185         struct task_struct *tsk;
 186 
 187         tsk = find_get_task_by_vpid(pid);
 188         if (!tsk)
 189                 return -ESRCH;
 190         fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
 191         put_task_struct(tsk);
 192         return 0;
 193 }
 194 
 195 static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
 196 {
 197         struct task_struct *tsk, *first;
 198         unsigned long flags;
 199         int rc = -ESRCH;
 200         u64 delta, utime, stime;
 201         u64 start_time;
 202 
 203         /*
 204          * Add additional stats from live tasks except zombie thread group
 205          * leaders who are already counted with the dead tasks
 206          */
 207         rcu_read_lock();
 208         first = find_task_by_vpid(tgid);
 209 
 210         if (!first || !lock_task_sighand(first, &flags))
 211                 goto out;
 212 
 213         if (first->signal->stats)
 214                 memcpy(stats, first->signal->stats, sizeof(*stats));
 215         else
 216                 memset(stats, 0, sizeof(*stats));
 217 
 218         tsk = first;
 219         start_time = ktime_get_ns();
 220         do {
 221                 if (tsk->exit_state)
 222                         continue;
 223                 /*
 224                  * Accounting subsystem can call its functions here to
 225                  * fill in relevant parts of struct taskstsats as follows
 226                  *
 227                  *      per-task-foo(stats, tsk);
 228                  */
 229                 delayacct_add_tsk(stats, tsk);
 230 
 231                 /* calculate task elapsed time in nsec */
 232                 delta = start_time - tsk->start_time;
 233                 /* Convert to micro seconds */
 234                 do_div(delta, NSEC_PER_USEC);
 235                 stats->ac_etime += delta;
 236 
 237                 task_cputime(tsk, &utime, &stime);
 238                 stats->ac_utime += div_u64(utime, NSEC_PER_USEC);
 239                 stats->ac_stime += div_u64(stime, NSEC_PER_USEC);
 240 
 241                 stats->nvcsw += tsk->nvcsw;
 242                 stats->nivcsw += tsk->nivcsw;
 243         } while_each_thread(first, tsk);
 244 
 245         unlock_task_sighand(first, &flags);
 246         rc = 0;
 247 out:
 248         rcu_read_unlock();
 249 
 250         stats->version = TASKSTATS_VERSION;
 251         /*
 252          * Accounting subsystems can also add calls here to modify
 253          * fields of taskstats.
 254          */
 255         return rc;
 256 }
 257 
 258 static void fill_tgid_exit(struct task_struct *tsk)
 259 {
 260         unsigned long flags;
 261 
 262         spin_lock_irqsave(&tsk->sighand->siglock, flags);
 263         if (!tsk->signal->stats)
 264                 goto ret;
 265 
 266         /*
 267          * Each accounting subsystem calls its functions here to
 268          * accumalate its per-task stats for tsk, into the per-tgid structure
 269          *
 270          *      per-task-foo(tsk->signal->stats, tsk);
 271          */
 272         delayacct_add_tsk(tsk->signal->stats, tsk);
 273 ret:
 274         spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 275         return;
 276 }
 277 
 278 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
 279 {
 280         struct listener_list *listeners;
 281         struct listener *s, *tmp, *s2;
 282         unsigned int cpu;
 283         int ret = 0;
 284 
 285         if (!cpumask_subset(mask, cpu_possible_mask))
 286                 return -EINVAL;
 287 
 288         if (current_user_ns() != &init_user_ns)
 289                 return -EINVAL;
 290 
 291         if (task_active_pid_ns(current) != &init_pid_ns)
 292                 return -EINVAL;
 293 
 294         if (isadd == REGISTER) {
 295                 for_each_cpu(cpu, mask) {
 296                         s = kmalloc_node(sizeof(struct listener),
 297                                         GFP_KERNEL, cpu_to_node(cpu));
 298                         if (!s) {
 299                                 ret = -ENOMEM;
 300                                 goto cleanup;
 301                         }
 302                         s->pid = pid;
 303                         s->valid = 1;
 304 
 305                         listeners = &per_cpu(listener_array, cpu);
 306                         down_write(&listeners->sem);
 307                         list_for_each_entry(s2, &listeners->list, list) {
 308                                 if (s2->pid == pid && s2->valid)
 309                                         goto exists;
 310                         }
 311                         list_add(&s->list, &listeners->list);
 312                         s = NULL;
 313 exists:
 314                         up_write(&listeners->sem);
 315                         kfree(s); /* nop if NULL */
 316                 }
 317                 return 0;
 318         }
 319 
 320         /* Deregister or cleanup */
 321 cleanup:
 322         for_each_cpu(cpu, mask) {
 323                 listeners = &per_cpu(listener_array, cpu);
 324                 down_write(&listeners->sem);
 325                 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
 326                         if (s->pid == pid) {
 327                                 list_del(&s->list);
 328                                 kfree(s);
 329                                 break;
 330                         }
 331                 }
 332                 up_write(&listeners->sem);
 333         }
 334         return ret;
 335 }
 336 
 337 static int parse(struct nlattr *na, struct cpumask *mask)
 338 {
 339         char *data;
 340         int len;
 341         int ret;
 342 
 343         if (na == NULL)
 344                 return 1;
 345         len = nla_len(na);
 346         if (len > TASKSTATS_CPUMASK_MAXLEN)
 347                 return -E2BIG;
 348         if (len < 1)
 349                 return -EINVAL;
 350         data = kmalloc(len, GFP_KERNEL);
 351         if (!data)
 352                 return -ENOMEM;
 353         nla_strlcpy(data, na, len);
 354         ret = cpulist_parse(data, mask);
 355         kfree(data);
 356         return ret;
 357 }
 358 
 359 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
 360 {
 361         struct nlattr *na, *ret;
 362         int aggr;
 363 
 364         aggr = (type == TASKSTATS_TYPE_PID)
 365                         ? TASKSTATS_TYPE_AGGR_PID
 366                         : TASKSTATS_TYPE_AGGR_TGID;
 367 
 368         na = nla_nest_start_noflag(skb, aggr);
 369         if (!na)
 370                 goto err;
 371 
 372         if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
 373                 nla_nest_cancel(skb, na);
 374                 goto err;
 375         }
 376         ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
 377                                 sizeof(struct taskstats), TASKSTATS_TYPE_NULL);
 378         if (!ret) {
 379                 nla_nest_cancel(skb, na);
 380                 goto err;
 381         }
 382         nla_nest_end(skb, na);
 383 
 384         return nla_data(ret);
 385 err:
 386         return NULL;
 387 }
 388 
 389 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 390 {
 391         int rc = 0;
 392         struct sk_buff *rep_skb;
 393         struct cgroupstats *stats;
 394         struct nlattr *na;
 395         size_t size;
 396         u32 fd;
 397         struct fd f;
 398 
 399         na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
 400         if (!na)
 401                 return -EINVAL;
 402 
 403         fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
 404         f = fdget(fd);
 405         if (!f.file)
 406                 return 0;
 407 
 408         size = nla_total_size(sizeof(struct cgroupstats));
 409 
 410         rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
 411                                 size);
 412         if (rc < 0)
 413                 goto err;
 414 
 415         na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
 416                                 sizeof(struct cgroupstats));
 417         if (na == NULL) {
 418                 nlmsg_free(rep_skb);
 419                 rc = -EMSGSIZE;
 420                 goto err;
 421         }
 422 
 423         stats = nla_data(na);
 424         memset(stats, 0, sizeof(*stats));
 425 
 426         rc = cgroupstats_build(stats, f.file->f_path.dentry);
 427         if (rc < 0) {
 428                 nlmsg_free(rep_skb);
 429                 goto err;
 430         }
 431 
 432         rc = send_reply(rep_skb, info);
 433 
 434 err:
 435         fdput(f);
 436         return rc;
 437 }
 438 
 439 static int cmd_attr_register_cpumask(struct genl_info *info)
 440 {
 441         cpumask_var_t mask;
 442         int rc;
 443 
 444         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
 445                 return -ENOMEM;
 446         rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
 447         if (rc < 0)
 448                 goto out;
 449         rc = add_del_listener(info->snd_portid, mask, REGISTER);
 450 out:
 451         free_cpumask_var(mask);
 452         return rc;
 453 }
 454 
 455 static int cmd_attr_deregister_cpumask(struct genl_info *info)
 456 {
 457         cpumask_var_t mask;
 458         int rc;
 459 
 460         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
 461                 return -ENOMEM;
 462         rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
 463         if (rc < 0)
 464                 goto out;
 465         rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
 466 out:
 467         free_cpumask_var(mask);
 468         return rc;
 469 }
 470 
 471 static size_t taskstats_packet_size(void)
 472 {
 473         size_t size;
 474 
 475         size = nla_total_size(sizeof(u32)) +
 476                 nla_total_size_64bit(sizeof(struct taskstats)) +
 477                 nla_total_size(0);
 478 
 479         return size;
 480 }
 481 
 482 static int cmd_attr_pid(struct genl_info *info)
 483 {
 484         struct taskstats *stats;
 485         struct sk_buff *rep_skb;
 486         size_t size;
 487         u32 pid;
 488         int rc;
 489 
 490         size = taskstats_packet_size();
 491 
 492         rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
 493         if (rc < 0)
 494                 return rc;
 495 
 496         rc = -EINVAL;
 497         pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
 498         stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
 499         if (!stats)
 500                 goto err;
 501 
 502         rc = fill_stats_for_pid(pid, stats);
 503         if (rc < 0)
 504                 goto err;
 505         return send_reply(rep_skb, info);
 506 err:
 507         nlmsg_free(rep_skb);
 508         return rc;
 509 }
 510 
 511 static int cmd_attr_tgid(struct genl_info *info)
 512 {
 513         struct taskstats *stats;
 514         struct sk_buff *rep_skb;
 515         size_t size;
 516         u32 tgid;
 517         int rc;
 518 
 519         size = taskstats_packet_size();
 520 
 521         rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
 522         if (rc < 0)
 523                 return rc;
 524 
 525         rc = -EINVAL;
 526         tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
 527         stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
 528         if (!stats)
 529                 goto err;
 530 
 531         rc = fill_stats_for_tgid(tgid, stats);
 532         if (rc < 0)
 533                 goto err;
 534         return send_reply(rep_skb, info);
 535 err:
 536         nlmsg_free(rep_skb);
 537         return rc;
 538 }
 539 
 540 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 541 {
 542         if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
 543                 return cmd_attr_register_cpumask(info);
 544         else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
 545                 return cmd_attr_deregister_cpumask(info);
 546         else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
 547                 return cmd_attr_pid(info);
 548         else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
 549                 return cmd_attr_tgid(info);
 550         else
 551                 return -EINVAL;
 552 }
 553 
 554 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
 555 {
 556         struct signal_struct *sig = tsk->signal;
 557         struct taskstats *stats_new, *stats;
 558 
 559         /* Pairs with smp_store_release() below. */
 560         stats = smp_load_acquire(&sig->stats);
 561         if (stats || thread_group_empty(tsk))
 562                 return stats;
 563 
 564         /* No problem if kmem_cache_zalloc() fails */
 565         stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
 566 
 567         spin_lock_irq(&tsk->sighand->siglock);
 568         stats = sig->stats;
 569         if (!stats) {
 570                 /*
 571                  * Pairs with smp_store_release() above and order the
 572                  * kmem_cache_zalloc().
 573                  */
 574                 smp_store_release(&sig->stats, stats_new);
 575                 stats = stats_new;
 576                 stats_new = NULL;
 577         }
 578         spin_unlock_irq(&tsk->sighand->siglock);
 579 
 580         if (stats_new)
 581                 kmem_cache_free(taskstats_cache, stats_new);
 582 
 583         return stats;
 584 }
 585 
 586 /* Send pid data out on exit */
 587 void taskstats_exit(struct task_struct *tsk, int group_dead)
 588 {
 589         int rc;
 590         struct listener_list *listeners;
 591         struct taskstats *stats;
 592         struct sk_buff *rep_skb;
 593         size_t size;
 594         int is_thread_group;
 595 
 596         if (!family_registered)
 597                 return;
 598 
 599         /*
 600          * Size includes space for nested attributes
 601          */
 602         size = taskstats_packet_size();
 603 
 604         is_thread_group = !!taskstats_tgid_alloc(tsk);
 605         if (is_thread_group) {
 606                 /* PID + STATS + TGID + STATS */
 607                 size = 2 * size;
 608                 /* fill the tsk->signal->stats structure */
 609                 fill_tgid_exit(tsk);
 610         }
 611 
 612         listeners = raw_cpu_ptr(&listener_array);
 613         if (list_empty(&listeners->list))
 614                 return;
 615 
 616         rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
 617         if (rc < 0)
 618                 return;
 619 
 620         stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
 621                          task_pid_nr_ns(tsk, &init_pid_ns));
 622         if (!stats)
 623                 goto err;
 624 
 625         fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
 626 
 627         /*
 628          * Doesn't matter if tsk is the leader or the last group member leaving
 629          */
 630         if (!is_thread_group || !group_dead)
 631                 goto send;
 632 
 633         stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
 634                          task_tgid_nr_ns(tsk, &init_pid_ns));
 635         if (!stats)
 636                 goto err;
 637 
 638         memcpy(stats, tsk->signal->stats, sizeof(*stats));
 639 
 640 send:
 641         send_cpu_listeners(rep_skb, listeners);
 642         return;
 643 err:
 644         nlmsg_free(rep_skb);
 645 }
 646 
 647 static const struct genl_ops taskstats_ops[] = {
 648         {
 649                 .cmd            = TASKSTATS_CMD_GET,
 650                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 651                 .doit           = taskstats_user_cmd,
 652                 /* policy enforced later */
 653                 .flags          = GENL_ADMIN_PERM | GENL_CMD_CAP_HASPOL,
 654         },
 655         {
 656                 .cmd            = CGROUPSTATS_CMD_GET,
 657                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 658                 .doit           = cgroupstats_user_cmd,
 659                 /* policy enforced later */
 660                 .flags          = GENL_CMD_CAP_HASPOL,
 661         },
 662 };
 663 
 664 static int taskstats_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
 665                               struct genl_info *info)
 666 {
 667         const struct nla_policy *policy = NULL;
 668 
 669         switch (ops->cmd) {
 670         case TASKSTATS_CMD_GET:
 671                 policy = taskstats_cmd_get_policy;
 672                 break;
 673         case CGROUPSTATS_CMD_GET:
 674                 policy = cgroupstats_cmd_get_policy;
 675                 break;
 676         default:
 677                 return -EINVAL;
 678         }
 679 
 680         return nlmsg_validate_deprecated(info->nlhdr, GENL_HDRLEN,
 681                                          TASKSTATS_CMD_ATTR_MAX, policy,
 682                                          info->extack);
 683 }
 684 
 685 static struct genl_family family __ro_after_init = {
 686         .name           = TASKSTATS_GENL_NAME,
 687         .version        = TASKSTATS_GENL_VERSION,
 688         .maxattr        = TASKSTATS_CMD_ATTR_MAX,
 689         .module         = THIS_MODULE,
 690         .ops            = taskstats_ops,
 691         .n_ops          = ARRAY_SIZE(taskstats_ops),
 692         .pre_doit       = taskstats_pre_doit,
 693 };
 694 
 695 /* Needed early in initialization */
 696 void __init taskstats_init_early(void)
 697 {
 698         unsigned int i;
 699 
 700         taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
 701         for_each_possible_cpu(i) {
 702                 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
 703                 init_rwsem(&(per_cpu(listener_array, i).sem));
 704         }
 705 }
 706 
 707 static int __init taskstats_init(void)
 708 {
 709         int rc;
 710 
 711         rc = genl_register_family(&family);
 712         if (rc)
 713                 return rc;
 714 
 715         family_registered = 1;
 716         pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
 717         return 0;
 718 }
 719 
 720 /*
 721  * late initcall ensures initialization of statistics collection
 722  * mechanisms precedes initialization of the taskstats interface
 723  */
 724 late_initcall(taskstats_init);

/* [<][>][^][v][top][bottom][index][help] */