This source file includes following definitions.
- task_pid
- task_pid_nr
- task_pid_nr_ns
- task_pid_vnr
- task_tgid_nr
- pid_alive
- task_pgrp_nr_ns
- task_pgrp_vnr
- task_session_nr_ns
- task_session_vnr
- task_tgid_nr_ns
- task_tgid_vnr
- task_ppid_nr_ns
- task_ppid_nr
- task_pgrp_nr
- task_state_index
- task_index_to_char
- task_state_to_char
- is_global_init
- is_percpu_thread
- TASK_PFA_TEST
- do_set_cpus_allowed
- set_cpus_allowed_ptr
- task_nice
- is_idle_task
- task_thread_info
- kick_process
- set_task_comm
- scheduler_ipi
- wait_task_inactive
- set_tsk_thread_flag
- clear_tsk_thread_flag
- update_tsk_thread_flag
- test_and_set_tsk_thread_flag
- test_and_clear_tsk_thread_flag
- test_tsk_thread_flag
- set_tsk_need_resched
- clear_tsk_need_resched
- test_tsk_need_resched
- _cond_resched
- cond_resched_rcu
- spin_needbreak
- need_resched
- task_cpu
- task_cpu
- set_task_cpu
- vcpu_is_preempted
- rseq_set_notify_resume
- rseq_handle_notify_resume
- rseq_signal_deliver
- rseq_preempt
- rseq_migrate
- rseq_fork
- rseq_execve
- rseq_set_notify_resume
- rseq_handle_notify_resume
- rseq_signal_deliver
- rseq_preempt
- rseq_migrate
- rseq_fork
- rseq_execve
- exit_umh
- rseq_syscall
   1 
   2 #ifndef _LINUX_SCHED_H
   3 #define _LINUX_SCHED_H
   4 
   5 
   6 
   7 
   8 
   9 
  10 #include <uapi/linux/sched.h>
  11 
  12 #include <asm/current.h>
  13 
  14 #include <linux/pid.h>
  15 #include <linux/sem.h>
  16 #include <linux/shm.h>
  17 #include <linux/kcov.h>
  18 #include <linux/mutex.h>
  19 #include <linux/plist.h>
  20 #include <linux/hrtimer.h>
  21 #include <linux/seccomp.h>
  22 #include <linux/nodemask.h>
  23 #include <linux/rcupdate.h>
  24 #include <linux/refcount.h>
  25 #include <linux/resource.h>
  26 #include <linux/latencytop.h>
  27 #include <linux/sched/prio.h>
  28 #include <linux/sched/types.h>
  29 #include <linux/signal_types.h>
  30 #include <linux/mm_types_task.h>
  31 #include <linux/task_io_accounting.h>
  32 #include <linux/posix-timers.h>
  33 #include <linux/rseq.h>
  34 
  35 
  36 struct audit_context;
  37 struct backing_dev_info;
  38 struct bio_list;
  39 struct blk_plug;
  40 struct capture_control;
  41 struct cfs_rq;
  42 struct fs_struct;
  43 struct futex_pi_state;
  44 struct io_context;
  45 struct mempolicy;
  46 struct nameidata;
  47 struct nsproxy;
  48 struct perf_event_context;
  49 struct pid_namespace;
  50 struct pipe_inode_info;
  51 struct rcu_node;
  52 struct reclaim_state;
  53 struct robust_list_head;
  54 struct root_domain;
  55 struct rq;
  56 struct sched_attr;
  57 struct sched_param;
  58 struct seq_file;
  59 struct sighand_struct;
  60 struct signal_struct;
  61 struct task_delay_info;
  62 struct task_group;
  63 
  64 
  65 
  66 
  67 
  68 
  69 
  70 
  71 
  72 
  73 
  74 
  75 
  76 #define TASK_RUNNING                    0x0000
  77 #define TASK_INTERRUPTIBLE              0x0001
  78 #define TASK_UNINTERRUPTIBLE            0x0002
  79 #define __TASK_STOPPED                  0x0004
  80 #define __TASK_TRACED                   0x0008
  81 
  82 #define EXIT_DEAD                       0x0010
  83 #define EXIT_ZOMBIE                     0x0020
  84 #define EXIT_TRACE                      (EXIT_ZOMBIE | EXIT_DEAD)
  85 
  86 #define TASK_PARKED                     0x0040
  87 #define TASK_DEAD                       0x0080
  88 #define TASK_WAKEKILL                   0x0100
  89 #define TASK_WAKING                     0x0200
  90 #define TASK_NOLOAD                     0x0400
  91 #define TASK_NEW                        0x0800
  92 #define TASK_STATE_MAX                  0x1000
  93 
  94 
  95 #define TASK_KILLABLE                   (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
  96 #define TASK_STOPPED                    (TASK_WAKEKILL | __TASK_STOPPED)
  97 #define TASK_TRACED                     (TASK_WAKEKILL | __TASK_TRACED)
  98 
  99 #define TASK_IDLE                       (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
 100 
 101 
 102 #define TASK_NORMAL                     (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
 103 
 104 
 105 #define TASK_REPORT                     (TASK_RUNNING | TASK_INTERRUPTIBLE | \
 106                                          TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
 107                                          __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
 108                                          TASK_PARKED)
 109 
 110 #define task_is_traced(task)            ((task->state & __TASK_TRACED) != 0)
 111 
 112 #define task_is_stopped(task)           ((task->state & __TASK_STOPPED) != 0)
 113 
 114 #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 115 
 116 #define task_contributes_to_load(task)  ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
 117                                          (task->flags & PF_FROZEN) == 0 && \
 118                                          (task->state & TASK_NOLOAD) == 0)
 119 
 120 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 121 
 122 
 123 
 124 
 125 
 126 #define is_special_task_state(state)                            \
 127         ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
 128 
 129 #define __set_current_state(state_value)                        \
 130         do {                                                    \
 131                 WARN_ON_ONCE(is_special_task_state(state_value));\
 132                 current->task_state_change = _THIS_IP_;         \
 133                 current->state = (state_value);                 \
 134         } while (0)
 135 
 136 #define set_current_state(state_value)                          \
 137         do {                                                    \
 138                 WARN_ON_ONCE(is_special_task_state(state_value));\
 139                 current->task_state_change = _THIS_IP_;         \
 140                 smp_store_mb(current->state, (state_value));    \
 141         } while (0)
 142 
 143 #define set_special_state(state_value)                                  \
 144         do {                                                            \
 145                 unsigned long flags;                    \
 146                 WARN_ON_ONCE(!is_special_task_state(state_value));      \
 147                 raw_spin_lock_irqsave(¤t->pi_lock, flags);        \
 148                 current->task_state_change = _THIS_IP_;                 \
 149                 current->state = (state_value);                         \
 150                 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);   \
 151         } while (0)
 152 #else
 153 
 154 
 155 
 156 
 157 
 158 
 159 
 160 
 161 
 162 
 163 
 164 
 165 
 166 
 167 
 168 
 169 
 170 
 171 
 172 
 173 
 174 
 175 
 176 
 177 
 178 
 179 
 180 
 181 
 182 
 183 
 184 
 185 
 186 
 187 
 188 
 189 
 190 #define __set_current_state(state_value)                                \
 191         current->state = (state_value)
 192 
 193 #define set_current_state(state_value)                                  \
 194         smp_store_mb(current->state, (state_value))
 195 
 196 
 197 
 198 
 199 
 200 
 201 
 202 #define set_special_state(state_value)                                  \
 203         do {                                                            \
 204                 unsigned long flags;                    \
 205                 raw_spin_lock_irqsave(¤t->pi_lock, flags);        \
 206                 current->state = (state_value);                         \
 207                 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);   \
 208         } while (0)
 209 
 210 #endif
 211 
 212 
 213 #define TASK_COMM_LEN                   16
 214 
 215 extern void scheduler_tick(void);
 216 
 217 #define MAX_SCHEDULE_TIMEOUT            LONG_MAX
 218 
 219 extern long schedule_timeout(long timeout);
 220 extern long schedule_timeout_interruptible(long timeout);
 221 extern long schedule_timeout_killable(long timeout);
 222 extern long schedule_timeout_uninterruptible(long timeout);
 223 extern long schedule_timeout_idle(long timeout);
 224 asmlinkage void schedule(void);
 225 extern void schedule_preempt_disabled(void);
 226 asmlinkage void preempt_schedule_irq(void);
 227 
 228 extern int __must_check io_schedule_prepare(void);
 229 extern void io_schedule_finish(int token);
 230 extern long io_schedule_timeout(long timeout);
 231 extern void io_schedule(void);
 232 
 233 
 234 
 235 
 236 
 237 
 238 
 239 
 240 
 241 
 242 struct prev_cputime {
 243 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 244         u64                             utime;
 245         u64                             stime;
 246         raw_spinlock_t                  lock;
 247 #endif
 248 };
 249 
 250 enum vtime_state {
 251         
 252         VTIME_INACTIVE = 0,
 253         
 254         VTIME_USER,
 255         
 256         VTIME_SYS,
 257 };
 258 
 259 struct vtime {
 260         seqcount_t              seqcount;
 261         unsigned long long      starttime;
 262         enum vtime_state        state;
 263         u64                     utime;
 264         u64                     stime;
 265         u64                     gtime;
 266 };
 267 
 268 
 269 
 270 
 271 
 272 
 273 
 274 enum uclamp_id {
 275         UCLAMP_MIN = 0,
 276         UCLAMP_MAX,
 277         UCLAMP_CNT
 278 };
 279 
 280 #ifdef CONFIG_SMP
 281 extern struct root_domain def_root_domain;
 282 extern struct mutex sched_domains_mutex;
 283 #endif
 284 
 285 struct sched_info {
 286 #ifdef CONFIG_SCHED_INFO
 287         
 288 
 289         
 290         unsigned long                   pcount;
 291 
 292         
 293         unsigned long long              run_delay;
 294 
 295         
 296 
 297         
 298         unsigned long long              last_arrival;
 299 
 300         
 301         unsigned long long              last_queued;
 302 
 303 #endif 
 304 };
 305 
 306 
 307 
 308 
 309 
 310 
 311 
 312 
 313 # define SCHED_FIXEDPOINT_SHIFT         10
 314 # define SCHED_FIXEDPOINT_SCALE         (1L << SCHED_FIXEDPOINT_SHIFT)
 315 
 316 
 317 # define SCHED_CAPACITY_SHIFT           SCHED_FIXEDPOINT_SHIFT
 318 # define SCHED_CAPACITY_SCALE           (1L << SCHED_CAPACITY_SHIFT)
 319 
 320 struct load_weight {
 321         unsigned long                   weight;
 322         u32                             inv_weight;
 323 };
 324 
 325 
 326 
 327 
 328 
 329 
 330 
 331 
 332 
 333 
 334 
 335 
 336 
 337 
 338 
 339 
 340 
 341 
 342 
 343 
 344 
 345 
 346 
 347 struct util_est {
 348         unsigned int                    enqueued;
 349         unsigned int                    ewma;
 350 #define UTIL_EST_WEIGHT_SHIFT           2
 351 } __attribute__((__aligned__(sizeof(u64))));
 352 
 353 
 354 
 355 
 356 
 357 
 358 
 359 
 360 
 361 
 362 
 363 
 364 
 365 
 366 
 367 
 368 
 369 
 370 
 371 
 372 
 373 
 374 
 375 
 376 
 377 
 378 
 379 
 380 
 381 
 382 
 383 
 384 
 385 
 386 
 387 
 388 
 389 
 390 
 391 
 392 
 393 
 394 
 395 
 396 struct sched_avg {
 397         u64                             last_update_time;
 398         u64                             load_sum;
 399         u64                             runnable_load_sum;
 400         u32                             util_sum;
 401         u32                             period_contrib;
 402         unsigned long                   load_avg;
 403         unsigned long                   runnable_load_avg;
 404         unsigned long                   util_avg;
 405         struct util_est                 util_est;
 406 } ____cacheline_aligned;
 407 
 408 struct sched_statistics {
 409 #ifdef CONFIG_SCHEDSTATS
 410         u64                             wait_start;
 411         u64                             wait_max;
 412         u64                             wait_count;
 413         u64                             wait_sum;
 414         u64                             iowait_count;
 415         u64                             iowait_sum;
 416 
 417         u64                             sleep_start;
 418         u64                             sleep_max;
 419         s64                             sum_sleep_runtime;
 420 
 421         u64                             block_start;
 422         u64                             block_max;
 423         u64                             exec_max;
 424         u64                             slice_max;
 425 
 426         u64                             nr_migrations_cold;
 427         u64                             nr_failed_migrations_affine;
 428         u64                             nr_failed_migrations_running;
 429         u64                             nr_failed_migrations_hot;
 430         u64                             nr_forced_migrations;
 431 
 432         u64                             nr_wakeups;
 433         u64                             nr_wakeups_sync;
 434         u64                             nr_wakeups_migrate;
 435         u64                             nr_wakeups_local;
 436         u64                             nr_wakeups_remote;
 437         u64                             nr_wakeups_affine;
 438         u64                             nr_wakeups_affine_attempts;
 439         u64                             nr_wakeups_passive;
 440         u64                             nr_wakeups_idle;
 441 #endif
 442 };
 443 
 444 struct sched_entity {
 445         
 446         struct load_weight              load;
 447         unsigned long                   runnable_weight;
 448         struct rb_node                  run_node;
 449         struct list_head                group_node;
 450         unsigned int                    on_rq;
 451 
 452         u64                             exec_start;
 453         u64                             sum_exec_runtime;
 454         u64                             vruntime;
 455         u64                             prev_sum_exec_runtime;
 456 
 457         u64                             nr_migrations;
 458 
 459         struct sched_statistics         statistics;
 460 
 461 #ifdef CONFIG_FAIR_GROUP_SCHED
 462         int                             depth;
 463         struct sched_entity             *parent;
 464         
 465         struct cfs_rq                   *cfs_rq;
 466         
 467         struct cfs_rq                   *my_q;
 468 #endif
 469 
 470 #ifdef CONFIG_SMP
 471         
 472 
 473 
 474 
 475 
 476 
 477         struct sched_avg                avg;
 478 #endif
 479 };
 480 
 481 struct sched_rt_entity {
 482         struct list_head                run_list;
 483         unsigned long                   timeout;
 484         unsigned long                   watchdog_stamp;
 485         unsigned int                    time_slice;
 486         unsigned short                  on_rq;
 487         unsigned short                  on_list;
 488 
 489         struct sched_rt_entity          *back;
 490 #ifdef CONFIG_RT_GROUP_SCHED
 491         struct sched_rt_entity          *parent;
 492         
 493         struct rt_rq                    *rt_rq;
 494         
 495         struct rt_rq                    *my_q;
 496 #endif
 497 } __randomize_layout;
 498 
 499 struct sched_dl_entity {
 500         struct rb_node                  rb_node;
 501 
 502         
 503 
 504 
 505 
 506 
 507         u64                             dl_runtime;     
 508         u64                             dl_deadline;    
 509         u64                             dl_period;      
 510         u64                             dl_bw;          
 511         u64                             dl_density;     
 512 
 513         
 514 
 515 
 516 
 517 
 518         s64                             runtime;        
 519         u64                             deadline;       
 520         unsigned int                    flags;          
 521 
 522         
 523 
 524 
 525 
 526 
 527 
 528 
 529 
 530 
 531 
 532 
 533 
 534 
 535 
 536 
 537 
 538 
 539 
 540 
 541 
 542 
 543 
 544 
 545 
 546         unsigned int                    dl_throttled      : 1;
 547         unsigned int                    dl_boosted        : 1;
 548         unsigned int                    dl_yielded        : 1;
 549         unsigned int                    dl_non_contending : 1;
 550         unsigned int                    dl_overrun        : 1;
 551 
 552         
 553 
 554 
 555 
 556         struct hrtimer                  dl_timer;
 557 
 558         
 559 
 560 
 561 
 562 
 563 
 564 
 565         struct hrtimer inactive_timer;
 566 };
 567 
 568 #ifdef CONFIG_UCLAMP_TASK
 569 
 570 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
 571 
 572 
 573 
 574 
 575 
 576 
 577 
 578 
 579 
 580 
 581 
 582 
 583 
 584 
 585 
 586 
 587 
 588 
 589 
 590 
 591 
 592 
 593 
 594 
 595 struct uclamp_se {
 596         unsigned int value              : bits_per(SCHED_CAPACITY_SCALE);
 597         unsigned int bucket_id          : bits_per(UCLAMP_BUCKETS);
 598         unsigned int active             : 1;
 599         unsigned int user_defined       : 1;
 600 };
 601 #endif 
 602 
 603 union rcu_special {
 604         struct {
 605                 u8                      blocked;
 606                 u8                      need_qs;
 607                 u8                      exp_hint; 
 608                 u8                      deferred_qs;
 609         } b; 
 610         u32 s; 
 611 };
 612 
 613 enum perf_event_task_context {
 614         perf_invalid_context = -1,
 615         perf_hw_context = 0,
 616         perf_sw_context,
 617         perf_nr_task_contexts,
 618 };
 619 
 620 struct wake_q_node {
 621         struct wake_q_node *next;
 622 };
 623 
 624 struct task_struct {
 625 #ifdef CONFIG_THREAD_INFO_IN_TASK
 626         
 627 
 628 
 629 
 630         struct thread_info              thread_info;
 631 #endif
 632         
 633         volatile long                   state;
 634 
 635         
 636 
 637 
 638 
 639         randomized_struct_fields_start
 640 
 641         void                            *stack;
 642         refcount_t                      usage;
 643         
 644         unsigned int                    flags;
 645         unsigned int                    ptrace;
 646 
 647 #ifdef CONFIG_SMP
 648         struct llist_node               wake_entry;
 649         int                             on_cpu;
 650 #ifdef CONFIG_THREAD_INFO_IN_TASK
 651         
 652         unsigned int                    cpu;
 653 #endif
 654         unsigned int                    wakee_flips;
 655         unsigned long                   wakee_flip_decay_ts;
 656         struct task_struct              *last_wakee;
 657 
 658         
 659 
 660 
 661 
 662 
 663 
 664 
 665         int                             recent_used_cpu;
 666         int                             wake_cpu;
 667 #endif
 668         int                             on_rq;
 669 
 670         int                             prio;
 671         int                             static_prio;
 672         int                             normal_prio;
 673         unsigned int                    rt_priority;
 674 
 675         const struct sched_class        *sched_class;
 676         struct sched_entity             se;
 677         struct sched_rt_entity          rt;
 678 #ifdef CONFIG_CGROUP_SCHED
 679         struct task_group               *sched_task_group;
 680 #endif
 681         struct sched_dl_entity          dl;
 682 
 683 #ifdef CONFIG_UCLAMP_TASK
 684         
 685         struct uclamp_se                uclamp_req[UCLAMP_CNT];
 686         
 687         struct uclamp_se                uclamp[UCLAMP_CNT];
 688 #endif
 689 
 690 #ifdef CONFIG_PREEMPT_NOTIFIERS
 691         
 692         struct hlist_head               preempt_notifiers;
 693 #endif
 694 
 695 #ifdef CONFIG_BLK_DEV_IO_TRACE
 696         unsigned int                    btrace_seq;
 697 #endif
 698 
 699         unsigned int                    policy;
 700         int                             nr_cpus_allowed;
 701         const cpumask_t                 *cpus_ptr;
 702         cpumask_t                       cpus_mask;
 703 
 704 #ifdef CONFIG_PREEMPT_RCU
 705         int                             rcu_read_lock_nesting;
 706         union rcu_special               rcu_read_unlock_special;
 707         struct list_head                rcu_node_entry;
 708         struct rcu_node                 *rcu_blocked_node;
 709 #endif 
 710 
 711 #ifdef CONFIG_TASKS_RCU
 712         unsigned long                   rcu_tasks_nvcsw;
 713         u8                              rcu_tasks_holdout;
 714         u8                              rcu_tasks_idx;
 715         int                             rcu_tasks_idle_cpu;
 716         struct list_head                rcu_tasks_holdout_list;
 717 #endif 
 718 
 719         struct sched_info               sched_info;
 720 
 721         struct list_head                tasks;
 722 #ifdef CONFIG_SMP
 723         struct plist_node               pushable_tasks;
 724         struct rb_node                  pushable_dl_tasks;
 725 #endif
 726 
 727         struct mm_struct                *mm;
 728         struct mm_struct                *active_mm;
 729 
 730         
 731         struct vmacache                 vmacache;
 732 
 733 #ifdef SPLIT_RSS_COUNTING
 734         struct task_rss_stat            rss_stat;
 735 #endif
 736         int                             exit_state;
 737         int                             exit_code;
 738         int                             exit_signal;
 739         
 740         int                             pdeath_signal;
 741         
 742         unsigned long                   jobctl;
 743 
 744         
 745         unsigned int                    personality;
 746 
 747         
 748         unsigned                        sched_reset_on_fork:1;
 749         unsigned                        sched_contributes_to_load:1;
 750         unsigned                        sched_migrated:1;
 751         unsigned                        sched_remote_wakeup:1;
 752 #ifdef CONFIG_PSI
 753         unsigned                        sched_psi_wake_requeue:1;
 754 #endif
 755 
 756         
 757         unsigned                        :0;
 758 
 759         
 760 
 761         
 762         unsigned                        in_execve:1;
 763         unsigned                        in_iowait:1;
 764 #ifndef TIF_RESTORE_SIGMASK
 765         unsigned                        restore_sigmask:1;
 766 #endif
 767 #ifdef CONFIG_MEMCG
 768         unsigned                        in_user_fault:1;
 769 #endif
 770 #ifdef CONFIG_COMPAT_BRK
 771         unsigned                        brk_randomized:1;
 772 #endif
 773 #ifdef CONFIG_CGROUPS
 774         
 775         unsigned                        no_cgroup_migration:1;
 776         
 777         unsigned                        frozen:1;
 778 #endif
 779 #ifdef CONFIG_BLK_CGROUP
 780         
 781         unsigned                        use_memdelay:1;
 782 #endif
 783 
 784         unsigned long                   atomic_flags; 
 785 
 786         struct restart_block            restart_block;
 787 
 788         pid_t                           pid;
 789         pid_t                           tgid;
 790 
 791 #ifdef CONFIG_STACKPROTECTOR
 792         
 793         unsigned long                   stack_canary;
 794 #endif
 795         
 796 
 797 
 798 
 799 
 800 
 801         
 802         struct task_struct __rcu        *real_parent;
 803 
 804         
 805         struct task_struct __rcu        *parent;
 806 
 807         
 808 
 809 
 810         struct list_head                children;
 811         struct list_head                sibling;
 812         struct task_struct              *group_leader;
 813 
 814         
 815 
 816 
 817 
 818 
 819 
 820         struct list_head                ptraced;
 821         struct list_head                ptrace_entry;
 822 
 823         
 824         struct pid                      *thread_pid;
 825         struct hlist_node               pid_links[PIDTYPE_MAX];
 826         struct list_head                thread_group;
 827         struct list_head                thread_node;
 828 
 829         struct completion               *vfork_done;
 830 
 831         
 832         int __user                      *set_child_tid;
 833 
 834         
 835         int __user                      *clear_child_tid;
 836 
 837         u64                             utime;
 838         u64                             stime;
 839 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 840         u64                             utimescaled;
 841         u64                             stimescaled;
 842 #endif
 843         u64                             gtime;
 844         struct prev_cputime             prev_cputime;
 845 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 846         struct vtime                    vtime;
 847 #endif
 848 
 849 #ifdef CONFIG_NO_HZ_FULL
 850         atomic_t                        tick_dep_mask;
 851 #endif
 852         
 853         unsigned long                   nvcsw;
 854         unsigned long                   nivcsw;
 855 
 856         
 857         u64                             start_time;
 858 
 859         
 860         u64                             real_start_time;
 861 
 862         
 863         unsigned long                   min_flt;
 864         unsigned long                   maj_flt;
 865 
 866         
 867         struct posix_cputimers          posix_cputimers;
 868 
 869         
 870 
 871         
 872         const struct cred __rcu         *ptracer_cred;
 873 
 874         
 875         const struct cred __rcu         *real_cred;
 876 
 877         
 878         const struct cred __rcu         *cred;
 879 
 880 #ifdef CONFIG_KEYS
 881         
 882         struct key                      *cached_requested_key;
 883 #endif
 884 
 885         
 886 
 887 
 888 
 889 
 890 
 891 
 892         char                            comm[TASK_COMM_LEN];
 893 
 894         struct nameidata                *nameidata;
 895 
 896 #ifdef CONFIG_SYSVIPC
 897         struct sysv_sem                 sysvsem;
 898         struct sysv_shm                 sysvshm;
 899 #endif
 900 #ifdef CONFIG_DETECT_HUNG_TASK
 901         unsigned long                   last_switch_count;
 902         unsigned long                   last_switch_time;
 903 #endif
 904         
 905         struct fs_struct                *fs;
 906 
 907         
 908         struct files_struct             *files;
 909 
 910         
 911         struct nsproxy                  *nsproxy;
 912 
 913         
 914         struct signal_struct            *signal;
 915         struct sighand_struct           *sighand;
 916         sigset_t                        blocked;
 917         sigset_t                        real_blocked;
 918         
 919         sigset_t                        saved_sigmask;
 920         struct sigpending               pending;
 921         unsigned long                   sas_ss_sp;
 922         size_t                          sas_ss_size;
 923         unsigned int                    sas_ss_flags;
 924 
 925         struct callback_head            *task_works;
 926 
 927 #ifdef CONFIG_AUDIT
 928 #ifdef CONFIG_AUDITSYSCALL
 929         struct audit_context            *audit_context;
 930 #endif
 931         kuid_t                          loginuid;
 932         unsigned int                    sessionid;
 933 #endif
 934         struct seccomp                  seccomp;
 935 
 936         
 937         u64                             parent_exec_id;
 938         u64                             self_exec_id;
 939 
 940         
 941         spinlock_t                      alloc_lock;
 942 
 943         
 944         raw_spinlock_t                  pi_lock;
 945 
 946         struct wake_q_node              wake_q;
 947 
 948 #ifdef CONFIG_RT_MUTEXES
 949         
 950         struct rb_root_cached           pi_waiters;
 951         
 952         struct task_struct              *pi_top_task;
 953         
 954         struct rt_mutex_waiter          *pi_blocked_on;
 955 #endif
 956 
 957 #ifdef CONFIG_DEBUG_MUTEXES
 958         
 959         struct mutex_waiter             *blocked_on;
 960 #endif
 961 
 962 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 963         int                             non_block_count;
 964 #endif
 965 
 966 #ifdef CONFIG_TRACE_IRQFLAGS
 967         unsigned int                    irq_events;
 968         unsigned long                   hardirq_enable_ip;
 969         unsigned long                   hardirq_disable_ip;
 970         unsigned int                    hardirq_enable_event;
 971         unsigned int                    hardirq_disable_event;
 972         int                             hardirqs_enabled;
 973         int                             hardirq_context;
 974         unsigned long                   softirq_disable_ip;
 975         unsigned long                   softirq_enable_ip;
 976         unsigned int                    softirq_disable_event;
 977         unsigned int                    softirq_enable_event;
 978         int                             softirqs_enabled;
 979         int                             softirq_context;
 980 #endif
 981 
 982 #ifdef CONFIG_LOCKDEP
 983 # define MAX_LOCK_DEPTH                 48UL
 984         u64                             curr_chain_key;
 985         int                             lockdep_depth;
 986         unsigned int                    lockdep_recursion;
 987         struct held_lock                held_locks[MAX_LOCK_DEPTH];
 988 #endif
 989 
 990 #ifdef CONFIG_UBSAN
 991         unsigned int                    in_ubsan;
 992 #endif
 993 
 994         
 995         void                            *journal_info;
 996 
 997         
 998         struct bio_list                 *bio_list;
 999 
1000 #ifdef CONFIG_BLOCK
1001         
1002         struct blk_plug                 *plug;
1003 #endif
1004 
1005         
1006         struct reclaim_state            *reclaim_state;
1007 
1008         struct backing_dev_info         *backing_dev_info;
1009 
1010         struct io_context               *io_context;
1011 
1012 #ifdef CONFIG_COMPACTION
1013         struct capture_control          *capture_control;
1014 #endif
1015         
1016         unsigned long                   ptrace_message;
1017         kernel_siginfo_t                *last_siginfo;
1018 
1019         struct task_io_accounting       ioac;
1020 #ifdef CONFIG_PSI
1021         
1022         unsigned int                    psi_flags;
1023 #endif
1024 #ifdef CONFIG_TASK_XACCT
1025         
1026         u64                             acct_rss_mem1;
1027         
1028         u64                             acct_vm_mem1;
1029         
1030         u64                             acct_timexpd;
1031 #endif
1032 #ifdef CONFIG_CPUSETS
1033         
1034         nodemask_t                      mems_allowed;
1035         
1036         seqcount_t                      mems_allowed_seq;
1037         int                             cpuset_mem_spread_rotor;
1038         int                             cpuset_slab_spread_rotor;
1039 #endif
1040 #ifdef CONFIG_CGROUPS
1041         
1042         struct css_set __rcu            *cgroups;
1043         
1044         struct list_head                cg_list;
1045 #endif
1046 #ifdef CONFIG_X86_CPU_RESCTRL
1047         u32                             closid;
1048         u32                             rmid;
1049 #endif
1050 #ifdef CONFIG_FUTEX
1051         struct robust_list_head __user  *robust_list;
1052 #ifdef CONFIG_COMPAT
1053         struct compat_robust_list_head __user *compat_robust_list;
1054 #endif
1055         struct list_head                pi_state_list;
1056         struct futex_pi_state           *pi_state_cache;
1057         struct mutex                    futex_exit_mutex;
1058         unsigned int                    futex_state;
1059 #endif
1060 #ifdef CONFIG_PERF_EVENTS
1061         struct perf_event_context       *perf_event_ctxp[perf_nr_task_contexts];
1062         struct mutex                    perf_event_mutex;
1063         struct list_head                perf_event_list;
1064 #endif
1065 #ifdef CONFIG_DEBUG_PREEMPT
1066         unsigned long                   preempt_disable_ip;
1067 #endif
1068 #ifdef CONFIG_NUMA
1069         
1070         struct mempolicy                *mempolicy;
1071         short                           il_prev;
1072         short                           pref_node_fork;
1073 #endif
1074 #ifdef CONFIG_NUMA_BALANCING
1075         int                             numa_scan_seq;
1076         unsigned int                    numa_scan_period;
1077         unsigned int                    numa_scan_period_max;
1078         int                             numa_preferred_nid;
1079         unsigned long                   numa_migrate_retry;
1080         
1081         u64                             node_stamp;
1082         u64                             last_task_numa_placement;
1083         u64                             last_sum_exec_runtime;
1084         struct callback_head            numa_work;
1085 
1086         
1087 
1088 
1089 
1090 
1091 
1092 
1093 
1094         struct numa_group __rcu         *numa_group;
1095 
1096         
1097 
1098 
1099 
1100 
1101 
1102 
1103 
1104 
1105 
1106 
1107 
1108 
1109 
1110         unsigned long                   *numa_faults;
1111         unsigned long                   total_numa_faults;
1112 
1113         
1114 
1115 
1116 
1117 
1118 
1119         unsigned long                   numa_faults_locality[3];
1120 
1121         unsigned long                   numa_pages_migrated;
1122 #endif 
1123 
1124 #ifdef CONFIG_RSEQ
1125         struct rseq __user *rseq;
1126         u32 rseq_sig;
1127         
1128 
1129 
1130 
1131         unsigned long rseq_event_mask;
1132 #endif
1133 
1134         struct tlbflush_unmap_batch     tlb_ubc;
1135 
1136         union {
1137                 refcount_t              rcu_users;
1138                 struct rcu_head         rcu;
1139         };
1140 
1141         
1142         struct pipe_inode_info          *splice_pipe;
1143 
1144         struct page_frag                task_frag;
1145 
1146 #ifdef CONFIG_TASK_DELAY_ACCT
1147         struct task_delay_info          *delays;
1148 #endif
1149 
1150 #ifdef CONFIG_FAULT_INJECTION
1151         int                             make_it_fail;
1152         unsigned int                    fail_nth;
1153 #endif
1154         
1155 
1156 
1157 
1158         int                             nr_dirtied;
1159         int                             nr_dirtied_pause;
1160         
1161         unsigned long                   dirty_paused_when;
1162 
1163 #ifdef CONFIG_LATENCYTOP
1164         int                             latency_record_count;
1165         struct latency_record           latency_record[LT_SAVECOUNT];
1166 #endif
1167         
1168 
1169 
1170 
1171         u64                             timer_slack_ns;
1172         u64                             default_timer_slack_ns;
1173 
1174 #ifdef CONFIG_KASAN
1175         unsigned int                    kasan_depth;
1176 #endif
1177 
1178 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1179         
1180         int                             curr_ret_stack;
1181         int                             curr_ret_depth;
1182 
1183         
1184         struct ftrace_ret_stack         *ret_stack;
1185 
1186         
1187         unsigned long long              ftrace_timestamp;
1188 
1189         
1190 
1191 
1192 
1193         atomic_t                        trace_overrun;
1194 
1195         
1196         atomic_t                        tracing_graph_pause;
1197 #endif
1198 
1199 #ifdef CONFIG_TRACING
1200         
1201         unsigned long                   trace;
1202 
1203         
1204         unsigned long                   trace_recursion;
1205 #endif 
1206 
1207 #ifdef CONFIG_KCOV
1208         
1209         unsigned int                    kcov_mode;
1210 
1211         
1212         unsigned int                    kcov_size;
1213 
1214         
1215         void                            *kcov_area;
1216 
1217         
1218         struct kcov                     *kcov;
1219 #endif
1220 
1221 #ifdef CONFIG_MEMCG
1222         struct mem_cgroup               *memcg_in_oom;
1223         gfp_t                           memcg_oom_gfp_mask;
1224         int                             memcg_oom_order;
1225 
1226         
1227         unsigned int                    memcg_nr_pages_over_high;
1228 
1229         
1230         struct mem_cgroup               *active_memcg;
1231 #endif
1232 
1233 #ifdef CONFIG_BLK_CGROUP
1234         struct request_queue            *throttle_queue;
1235 #endif
1236 
1237 #ifdef CONFIG_UPROBES
1238         struct uprobe_task              *utask;
1239 #endif
1240 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1241         unsigned int                    sequential_io;
1242         unsigned int                    sequential_io_avg;
1243 #endif
1244 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1245         unsigned long                   task_state_change;
1246 #endif
1247         int                             pagefault_disabled;
1248 #ifdef CONFIG_MMU
1249         struct task_struct              *oom_reaper_list;
1250 #endif
1251 #ifdef CONFIG_VMAP_STACK
1252         struct vm_struct                *stack_vm_area;
1253 #endif
1254 #ifdef CONFIG_THREAD_INFO_IN_TASK
1255         
1256         refcount_t                      stack_refcount;
1257 #endif
1258 #ifdef CONFIG_LIVEPATCH
1259         int patch_state;
1260 #endif
1261 #ifdef CONFIG_SECURITY
1262         
1263         void                            *security;
1264 #endif
1265 
1266 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1267         unsigned long                   lowest_stack;
1268         unsigned long                   prev_lowest_stack;
1269 #endif
1270 
1271         
1272 
1273 
1274 
1275         randomized_struct_fields_end
1276 
1277         
1278         struct thread_struct            thread;
1279 
1280         
1281 
1282 
1283 
1284 
1285 
1286 };
1287 
1288 static inline struct pid *task_pid(struct task_struct *task)
1289 {
1290         return task->thread_pid;
1291 }
1292 
1293 
1294 
1295 
1296 
1297 
1298 
1299 
1300 
1301 
1302 
1303 
1304 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1305 
1306 static inline pid_t task_pid_nr(struct task_struct *tsk)
1307 {
1308         return tsk->pid;
1309 }
1310 
1311 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1312 {
1313         return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1314 }
1315 
1316 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1317 {
1318         return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1319 }
1320 
1321 
1322 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1323 {
1324         return tsk->tgid;
1325 }
1326 
1327 
1328 
1329 
1330 
1331 
1332 
1333 
1334 
1335 
1336 
1337 static inline int pid_alive(const struct task_struct *p)
1338 {
1339         return p->thread_pid != NULL;
1340 }
1341 
1342 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1343 {
1344         return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1345 }
1346 
1347 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1348 {
1349         return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1350 }
1351 
1352 
1353 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1354 {
1355         return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1356 }
1357 
1358 static inline pid_t task_session_vnr(struct task_struct *tsk)
1359 {
1360         return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1361 }
1362 
1363 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1364 {
1365         return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1366 }
1367 
1368 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1369 {
1370         return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1371 }
1372 
1373 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1374 {
1375         pid_t pid = 0;
1376 
1377         rcu_read_lock();
1378         if (pid_alive(tsk))
1379                 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1380         rcu_read_unlock();
1381 
1382         return pid;
1383 }
1384 
1385 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1386 {
1387         return task_ppid_nr_ns(tsk, &init_pid_ns);
1388 }
1389 
1390 
1391 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1392 {
1393         return task_pgrp_nr_ns(tsk, &init_pid_ns);
1394 }
1395 
1396 #define TASK_REPORT_IDLE        (TASK_REPORT + 1)
1397 #define TASK_REPORT_MAX         (TASK_REPORT_IDLE << 1)
1398 
1399 static inline unsigned int task_state_index(struct task_struct *tsk)
1400 {
1401         unsigned int tsk_state = READ_ONCE(tsk->state);
1402         unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1403 
1404         BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1405 
1406         if (tsk_state == TASK_IDLE)
1407                 state = TASK_REPORT_IDLE;
1408 
1409         return fls(state);
1410 }
1411 
1412 static inline char task_index_to_char(unsigned int state)
1413 {
1414         static const char state_char[] = "RSDTtXZPI";
1415 
1416         BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1417 
1418         return state_char[state];
1419 }
1420 
1421 static inline char task_state_to_char(struct task_struct *tsk)
1422 {
1423         return task_index_to_char(task_state_index(tsk));
1424 }
1425 
1426 
1427 
1428 
1429 
1430 
1431 
1432 
1433 
1434 
1435 static inline int is_global_init(struct task_struct *tsk)
1436 {
1437         return task_tgid_nr(tsk) == 1;
1438 }
1439 
1440 extern struct pid *cad_pid;
1441 
1442 
1443 
1444 
1445 #define PF_IDLE                 0x00000002      
1446 #define PF_EXITING              0x00000004      
1447 #define PF_VCPU                 0x00000010      
1448 #define PF_WQ_WORKER            0x00000020      
1449 #define PF_FORKNOEXEC           0x00000040      
1450 #define PF_MCE_PROCESS          0x00000080      
1451 #define PF_SUPERPRIV            0x00000100      
1452 #define PF_DUMPCORE             0x00000200      
1453 #define PF_SIGNALED             0x00000400      
1454 #define PF_MEMALLOC             0x00000800      
1455 #define PF_NPROC_EXCEEDED       0x00001000      
1456 #define PF_USED_MATH            0x00002000      
1457 #define PF_USED_ASYNC           0x00004000      
1458 #define PF_NOFREEZE             0x00008000      
1459 #define PF_FROZEN               0x00010000      
1460 #define PF_KSWAPD               0x00020000      
1461 #define PF_MEMALLOC_NOFS        0x00040000      
1462 #define PF_MEMALLOC_NOIO        0x00080000      
1463 #define PF_LESS_THROTTLE        0x00100000      
1464 #define PF_KTHREAD              0x00200000      
1465 #define PF_RANDOMIZE            0x00400000      
1466 #define PF_SWAPWRITE            0x00800000      
1467 #define PF_MEMSTALL             0x01000000      
1468 #define PF_UMH                  0x02000000      
1469 #define PF_NO_SETAFFINITY       0x04000000      
1470 #define PF_MCE_EARLY            0x08000000      
1471 #define PF_MEMALLOC_NOCMA       0x10000000      
1472 #define PF_FREEZER_SKIP         0x40000000      
1473 #define PF_SUSPEND_TASK         0x80000000      
1474 
1475 
1476 
1477 
1478 
1479 
1480 
1481 
1482 
1483 
1484 
1485 
1486 #define clear_stopped_child_used_math(child)    do { (child)->flags &= ~PF_USED_MATH; } while (0)
1487 #define set_stopped_child_used_math(child)      do { (child)->flags |= PF_USED_MATH; } while (0)
1488 #define clear_used_math()                       clear_stopped_child_used_math(current)
1489 #define set_used_math()                         set_stopped_child_used_math(current)
1490 
1491 #define conditional_stopped_child_used_math(condition, child) \
1492         do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1493 
1494 #define conditional_used_math(condition)        conditional_stopped_child_used_math(condition, current)
1495 
1496 #define copy_to_stopped_child_used_math(child) \
1497         do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1498 
1499 
1500 #define tsk_used_math(p)                        ((p)->flags & PF_USED_MATH)
1501 #define used_math()                             tsk_used_math(current)
1502 
1503 static inline bool is_percpu_thread(void)
1504 {
1505 #ifdef CONFIG_SMP
1506         return (current->flags & PF_NO_SETAFFINITY) &&
1507                 (current->nr_cpus_allowed  == 1);
1508 #else
1509         return true;
1510 #endif
1511 }
1512 
1513 
1514 #define PFA_NO_NEW_PRIVS                0       
1515 #define PFA_SPREAD_PAGE                 1       
1516 #define PFA_SPREAD_SLAB                 2       
1517 #define PFA_SPEC_SSB_DISABLE            3       
1518 #define PFA_SPEC_SSB_FORCE_DISABLE      4       
1519 #define PFA_SPEC_IB_DISABLE             5       
1520 #define PFA_SPEC_IB_FORCE_DISABLE       6       
1521 #define PFA_SPEC_SSB_NOEXEC             7       
1522 
1523 #define TASK_PFA_TEST(name, func)                                       \
1524         static inline bool task_##func(struct task_struct *p)           \
1525         { return test_bit(PFA_##name, &p->atomic_flags); }
1526 
1527 #define TASK_PFA_SET(name, func)                                        \
1528         static inline void task_set_##func(struct task_struct *p)       \
1529         { set_bit(PFA_##name, &p->atomic_flags); }
1530 
1531 #define TASK_PFA_CLEAR(name, func)                                      \
1532         static inline void task_clear_##func(struct task_struct *p)     \
1533         { clear_bit(PFA_##name, &p->atomic_flags); }
1534 
1535 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1536 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1537 
1538 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1539 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1540 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1541 
1542 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1543 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1544 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1545 
1546 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1547 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1548 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1549 
1550 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1551 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1552 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1553 
1554 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1555 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1556 
1557 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1558 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1559 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1560 
1561 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1562 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1563 
1564 static inline void
1565 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1566 {
1567         current->flags &= ~flags;
1568         current->flags |= orig_flags & flags;
1569 }
1570 
1571 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1572 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1573 #ifdef CONFIG_SMP
1574 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1575 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1576 #else
1577 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1578 {
1579 }
1580 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1581 {
1582         if (!cpumask_test_cpu(0, new_mask))
1583                 return -EINVAL;
1584         return 0;
1585 }
1586 #endif
1587 
1588 extern int yield_to(struct task_struct *p, bool preempt);
1589 extern void set_user_nice(struct task_struct *p, long nice);
1590 extern int task_prio(const struct task_struct *p);
1591 
1592 
1593 
1594 
1595 
1596 
1597 
1598 static inline int task_nice(const struct task_struct *p)
1599 {
1600         return PRIO_TO_NICE((p)->static_prio);
1601 }
1602 
1603 extern int can_nice(const struct task_struct *p, const int nice);
1604 extern int task_curr(const struct task_struct *p);
1605 extern int idle_cpu(int cpu);
1606 extern int available_idle_cpu(int cpu);
1607 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1608 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1609 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1610 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1611 extern struct task_struct *idle_task(int cpu);
1612 
1613 
1614 
1615 
1616 
1617 
1618 
1619 static inline bool is_idle_task(const struct task_struct *p)
1620 {
1621         return !!(p->flags & PF_IDLE);
1622 }
1623 
1624 extern struct task_struct *curr_task(int cpu);
1625 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1626 
1627 void yield(void);
1628 
1629 union thread_union {
1630 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1631         struct task_struct task;
1632 #endif
1633 #ifndef CONFIG_THREAD_INFO_IN_TASK
1634         struct thread_info thread_info;
1635 #endif
1636         unsigned long stack[THREAD_SIZE/sizeof(long)];
1637 };
1638 
1639 #ifndef CONFIG_THREAD_INFO_IN_TASK
1640 extern struct thread_info init_thread_info;
1641 #endif
1642 
1643 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1644 
1645 #ifdef CONFIG_THREAD_INFO_IN_TASK
1646 static inline struct thread_info *task_thread_info(struct task_struct *task)
1647 {
1648         return &task->thread_info;
1649 }
1650 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1651 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1652 #endif
1653 
1654 
1655 
1656 
1657 
1658 
1659 
1660 
1661 
1662 
1663 
1664 
1665 extern struct task_struct *find_task_by_vpid(pid_t nr);
1666 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1667 
1668 
1669 
1670 
1671 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1672 
1673 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1674 extern int wake_up_process(struct task_struct *tsk);
1675 extern void wake_up_new_task(struct task_struct *tsk);
1676 
1677 #ifdef CONFIG_SMP
1678 extern void kick_process(struct task_struct *tsk);
1679 #else
1680 static inline void kick_process(struct task_struct *tsk) { }
1681 #endif
1682 
1683 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1684 
1685 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1686 {
1687         __set_task_comm(tsk, from, false);
1688 }
1689 
1690 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1691 #define get_task_comm(buf, tsk) ({                      \
1692         BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);     \
1693         __get_task_comm(buf, sizeof(buf), tsk);         \
1694 })
1695 
1696 #ifdef CONFIG_SMP
1697 void scheduler_ipi(void);
1698 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1699 #else
1700 static inline void scheduler_ipi(void) { }
1701 static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1702 {
1703         return 1;
1704 }
1705 #endif
1706 
1707 
1708 
1709 
1710 
1711 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1712 {
1713         set_ti_thread_flag(task_thread_info(tsk), flag);
1714 }
1715 
1716 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1717 {
1718         clear_ti_thread_flag(task_thread_info(tsk), flag);
1719 }
1720 
1721 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1722                                           bool value)
1723 {
1724         update_ti_thread_flag(task_thread_info(tsk), flag, value);
1725 }
1726 
1727 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1728 {
1729         return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1730 }
1731 
1732 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1733 {
1734         return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1735 }
1736 
1737 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1738 {
1739         return test_ti_thread_flag(task_thread_info(tsk), flag);
1740 }
1741 
1742 static inline void set_tsk_need_resched(struct task_struct *tsk)
1743 {
1744         set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1745 }
1746 
1747 static inline void clear_tsk_need_resched(struct task_struct *tsk)
1748 {
1749         clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1750 }
1751 
1752 static inline int test_tsk_need_resched(struct task_struct *tsk)
1753 {
1754         return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1755 }
1756 
1757 
1758 
1759 
1760 
1761 
1762 
1763 #ifndef CONFIG_PREEMPTION
1764 extern int _cond_resched(void);
1765 #else
1766 static inline int _cond_resched(void) { return 0; }
1767 #endif
1768 
1769 #define cond_resched() ({                       \
1770         ___might_sleep(__FILE__, __LINE__, 0);  \
1771         _cond_resched();                        \
1772 })
1773 
1774 extern int __cond_resched_lock(spinlock_t *lock);
1775 
1776 #define cond_resched_lock(lock) ({                              \
1777         ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1778         __cond_resched_lock(lock);                              \
1779 })
1780 
1781 static inline void cond_resched_rcu(void)
1782 {
1783 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1784         rcu_read_unlock();
1785         cond_resched();
1786         rcu_read_lock();
1787 #endif
1788 }
1789 
1790 
1791 
1792 
1793 
1794 
1795 static inline int spin_needbreak(spinlock_t *lock)
1796 {
1797 #ifdef CONFIG_PREEMPTION
1798         return spin_is_contended(lock);
1799 #else
1800         return 0;
1801 #endif
1802 }
1803 
1804 static __always_inline bool need_resched(void)
1805 {
1806         return unlikely(tif_need_resched());
1807 }
1808 
1809 
1810 
1811 
1812 #ifdef CONFIG_SMP
1813 
1814 static inline unsigned int task_cpu(const struct task_struct *p)
1815 {
1816 #ifdef CONFIG_THREAD_INFO_IN_TASK
1817         return READ_ONCE(p->cpu);
1818 #else
1819         return READ_ONCE(task_thread_info(p)->cpu);
1820 #endif
1821 }
1822 
1823 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1824 
1825 #else
1826 
1827 static inline unsigned int task_cpu(const struct task_struct *p)
1828 {
1829         return 0;
1830 }
1831 
1832 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1833 {
1834 }
1835 
1836 #endif 
1837 
1838 
1839 
1840 
1841 
1842 
1843 
1844 
1845 
1846 #ifndef vcpu_is_preempted
1847 static inline bool vcpu_is_preempted(int cpu)
1848 {
1849         return false;
1850 }
1851 #endif
1852 
1853 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1854 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1855 
1856 #ifndef TASK_SIZE_OF
1857 #define TASK_SIZE_OF(tsk)       TASK_SIZE
1858 #endif
1859 
1860 #ifdef CONFIG_RSEQ
1861 
1862 
1863 
1864 
1865 
1866 enum rseq_event_mask_bits {
1867         RSEQ_EVENT_PREEMPT_BIT  = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
1868         RSEQ_EVENT_SIGNAL_BIT   = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
1869         RSEQ_EVENT_MIGRATE_BIT  = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
1870 };
1871 
1872 enum rseq_event_mask {
1873         RSEQ_EVENT_PREEMPT      = (1U << RSEQ_EVENT_PREEMPT_BIT),
1874         RSEQ_EVENT_SIGNAL       = (1U << RSEQ_EVENT_SIGNAL_BIT),
1875         RSEQ_EVENT_MIGRATE      = (1U << RSEQ_EVENT_MIGRATE_BIT),
1876 };
1877 
1878 static inline void rseq_set_notify_resume(struct task_struct *t)
1879 {
1880         if (t->rseq)
1881                 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1882 }
1883 
1884 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1885 
1886 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1887                                              struct pt_regs *regs)
1888 {
1889         if (current->rseq)
1890                 __rseq_handle_notify_resume(ksig, regs);
1891 }
1892 
1893 static inline void rseq_signal_deliver(struct ksignal *ksig,
1894                                        struct pt_regs *regs)
1895 {
1896         preempt_disable();
1897         __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
1898         preempt_enable();
1899         rseq_handle_notify_resume(ksig, regs);
1900 }
1901 
1902 
1903 static inline void rseq_preempt(struct task_struct *t)
1904 {
1905         __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
1906         rseq_set_notify_resume(t);
1907 }
1908 
1909 
1910 static inline void rseq_migrate(struct task_struct *t)
1911 {
1912         __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
1913         rseq_set_notify_resume(t);
1914 }
1915 
1916 
1917 
1918 
1919 
1920 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1921 {
1922         if (clone_flags & CLONE_VM) {
1923                 t->rseq = NULL;
1924                 t->rseq_sig = 0;
1925                 t->rseq_event_mask = 0;
1926         } else {
1927                 t->rseq = current->rseq;
1928                 t->rseq_sig = current->rseq_sig;
1929                 t->rseq_event_mask = current->rseq_event_mask;
1930         }
1931 }
1932 
1933 static inline void rseq_execve(struct task_struct *t)
1934 {
1935         t->rseq = NULL;
1936         t->rseq_sig = 0;
1937         t->rseq_event_mask = 0;
1938 }
1939 
1940 #else
1941 
1942 static inline void rseq_set_notify_resume(struct task_struct *t)
1943 {
1944 }
1945 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1946                                              struct pt_regs *regs)
1947 {
1948 }
1949 static inline void rseq_signal_deliver(struct ksignal *ksig,
1950                                        struct pt_regs *regs)
1951 {
1952 }
1953 static inline void rseq_preempt(struct task_struct *t)
1954 {
1955 }
1956 static inline void rseq_migrate(struct task_struct *t)
1957 {
1958 }
1959 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1960 {
1961 }
1962 static inline void rseq_execve(struct task_struct *t)
1963 {
1964 }
1965 
1966 #endif
1967 
1968 void __exit_umh(struct task_struct *tsk);
1969 
1970 static inline void exit_umh(struct task_struct *tsk)
1971 {
1972         if (unlikely(tsk->flags & PF_UMH))
1973                 __exit_umh(tsk);
1974 }
1975 
1976 #ifdef CONFIG_DEBUG_RSEQ
1977 
1978 void rseq_syscall(struct pt_regs *regs);
1979 
1980 #else
1981 
1982 static inline void rseq_syscall(struct pt_regs *regs)
1983 {
1984 }
1985 
1986 #endif
1987 
1988 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
1989 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
1990 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
1991 
1992 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
1993 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
1994 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
1995 
1996 int sched_trace_rq_cpu(struct rq *rq);
1997 
1998 const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
1999 
2000 #endif