root/include/asm-generic/preempt.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. preempt_count
  2. preempt_count_ptr
  3. preempt_count_set
  4. set_preempt_need_resched
  5. clear_preempt_need_resched
  6. test_preempt_need_resched
  7. __preempt_count_add
  8. __preempt_count_sub
  9. __preempt_count_dec_and_test
  10. should_resched

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef __ASM_PREEMPT_H
   3 #define __ASM_PREEMPT_H
   4 
   5 #include <linux/thread_info.h>
   6 
   7 #define PREEMPT_ENABLED (0)
   8 
   9 static __always_inline int preempt_count(void)
  10 {
  11         return READ_ONCE(current_thread_info()->preempt_count);
  12 }
  13 
  14 static __always_inline volatile int *preempt_count_ptr(void)
  15 {
  16         return &current_thread_info()->preempt_count;
  17 }
  18 
  19 static __always_inline void preempt_count_set(int pc)
  20 {
  21         *preempt_count_ptr() = pc;
  22 }
  23 
  24 /*
  25  * must be macros to avoid header recursion hell
  26  */
  27 #define init_task_preempt_count(p) do { \
  28         task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
  29 } while (0)
  30 
  31 #define init_idle_preempt_count(p, cpu) do { \
  32         task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
  33 } while (0)
  34 
  35 static __always_inline void set_preempt_need_resched(void)
  36 {
  37 }
  38 
  39 static __always_inline void clear_preempt_need_resched(void)
  40 {
  41 }
  42 
  43 static __always_inline bool test_preempt_need_resched(void)
  44 {
  45         return false;
  46 }
  47 
  48 /*
  49  * The various preempt_count add/sub methods
  50  */
  51 
  52 static __always_inline void __preempt_count_add(int val)
  53 {
  54         *preempt_count_ptr() += val;
  55 }
  56 
  57 static __always_inline void __preempt_count_sub(int val)
  58 {
  59         *preempt_count_ptr() -= val;
  60 }
  61 
  62 static __always_inline bool __preempt_count_dec_and_test(void)
  63 {
  64         /*
  65          * Because of load-store architectures cannot do per-cpu atomic
  66          * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
  67          * lost.
  68          */
  69         return !--*preempt_count_ptr() && tif_need_resched();
  70 }
  71 
  72 /*
  73  * Returns true when we need to resched and can (barring IRQ state).
  74  */
  75 static __always_inline bool should_resched(int preempt_offset)
  76 {
  77         return unlikely(preempt_count() == preempt_offset &&
  78                         tif_need_resched());
  79 }
  80 
  81 #ifdef CONFIG_PREEMPTION
  82 extern asmlinkage void preempt_schedule(void);
  83 #define __preempt_schedule() preempt_schedule()
  84 extern asmlinkage void preempt_schedule_notrace(void);
  85 #define __preempt_schedule_notrace() preempt_schedule_notrace()
  86 #endif /* CONFIG_PREEMPTION */
  87 
  88 #endif /* __ASM_PREEMPT_H */

/* [<][>][^][v][top][bottom][index][help] */