root/arch/arm64/include/asm/preempt.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. preempt_count
  2. preempt_count_set
  3. set_preempt_need_resched
  4. clear_preempt_need_resched
  5. test_preempt_need_resched
  6. __preempt_count_add
  7. __preempt_count_sub
  8. __preempt_count_dec_and_test
  9. should_resched

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef __ASM_PREEMPT_H
   3 #define __ASM_PREEMPT_H
   4 
   5 #include <linux/thread_info.h>
   6 
   7 #define PREEMPT_NEED_RESCHED    BIT(32)
   8 #define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
   9 
  10 static inline int preempt_count(void)
  11 {
  12         return READ_ONCE(current_thread_info()->preempt.count);
  13 }
  14 
  15 static inline void preempt_count_set(u64 pc)
  16 {
  17         /* Preserve existing value of PREEMPT_NEED_RESCHED */
  18         WRITE_ONCE(current_thread_info()->preempt.count, pc);
  19 }
  20 
  21 #define init_task_preempt_count(p) do { \
  22         task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
  23 } while (0)
  24 
  25 #define init_idle_preempt_count(p, cpu) do { \
  26         task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
  27 } while (0)
  28 
  29 static inline void set_preempt_need_resched(void)
  30 {
  31         current_thread_info()->preempt.need_resched = 0;
  32 }
  33 
  34 static inline void clear_preempt_need_resched(void)
  35 {
  36         current_thread_info()->preempt.need_resched = 1;
  37 }
  38 
  39 static inline bool test_preempt_need_resched(void)
  40 {
  41         return !current_thread_info()->preempt.need_resched;
  42 }
  43 
  44 static inline void __preempt_count_add(int val)
  45 {
  46         u32 pc = READ_ONCE(current_thread_info()->preempt.count);
  47         pc += val;
  48         WRITE_ONCE(current_thread_info()->preempt.count, pc);
  49 }
  50 
  51 static inline void __preempt_count_sub(int val)
  52 {
  53         u32 pc = READ_ONCE(current_thread_info()->preempt.count);
  54         pc -= val;
  55         WRITE_ONCE(current_thread_info()->preempt.count, pc);
  56 }
  57 
  58 static inline bool __preempt_count_dec_and_test(void)
  59 {
  60         struct thread_info *ti = current_thread_info();
  61         u64 pc = READ_ONCE(ti->preempt_count);
  62 
  63         /* Update only the count field, leaving need_resched unchanged */
  64         WRITE_ONCE(ti->preempt.count, --pc);
  65 
  66         /*
  67          * If we wrote back all zeroes, then we're preemptible and in
  68          * need of a reschedule. Otherwise, we need to reload the
  69          * preempt_count in case the need_resched flag was cleared by an
  70          * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
  71          * pair.
  72          */
  73         return !pc || !READ_ONCE(ti->preempt_count);
  74 }
  75 
  76 static inline bool should_resched(int preempt_offset)
  77 {
  78         u64 pc = READ_ONCE(current_thread_info()->preempt_count);
  79         return pc == preempt_offset;
  80 }
  81 
  82 #ifdef CONFIG_PREEMPT
  83 void preempt_schedule(void);
  84 #define __preempt_schedule() preempt_schedule()
  85 void preempt_schedule_notrace(void);
  86 #define __preempt_schedule_notrace() preempt_schedule_notrace()
  87 #endif /* CONFIG_PREEMPT */
  88 
  89 #endif /* __ASM_PREEMPT_H */

/* [<][>][^][v][top][bottom][index][help] */