1/* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25#ifndef __LINUX_TINY_H 26#define __LINUX_TINY_H 27 28#include <linux/cache.h> 29 30static inline unsigned long get_state_synchronize_rcu(void) 31{ 32 return 0; 33} 34 35static inline void cond_synchronize_rcu(unsigned long oldstate) 36{ 37 might_sleep(); 38} 39 40static inline unsigned long get_state_synchronize_sched(void) 41{ 42 return 0; 43} 44 45static inline void cond_synchronize_sched(unsigned long oldstate) 46{ 47 might_sleep(); 48} 49 50static inline void rcu_barrier_bh(void) 51{ 52 wait_rcu_gp(call_rcu_bh); 53} 54 55static inline void rcu_barrier_sched(void) 56{ 57 wait_rcu_gp(call_rcu_sched); 58} 59 60static inline void synchronize_rcu_expedited(void) 61{ 62 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 63} 64 65static inline void rcu_barrier(void) 66{ 67 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 68} 69 70static inline void synchronize_rcu_bh(void) 71{ 72 synchronize_sched(); 73} 74 75static inline void synchronize_rcu_bh_expedited(void) 76{ 77 synchronize_sched(); 78} 79 80static inline void synchronize_sched_expedited(void) 81{ 82 synchronize_sched(); 83} 84 85static inline void kfree_call_rcu(struct rcu_head *head, 86 rcu_callback_t func) 87{ 88 call_rcu(head, func); 89} 90 91static inline void rcu_note_context_switch(void) 92{ 93 rcu_sched_qs(); 94} 95 96/* 97 * Take advantage of the fact that there is only one CPU, which 98 * allows us to ignore virtualization-based context switches. 99 */ 100static inline void rcu_virt_note_context_switch(int cpu) 101{ 102} 103 104/* 105 * Return the number of grace periods started. 106 */ 107static inline unsigned long rcu_batches_started(void) 108{ 109 return 0; 110} 111 112/* 113 * Return the number of bottom-half grace periods started. 114 */ 115static inline unsigned long rcu_batches_started_bh(void) 116{ 117 return 0; 118} 119 120/* 121 * Return the number of sched grace periods started. 122 */ 123static inline unsigned long rcu_batches_started_sched(void) 124{ 125 return 0; 126} 127 128/* 129 * Return the number of grace periods completed. 130 */ 131static inline unsigned long rcu_batches_completed(void) 132{ 133 return 0; 134} 135 136/* 137 * Return the number of bottom-half grace periods completed. 138 */ 139static inline unsigned long rcu_batches_completed_bh(void) 140{ 141 return 0; 142} 143 144/* 145 * Return the number of sched grace periods completed. 146 */ 147static inline unsigned long rcu_batches_completed_sched(void) 148{ 149 return 0; 150} 151 152static inline void rcu_force_quiescent_state(void) 153{ 154} 155 156static inline void rcu_bh_force_quiescent_state(void) 157{ 158} 159 160static inline void rcu_sched_force_quiescent_state(void) 161{ 162} 163 164static inline void show_rcu_gp_kthreads(void) 165{ 166} 167 168static inline void rcu_cpu_stall_reset(void) 169{ 170} 171 172static inline void rcu_idle_enter(void) 173{ 174} 175 176static inline void rcu_idle_exit(void) 177{ 178} 179 180static inline void rcu_irq_enter(void) 181{ 182} 183 184static inline void rcu_irq_exit(void) 185{ 186} 187 188static inline void exit_rcu(void) 189{ 190} 191 192#ifdef CONFIG_DEBUG_LOCK_ALLOC 193extern int rcu_scheduler_active __read_mostly; 194void rcu_scheduler_starting(void); 195#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 196static inline void rcu_scheduler_starting(void) 197{ 198} 199#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 200 201#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) 202 203static inline bool rcu_is_watching(void) 204{ 205 return __rcu_is_watching(); 206} 207 208#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 209 210static inline bool rcu_is_watching(void) 211{ 212 return true; 213} 214 215#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 216 217static inline void rcu_all_qs(void) 218{ 219 barrier(); /* Avoid RCU read-side critical sections leaking across. */ 220} 221 222#endif /* __LINUX_RCUTINY_H */ 223