root/arch/powerpc/include/asm/hw_irq.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. irq_soft_mask_return
  2. irq_soft_mask_set
  3. irq_soft_mask_set_return
  4. irq_soft_mask_or_return
  5. arch_local_save_flags
  6. arch_local_irq_disable
  7. arch_local_irq_enable
  8. arch_local_irq_save
  9. arch_irqs_disabled_flags
  10. arch_irqs_disabled
  11. lazy_irq_pending
  12. may_hard_irq_enable
  13. arch_irq_disabled_regs
  14. arch_local_save_flags
  15. arch_local_irq_restore
  16. arch_local_irq_save
  17. arch_local_irq_disable
  18. arch_local_irq_enable
  19. arch_irqs_disabled_flags
  20. arch_irqs_disabled
  21. arch_irq_disabled_regs
  22. may_hard_irq_enable

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
   4  */
   5 #ifndef _ASM_POWERPC_HW_IRQ_H
   6 #define _ASM_POWERPC_HW_IRQ_H
   7 
   8 #ifdef __KERNEL__
   9 
  10 #include <linux/errno.h>
  11 #include <linux/compiler.h>
  12 #include <asm/ptrace.h>
  13 #include <asm/processor.h>
  14 
  15 #ifdef CONFIG_PPC64
  16 
  17 /*
  18  * PACA flags in paca->irq_happened.
  19  *
  20  * This bits are set when interrupts occur while soft-disabled
  21  * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
  22  * is set whenever we manually hard disable.
  23  */
  24 #define PACA_IRQ_HARD_DIS       0x01
  25 #define PACA_IRQ_DBELL          0x02
  26 #define PACA_IRQ_EE             0x04
  27 #define PACA_IRQ_DEC            0x08 /* Or FIT */
  28 #define PACA_IRQ_EE_EDGE        0x10 /* BookE only */
  29 #define PACA_IRQ_HMI            0x20
  30 #define PACA_IRQ_PMI            0x40
  31 
  32 /*
  33  * Some soft-masked interrupts must be hard masked until they are replayed
  34  * (e.g., because the soft-masked handler does not clear the exception).
  35  */
  36 #ifdef CONFIG_PPC_BOOK3S
  37 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
  38 #else
  39 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
  40 #endif
  41 
  42 /*
  43  * flags for paca->irq_soft_mask
  44  */
  45 #define IRQS_ENABLED            0
  46 #define IRQS_DISABLED           1 /* local_irq_disable() interrupts */
  47 #define IRQS_PMI_DISABLED       2
  48 #define IRQS_ALL_DISABLED       (IRQS_DISABLED | IRQS_PMI_DISABLED)
  49 
  50 #endif /* CONFIG_PPC64 */
  51 
  52 #ifndef __ASSEMBLY__
  53 
  54 extern void replay_system_reset(void);
  55 extern void __replay_interrupt(unsigned int vector);
  56 
  57 extern void timer_interrupt(struct pt_regs *);
  58 extern void timer_broadcast_interrupt(void);
  59 extern void performance_monitor_exception(struct pt_regs *regs);
  60 extern void WatchdogException(struct pt_regs *regs);
  61 extern void unknown_exception(struct pt_regs *regs);
  62 
  63 #ifdef CONFIG_PPC64
  64 #include <asm/paca.h>
  65 
  66 static inline notrace unsigned long irq_soft_mask_return(void)
  67 {
  68         unsigned long flags;
  69 
  70         asm volatile(
  71                 "lbz %0,%1(13)"
  72                 : "=r" (flags)
  73                 : "i" (offsetof(struct paca_struct, irq_soft_mask)));
  74 
  75         return flags;
  76 }
  77 
  78 /*
  79  * The "memory" clobber acts as both a compiler barrier
  80  * for the critical section and as a clobber because
  81  * we changed paca->irq_soft_mask
  82  */
  83 static inline notrace void irq_soft_mask_set(unsigned long mask)
  84 {
  85 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
  86         /*
  87          * The irq mask must always include the STD bit if any are set.
  88          *
  89          * and interrupts don't get replayed until the standard
  90          * interrupt (local_irq_disable()) is unmasked.
  91          *
  92          * Other masks must only provide additional masking beyond
  93          * the standard, and they are also not replayed until the
  94          * standard interrupt becomes unmasked.
  95          *
  96          * This could be changed, but it will require partial
  97          * unmasks to be replayed, among other things. For now, take
  98          * the simple approach.
  99          */
 100         WARN_ON(mask && !(mask & IRQS_DISABLED));
 101 #endif
 102 
 103         asm volatile(
 104                 "stb %0,%1(13)"
 105                 :
 106                 : "r" (mask),
 107                   "i" (offsetof(struct paca_struct, irq_soft_mask))
 108                 : "memory");
 109 }
 110 
 111 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
 112 {
 113         unsigned long flags;
 114 
 115 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
 116         WARN_ON(mask && !(mask & IRQS_DISABLED));
 117 #endif
 118 
 119         asm volatile(
 120                 "lbz %0,%1(13); stb %2,%1(13)"
 121                 : "=&r" (flags)
 122                 : "i" (offsetof(struct paca_struct, irq_soft_mask)),
 123                   "r" (mask)
 124                 : "memory");
 125 
 126         return flags;
 127 }
 128 
 129 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
 130 {
 131         unsigned long flags, tmp;
 132 
 133         asm volatile(
 134                 "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
 135                 : "=&r" (flags), "=r" (tmp)
 136                 : "i" (offsetof(struct paca_struct, irq_soft_mask)),
 137                   "r" (mask)
 138                 : "memory");
 139 
 140 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
 141         WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
 142 #endif
 143 
 144         return flags;
 145 }
 146 
 147 static inline unsigned long arch_local_save_flags(void)
 148 {
 149         return irq_soft_mask_return();
 150 }
 151 
 152 static inline void arch_local_irq_disable(void)
 153 {
 154         irq_soft_mask_set(IRQS_DISABLED);
 155 }
 156 
 157 extern void arch_local_irq_restore(unsigned long);
 158 
 159 static inline void arch_local_irq_enable(void)
 160 {
 161         arch_local_irq_restore(IRQS_ENABLED);
 162 }
 163 
 164 static inline unsigned long arch_local_irq_save(void)
 165 {
 166         return irq_soft_mask_set_return(IRQS_DISABLED);
 167 }
 168 
 169 static inline bool arch_irqs_disabled_flags(unsigned long flags)
 170 {
 171         return flags & IRQS_DISABLED;
 172 }
 173 
 174 static inline bool arch_irqs_disabled(void)
 175 {
 176         return arch_irqs_disabled_flags(arch_local_save_flags());
 177 }
 178 
 179 #ifdef CONFIG_PPC_BOOK3S
 180 /*
 181  * To support disabling and enabling of irq with PMI, set of
 182  * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
 183  * functions are added. These macros are implemented using generic
 184  * linux local_irq_* code from include/linux/irqflags.h.
 185  */
 186 #define raw_local_irq_pmu_save(flags)                                   \
 187         do {                                                            \
 188                 typecheck(unsigned long, flags);                        \
 189                 flags = irq_soft_mask_or_return(IRQS_DISABLED | \
 190                                 IRQS_PMI_DISABLED);                     \
 191         } while(0)
 192 
 193 #define raw_local_irq_pmu_restore(flags)                                \
 194         do {                                                            \
 195                 typecheck(unsigned long, flags);                        \
 196                 arch_local_irq_restore(flags);                          \
 197         } while(0)
 198 
 199 #ifdef CONFIG_TRACE_IRQFLAGS
 200 #define powerpc_local_irq_pmu_save(flags)                       \
 201          do {                                                   \
 202                 raw_local_irq_pmu_save(flags);                  \
 203                 trace_hardirqs_off();                           \
 204         } while(0)
 205 #define powerpc_local_irq_pmu_restore(flags)                    \
 206         do {                                                    \
 207                 if (raw_irqs_disabled_flags(flags)) {           \
 208                         raw_local_irq_pmu_restore(flags);       \
 209                         trace_hardirqs_off();                   \
 210                 } else {                                        \
 211                         trace_hardirqs_on();                    \
 212                         raw_local_irq_pmu_restore(flags);       \
 213                 }                                               \
 214         } while(0)
 215 #else
 216 #define powerpc_local_irq_pmu_save(flags)                       \
 217         do {                                                    \
 218                 raw_local_irq_pmu_save(flags);                  \
 219         } while(0)
 220 #define powerpc_local_irq_pmu_restore(flags)                    \
 221         do {                                                    \
 222                 raw_local_irq_pmu_restore(flags);               \
 223         } while (0)
 224 #endif  /* CONFIG_TRACE_IRQFLAGS */
 225 
 226 #endif /* CONFIG_PPC_BOOK3S */
 227 
 228 #ifdef CONFIG_PPC_BOOK3E
 229 #define __hard_irq_enable()     asm volatile("wrteei 1" : : : "memory")
 230 #define __hard_irq_disable()    asm volatile("wrteei 0" : : : "memory")
 231 #else
 232 #define __hard_irq_enable()     __mtmsrd(MSR_EE|MSR_RI, 1)
 233 #define __hard_irq_disable()    __mtmsrd(MSR_RI, 1)
 234 #endif
 235 
 236 #define hard_irq_disable()      do {                                    \
 237         unsigned long flags;                                            \
 238         __hard_irq_disable();                                           \
 239         flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);            \
 240         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;                  \
 241         if (!arch_irqs_disabled_flags(flags)) {                         \
 242                 asm ("stdx %%r1, 0, %1 ;"                               \
 243                      : "=m" (local_paca->saved_r1)                      \
 244                      : "b" (&local_paca->saved_r1));                    \
 245                 trace_hardirqs_off();                                   \
 246         }                                                               \
 247 } while(0)
 248 
 249 static inline bool lazy_irq_pending(void)
 250 {
 251         return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
 252 }
 253 
 254 /*
 255  * This is called by asynchronous interrupts to conditionally
 256  * re-enable hard interrupts after having cleared the source
 257  * of the interrupt. They are kept disabled if there is a different
 258  * soft-masked interrupt pending that requires hard masking.
 259  */
 260 static inline void may_hard_irq_enable(void)
 261 {
 262         if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
 263                 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
 264                 __hard_irq_enable();
 265         }
 266 }
 267 
 268 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
 269 {
 270         return (regs->softe & IRQS_DISABLED);
 271 }
 272 
 273 extern bool prep_irq_for_idle(void);
 274 extern bool prep_irq_for_idle_irqsoff(void);
 275 extern void irq_set_pending_from_srr1(unsigned long srr1);
 276 
 277 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
 278 
 279 extern void force_external_irq_replay(void);
 280 
 281 #else /* CONFIG_PPC64 */
 282 
 283 #define SET_MSR_EE(x)   mtmsr(x)
 284 
 285 static inline unsigned long arch_local_save_flags(void)
 286 {
 287         return mfmsr();
 288 }
 289 
 290 static inline void arch_local_irq_restore(unsigned long flags)
 291 {
 292 #if defined(CONFIG_BOOKE)
 293         asm volatile("wrtee %0" : : "r" (flags) : "memory");
 294 #else
 295         mtmsr(flags);
 296 #endif
 297 }
 298 
 299 static inline unsigned long arch_local_irq_save(void)
 300 {
 301         unsigned long flags = arch_local_save_flags();
 302 #ifdef CONFIG_BOOKE
 303         asm volatile("wrteei 0" : : : "memory");
 304 #elif defined(CONFIG_PPC_8xx)
 305         wrtspr(SPRN_EID);
 306 #else
 307         SET_MSR_EE(flags & ~MSR_EE);
 308 #endif
 309         return flags;
 310 }
 311 
 312 static inline void arch_local_irq_disable(void)
 313 {
 314 #ifdef CONFIG_BOOKE
 315         asm volatile("wrteei 0" : : : "memory");
 316 #elif defined(CONFIG_PPC_8xx)
 317         wrtspr(SPRN_EID);
 318 #else
 319         arch_local_irq_save();
 320 #endif
 321 }
 322 
 323 static inline void arch_local_irq_enable(void)
 324 {
 325 #ifdef CONFIG_BOOKE
 326         asm volatile("wrteei 1" : : : "memory");
 327 #elif defined(CONFIG_PPC_8xx)
 328         wrtspr(SPRN_EIE);
 329 #else
 330         unsigned long msr = mfmsr();
 331         SET_MSR_EE(msr | MSR_EE);
 332 #endif
 333 }
 334 
 335 static inline bool arch_irqs_disabled_flags(unsigned long flags)
 336 {
 337         return (flags & MSR_EE) == 0;
 338 }
 339 
 340 static inline bool arch_irqs_disabled(void)
 341 {
 342         return arch_irqs_disabled_flags(arch_local_save_flags());
 343 }
 344 
 345 #define hard_irq_disable()              arch_local_irq_disable()
 346 
 347 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
 348 {
 349         return !(regs->msr & MSR_EE);
 350 }
 351 
 352 static inline void may_hard_irq_enable(void) { }
 353 
 354 #endif /* CONFIG_PPC64 */
 355 
 356 #define ARCH_IRQ_INIT_FLAGS     IRQ_NOREQUEST
 357 
 358 /*
 359  * interrupt-retrigger: should we handle this via lost interrupts and IPIs
 360  * or should we not care like we do now ? --BenH.
 361  */
 362 struct irq_chip;
 363 
 364 #endif  /* __ASSEMBLY__ */
 365 #endif  /* __KERNEL__ */
 366 #endif  /* _ASM_POWERPC_HW_IRQ_H */

/* [<][>][^][v][top][bottom][index][help] */