root/arch/sparc/include/asm/processor_64.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. prefetch
  2. prefetchw

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * include/asm/processor.h
   4  *
   5  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   6  */
   7 
   8 #ifndef __ASM_SPARC64_PROCESSOR_H
   9 #define __ASM_SPARC64_PROCESSOR_H
  10 
  11 #include <asm/asi.h>
  12 #include <asm/pstate.h>
  13 #include <asm/ptrace.h>
  14 #include <asm/page.h>
  15 
  16 /*
  17  * User lives in his very own context, and cannot reference us. Note
  18  * that TASK_SIZE is a misnomer, it really gives maximum user virtual
  19  * address that the kernel will allocate out.
  20  *
  21  * XXX No longer using virtual page tables, kill this upper limit...
  22  */
  23 #define VA_BITS         44
  24 #ifndef __ASSEMBLY__
  25 #define VPTE_SIZE       (1UL << (VA_BITS - PAGE_SHIFT + 3))
  26 #else
  27 #define VPTE_SIZE       (1 << (VA_BITS - PAGE_SHIFT + 3))
  28 #endif
  29 
  30 #define TASK_SIZE_OF(tsk) \
  31         (test_tsk_thread_flag(tsk,TIF_32BIT) ? \
  32          (1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
  33 #define TASK_SIZE \
  34         (test_thread_flag(TIF_32BIT) ? \
  35          (1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
  36 #ifdef __KERNEL__
  37 
  38 #define STACK_TOP32     ((1UL << 32UL) - PAGE_SIZE)
  39 #define STACK_TOP64     (0x0000080000000000UL - (1UL << 32UL))
  40 
  41 #define STACK_TOP       (test_thread_flag(TIF_32BIT) ? \
  42                          STACK_TOP32 : STACK_TOP64)
  43 
  44 #define STACK_TOP_MAX   STACK_TOP64
  45 
  46 #endif
  47 
  48 #ifndef __ASSEMBLY__
  49 
  50 typedef struct {
  51         unsigned char seg;
  52 } mm_segment_t;
  53 
  54 /* The Sparc processor specific thread struct. */
  55 /* XXX This should die, everything can go into thread_info now. */
  56 struct thread_struct {
  57 #ifdef CONFIG_DEBUG_SPINLOCK
  58         /* How many spinlocks held by this thread.
  59          * Used with spin lock debugging to catch tasks
  60          * sleeping illegally with locks held.
  61          */
  62         int smp_lock_count;
  63         unsigned int smp_lock_pc;
  64 #else
  65         int dummy; /* f'in gcc bug... */
  66 #endif
  67 };
  68 
  69 #endif /* !(__ASSEMBLY__) */
  70 
  71 #ifndef CONFIG_DEBUG_SPINLOCK
  72 #define INIT_THREAD  {                  \
  73         0,                              \
  74 }
  75 #else /* CONFIG_DEBUG_SPINLOCK */
  76 #define INIT_THREAD  {                                  \
  77 /* smp_lock_count, smp_lock_pc, */                      \
  78    0,              0,                                   \
  79 }
  80 #endif /* !(CONFIG_DEBUG_SPINLOCK) */
  81 
  82 #ifndef __ASSEMBLY__
  83 
  84 #include <linux/types.h>
  85 #include <asm/fpumacro.h>
  86 
  87 struct task_struct;
  88 
  89 /* On Uniprocessor, even in RMO processes see TSO semantics */
  90 #ifdef CONFIG_SMP
  91 #define TSTATE_INITIAL_MM       TSTATE_TSO
  92 #else
  93 #define TSTATE_INITIAL_MM       TSTATE_RMO
  94 #endif
  95 
  96 /* Do necessary setup to start up a newly executed thread. */
  97 #define start_thread(regs, pc, sp) \
  98 do { \
  99         unsigned long __asi = ASI_PNF; \
 100         regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \
 101         regs->tpc = ((pc & (~3)) - 4); \
 102         regs->tnpc = regs->tpc + 4; \
 103         regs->y = 0; \
 104         set_thread_wstate(1 << 3); \
 105         if (current_thread_info()->utraps) { \
 106                 if (*(current_thread_info()->utraps) < 2) \
 107                         kfree(current_thread_info()->utraps); \
 108                 else \
 109                         (*(current_thread_info()->utraps))--; \
 110                 current_thread_info()->utraps = NULL; \
 111         } \
 112         __asm__ __volatile__( \
 113         "stx            %%g0, [%0 + %2 + 0x00]\n\t" \
 114         "stx            %%g0, [%0 + %2 + 0x08]\n\t" \
 115         "stx            %%g0, [%0 + %2 + 0x10]\n\t" \
 116         "stx            %%g0, [%0 + %2 + 0x18]\n\t" \
 117         "stx            %%g0, [%0 + %2 + 0x20]\n\t" \
 118         "stx            %%g0, [%0 + %2 + 0x28]\n\t" \
 119         "stx            %%g0, [%0 + %2 + 0x30]\n\t" \
 120         "stx            %%g0, [%0 + %2 + 0x38]\n\t" \
 121         "stx            %%g0, [%0 + %2 + 0x40]\n\t" \
 122         "stx            %%g0, [%0 + %2 + 0x48]\n\t" \
 123         "stx            %%g0, [%0 + %2 + 0x50]\n\t" \
 124         "stx            %%g0, [%0 + %2 + 0x58]\n\t" \
 125         "stx            %%g0, [%0 + %2 + 0x60]\n\t" \
 126         "stx            %%g0, [%0 + %2 + 0x68]\n\t" \
 127         "stx            %1,   [%0 + %2 + 0x70]\n\t" \
 128         "stx            %%g0, [%0 + %2 + 0x78]\n\t" \
 129         "wrpr           %%g0, (1 << 3), %%wstate\n\t" \
 130         : \
 131         : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \
 132           "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
 133         fprs_write(0);  \
 134         current_thread_info()->xfsr[0] = 0;     \
 135         current_thread_info()->fpsaved[0] = 0;  \
 136         regs->tstate &= ~TSTATE_PEF;    \
 137 } while (0)
 138 
 139 #define start_thread32(regs, pc, sp) \
 140 do { \
 141         unsigned long __asi = ASI_PNF; \
 142         pc &= 0x00000000ffffffffUL; \
 143         sp &= 0x00000000ffffffffUL; \
 144         regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \
 145         regs->tpc = ((pc & (~3)) - 4); \
 146         regs->tnpc = regs->tpc + 4; \
 147         regs->y = 0; \
 148         set_thread_wstate(2 << 3); \
 149         if (current_thread_info()->utraps) { \
 150                 if (*(current_thread_info()->utraps) < 2) \
 151                         kfree(current_thread_info()->utraps); \
 152                 else \
 153                         (*(current_thread_info()->utraps))--; \
 154                 current_thread_info()->utraps = NULL; \
 155         } \
 156         __asm__ __volatile__( \
 157         "stx            %%g0, [%0 + %2 + 0x00]\n\t" \
 158         "stx            %%g0, [%0 + %2 + 0x08]\n\t" \
 159         "stx            %%g0, [%0 + %2 + 0x10]\n\t" \
 160         "stx            %%g0, [%0 + %2 + 0x18]\n\t" \
 161         "stx            %%g0, [%0 + %2 + 0x20]\n\t" \
 162         "stx            %%g0, [%0 + %2 + 0x28]\n\t" \
 163         "stx            %%g0, [%0 + %2 + 0x30]\n\t" \
 164         "stx            %%g0, [%0 + %2 + 0x38]\n\t" \
 165         "stx            %%g0, [%0 + %2 + 0x40]\n\t" \
 166         "stx            %%g0, [%0 + %2 + 0x48]\n\t" \
 167         "stx            %%g0, [%0 + %2 + 0x50]\n\t" \
 168         "stx            %%g0, [%0 + %2 + 0x58]\n\t" \
 169         "stx            %%g0, [%0 + %2 + 0x60]\n\t" \
 170         "stx            %%g0, [%0 + %2 + 0x68]\n\t" \
 171         "stx            %1,   [%0 + %2 + 0x70]\n\t" \
 172         "stx            %%g0, [%0 + %2 + 0x78]\n\t" \
 173         "wrpr           %%g0, (2 << 3), %%wstate\n\t" \
 174         : \
 175         : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \
 176           "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
 177         fprs_write(0);  \
 178         current_thread_info()->xfsr[0] = 0;     \
 179         current_thread_info()->fpsaved[0] = 0;  \
 180         regs->tstate &= ~TSTATE_PEF;    \
 181 } while (0)
 182 
 183 /* Free all resources held by a thread. */
 184 #define release_thread(tsk)             do { } while (0)
 185 
 186 unsigned long get_wchan(struct task_struct *task);
 187 
 188 #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
 189 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc)
 190 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->u_regs[UREG_FP])
 191 
 192 /* Please see the commentary in asm/backoff.h for a description of
 193  * what these instructions are doing and how they have been chosen.
 194  * To make a long story short, we are trying to yield the current cpu
 195  * strand during busy loops.
 196  */
 197 #ifdef  BUILD_VDSO
 198 #define cpu_relax()     asm volatile("\n99:\n\t"                        \
 199                                      "rd        %%ccr, %%g0\n\t"        \
 200                                      "rd        %%ccr, %%g0\n\t"        \
 201                                      "rd        %%ccr, %%g0\n\t"        \
 202                                      ::: "memory")
 203 #else /* ! BUILD_VDSO */
 204 #define cpu_relax()     asm volatile("\n99:\n\t"                        \
 205                                      "rd        %%ccr, %%g0\n\t"        \
 206                                      "rd        %%ccr, %%g0\n\t"        \
 207                                      "rd        %%ccr, %%g0\n\t"        \
 208                                      ".section  .pause_3insn_patch,\"ax\"\n\t"\
 209                                      ".word     99b\n\t"                \
 210                                      "wr        %%g0, 128, %%asr27\n\t" \
 211                                      "nop\n\t"                          \
 212                                      "nop\n\t"                          \
 213                                      ".previous"                        \
 214                                      ::: "memory")
 215 #endif
 216 
 217 /* Prefetch support.  This is tuned for UltraSPARC-III and later.
 218  * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
 219  * a shallower prefetch queue than later chips.
 220  */
 221 #define ARCH_HAS_PREFETCH
 222 #define ARCH_HAS_PREFETCHW
 223 #define ARCH_HAS_SPINLOCK_PREFETCH
 224 
 225 static inline void prefetch(const void *x)
 226 {
 227         /* We do not use the read prefetch mnemonic because that
 228          * prefetches into the prefetch-cache which only is accessible
 229          * by floating point operations in UltraSPARC-III and later.
 230          * By contrast, "#one_write" prefetches into the L2 cache
 231          * in shared state.
 232          */
 233         __asm__ __volatile__("prefetch [%0], #one_write"
 234                              : /* no outputs */
 235                              : "r" (x));
 236 }
 237 
 238 static inline void prefetchw(const void *x)
 239 {
 240         /* The most optimal prefetch to use for writes is
 241          * "#n_writes".  This brings the cacheline into the
 242          * L2 cache in "owned" state.
 243          */
 244         __asm__ __volatile__("prefetch [%0], #n_writes"
 245                              : /* no outputs */
 246                              : "r" (x));
 247 }
 248 
 249 #define spin_lock_prefetch(x)   prefetchw(x)
 250 
 251 #define HAVE_ARCH_PICK_MMAP_LAYOUT
 252 
 253 int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap);
 254 
 255 #endif /* !(__ASSEMBLY__) */
 256 
 257 #endif /* !(__ASM_SPARC64_PROCESSOR_H) */

/* [<][>][^][v][top][bottom][index][help] */