root/arch/ia64/include/asm/asmmacro.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_IA64_ASMMACRO_H
   3 #define _ASM_IA64_ASMMACRO_H
   4 
   5 /*
   6  * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
   7  *      David Mosberger-Tang <davidm@hpl.hp.com>
   8  */
   9 
  10 
  11 #define ENTRY(name)                             \
  12         .align 32;                              \
  13         .proc name;                             \
  14 name:
  15 
  16 #define ENTRY_MIN_ALIGN(name)                   \
  17         .align 16;                              \
  18         .proc name;                             \
  19 name:
  20 
  21 #define GLOBAL_ENTRY(name)                      \
  22         .global name;                           \
  23         ENTRY(name)
  24 
  25 #define END(name)                               \
  26         .endp name
  27 
  28 /*
  29  * Helper macros to make unwind directives more readable:
  30  */
  31 
  32 /* prologue_gr: */
  33 #define ASM_UNW_PRLG_RP                 0x8
  34 #define ASM_UNW_PRLG_PFS                0x4
  35 #define ASM_UNW_PRLG_PSP                0x2
  36 #define ASM_UNW_PRLG_PR                 0x1
  37 #define ASM_UNW_PRLG_GRSAVE(ninputs)    (32+(ninputs))
  38 
  39 /*
  40  * Helper macros for accessing user memory.
  41  *
  42  * When adding any new .section/.previous entries here, make sure to
  43  * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
  44  * unpleasant things will happen.
  45  */
  46 
  47         .section "__ex_table", "a"              // declare section & section attributes
  48         .previous
  49 
  50 # define EX(y,x...)                             \
  51         .xdata4 "__ex_table", 99f-., y-.;       \
  52   [99:] x
  53 # define EXCLR(y,x...)                          \
  54         .xdata4 "__ex_table", 99f-., y-.+4;     \
  55   [99:] x
  56 
  57 /*
  58  * Tag MCA recoverable instruction ranges.
  59  */
  60 
  61         .section "__mca_table", "a"             // declare section & section attributes
  62         .previous
  63 
  64 # define MCA_RECOVER_RANGE(y)                   \
  65         .xdata4 "__mca_table", y-., 99f-.;      \
  66   [99:]
  67 
  68 /*
  69  * Mark instructions that need a load of a virtual address patched to be
  70  * a load of a physical address.  We use this either in critical performance
  71  * path (ivt.S - TLB miss processing) or in places where it might not be
  72  * safe to use a "tpa" instruction (mca_asm.S - error recovery).
  73  */
  74         .section ".data..patch.vtop", "a"       // declare section & section attributes
  75         .previous
  76 
  77 #define LOAD_PHYSICAL(pr, reg, obj)             \
  78 [1:](pr)movl reg = obj;                         \
  79         .xdata4 ".data..patch.vtop", 1b-.
  80 
  81 /*
  82  * For now, we always put in the McKinley E9 workaround.  On CPUs that don't need it,
  83  * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
  84  */
  85 #define DO_MCKINLEY_E9_WORKAROUND
  86 
  87 #ifdef DO_MCKINLEY_E9_WORKAROUND
  88         .section ".data..patch.mckinley_e9", "a"
  89         .previous
  90 /* workaround for Itanium 2 Errata 9: */
  91 # define FSYS_RETURN                                    \
  92         .xdata4 ".data..patch.mckinley_e9", 1f-.;       \
  93 1:{ .mib;                                               \
  94         nop.m 0;                                        \
  95         mov r16=ar.pfs;                                 \
  96         br.call.sptk.many b7=2f;;                       \
  97   };                                                    \
  98 2:{ .mib;                                               \
  99         nop.m 0;                                        \
 100         mov ar.pfs=r16;                                 \
 101         br.ret.sptk.many b6;;                           \
 102   }
 103 #else
 104 # define FSYS_RETURN    br.ret.sptk.many b6
 105 #endif
 106 
 107 /*
 108  * If physical stack register size is different from DEF_NUM_STACK_REG,
 109  * dynamically patch the kernel for correct size.
 110  */
 111         .section ".data..patch.phys_stack_reg", "a"
 112         .previous
 113 #define LOAD_PHYS_STACK_REG_SIZE(reg)                   \
 114 [1:]    adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0;        \
 115         .xdata4 ".data..patch.phys_stack_reg", 1b-.
 116 
 117 /*
 118  * Up until early 2004, use of .align within a function caused bad unwind info.
 119  * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
 120  * otherwise.
 121  */
 122 #ifdef HAVE_WORKING_TEXT_ALIGN
 123 # define TEXT_ALIGN(n)  .align n
 124 #else
 125 # define TEXT_ALIGN(n)
 126 #endif
 127 
 128 #ifdef HAVE_SERIALIZE_DIRECTIVE
 129 # define dv_serialize_data              .serialize.data
 130 # define dv_serialize_instruction       .serialize.instruction
 131 #else
 132 # define dv_serialize_data
 133 # define dv_serialize_instruction
 134 #endif
 135 
 136 #endif /* _ASM_IA64_ASMMACRO_H */

/* [<][>][^][v][top][bottom][index][help] */