root/arch/powerpc/include/asm/asm-prototypes.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. ucall_norets
  2. kvmppc_save_tm_hv
  3. kvmppc_restore_tm_hv

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 #ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
   3 #define _ASM_POWERPC_ASM_PROTOTYPES_H
   4 /*
   5  * This file is for prototypes of C functions that are only called
   6  * from asm, and any associated variables.
   7  *
   8  * Copyright 2016, Daniel Axtens, IBM Corporation.
   9  */
  10 
  11 #include <linux/threads.h>
  12 #include <asm/cacheflush.h>
  13 #include <asm/checksum.h>
  14 #include <linux/uaccess.h>
  15 #include <asm/epapr_hcalls.h>
  16 #include <asm/dcr.h>
  17 #include <asm/mmu_context.h>
  18 #include <asm/ultravisor-api.h>
  19 
  20 #include <uapi/asm/ucontext.h>
  21 
  22 /* SMP */
  23 extern struct task_struct *current_set[NR_CPUS];
  24 extern struct task_struct *secondary_current;
  25 void start_secondary(void *unused);
  26 
  27 /* kexec */
  28 struct paca_struct;
  29 struct kimage;
  30 extern struct paca_struct kexec_paca;
  31 void kexec_copy_flush(struct kimage *image);
  32 
  33 /* pseries hcall tracing */
  34 extern struct static_key hcall_tracepoint_key;
  35 void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
  36 void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
  37 
  38 /* Ultravisor */
  39 #if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
  40 long ucall_norets(unsigned long opcode, ...);
  41 #else
  42 static inline long ucall_norets(unsigned long opcode, ...)
  43 {
  44         return U_NOT_AVAILABLE;
  45 }
  46 #endif
  47 
  48 /* OPAL */
  49 int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
  50                     int64_t a4, int64_t a5, int64_t a6, int64_t a7,
  51                     int64_t opcode, uint64_t msr);
  52 
  53 /* VMX copying */
  54 int enter_vmx_usercopy(void);
  55 int exit_vmx_usercopy(void);
  56 int enter_vmx_ops(void);
  57 void *exit_vmx_ops(void *dest);
  58 
  59 /* Traps */
  60 long machine_check_early(struct pt_regs *regs);
  61 long hmi_exception_realmode(struct pt_regs *regs);
  62 void SMIException(struct pt_regs *regs);
  63 void handle_hmi_exception(struct pt_regs *regs);
  64 void instruction_breakpoint_exception(struct pt_regs *regs);
  65 void RunModeException(struct pt_regs *regs);
  66 void single_step_exception(struct pt_regs *regs);
  67 void program_check_exception(struct pt_regs *regs);
  68 void alignment_exception(struct pt_regs *regs);
  69 void StackOverflow(struct pt_regs *regs);
  70 void kernel_fp_unavailable_exception(struct pt_regs *regs);
  71 void altivec_unavailable_exception(struct pt_regs *regs);
  72 void vsx_unavailable_exception(struct pt_regs *regs);
  73 void fp_unavailable_tm(struct pt_regs *regs);
  74 void altivec_unavailable_tm(struct pt_regs *regs);
  75 void vsx_unavailable_tm(struct pt_regs *regs);
  76 void facility_unavailable_exception(struct pt_regs *regs);
  77 void TAUException(struct pt_regs *regs);
  78 void altivec_assist_exception(struct pt_regs *regs);
  79 void unrecoverable_exception(struct pt_regs *regs);
  80 void kernel_bad_stack(struct pt_regs *regs);
  81 void system_reset_exception(struct pt_regs *regs);
  82 void machine_check_exception(struct pt_regs *regs);
  83 void emulation_assist_interrupt(struct pt_regs *regs);
  84 long do_slb_fault(struct pt_regs *regs, unsigned long ea);
  85 void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err);
  86 
  87 /* signals, syscalls and interrupts */
  88 long sys_swapcontext(struct ucontext __user *old_ctx,
  89                     struct ucontext __user *new_ctx,
  90                     long ctx_size);
  91 #ifdef CONFIG_PPC32
  92 long sys_debug_setcontext(struct ucontext __user *ctx,
  93                           int ndbg, struct sig_dbg_op __user *dbg);
  94 int
  95 ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp);
  96 unsigned long __init early_init(unsigned long dt_ptr);
  97 void __init machine_init(u64 dt_ptr);
  98 #endif
  99 
 100 long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
 101                       u32 len_high, u32 len_low);
 102 long sys_switch_endian(void);
 103 notrace unsigned int __check_irq_replay(void);
 104 void notrace restore_interrupts(void);
 105 
 106 /* ptrace */
 107 long do_syscall_trace_enter(struct pt_regs *regs);
 108 void do_syscall_trace_leave(struct pt_regs *regs);
 109 
 110 /* process */
 111 void restore_math(struct pt_regs *regs);
 112 void restore_tm_state(struct pt_regs *regs);
 113 
 114 /* prom_init (OpenFirmware) */
 115 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
 116                                unsigned long pp,
 117                                unsigned long r6, unsigned long r7,
 118                                unsigned long kbase);
 119 
 120 /* setup */
 121 void __init early_setup(unsigned long dt_ptr);
 122 void early_setup_secondary(void);
 123 
 124 /* time */
 125 void accumulate_stolen_time(void);
 126 
 127 /* misc runtime */
 128 extern u64 __bswapdi2(u64);
 129 extern s64 __lshrdi3(s64, int);
 130 extern s64 __ashldi3(s64, int);
 131 extern s64 __ashrdi3(s64, int);
 132 extern int __cmpdi2(s64, s64);
 133 extern int __ucmpdi2(u64, u64);
 134 
 135 /* tracing */
 136 void _mcount(void);
 137 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
 138                                                 unsigned long sp);
 139 
 140 void pnv_power9_force_smt4_catch(void);
 141 void pnv_power9_force_smt4_release(void);
 142 
 143 /* Transaction memory related */
 144 void tm_enable(void);
 145 void tm_disable(void);
 146 void tm_abort(uint8_t cause);
 147 
 148 struct kvm_vcpu;
 149 void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
 150 void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
 151 
 152 /* Patch sites */
 153 extern s32 patch__call_flush_count_cache;
 154 extern s32 patch__flush_count_cache_return;
 155 extern s32 patch__flush_link_stack_return;
 156 extern s32 patch__call_kvm_flush_link_stack;
 157 extern s32 patch__memset_nocache, patch__memcpy_nocache;
 158 
 159 extern long flush_count_cache;
 160 extern long kvm_flush_link_stack;
 161 
 162 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 163 void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
 164 void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
 165 #else
 166 static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
 167                                      bool preserve_nv) { }
 168 static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
 169                                         bool preserve_nv) { }
 170 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 171 
 172 void kvmhv_save_host_pmu(void);
 173 void kvmhv_load_host_pmu(void);
 174 void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
 175 void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
 176 
 177 int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
 178 
 179 long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
 180 long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
 181                         unsigned long dabrx);
 182 
 183 #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */

/* [<][>][^][v][top][bottom][index][help] */