root/arch/powerpc/kernel/vector.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #include <asm/processor.h>
   3 #include <asm/ppc_asm.h>
   4 #include <asm/reg.h>
   5 #include <asm/asm-offsets.h>
   6 #include <asm/cputable.h>
   7 #include <asm/thread_info.h>
   8 #include <asm/page.h>
   9 #include <asm/ptrace.h>
  10 #include <asm/export.h>
  11 #include <asm/asm-compat.h>
  12 
  13 /*
  14  * Load state from memory into VMX registers including VSCR.
  15  * Assumes the caller has enabled VMX in the MSR.
  16  */
  17 _GLOBAL(load_vr_state)
  18         li      r4,VRSTATE_VSCR
  19         lvx     v0,r4,r3
  20         mtvscr  v0
  21         REST_32VRS(0,r4,r3)
  22         blr
  23 EXPORT_SYMBOL(load_vr_state)
  24 _ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
  25 
  26 /*
  27  * Store VMX state into memory, including VSCR.
  28  * Assumes the caller has enabled VMX in the MSR.
  29  */
  30 _GLOBAL(store_vr_state)
  31         SAVE_32VRS(0, r4, r3)
  32         mfvscr  v0
  33         li      r4, VRSTATE_VSCR
  34         stvx    v0, r4, r3
  35         blr
  36 EXPORT_SYMBOL(store_vr_state)
  37 
  38 /*
  39  * Disable VMX for the task which had it previously,
  40  * and save its vector registers in its thread_struct.
  41  * Enables the VMX for use in the kernel on return.
  42  * On SMP we know the VMX is free, since we give it up every
  43  * switch (ie, no lazy save of the vector registers).
  44  *
  45  * Note that on 32-bit this can only use registers that will be
  46  * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
  47  */
  48 _GLOBAL(load_up_altivec)
  49         mfmsr   r5                      /* grab the current MSR */
  50         oris    r5,r5,MSR_VEC@h
  51         MTMSRD(r5)                      /* enable use of AltiVec now */
  52         isync
  53 
  54         /*
  55          * While userspace in general ignores VRSAVE, glibc uses it as a boolean
  56          * to optimise userspace context save/restore. Whenever we take an
  57          * altivec unavailable exception we must set VRSAVE to something non
  58          * zero. Set it to all 1s. See also the programming note in the ISA.
  59          */
  60         mfspr   r4,SPRN_VRSAVE
  61         cmpwi   0,r4,0
  62         bne+    1f
  63         li      r4,-1
  64         mtspr   SPRN_VRSAVE,r4
  65 1:
  66         /* enable use of VMX after return */
  67 #ifdef CONFIG_PPC32
  68         mfspr   r5,SPRN_SPRG_THREAD             /* current task's THREAD (phys) */
  69         oris    r9,r9,MSR_VEC@h
  70 #else
  71         ld      r4,PACACURRENT(r13)
  72         addi    r5,r4,THREAD            /* Get THREAD */
  73         oris    r12,r12,MSR_VEC@h
  74         std     r12,_MSR(r1)
  75 #endif
  76         /* Don't care if r4 overflows, this is desired behaviour */
  77         lbz     r4,THREAD_LOAD_VEC(r5)
  78         addi    r4,r4,1
  79         stb     r4,THREAD_LOAD_VEC(r5)
  80         addi    r6,r5,THREAD_VRSTATE
  81         li      r4,1
  82         li      r10,VRSTATE_VSCR
  83         stw     r4,THREAD_USED_VR(r5)
  84         lvx     v0,r10,r6
  85         mtvscr  v0
  86         REST_32VRS(0,r4,r6)
  87         /* restore registers and return */
  88         blr
  89 
  90 /*
  91  * save_altivec(tsk)
  92  * Save the vector registers to its thread_struct
  93  */
  94 _GLOBAL(save_altivec)
  95         addi    r3,r3,THREAD            /* want THREAD of task */
  96         PPC_LL  r7,THREAD_VRSAVEAREA(r3)
  97         PPC_LL  r5,PT_REGS(r3)
  98         PPC_LCMPI       0,r7,0
  99         bne     2f
 100         addi    r7,r3,THREAD_VRSTATE
 101 2:      SAVE_32VRS(0,r4,r7)
 102         mfvscr  v0
 103         li      r4,VRSTATE_VSCR
 104         stvx    v0,r4,r7
 105         blr
 106 
 107 #ifdef CONFIG_VSX
 108 
 109 #ifdef CONFIG_PPC32
 110 #error This asm code isn't ready for 32-bit kernels
 111 #endif
 112 
 113 /*
 114  * load_up_vsx(unused, unused, tsk)
 115  * Disable VSX for the task which had it previously,
 116  * and save its vector registers in its thread_struct.
 117  * Reuse the fp and vsx saves, but first check to see if they have
 118  * been saved already.
 119  */
 120 _GLOBAL(load_up_vsx)
 121 /* Load FP and VSX registers if they haven't been done yet */
 122         andi.   r5,r12,MSR_FP
 123         beql+   load_up_fpu             /* skip if already loaded */
 124         andis.  r5,r12,MSR_VEC@h
 125         beql+   load_up_altivec         /* skip if already loaded */
 126 
 127         ld      r4,PACACURRENT(r13)
 128         addi    r4,r4,THREAD            /* Get THREAD */
 129         li      r6,1
 130         stw     r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
 131         /* enable use of VSX after return */
 132         oris    r12,r12,MSR_VSX@h
 133         std     r12,_MSR(r1)
 134         b       fast_exception_return
 135 
 136 #endif /* CONFIG_VSX */
 137 
 138 
 139 /*
 140  * The routines below are in assembler so we can closely control the
 141  * usage of floating-point registers.  These routines must be called
 142  * with preempt disabled.
 143  */
 144 #ifdef CONFIG_PPC32
 145         .data
 146 fpzero:
 147         .long   0
 148 fpone:
 149         .long   0x3f800000      /* 1.0 in single-precision FP */
 150 fphalf:
 151         .long   0x3f000000      /* 0.5 in single-precision FP */
 152 
 153 #define LDCONST(fr, name)       \
 154         lis     r11,name@ha;    \
 155         lfs     fr,name@l(r11)
 156 #else
 157 
 158         .section ".toc","aw"
 159 fpzero:
 160         .tc     FD_0_0[TC],0
 161 fpone:
 162         .tc     FD_3ff00000_0[TC],0x3ff0000000000000    /* 1.0 */
 163 fphalf:
 164         .tc     FD_3fe00000_0[TC],0x3fe0000000000000    /* 0.5 */
 165 
 166 #define LDCONST(fr, name)       \
 167         lfd     fr,name@toc(r2)
 168 #endif
 169 
 170         .text
 171 /*
 172  * Internal routine to enable floating point and set FPSCR to 0.
 173  * Don't call it from C; it doesn't use the normal calling convention.
 174  */
 175 fpenable:
 176 #ifdef CONFIG_PPC32
 177         stwu    r1,-64(r1)
 178 #else
 179         stdu    r1,-64(r1)
 180 #endif
 181         mfmsr   r10
 182         ori     r11,r10,MSR_FP
 183         mtmsr   r11
 184         isync
 185         stfd    fr0,24(r1)
 186         stfd    fr1,16(r1)
 187         stfd    fr31,8(r1)
 188         LDCONST(fr1, fpzero)
 189         mffs    fr31
 190         MTFSF_L(fr1)
 191         blr
 192 
 193 fpdisable:
 194         mtlr    r12
 195         MTFSF_L(fr31)
 196         lfd     fr31,8(r1)
 197         lfd     fr1,16(r1)
 198         lfd     fr0,24(r1)
 199         mtmsr   r10
 200         isync
 201         addi    r1,r1,64
 202         blr
 203 
 204 /*
 205  * Vector add, floating point.
 206  */
 207 _GLOBAL(vaddfp)
 208         mflr    r12
 209         bl      fpenable
 210         li      r0,4
 211         mtctr   r0
 212         li      r6,0
 213 1:      lfsx    fr0,r4,r6
 214         lfsx    fr1,r5,r6
 215         fadds   fr0,fr0,fr1
 216         stfsx   fr0,r3,r6
 217         addi    r6,r6,4
 218         bdnz    1b
 219         b       fpdisable
 220 
 221 /*
 222  * Vector subtract, floating point.
 223  */
 224 _GLOBAL(vsubfp)
 225         mflr    r12
 226         bl      fpenable
 227         li      r0,4
 228         mtctr   r0
 229         li      r6,0
 230 1:      lfsx    fr0,r4,r6
 231         lfsx    fr1,r5,r6
 232         fsubs   fr0,fr0,fr1
 233         stfsx   fr0,r3,r6
 234         addi    r6,r6,4
 235         bdnz    1b
 236         b       fpdisable
 237 
 238 /*
 239  * Vector multiply and add, floating point.
 240  */
 241 _GLOBAL(vmaddfp)
 242         mflr    r12
 243         bl      fpenable
 244         stfd    fr2,32(r1)
 245         li      r0,4
 246         mtctr   r0
 247         li      r7,0
 248 1:      lfsx    fr0,r4,r7
 249         lfsx    fr1,r5,r7
 250         lfsx    fr2,r6,r7
 251         fmadds  fr0,fr0,fr2,fr1
 252         stfsx   fr0,r3,r7
 253         addi    r7,r7,4
 254         bdnz    1b
 255         lfd     fr2,32(r1)
 256         b       fpdisable
 257 
 258 /*
 259  * Vector negative multiply and subtract, floating point.
 260  */
 261 _GLOBAL(vnmsubfp)
 262         mflr    r12
 263         bl      fpenable
 264         stfd    fr2,32(r1)
 265         li      r0,4
 266         mtctr   r0
 267         li      r7,0
 268 1:      lfsx    fr0,r4,r7
 269         lfsx    fr1,r5,r7
 270         lfsx    fr2,r6,r7
 271         fnmsubs fr0,fr0,fr2,fr1
 272         stfsx   fr0,r3,r7
 273         addi    r7,r7,4
 274         bdnz    1b
 275         lfd     fr2,32(r1)
 276         b       fpdisable
 277 
 278 /*
 279  * Vector reciprocal estimate.  We just compute 1.0/x.
 280  * r3 -> destination, r4 -> source.
 281  */
 282 _GLOBAL(vrefp)
 283         mflr    r12
 284         bl      fpenable
 285         li      r0,4
 286         LDCONST(fr1, fpone)
 287         mtctr   r0
 288         li      r6,0
 289 1:      lfsx    fr0,r4,r6
 290         fdivs   fr0,fr1,fr0
 291         stfsx   fr0,r3,r6
 292         addi    r6,r6,4
 293         bdnz    1b
 294         b       fpdisable
 295 
 296 /*
 297  * Vector reciprocal square-root estimate, floating point.
 298  * We use the frsqrte instruction for the initial estimate followed
 299  * by 2 iterations of Newton-Raphson to get sufficient accuracy.
 300  * r3 -> destination, r4 -> source.
 301  */
 302 _GLOBAL(vrsqrtefp)
 303         mflr    r12
 304         bl      fpenable
 305         stfd    fr2,32(r1)
 306         stfd    fr3,40(r1)
 307         stfd    fr4,48(r1)
 308         stfd    fr5,56(r1)
 309         li      r0,4
 310         LDCONST(fr4, fpone)
 311         LDCONST(fr5, fphalf)
 312         mtctr   r0
 313         li      r6,0
 314 1:      lfsx    fr0,r4,r6
 315         frsqrte fr1,fr0         /* r = frsqrte(s) */
 316         fmuls   fr3,fr1,fr0     /* r * s */
 317         fmuls   fr2,fr1,fr5     /* r * 0.5 */
 318         fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
 319         fmadds  fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
 320         fmuls   fr3,fr1,fr0     /* r * s */
 321         fmuls   fr2,fr1,fr5     /* r * 0.5 */
 322         fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
 323         fmadds  fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
 324         stfsx   fr1,r3,r6
 325         addi    r6,r6,4
 326         bdnz    1b
 327         lfd     fr5,56(r1)
 328         lfd     fr4,48(r1)
 329         lfd     fr3,40(r1)
 330         lfd     fr2,32(r1)
 331         b       fpdisable

/* [<][>][^][v][top][bottom][index][help] */