root/arch/sparc/lib/U3memcpy.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /* U3memcpy.S: UltraSparc-III optimized memcpy.
   3  *
   4  * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
   5  */
   6 
   7 #ifdef __KERNEL__
   8 #include <linux/linkage.h>
   9 #include <asm/visasm.h>
  10 #include <asm/asi.h>
  11 #define GLOBAL_SPARE    %g7
  12 #else
  13 #define ASI_BLK_P 0xf0
  14 #define FPRS_FEF  0x04
  15 #ifdef MEMCPY_DEBUG
  16 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
  17                      clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
  18 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
  19 #else
  20 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
  21 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
  22 #endif
  23 #define GLOBAL_SPARE    %g5
  24 #endif
  25 
  26 #ifndef EX_LD
  27 #define EX_LD(x,y)      x
  28 #endif
  29 #ifndef EX_LD_FP
  30 #define EX_LD_FP(x,y)   x
  31 #endif
  32 
  33 #ifndef EX_ST
  34 #define EX_ST(x,y)      x
  35 #endif
  36 #ifndef EX_ST_FP
  37 #define EX_ST_FP(x,y)   x
  38 #endif
  39 
  40 #ifndef LOAD
  41 #define LOAD(type,addr,dest)    type [addr], dest
  42 #endif
  43 
  44 #ifndef STORE
  45 #define STORE(type,src,addr)    type src, [addr]
  46 #endif
  47 
  48 #ifndef STORE_BLK
  49 #define STORE_BLK(src,addr)     stda src, [addr] ASI_BLK_P
  50 #endif
  51 
  52 #ifndef FUNC_NAME
  53 #define FUNC_NAME       U3memcpy
  54 #endif
  55 
  56 #ifndef PREAMBLE
  57 #define PREAMBLE
  58 #endif
  59 
  60 #ifndef XCC
  61 #define XCC xcc
  62 #endif
  63 
  64         .register       %g2,#scratch
  65         .register       %g3,#scratch
  66 
  67         /* Special/non-trivial issues of this code:
  68          *
  69          * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
  70          * 2) Only low 32 FPU registers are used so that only the
  71          *    lower half of the FPU register set is dirtied by this
  72          *    code.  This is especially important in the kernel.
  73          * 3) This code never prefetches cachelines past the end
  74          *    of the source buffer.
  75          */
  76 
  77         .text
  78 #ifndef EX_RETVAL
  79 #define EX_RETVAL(x)    x
  80 __restore_fp:
  81         VISExitHalf
  82         retl
  83          nop
  84 ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
  85         add     %g1, 1, %g1
  86         add     %g2, %g1, %g2
  87         ba,pt   %xcc, __restore_fp
  88          add    %o2, %g2, %o0
  89 ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
  90 ENTRY(U3_retl_o2_plus_g2_fp)
  91         ba,pt   %xcc, __restore_fp
  92          add    %o2, %g2, %o0
  93 ENDPROC(U3_retl_o2_plus_g2_fp)
  94 ENTRY(U3_retl_o2_plus_g2_plus_8_fp)
  95         add     %g2, 8, %g2
  96         ba,pt   %xcc, __restore_fp
  97          add    %o2, %g2, %o0
  98 ENDPROC(U3_retl_o2_plus_g2_plus_8_fp)
  99 ENTRY(U3_retl_o2)
 100         retl
 101          mov    %o2, %o0
 102 ENDPROC(U3_retl_o2)
 103 ENTRY(U3_retl_o2_plus_1)
 104         retl
 105          add    %o2, 1, %o0
 106 ENDPROC(U3_retl_o2_plus_1)
 107 ENTRY(U3_retl_o2_plus_4)
 108         retl
 109          add    %o2, 4, %o0
 110 ENDPROC(U3_retl_o2_plus_4)
 111 ENTRY(U3_retl_o2_plus_8)
 112         retl
 113          add    %o2, 8, %o0
 114 ENDPROC(U3_retl_o2_plus_8)
 115 ENTRY(U3_retl_o2_plus_g1_plus_1)
 116         add     %g1, 1, %g1
 117         retl
 118          add    %o2, %g1, %o0
 119 ENDPROC(U3_retl_o2_plus_g1_plus_1)
 120 ENTRY(U3_retl_o2_fp)
 121         ba,pt   %xcc, __restore_fp
 122          mov    %o2, %o0
 123 ENDPROC(U3_retl_o2_fp)
 124 ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
 125         sll     %o3, 6, %o3
 126         add     %o3, 0x80, %o3
 127         ba,pt   %xcc, __restore_fp
 128          add    %o2, %o3, %o0
 129 ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
 130 ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
 131         sll     %o3, 6, %o3
 132         add     %o3, 0x40, %o3
 133         ba,pt   %xcc, __restore_fp
 134          add    %o2, %o3, %o0
 135 ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
 136 ENTRY(U3_retl_o2_plus_GS_plus_0x10)
 137         add     GLOBAL_SPARE, 0x10, GLOBAL_SPARE
 138         retl
 139          add    %o2, GLOBAL_SPARE, %o0
 140 ENDPROC(U3_retl_o2_plus_GS_plus_0x10)
 141 ENTRY(U3_retl_o2_plus_GS_plus_0x08)
 142         add     GLOBAL_SPARE, 0x08, GLOBAL_SPARE
 143         retl
 144          add    %o2, GLOBAL_SPARE, %o0
 145 ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
 146 ENTRY(U3_retl_o2_and_7_plus_GS)
 147         and     %o2, 7, %o2
 148         retl
 149          add    %o2, GLOBAL_SPARE, %o0
 150 ENDPROC(U3_retl_o2_and_7_plus_GS)
 151 ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
 152         add     GLOBAL_SPARE, 8, GLOBAL_SPARE
 153         and     %o2, 7, %o2
 154         retl
 155          add    %o2, GLOBAL_SPARE, %o0
 156 ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
 157 #endif
 158 
 159         .align          64
 160 
 161         /* The cheetah's flexible spine, oversized liver, enlarged heart,
 162          * slender muscular body, and claws make it the swiftest hunter
 163          * in Africa and the fastest animal on land.  Can reach speeds
 164          * of up to 2.4GB per second.
 165          */
 166 
 167         .globl  FUNC_NAME
 168         .type   FUNC_NAME,#function
 169 FUNC_NAME:      /* %o0=dst, %o1=src, %o2=len */
 170         srlx            %o2, 31, %g2
 171         cmp             %g2, 0
 172 
 173         /* software trap 5 "Range Check" if dst >= 0x80000000 */
 174         tne             %xcc, 5
 175         PREAMBLE
 176         mov             %o0, %o4
 177 
 178         /* if len == 0 */
 179         cmp             %o2, 0
 180         be,pn           %XCC, end_return
 181          or             %o0, %o1, %o3
 182 
 183         /* if len < 16 */
 184         cmp             %o2, 16
 185         blu,a,pn        %XCC, less_than_16
 186          or             %o3, %o2, %o3
 187 
 188         /* if len < 192 */
 189         cmp             %o2, (3 * 64)
 190         blu,pt          %XCC, less_than_192
 191          andcc          %o3, 0x7, %g0
 192 
 193         /* Clobbers o5/g1/g2/g3/g7/icc/xcc.  We must preserve
 194          * o5 from here until we hit VISExitHalf.
 195          */
 196         VISEntryHalf
 197 
 198         /* Is 'dst' already aligned on an 64-byte boundary? */
 199         andcc           %o0, 0x3f, %g2
 200         be,pt           %XCC, 2f
 201 
 202         /* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
 203          * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
 204          * subtract this from 'len'.
 205          */
 206          sub            %o0, %o1, GLOBAL_SPARE
 207         sub             %g2, 0x40, %g2
 208         sub             %g0, %g2, %g2
 209         sub             %o2, %g2, %o2
 210         andcc           %g2, 0x7, %g1
 211         be,pt           %icc, 2f
 212          and            %g2, 0x38, %g2
 213 
 214 1:      subcc           %g1, 0x1, %g1
 215         EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1)
 216         EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1)
 217         bgu,pt          %XCC, 1b
 218          add            %o1, 0x1, %o1
 219 
 220         add             %o1, GLOBAL_SPARE, %o0
 221 
 222 2:      cmp             %g2, 0x0
 223         and             %o1, 0x7, %g1
 224         be,pt           %icc, 3f
 225          alignaddr      %o1, %g0, %o1
 226 
 227         EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2)
 228 1:      EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2)
 229         add             %o1, 0x8, %o1
 230         subcc           %g2, 0x8, %g2
 231         faligndata      %f4, %f6, %f0
 232         EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8)
 233         be,pn           %icc, 3f
 234          add            %o0, 0x8, %o0
 235 
 236         EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2)
 237         add             %o1, 0x8, %o1
 238         subcc           %g2, 0x8, %g2
 239         faligndata      %f6, %f4, %f2
 240         EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8)
 241         bne,pt          %icc, 1b
 242          add            %o0, 0x8, %o0
 243 
 244 3:      LOAD(prefetch, %o1 + 0x000, #one_read)
 245         LOAD(prefetch, %o1 + 0x040, #one_read)
 246         andn            %o2, (0x40 - 1), GLOBAL_SPARE
 247         LOAD(prefetch, %o1 + 0x080, #one_read)
 248         LOAD(prefetch, %o1 + 0x0c0, #one_read)
 249         LOAD(prefetch, %o1 + 0x100, #one_read)
 250         EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2)
 251         LOAD(prefetch, %o1 + 0x140, #one_read)
 252         EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2)
 253         LOAD(prefetch, %o1 + 0x180, #one_read)
 254         EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2)
 255         LOAD(prefetch, %o1 + 0x1c0, #one_read)
 256         faligndata      %f0, %f2, %f16
 257         EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2)
 258         faligndata      %f2, %f4, %f18
 259         EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2)
 260         faligndata      %f4, %f6, %f20
 261         EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2)
 262         faligndata      %f6, %f8, %f22
 263 
 264         EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2)
 265         faligndata      %f8, %f10, %f24
 266         EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2)
 267         faligndata      %f10, %f12, %f26
 268         EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
 269 
 270         subcc           GLOBAL_SPARE, 0x80, GLOBAL_SPARE
 271         add             %o1, 0x40, %o1
 272         bgu,pt          %XCC, 1f
 273          srl            GLOBAL_SPARE, 6, %o3
 274         ba,pt           %xcc, 2f
 275          nop
 276 
 277         .align          64
 278 1:
 279         EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
 280         faligndata      %f12, %f14, %f28
 281         EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
 282         faligndata      %f14, %f0, %f30
 283         EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
 284         EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 285         faligndata      %f0, %f2, %f16
 286         add             %o0, 0x40, %o0
 287 
 288         EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 289         faligndata      %f2, %f4, %f18
 290         EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 291         faligndata      %f4, %f6, %f20
 292         EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 293         subcc           %o3, 0x01, %o3
 294         faligndata      %f6, %f8, %f22
 295         EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80)
 296 
 297         faligndata      %f8, %f10, %f24
 298         EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
 299         LOAD(prefetch, %o1 + 0x1c0, #one_read)
 300         faligndata      %f10, %f12, %f26
 301         bg,pt           %XCC, 1b
 302          add            %o1, 0x40, %o1
 303 
 304         /* Finally we copy the last full 64-byte block. */
 305 2:
 306         EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
 307         faligndata      %f12, %f14, %f28
 308         EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
 309         faligndata      %f14, %f0, %f30
 310         EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
 311         EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 312         faligndata      %f0, %f2, %f16
 313         EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 314         faligndata      %f2, %f4, %f18
 315         EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 316         faligndata      %f4, %f6, %f20
 317         EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 318         faligndata      %f6, %f8, %f22
 319         EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 320         faligndata      %f8, %f10, %f24
 321         cmp             %g1, 0
 322         be,pt           %XCC, 1f
 323          add            %o0, 0x40, %o0
 324         EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 325 1:      faligndata      %f10, %f12, %f26
 326         faligndata      %f12, %f14, %f28
 327         faligndata      %f14, %f0, %f30
 328         EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
 329         add             %o0, 0x40, %o0
 330         add             %o1, 0x40, %o1
 331         membar          #Sync
 332 
 333         /* Now we copy the (len modulo 64) bytes at the end.
 334          * Note how we borrow the %f0 loaded above.
 335          *
 336          * Also notice how this code is careful not to perform a
 337          * load past the end of the src buffer.
 338          */
 339         and             %o2, 0x3f, %o2
 340         andcc           %o2, 0x38, %g2
 341         be,pn           %XCC, 2f
 342          subcc          %g2, 0x8, %g2
 343         be,pn           %XCC, 2f
 344          cmp            %g1, 0
 345 
 346         sub             %o2, %g2, %o2
 347         be,a,pt         %XCC, 1f
 348          EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2)
 349 
 350 1:      EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2)
 351         add             %o1, 0x8, %o1
 352         subcc           %g2, 0x8, %g2
 353         faligndata      %f0, %f2, %f8
 354         EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
 355         be,pn           %XCC, 2f
 356          add            %o0, 0x8, %o0
 357         EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2)
 358         add             %o1, 0x8, %o1
 359         subcc           %g2, 0x8, %g2
 360         faligndata      %f2, %f0, %f8
 361         EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
 362         bne,pn          %XCC, 1b
 363          add            %o0, 0x8, %o0
 364 
 365         /* If anything is left, we copy it one byte at a time.
 366          * Note that %g1 is (src & 0x3) saved above before the
 367          * alignaddr was performed.
 368          */
 369 2:
 370         cmp             %o2, 0
 371         add             %o1, %g1, %o1
 372         VISExitHalf
 373         be,pn           %XCC, end_return
 374          sub            %o0, %o1, %o3
 375 
 376         andcc           %g1, 0x7, %g0
 377         bne,pn          %icc, 90f
 378          andcc          %o2, 0x8, %g0
 379         be,pt           %icc, 1f
 380          nop
 381         EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2)
 382         EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2)
 383         add             %o1, 0x8, %o1
 384         sub             %o2, 8, %o2
 385 
 386 1:      andcc           %o2, 0x4, %g0
 387         be,pt           %icc, 1f
 388          nop
 389         EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2)
 390         EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2)
 391         add             %o1, 0x4, %o1
 392         sub             %o2, 4, %o2
 393 
 394 1:      andcc           %o2, 0x2, %g0
 395         be,pt           %icc, 1f
 396          nop
 397         EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2)
 398         EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2)
 399         add             %o1, 0x2, %o1
 400         sub             %o2, 2, %o2
 401 
 402 1:      andcc           %o2, 0x1, %g0
 403         be,pt           %icc, end_return
 404          nop
 405         EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2)
 406         ba,pt           %xcc, end_return
 407          EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2)
 408 
 409         .align          64
 410         /* 16 <= len < 192 */
 411 less_than_192:
 412         bne,pn          %XCC, 75f
 413          sub            %o0, %o1, %o3
 414 
 415 72:
 416         andn            %o2, 0xf, GLOBAL_SPARE
 417         and             %o2, 0xf, %o2
 418 1:      subcc           GLOBAL_SPARE, 0x10, GLOBAL_SPARE
 419         EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10)
 420         EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10)
 421         EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10)
 422         add             %o1, 0x8, %o1
 423         EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08)
 424         bgu,pt          %XCC, 1b
 425          add            %o1, 0x8, %o1
 426 73:     andcc           %o2, 0x8, %g0
 427         be,pt           %XCC, 1f
 428          nop
 429         sub             %o2, 0x8, %o2
 430         EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8)
 431         EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8)
 432         add             %o1, 0x8, %o1
 433 1:      andcc           %o2, 0x4, %g0
 434         be,pt           %XCC, 1f
 435          nop
 436         sub             %o2, 0x4, %o2
 437         EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4)
 438         EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4)
 439         add             %o1, 0x4, %o1
 440 1:      cmp             %o2, 0
 441         be,pt           %XCC, end_return
 442          nop
 443         ba,pt           %xcc, 90f
 444          nop
 445 
 446 75:
 447         andcc           %o0, 0x7, %g1
 448         sub             %g1, 0x8, %g1
 449         be,pn           %icc, 2f
 450          sub            %g0, %g1, %g1
 451         sub             %o2, %g1, %o2
 452 
 453 1:      subcc           %g1, 1, %g1
 454         EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1)
 455         EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1)
 456         bgu,pt          %icc, 1b
 457          add            %o1, 1, %o1
 458 
 459 2:      add             %o1, %o3, %o0
 460         andcc           %o1, 0x7, %g1
 461         bne,pt          %icc, 8f
 462          sll            %g1, 3, %g1
 463 
 464         cmp             %o2, 16
 465         bgeu,pt         %icc, 72b
 466          nop
 467         ba,a,pt         %xcc, 73b
 468 
 469 8:      mov             64, %o3
 470         andn            %o1, 0x7, %o1
 471         EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2)
 472         sub             %o3, %g1, %o3
 473         andn            %o2, 0x7, GLOBAL_SPARE
 474         sllx            %g2, %g1, %g2
 475 1:      EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS)
 476         subcc           GLOBAL_SPARE, 0x8, GLOBAL_SPARE
 477         add             %o1, 0x8, %o1
 478         srlx            %g3, %o3, %o5
 479         or              %o5, %g2, %o5
 480         EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8)
 481         add             %o0, 0x8, %o0
 482         bgu,pt          %icc, 1b
 483          sllx           %g3, %g1, %g2
 484 
 485         srl             %g1, 3, %g1
 486         andcc           %o2, 0x7, %o2
 487         be,pn           %icc, end_return
 488          add            %o1, %g1, %o1
 489         ba,pt           %xcc, 90f
 490          sub            %o0, %o1, %o3
 491 
 492         .align          64
 493         /* 0 < len < 16 */
 494 less_than_16:
 495         andcc           %o3, 0x3, %g0
 496         bne,pn          %XCC, 90f
 497          sub            %o0, %o1, %o3
 498 
 499 1:
 500         subcc           %o2, 4, %o2
 501         EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4)
 502         EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4)
 503         bgu,pt          %XCC, 1b
 504          add            %o1, 4, %o1
 505 
 506 end_return:
 507         retl
 508          mov            EX_RETVAL(%o4), %o0
 509 
 510         .align          32
 511 90:
 512         subcc           %o2, 1, %o2
 513         EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1)
 514         EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1)
 515         bgu,pt          %XCC, 90b
 516          add            %o1, 1, %o1
 517         retl
 518          mov            EX_RETVAL(%o4), %o0
 519 
 520         .size           FUNC_NAME, .-FUNC_NAME

/* [<][>][^][v][top][bottom][index][help] */