1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 #include <linux/linkage.h>
  10 #include <asm/assembler.h>
  11 #include <asm/unwind.h>
  12 
  13         .text
  14         .align  5
  15 
  16 ENTRY(mmioset)
  17 ENTRY(memset)
  18 UNWIND( .fnstart         )
  19         ands    r3, r0, #3              @ 1 unaligned?
  20         mov     ip, r0                  @ preserve r0 as return value
  21         bne     6f                      @ 1
  22 
  23 
  24 
  25 1:      orr     r1, r1, r1, lsl #8
  26         orr     r1, r1, r1, lsl #16
  27         mov     r3, r1
  28 7:      cmp     r2, #16
  29         blt     4f
  30 
  31 #if ! CALGN(1)+0
  32 
  33 
  34 
  35 
  36         stmfd   sp!, {r8, lr}
  37 UNWIND( .fnend              )
  38 UNWIND( .fnstart            )
  39 UNWIND( .save {r8, lr}      )
  40         mov     r8, r1
  41         mov     lr, r3
  42 
  43 2:      subs    r2, r2, #64
  44         stmiage ip!, {r1, r3, r8, lr}   @ 64 bytes at a time.
  45         stmiage ip!, {r1, r3, r8, lr}
  46         stmiage ip!, {r1, r3, r8, lr}
  47         stmiage ip!, {r1, r3, r8, lr}
  48         bgt     2b
  49         ldmfdeq sp!, {r8, pc}           @ Now <64 bytes to go.
  50 
  51 
  52 
  53         tst     r2, #32
  54         stmiane ip!, {r1, r3, r8, lr}
  55         stmiane ip!, {r1, r3, r8, lr}
  56         tst     r2, #16
  57         stmiane ip!, {r1, r3, r8, lr}
  58         ldmfd   sp!, {r8, lr}
  59 UNWIND( .fnend              )
  60 
  61 #else
  62 
  63 
  64 
  65 
  66 
  67 
  68         stmfd   sp!, {r4-r8, lr}
  69 UNWIND( .fnend                 )
  70 UNWIND( .fnstart               )
  71 UNWIND( .save {r4-r8, lr}      )
  72         mov     r4, r1
  73         mov     r5, r3
  74         mov     r6, r1
  75         mov     r7, r3
  76         mov     r8, r1
  77         mov     lr, r3
  78 
  79         cmp     r2, #96
  80         tstgt   ip, #31
  81         ble     3f
  82 
  83         and     r8, ip, #31
  84         rsb     r8, r8, #32
  85         sub     r2, r2, r8
  86         movs    r8, r8, lsl #(32 - 4)
  87         stmiacs ip!, {r4, r5, r6, r7}
  88         stmiami ip!, {r4, r5}
  89         tst     r8, #(1 << 30)
  90         mov     r8, r1
  91         strne   r1, [ip], #4
  92 
  93 3:      subs    r2, r2, #64
  94         stmiage ip!, {r1, r3-r8, lr}
  95         stmiage ip!, {r1, r3-r8, lr}
  96         bgt     3b
  97         ldmfdeq sp!, {r4-r8, pc}
  98 
  99         tst     r2, #32
 100         stmiane ip!, {r1, r3-r8, lr}
 101         tst     r2, #16
 102         stmiane ip!, {r4-r7}
 103         ldmfd   sp!, {r4-r8, lr}
 104 UNWIND( .fnend                 )
 105 
 106 #endif
 107 
 108 UNWIND( .fnstart            )
 109 4:      tst     r2, #8
 110         stmiane ip!, {r1, r3}
 111         tst     r2, #4
 112         strne   r1, [ip], #4
 113 
 114 
 115 
 116 
 117 5:      tst     r2, #2
 118         strbne  r1, [ip], #1
 119         strbne  r1, [ip], #1
 120         tst     r2, #1
 121         strbne  r1, [ip], #1
 122         ret     lr
 123 
 124 6:      subs    r2, r2, #4              @ 1 do we have enough
 125         blt     5b                      @ 1 bytes to align with?
 126         cmp     r3, #2                  @ 1
 127         strblt  r1, [ip], #1            @ 1
 128         strble  r1, [ip], #1            @ 1
 129         strb    r1, [ip], #1            @ 1
 130         add     r2, r2, r3              @ 1 (r2 = r2 - (4 - r3))
 131         b       1b
 132 UNWIND( .fnend   )
 133 ENDPROC(memset)
 134 ENDPROC(mmioset)
 135 
 136 ENTRY(__memset32)
 137 UNWIND( .fnstart         )
 138         mov     r3, r1                  @ copy r1 to r3 and fall into memset64
 139 UNWIND( .fnend   )
 140 ENDPROC(__memset32)
 141 ENTRY(__memset64)
 142 UNWIND( .fnstart         )
 143         mov     ip, r0                  @ preserve r0 as return value
 144         b       7b                      @ jump into the middle of memset
 145 UNWIND( .fnend   )
 146 ENDPROC(__memset64)