/linux-4.1.27/arch/mn10300/lib/ |
H A D | memset.S | 26 mov d1,(16,sp) 39 extbu d1 40 mov_asl d1,d3,8,d1 41 or_asl d1,d3,8,d1 42 or_asl d1,d3,8,d1 43 or d3,d1 53 mov d1,(a0+) 54 mov d1,(a0+) 55 mov d1,(a0+) 56 mov d1,(a0+) 57 mov d1,(a0+) 58 mov d1,(a0+) 59 mov d1,(a0+) 60 mov d1,(a0+) 72 mov d1,(a0+) 73 mov d1,(a0+) 74 mov d1,(a0+) 75 mov d1,(a0+) 84 mov d1,(a0+) 86 mov d1,(a0+) 88 mov d1,(a0+) 94 mov (20,sp),d1 95 cmp d2,d1 113 movbu d1,(a0)
|
H A D | do_csum.S | 25 mov d1,d2 # count 28 clr d1 # accumulator 42 add d0,d1 51 add d0,d1 69 add d0,d1 70 addc e0,d1 71 addc e1,d1 72 addc e3,d1 77 addc d0,d1 78 addc e0,d1 79 addc e1,d1 80 addc e3,d1 81 addc +0,d1 97 add d0,d1 98 addc e0,d1 99 addc e1,d1 100 addc e3,d1 101 addc +0,d1 114 add d0,d1 115 addc +0,d1 118 add d0,d1 119 addc +0,d1 122 add d0,d1 123 addc +0,d1 137 add d0,d1 138 addc +0,d1 143 and d1,d0 144 asl +16,d1 145 add d1,d0
|
H A D | memmove.S | 26 cmp d1,d0 32 mov d1,(16,sp) 35 add d1,d2,a1 # src end 42 or d0,d1,d3 59 mov (a1),d1 61 mov d1,(a0) 66 mov (a1),d1 68 mov d1,(a0) 73 mov (a1),d1 75 mov d1,(a0) 80 mov (a1),d1 82 mov d1,(a0) 97 mov (a1),d1 99 mov d1,(a0)
|
H A D | __lshrdi3.S | 38 asl a1,d1,a1 # get underflow from MSW -> LSW 40 or_lsr a1,d0,a0,d1 # insert underflow into LSW and 47 lsr a0,d1,d0 48 clr d1
|
H A D | memcpy.S | 26 mov d1,(16,sp) 29 mov d1,a1 # src 36 or d0,d1,d3 51 mov (a1+),d1 59 mov d1,(a0+) 79 mov (a1+),d1 83 mov d1,(a0+) 107 mov (20,sp),d1 108 cmp d2,d1
|
H A D | __ashldi3.S | 33 asl a0,d1 39 or_asl a1,d1,a0,d0 # insert overflow into MSW and 46 asl a0,d0,d1
|
H A D | __ashrdi3.S | 37 asl a1,d1,a1 # get underflow from MSW -> LSW 39 or_asr a1,d0,a0,d1 # insert underflow into LSW and 46 asr a0,d1,d0 48 mov mdr,d1
|
H A D | __ucmpdi2.S | 31 subc a1,d1 # may clear Z, never sets it
|
/linux-4.1.27/lib/ |
H A D | rational.c | 35 unsigned long n, d, n0, d0, n1, d1; rational_best_approximation() local 38 n0 = d1 = 0; rational_best_approximation() 42 if ((n1 > max_numerator) || (d1 > max_denominator)) { rational_best_approximation() 44 d1 = d0; rational_best_approximation() 56 t = d0 + a * d1; rational_best_approximation() 57 d0 = d1; rational_best_approximation() 58 d1 = t; rational_best_approximation() 61 *best_denominator = d1; rational_best_approximation()
|
H A D | test-hexdump.c | 25 "4c", "d1", "19", "99", "43", "b1", "af", "0c",
|
H A D | checksum.c | 23 * specify d0 and d1 as scratch registers. Letting gcc
|
/linux-4.1.27/arch/x86/boot/compressed/ |
H A D | string.c | 6 int d0, d1, d2; memcpy() local 11 : "=&c" (d0), "=&D" (d1), "=&S" (d2) memcpy() 20 long d0, d1, d2; memcpy() local 25 : "=&c" (d0), "=&D" (d1), "=&S" (d2) memcpy()
|
/linux-4.1.27/drivers/video/fbdev/core/ |
H A D | cfbcopyarea.c | 107 unsigned long d0, d1; bitcpy() local 127 d1 = FB_READL(src + 1); bitcpy() 128 d1 = fb_rev_pixels_in_long(d1, bswapmask); bitcpy() 129 d0 = d0 >> right | d1 << left; bitcpy() 145 d1 = d0; bitcpy() 150 d1 = FB_READL(src++); bitcpy() 151 d1 = fb_rev_pixels_in_long(d1, bswapmask); bitcpy() 153 d0 = d0 >> right | d1 << left; bitcpy() 158 d0 = d1; bitcpy() 165 d1 = FB_READL(src++); bitcpy() 166 FB_WRITEL(d0 >> right | d1 << left, dst++); bitcpy() 167 d0 = d1; bitcpy() 168 d1 = FB_READL(src++); bitcpy() 169 FB_WRITEL(d0 >> right | d1 << left, dst++); bitcpy() 170 d0 = d1; bitcpy() 171 d1 = FB_READL(src++); bitcpy() 172 FB_WRITEL(d0 >> right | d1 << left, dst++); bitcpy() 173 d0 = d1; bitcpy() 174 d1 = FB_READL(src++); bitcpy() 175 FB_WRITEL(d0 >> right | d1 << left, dst++); bitcpy() 176 d0 = d1; bitcpy() 180 d1 = FB_READL(src++); bitcpy() 181 d1 = fb_rev_pixels_in_long(d1, bswapmask); bitcpy() 182 d0 = d0 >> right | d1 << left; bitcpy() 185 d0 = d1; bitcpy() 195 d1 = FB_READL(src); bitcpy() 196 d1 = fb_rev_pixels_in_long(d1, bitcpy() 198 d0 = d0 >> right | d1 << left; bitcpy() 280 unsigned long d0, d1; bitcpy_rev() local 299 d1 = FB_READL(src - 1); bitcpy_rev() 300 d1 = fb_rev_pixels_in_long(d1, bswapmask); bitcpy_rev() 301 d0 = d0 << left | d1 >> right; bitcpy_rev() 318 d1 = d0; bitcpy_rev() 322 d1 = FB_READL(src--); bitcpy_rev() 323 d1 = fb_rev_pixels_in_long(d1, bswapmask); bitcpy_rev() 324 d0 = d0 << left | d1 >> right; bitcpy_rev() 331 d0 = d1; bitcpy_rev() 339 d1 = FB_READL(src--); bitcpy_rev() 340 FB_WRITEL(d0 << left | d1 >> right, dst--); bitcpy_rev() 341 d0 = d1; bitcpy_rev() 342 d1 = FB_READL(src--); bitcpy_rev() 343 FB_WRITEL(d0 << left | d1 >> right, dst--); bitcpy_rev() 344 d0 = d1; bitcpy_rev() 345 d1 = FB_READL(src--); bitcpy_rev() 346 FB_WRITEL(d0 << left | d1 >> right, dst--); bitcpy_rev() 347 d0 = d1; bitcpy_rev() 348 d1 = FB_READL(src--); bitcpy_rev() 349 FB_WRITEL(d0 << left | d1 >> right, dst--); bitcpy_rev() 350 d0 = d1; bitcpy_rev() 354 d1 = FB_READL(src--); bitcpy_rev() 355 d1 = fb_rev_pixels_in_long(d1, bswapmask); bitcpy_rev() 356 d0 = d0 << left | d1 >> right; bitcpy_rev() 359 d0 = d1; bitcpy_rev() 369 d1 = FB_READL(src); bitcpy_rev() 370 d1 = fb_rev_pixels_in_long(d1, bitcpy_rev() 372 d0 = d0 << left | d1 >> right; bitcpy_rev()
|
H A D | syscopyarea.c | 76 unsigned long d0, d1; bitcpy() local 96 d1 = *src; bitcpy() 97 *dst = comp(d0 >> right | d1 << left, *dst, bitcpy() 117 d1 = *src++; bitcpy() 118 *dst = comp(d0 >> right | d1 << left, *dst, bitcpy() 120 d0 = d1; bitcpy() 129 d1 = *src++; bitcpy() 130 *dst++ = d0 >> right | d1 << left; bitcpy() 131 d0 = d1; bitcpy() 132 d1 = *src++; bitcpy() 133 *dst++ = d0 >> right | d1 << left; bitcpy() 134 d0 = d1; bitcpy() 135 d1 = *src++; bitcpy() 136 *dst++ = d0 >> right | d1 << left; bitcpy() 137 d0 = d1; bitcpy() 138 d1 = *src++; bitcpy() 139 *dst++ = d0 >> right | d1 << left; bitcpy() 140 d0 = d1; bitcpy() 144 d1 = *src++; bitcpy() 145 *dst++ = d0 >> right | d1 << left; bitcpy() 146 d0 = d1; bitcpy() 156 d1 = *src; bitcpy() 157 d0 = d0 >> right | d1 << left; bitcpy() 253 unsigned long d0, d1; bitcpy_rev() local 260 d1 = d0; bitcpy_rev() 264 d1 = *src--; bitcpy_rev() 265 d0 = d0 << left | d1 >> right; bitcpy_rev() 271 d0 = d1; bitcpy_rev() 279 d1 = *src--; bitcpy_rev() 280 *dst-- = d0 << left | d1 >> right; bitcpy_rev() 281 d0 = d1; bitcpy_rev() 282 d1 = *src--; bitcpy_rev() 283 *dst-- = d0 << left | d1 >> right; bitcpy_rev() 284 d0 = d1; bitcpy_rev() 285 d1 = *src--; bitcpy_rev() 286 *dst-- = d0 << left | d1 >> right; bitcpy_rev() 287 d0 = d1; bitcpy_rev() 288 d1 = *src--; bitcpy_rev() 289 *dst-- = d0 << left | d1 >> right; bitcpy_rev() 290 d0 = d1; bitcpy_rev() 294 d1 = *src--; bitcpy_rev() 295 *dst-- = d0 << left | d1 >> right; bitcpy_rev() 296 d0 = d1; bitcpy_rev() 306 d1 = *src; bitcpy_rev() 307 d0 = d0 << left | d1 >> right; bitcpy_rev()
|
/linux-4.1.27/arch/m68k/fpsp040/ |
H A D | round.S | 24 | d1(high word) contains rounding precision: 28 | d1(low word) contains rounding mode: 55 swap %d1 |set up d1.w for round prec. 64 movel (%a1,%d1.w*4),%a1 67 | Jump table indexed by rounding mode in d1.w. All following assumes 81 swap %d1 |set up d1 for round prec. 86 movel (%a1,%d1.w*4),%a1 94 swap %d1 |set up d1 for round prec. 99 movel (%a1,%d1.w*4),%a1 106 swap %d1 |set up d1 for round prec. 116 swap %d1 |set up d1 for round prec. 120 movel (%a1,%d1.w*4),%a1 126 | Input: d1 = PREC:ROUND 134 | Notes: the ext_grs uses the round PREC, and therefore has to swap d1 135 | prior to usage, and needs to restore d1 to original. 138 swap %d1 |have d1.w point to round precision 139 cmpiw #0,%d1 145 cmpiw #1,%d1 175 swap %d1 |restore d1 to original 262 movel (%a1,%d1.w*4),%a1 276 | bit of the mantissa (msb in d1). 279 | bit of the mantissa (msb in d1) unless this would mean the exponent 281 | exponent (d0) is set to 0 and the mantissa (d1 & d2) is not 312 movel LOCAL_HI(%a0),%d1 315 bfffo %d1{#0:#32},%d3 |get the distance to the first 1 327 lsll %d0,%d1 |shift ms mant by count 332 orl %d6,%d1 |shift the ls mant bits into the ms mant 336 movel %d1,LOCAL_HI(%a0) 366 movel LOCAL_LO(%a0),%d1 |d1 has ls mant 369 movel %d1,%d6 |copy ls mant into d6 374 lsrl %d6,%d1 |shift off all bits but those that will 376 orl %d1,%d0 |shift the ls mant bits into the ms mant 387 movel LOCAL_LO(%a0),%d1 |d1 has ls mant 389 bfffo %d1{#0:#32},%d7 |find first 1 in ls mant to d7) 391 lsll %d7,%d1 |shift first 1 to integer bit in ms mant 393 movel %d1,LOCAL_HI(%a0) |store exp 428 clrl %d1 |load d1 with ext threshold 431 tstb %d1 |check for inex 440 movew #dbl_thresh,%d1 |put copy of threshold in d1 441 movel %d1,%d0 |copy d1 into d0 448 tstb %d1 |check flag 453 movew #sgl_thresh,%d1 |put copy of threshold in d1 454 movel %d1,%d0 |copy d1 into d0 461 tstb %d1 |check flag 475 movew %d1,LOCAL_EX(%a0) |load exp with threshold 476 movel #0,LOCAL_HI(%a0) |set d1 = 0 (ms mantissa) 490 | d1{15:0} denormalization threshold 494 | d1.b inexact flag: all ones means inexact result 514 movel %d1,%d0 |copy the denorm threshold 515 subw LOCAL_EX(%a0),%d1 |d1 = threshold - uns exponent 516 bles no_lp |d1 <= 0 517 cmpw #32,%d1 518 blts case_1 |0 = d1 < 32 519 cmpw #64,%d1 520 blts case_2 |32 <= d1 < 64 521 bra case_3 |d1 >= 64 526 clrb %d1 |set no inex2 reported 530 | case (0<d1<32) 536 subw %d1,%d0 |d0 = 32 - d1 538 bfextu %d2{%d1:%d0},%d2 |d2 = new LOCAL_HI 539 bfextu LOCAL_HI(%a0){%d0:#32},%d1 |d1 = new LOCAL_LO 542 movel %d1,LOCAL_LO(%a0) |store new LOCAL_LO 543 clrb %d1 547 st %d1 559 | case (32<=d1<64) 564 subw #32,%d1 |d1 now between 0 and 32 566 subw %d1,%d0 |d0 = 32 - d1 568 bfextu %d2{%d1:%d0},%d2 |d2 = new LOCAL_LO 569 bfextu LOCAL_HI(%a0){%d0:#32},%d1 |d1 = new G,R,S 570 bftst %d1{#2:#30} 574 movel %d1,%d0 575 clrb %d1 578 movel %d1,%d0 580 st %d1 594 | d1 >= 64 Force the exponent to be the denorm threshold with the 604 cmpw #64,%d1 606 cmpw #65,%d1 609 | Shift value is out of range. Set d1 for inex2 flag and 615 st %d1 620 bfextu %d0{#2:#30},%d1 626 bfextu %d0{#1:#31},%d1 631 tstl %d1 637 clrb %d1 642 st %d1
|
H A D | sto_res.S | 38 moveql #7,%d1 39 subl %d0,%d1 |d1 = 7- (dest. reg. no.) 41 bsetl %d1,%d0 |d0 is dynamic register mask 71 moveql #7,%d1 72 subl %d0,%d1 |d1 = 7- (dest. reg. no.) 74 bsetl %d1,%d0 |d0 is dynamic register mask
|
H A D | scale.S | 51 clrl %d1 52 movew FPTEMP(%a6),%d1 |get dest exponent 54 andil #0x7fff,%d1 |strip sign 98 tstw %d1 |check for denorm 100 addl %d0,%d1 |add src to dest exp 102 cmpil #0x7fff,%d1 |test for overflow 106 orw #0x8000,%d1 108 movew %d1,FPTEMP(%a6) |result in FPTEMP 115 orw #0x8000,%d1 125 orw #0x8000,%d1 129 movew %d1,ETEMP(%a6) |input expected in ETEMP 136 movew %d1,FPTEMP(%a6) |result in FPTEMP 147 addl %d0,%d1 |add src to dest 153 orw #0x8000,%d1 155 movew %d1,FPTEMP(%a6) |result in FPTEMP 167 cmpiw #0xffc0,%d1 |lower bound for normalization 169 movew %d1,%d0 |use d0 for exp 171 movel FPTEMP_HI(%a6),%d1 176 lsrl #1,%d1 |while shifting the 197 movel %d1,FPTEMP_HI(%a6) 200 tstl %d1 302 movew FPTEMP_EX(%a6),%d1 319 addw %d0,%d1 |dst is normalized; add src 322 orl #0x8000,%d1 324 movemw %d1,FPTEMP_EX(%a6) 340 orl #0x8000,%d1 342 movemw %d1,ETEMP_EX(%a6) 357 orl #0x8000,%d1 364 movew %d1,ETEMP(%a6) |result in ETEMP 367 movew %d1,ETEMP(%a6) |result in ETEMP
|
H A D | x_snan.S | 45 moveml %d0-%d1/%a0-%a1,USER_DA(%a6) 72 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 105 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 133 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 164 movel ETEMP_HI(%a6),%d1 169 bsetl #30,%d1 176 movel %d1,-(%a7) |move the snan onto the stack 180 movel (%a7)+,%d1 |clear off stack 186 movel ETEMP_HI(%a6),%d1 191 bsetl #30,%d1 198 movel %d1,-(%a7) |move the snan onto the stack 202 movel (%a7)+,%d1 |clear off stack 208 movel ETEMP_HI(%a6),%d1 213 bsetl #30,%d1 219 movel %d1,-(%a7) |move the snan onto the stack 223 movel (%a7)+,%d1 |clear off stack 233 movel %d1,L_SCR1(%a6) |data 236 movel %d0,%d1 237 andil #0x7,%d1 |d1 now holds register number 244 orl #0x8,%d1 247 orl #0x10,%d1
|
H A D | x_fline.S | 49 moveml %d0-%d1/%a0-%a1,USER_DA(%a6) 59 bfextu %d0{#4:#3},%d1 |extract coprocessor id 60 cmpib #1,%d1 |check if cpid=1 62 bfextu %d0{#16:#6},%d1 63 cmpib #0x17,%d1 |check if it is an FMOVECR encoding 86 movel #4,%d1 87 addl %d1,EXC_PC(%a6) |increment stacked pc value to next inst 93 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 |restore data registers 97 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 |restore data registers
|
H A D | srem_mod.S | 158 movew SignY(%a6),%d1 159 eorl %d0,%d1 160 andil #0x00008000,%d1 161 movew %d1,SignQ(%a6) | ...sign(Q) obtained 163 movel -8(%a0),%d1 168 tstl %d1 172 movel %d2,%d1 176 bfffo %d1{#0:#32},%d6 177 lsll %d6,%d1 184 bfffo %d1{#0:#32},%d6 186 lsll %d6,%d1 192 orl %d7,%d1 | ...(D0,D1,D2) normalized 227 cmpl %d4,%d1 | ...compare hi(R) and hi(Y) 243 subxl %d4,%d1 | ...hi(R) - hi(Y) 254 roxll #1,%d1 | ...hi(R) = 2hi(R) + carry 267 tstl %d1 271 movel %d2,%d1 275 bfffo %d1{#0:#32},%d6 276 lsll %d6,%d1 283 bfffo %d1{#0:#32},%d6 286 lsll %d6,%d1 292 orl %d7,%d1 | ...(D0,D1,D2) normalized 301 movel %d1,R_Hi(%a6) 312 movel %d1,R_Hi(%a6) 339 cmpl %d4,%d1
|
H A D | gen_except.S | 76 bfextu CMDREG1B(%a6){#13:#1},%d1 |extract bit 2 77 lsll #5,%d1 78 swap %d1 79 orl %d1,%d0 |put it in the right place 80 bfextu CMDREG1B(%a6){#10:#3},%d1 |extract bit 3,4,5 81 lsll #2,%d1 82 swap %d1 83 orl %d1,%d0 |put them in the right place 155 bfffo %d0{#24:#8},%d1 |test for first set bit 157 subib #24,%d1 |normalize bit offset to 0-8 158 movel (%a0,%d1.w*4),%a0 |load routine address based 195 bfextu CMDREG1B(%a6){#13:#1},%d1 |extract bit 2 196 lsll #5,%d1 197 swap %d1 198 orl %d1,%d0 |put it in the right place 199 bfextu CMDREG1B(%a6){#10:#3},%d1 |extract bit 3,4,5 200 lsll #2,%d1 201 swap %d1 202 orl %d1,%d0 |put them in the right place 231 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 307 bfextu CMDREG1B(%a6){#13:#1},%d1 |extract bit 2 308 lsll #5,%d1 309 swap %d1 310 orl %d1,%d0 |put it in the right place 311 bfextu CMDREG1B(%a6){#10:#3},%d1 |extract bit 3,4,5 312 lsll #2,%d1 313 swap %d1 314 orl %d1,%d0 |put them in the right place 366 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 431 movel %d1,USER_D1(%a6) | save d1 432 movew #8,%d1 | place unimp frame instead 434 dbra %d1,loop40 435 movel USER_D1(%a6),%d1 | restore d1 446 movel %d1,USER_D1(%a6) | save d1 447 movew #10,%d1 | place unimp frame instead 449 dbra %d1,loop41 450 movel USER_D1(%a6),%d1 | restore d1
|
H A D | x_store.S | 154 movel LOCAL_HI(%a1),%d1 |get ms mantissa 155 bfextu %d1{#1:#20},%d1 |get upper 20 bits of ms 156 orl %d1,%d0 |put these bits in ms word of double 158 movel LOCAL_HI(%a1),%d1 |get ms mantissa 160 lsll %d0,%d1 |put lower 11 bits in upper bits 161 movel %d1,LOCAL_HI(%a1) |build lower lword in memory 162 movel LOCAL_LO(%a1),%d1 |get ls mantissa 163 bfextu %d1{#0:#21},%d0 |get ls 21 bits of double 222 movel LOCAL_HI(%a1),%d1 |get ms mantissa 223 bfextu %d1{#1:#23},%d1 |get upper 23 bits of ms 224 orl %d1,%d0 |put these bits in ms word of single 238 movel %d0,%d1 |d1 has size:reg formatted for reg_dest 239 orl #0x10,%d1 |reg_dest wants size added to reg#
|
H A D | x_unfl.S | 48 moveml %d0-%d1/%a0-%a1,USER_DA(%a6) 73 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 108 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 127 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 134 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 163 movew CMDREG3B(%a6),%d1 |check for fsgldiv or fsglmul 164 andiw #0x7f,%d1 165 cmpiw #0x30,%d1 |check for sgldiv 167 cmpiw #0x33,%d1 |check for sglmul 189 | Set up d1 for round subroutine d1 contains the PREC/MODE 192 bfextu FPCR_MODE(%a6){#2:#2},%d1 |get mode from FPCR 193 | ;mode in lower d1 194 addl (%a7)+,%d1 |merge PREC/MODE 204 | d1{01:00} has rounding mode 205 | d1{17:16} has rounding precision
|
H A D | scosh.S | 74 fmovel %d1,%FPCR 93 movel %d1,-(%sp) 94 clrl %d1 98 movel (%sp)+,%d1 103 fmovel %d1,%FPCR 116 movel %d1,-(%sp) 117 clrl %d1
|
H A D | do_func.S | 80 bfextu STAG(%a6){#0:#3},%d1 82 addl %d1,%d0 |combine for final index into table 86 movel USER_FPCR(%a6),%d1 87 andl #0xFF,%d1 | discard all but rounding mode/prec 276 bfextu DTAG(%a6){#0:#3},%d1 |dtag = d1 282 bclrl #2,%d1 284 lslb #2,%d1 285 orb %d0,%d1 |d1{3:2} = dtag, d1{1:0} = stag 292 movel (%a1,%d1.w*4),%a1 302 moveb ETEMP(%a6),%d1 |get sign of src op 304 eorb %d0,%d1 |get exor of sign bits 305 btstl #7,%d1 |test for sign 314 moveb ETEMP(%a6),%d1 |get sign of src op 316 eorb %d0,%d1 |get exor of sign bits 317 btstl #7,%d1 |test for sign 357 bfextu DTAG(%a6){#0:#3},%d1 |dtag = d1 362 bclr #2,%d1 364 lslb #2,%d1 365 orb %d0,%d1 |d1{3:2} = dtag, d1{1:0} = stag 372 movel (%a1,%d1.w*4),%a1 382 moveb ETEMP(%a6),%d1 |get sign of src op 384 eorb %d0,%d1 |get exor of sign bits 385 btstl #7,%d1 |test for sign 394 moveb ETEMP(%a6),%d1 |get sign of src op 396 eorb %d0,%d1 |get exor of sign bits 397 btstl #7,%d1 |test for sign 435 bfextu DTAG(%a6){#0:#3},%d1 |dtag in d1 437 bclrl #2,%d1 |alias denorm into norm 438 lslb #2,%d1 439 orb %d0,%d1 |d1{4:2} = dtag, d1{1:0} = stag 449 movel (%a1,%d1.w*4),%a1 |load a1 with label depending on tag
|
H A D | stanh.S | 105 movel %d1,-(%a7) 106 clrl %d1 109 movel (%a7)+,%d1 117 fmovel %d1,%FPCR |restore users exceptions 141 movel %d1,-(%a7) 142 clrl %d1 145 movel (%a7)+,%d1 157 fmovel %d1,%FPCR |restore users exceptions 165 fmovel %d1,%FPCR |restore users exceptions 179 fmovel %d1,%FPCR |restore users exceptions
|
H A D | x_unsupp.S | 45 moveml %d0-%d1/%a0-%a1,USER_DA(%a6) 64 movel USER_FPSR(%a6),%d1 67 andl #0xFF00FF,%d1 |clear all but aexcs and qbyte 70 andl #0x0FFF40FF,%d1 |clear all but cc's, snan bit, aexcs, and qbyte 72 movel %d1,USER_FPSR(%a6)
|
H A D | decbin.S | 125 | 2. Calculate absolute value of exponent in d1 by mul and add. 134 | (*) d1: accumulator for binary exponent 151 clrl %d1 |zero d1 for accumulator 153 mulul #TEN,%d1 |mul partial product by one digit place 155 addl %d0,%d1 |d1 = d1 + d0 160 negl %d1 |negate before subtracting 162 subl #16,%d1 |sub to compensate for shift of mant 164 negl %d1 |now negative, make pos and set SE 168 movel %d1,L_SCR1(%a6) |save exp in memory 180 | (*) d1: lword counter 191 moveql #1,%d1 |word counter, init to 1 206 movel (%a0,%d1.L*4),%d4 |load mantissa longword into d4 216 | then inc d1 (=2) to point to the next long word and reset d3 to 0 223 addql #1,%d1 |inc lw pointer in mantissa 224 cmpl #2,%d1 |test for last lw 270 | (*) d1: zero count 286 movel L_SCR1(%a6),%d1 |load expA for range test 287 cmpl #27,%d1 |test is with 27 291 clrl %d1 |zero count reg 295 addql #1,%d1 |inc zero count 299 addql #8,%d1 |and inc count by 8 309 addql #1,%d1 |inc digit counter 312 movel %d1,%d0 |copy counter to d2 313 movel L_SCR1(%a6),%d1 |get adjusted exp from memory 314 subl %d0,%d1 |subtract count from exp 316 negl %d1 |now its neg; get abs 343 clrl %d1 |clr counter 348 addql #8,%d1 |inc counter by 8 357 addql #1,%d1 |inc digit counter 360 movel %d1,%d0 |copy counter to d0 361 movel L_SCR1(%a6),%d1 |get adjusted exp from memory 362 subl %d0,%d1 |subtract count from exp 364 negl %d1 |take abs of exp and clr SE 394 | ( ) d1: exponent 401 | ( ) d1: exponent 454 movel %d1,%d0 |copy exp to d0;use d0
|
H A D | res_func.S | 207 movew LOCAL_EX(%a0),%d1 |compare exponent to double threshold 208 andw #0x7fff,%d1 209 cmpw #0x3c01,%d1 211 bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rmode 212 orl #0x00020000,%d1 |or in rprec (double) 221 movew LOCAL_EX(%a0),%d1 |check for overflow 222 andw #0x7fff,%d1 223 cmpw #0x43ff,%d1 231 movew LOCAL_EX(%a0),%d1 232 andw #0x7fff,%d1 233 cmpw #0x3f81,%d1 235 bfextu FPCR_MODE(%a6){#2:#2},%d1 236 orl #0x00010000,%d1 245 movew LOCAL_EX(%a0),%d1 246 andw #0x7FFF,%d1 247 cmpw #0x407f,%d1 268 bfextu FPCR_MODE(%a6){#2:#2},%d1 273 swap %d1 274 moveb FPCR_MODE(%a6),%d1 275 lsrb #6,%d1 276 swap %d1 279 orl #0x00020000,%d1 282 orl #0x00010000,%d1 500 bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rmode 503 cmpib #3,%d1 |check for rp 507 cmpib #2,%d1 |check for rm 515 bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rmode 518 cmpib #3,%d1 |check for rp 522 cmpib #2,%d1 |check for rm 706 bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg) 707 subl %d1,%d0 |subtract dest from src 712 movew FPTEMP_EX(%a6),%d1 713 eorw %d1,%d0 745 bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg) 746 subl %d1,%d0 |subtract src from dest 751 movew FPTEMP_EX(%a6),%d1 752 eorw %d1,%d0 810 bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg) 811 subl %d1,%d0 |subtract dest from src 819 bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg) 820 subl %d1,%d0 |subtract src from dest 835 movew FPTEMP_EX(%a6),%d1 836 eorw %d1,%d0 854 fmovel %fpsr,%d1 855 orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd 858 movel USER_FPCR(%a6),%d1 859 andil #0xc0,%d1 860 lsrl #6,%d1 |put precision in upper word 861 swap %d1 862 orl %d0,%d1 |set up for round call 881 fmovel %fpsr,%d1 882 orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd 886 movel USER_FPCR(%a6),%d1 887 andil #0xc0,%d1 888 lsrl #6,%d1 |put precision in upper word 889 swap %d1 890 orl %d0,%d1 |set up for round call 910 movel USER_FPCR(%a6),%d1 911 andil #0xc0,%d1 912 lsrl #6,%d1 |put precision in upper word 913 swap %d1 914 orl %d0,%d1 |set up for round call 936 movel USER_FPCR(%a6),%d1 937 andil #0xc0,%d1 938 lsrl #6,%d1 |put precision in upper word 939 swap %d1 940 orl %d0,%d1 |set up for round call 985 bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg) 986 subl %d1,%d0 |subtract src from dest 994 bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg) 995 subl %d1,%d0 |subtract dest from src 1010 movew FPTEMP_EX(%a6),%d1 1011 eorw %d1,%d0 1028 fmovel %fpsr,%d1 1029 orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd 1033 movel USER_FPCR(%a6),%d1 1034 andil #0xc0,%d1 1035 lsrl #6,%d1 |put precision in upper word 1036 swap %d1 1037 orl %d0,%d1 |set up for round call 1056 fmovel %fpsr,%d1 1057 orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd 1061 movel USER_FPCR(%a6),%d1 1062 andil #0xc0,%d1 1063 lsrl #6,%d1 |put precision in upper word 1064 swap %d1 1065 orl %d0,%d1 |set up for round call 1085 movel USER_FPCR(%a6),%d1 1086 andil #0xc0,%d1 1087 lsrl #6,%d1 |put precision in upper word 1088 swap %d1 1089 orl %d0,%d1 |set up for round call 1117 movel USER_FPCR(%a6),%d1 1118 andil #0xc0,%d1 1119 lsrl #6,%d1 |put precision in upper word 1120 swap %d1 1121 orl %d0,%d1 |set up for round call 1166 bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg) 1167 subl %d1,%d0 |subtract dest from src 1177 bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg) 1178 subl %d1,%d0 |subtract src from dest 1204 bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg) 1205 addl %d1,%d0 |subtract dest from src 1212 bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg) 1213 addl %d1,%d0 |subtract src from dest 1226 movew FPTEMP_EX(%a6),%d1 1227 eorw %d1,%d0 1293 movel #7,%d1 1294 subl %d0,%d1 1296 bsetl %d1,%d0 1380 movel #7,%d1 1381 subl %d0,%d1 1383 bsetl %d1,%d0 1438 bfextu CMDREG1B(%a6){#3:#3},%d1 |put source specifier in d1 1440 movel %a0@(%d1:l:4),%a0 1505 movel USER_FPCR(%a6),%d1 |use user's rounding mode 1506 andil #0x30,%d1 1507 fmovel %d1,%fpcr 1509 fmovel %fpsr,%d1 1510 orl %d1,USER_FPSR(%a6) |capture inex2/ainex if set 1551 movel USER_FPCR(%a6),%d1 |use user's rounding mode 1552 andil #0x30,%d1 1553 fmovel %d1,%fpcr 1555 fmovel %fpsr,%d1 1556 orl %d1,USER_FPSR(%a6) |capture inex2/ainex if set 1596 movel USER_FPCR(%a6),%d1 |use user's rounding mode 1597 andil #0x30,%d1 1598 fmovel %d1,%fpcr 1600 fmovel %fpsr,%d1 1601 orl %d1,USER_FPSR(%a6) |capture inex2/ainex if set 1630 bfextu FPCR_MODE(%a6){#2:#2},%d1 | d1 is the rounding mode 1631 cmpb #2,%d1 1670 movel (%sp)+,%d1 |get size 1671 cmpil #4,%d1 |most frequent case 1673 cmpil #2,%d1 1680 movel %d0,%d1 |reg_dest expects size:reg in d1 1725 bfextu FPCR_MODE(%a6){#2:#2},%d1 |rnd mode in d1 1727 addl %d0,%d1 |d1 has PREC/MODE info 1736 bfextu CMDREG1B(%a6){#3:#3},%d1 |extract destination format 1740 cmpb #2,%d1 1774 movel #sgl_thresh,%d1 |load in single denorm threshold 1775 bsrl dpspdnrm |expects d1 to have the proper 1787 movel #dbl_thresh,%d1 |load in double precision threshold 1789 bsrl dpspdnrm |expects d1 to have proper 1891 | d1 is the single precision or double precision 1905 bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rounding mode 1906 swap %d1 1907 movew 2(%a7),%d1 |set rounding precision 1908 swap %d1 |at this point d1 has PREC/MODE info
|
H A D | sgetem.S | 95 movel LOCAL_LO(%a0),%d1 |load ls mant in d1 98 movel %d1,LOCAL_LO(%a0) |put ls mant back on stack 107 | ls mantissa part in d1 109 | shifted bits in d0 and d1 114 tstl %d1 |test if any bits set in ls mant 119 exg %d0,%d1 |shift ls mant to ms mant 129 movel %d1,%d6 |save ls mant in d6 130 lsll %d3,%d1 |shift ls mant by count
|
H A D | slog2.S | 120 movel %d1,-(%sp) 121 clrl %d1 133 movel %d1,-(%sp) 134 clrl %d1 147 movel %d1,-(%sp) 148 clrl %d1 172 fmovel %d1,%fpcr 177 movel %d1,-(%sp) 178 clrl %d1
|
H A D | smovecr.S | 46 bfextu USER_FPCR(%a6){#26:#2},%d1 |get rmode 64 tstb %d1 |offset is zero, check for rmode 66 cmpib #0x3,%d1 |check for rp 79 tstb %d1 |check for rmode 81 cmpib #0x3,%d1 |check for rp 100 tstb %d1 |check for rmode 102 cmpib #0x3,%d1 |check for rp 129 movel %d1,L_SCR1(%a6) |load mode for round call 130 bfextu USER_FPCR(%a6){#24:#2},%d1 |get precision 131 tstl %d1 |check if extended precision 142 swap %d1 |rnd prec in upper word of d1 143 addl L_SCR1(%a6),%d1 |merge rmode in low word of d1
|
H A D | ssinh.S | 90 moveml %a1/%d1,-(%sp) 92 clrl %d1 95 moveml (%sp)+,%a1/%d1 107 fmovel %d1,%fpcr 125 movel %d1,-(%sp) 126 clrl %d1
|
H A D | x_bsun.S | 32 moveml %d0-%d1/%a0-%a1,USER_DA(%a6) 39 moveml USER_DA(%a6),%d0-%d1/%a0-%a1
|
H A D | sacos.S | 58 fmovel %d1,%fpcr | ...load user's rounding mode/precision 82 movel %d1,-(%sp) |save original users fpcr 83 clrl %d1 103 fmovel %d1,%FPCR 110 fmovel %d1,%FPCR
|
H A D | binstr.S | 30 | d1. 50 | d1: temp used to form the digit 87 | A3. Multiply d2:d3 by 8; extract msbs into d1. 89 bfextu %d2{#0:#3},%d1 |copy 3 msbs of d2 into d1 95 | A4. Multiply d4:d5 by 2; add carry out to d1. 100 addxw %d6,%d1 |add in extend from mul by 2 108 addxw %d6,%d1 |add in extend from add to d1 118 addw %d1,%d7 |add in ls digit to d7b 126 movew %d1,%d7 |put new digit in d7b
|
H A D | x_operr.S | 67 moveml %d0-%d1/%a0-%a1,USER_DA(%a6) 101 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 109 moveql #4,%d1 |write size to d1 156 moveql #2,%d1 |write size to d1 183 moveql #1,%d1 |write size to d1 243 | This routine stores the data in d0, for the given size in d1, 249 movel %d1,-(%a7) |save register size 251 movel (%a7)+,%d1 259 cmpil #4,%d1 261 cmpil #2,%d1 268 movel %d0,%d1 |format size:reg for reg_dest 278 movel %d1,%d0 |put size in d0 312 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 338 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 349 moveml USER_DA(%a6),%d0-%d1/%a0-%a1
|
H A D | skeleton.S | 151 moveml %d0/%d1,USER_DA(%a6) 155 moveml USER_DA(%a6),%d0/%d1 418 movel %d1,-(%sp) |preserve d1 just in case 424 movel (%sp)+,%d1 457 movel %d1,-(%sp) |preserve d1 just in case 463 movel (%sp)+,%d1 476 movel #1,%d1 479 | movec %d1,%DFC | set dfc for user data space 481 moveb (%a0)+,%d1 | fetch supervisor byte 483 movesb %d1,(%a1)+ | write user byte 492 movel #1,%d1 494 | movec %d1,%SFC | set sfc for user space 497 movesb (%a0)+,%d1 | fetch user byte 498 moveb %d1,(%a1)+ | write supervisor byte
|
H A D | bugfix.S | 201 bfextu CMDREG3B(%a6){#6:#3},%d1 |get 3rd dest 202 cmpb %d0,%d1 209 cmpb %d0,%d1 |cmp 1st dest with 3rd dest 214 bfextu CMDREG2B(%a6){#6:#3},%d1 |get 2nd dest 215 cmpb %d0,%d1 |cmp 1st dest with 2nd dest 218 cmpb %d0,%d1 |cmp 1st src with 2nd dest 226 movel #7,%d1 227 subl %d0,%d1 229 bsetl %d1,%d0 271 movel (%a7)+,%d1 |save return address from bsr 318 movel %d1,-(%a7) |return bsr return address 320 movel #7,%d1 321 subl %d0,%d1 323 bsetl %d1,%d0 361 bfextu CMDREG3B(%a6){#6:#3},%d1 |get 3rd dest 362 cmpb %d0,%d1 |cmp 1st dest with 3rd dest 364 bfextu CMDREG2B(%a6){#6:#3},%d1 |get 2nd dest 365 cmpb %d0,%d1 |cmp 1st dest with 2nd dest 397 movel (%a7)+,%d1 |save return address from bsr 447 movel %d1,-(%a7)
|
H A D | get_op.S | 281 movew #0x3f81,%d1 |write bias for sgl denorm 284 movew #0x3c01,%d1 |write the bias for a dbl denorm 288 bset #15,%d1 |set sign bit because it is negative 290 movew %d1,ETEMP_EX(%a6) 293 movew CMDREG1B(%a6),%d1 294 andw #0xe3ff,%d1 |clear out source specifier 295 orw #0x0800,%d1 |set source specifier to extended prec 296 movew %d1,CMDREG1B(%a6) |write back to the command word in stack 318 movel #7,%d1 319 subl %d0,%d1 321 bsetl %d1,%d0 |set up d0 as a dynamic register mask 496 bfextu %d0{#20:#12},%d1 |get exponent into d1 497 cmpiw #0x0fff,%d1 |test for inf or NaN 499 bfextu %d0{#17:#3},%d1 |get SE and y bits into d1 500 cmpiw #7,%d1 |SE and y bits must be on for special 548 bfextu %d0{#20:#12},%d1 |get exponent into d1 549 cmpiw #0x0fff,%d1 |test for inf or NaN 551 bfextu %d0{#17:#3},%d1 |get SE and y bits into d1 552 cmpiw #7,%d1 |SE and y bits must be on for special 636 movew ETEMP_EX(%a6),%d1 637 andiw #0x7fff,%d1 |strip sign 638 cmpw #0x7fff,%d1 640 movel ETEMP_HI(%a6),%d1 642 movel ETEMP_LO(%a6),%d1 653 tstw %d1 662 cmpiw #0x3fff,%d1
|
H A D | sint.S | 78 bfextu FPCR_MODE(%a6){#2:#2},%d1 |use user's mode for rounding 81 movel %d1,L_SCR1(%a6) |save mode bits 133 bfextu FPCR_MODE(%a6){#2:#2},%d1 |use user's mode for rounding 136 movel %d1,L_SCR1(%a6) |save mode bits 211 movel #0x403e,%d1 |set threshold for dnrm_lp 218 movel L_SCR1(%a6),%d1 |use selected rounding mode 224 | ; d1 PREC/MODE info
|
H A D | satanh.S | 90 movel %d1,-(%sp) 91 clrl %d1
|
H A D | ssin.S | 280 fmovel %d1,%FPCR |restore users exceptions 344 fmovel %d1,%FPCR |restore users exceptions 363 fmovel %d1,%FPCR |restore users exceptions 371 fmovel %d1,%FPCR |restore users exceptions 649 movel %d1,-(%sp) |restore users mode & precision 650 andil #0xff,%d1 |mask off all exceptions 651 fmovel %d1,%FPCR 717 movel %d1,-(%sp) |save users mode & precision 718 andil #0xff,%d1 |mask off all exceptions 719 fmovel %d1,%FPCR 736 movel %d1,-(%sp) |save users mode & precision 737 andil #0xff,%d1 |mask off all exceptions 738 fmovel %d1,%FPCR
|
H A D | x_ovfl.S | 60 moveml %d0-%d1/%a0-%a1,USER_DA(%a6) 85 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 116 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 136 moveml USER_DA(%a6),%d0-%d1/%a0-%a1 143 moveml USER_DA(%a6),%d0-%d1/%a0-%a1
|
H A D | bindec.S | 113 | d1: scratch 188 movel 4(%a0),%d1 193 roxll #1,%d1 194 tstl %d1 205 movel %d1,4(%a0) 401 bfextu USER_FPCR(%a6){#26:#2},%d1 |get initial rmode bits 402 lslw #1,%d1 |put them in bits 2:1 403 addw %d5,%d1 |add in LAMBDA 404 lslw #1,%d1 |put them in bits 3:1 407 addql #1,%d1 |if neg, set bit 0 410 moveb (%a2,%d1),%d3 |load d3 with new rmode 586 moveml %d0-%d1/%a0-%a1,-(%a7) |save regs used by sintd0 601 moveml (%a7)+,%d0-%d1/%a0-%a1 |restore regs used by sint 728 | d1: x/0 773 clrl %d1 |put zero in d1 for addx 775 addxl %d1,%d2 |continue inc 801 | d1: x/scratch (0);shift count for final exponent packing 858 clrl %d1 |put zero in d1 for addx 860 addxl %d1,%d2 |continue inc 866 movel #12,%d1 |use d1 for shift count 867 lsrl %d1,%d0 |shift d0 right by 12 869 lsrl %d1,%d0 |shift d0 right by 12
|
H A D | sasin.S | 99 fmovel %d1,%FPCR
|
H A D | x_unimp.S | 46 moveml %d0-%d1/%a0-%a1,USER_DA(%a6)
|
H A D | satan.S | 336 fmovel %d1,%FPCR |restore users exceptions 387 fmovel %d1,%FPCR |restore users exceptions 396 fmovel %d1,%FPCR |restore users exceptions 447 fmovel %d1,%FPCR |restore users exceptions 467 fmovel %d1,%fpcr 473 fmovel %d1,%fpcr
|
H A D | util.S | 183 bfextu FPCR_MODE(%a6){#2:#2},%d1 |set round mode 184 orl %d1,%d0 |index is fmt:mode in d0{3:0} 513 bfextu FPCR_MODE(%a6){#2:#2},%d1 |set round mode 514 orl %d1,%d0 |index is fmt:mode in d0{3:0} 632 | d1: data size and dest register number formatted as: 672 movel %a0@(%d1:l:4),%a0
|
H A D | setox.S | 456 fmovel %d1,%fpcr 566 fmovel %d1,%FPCR | ...restore user FPCR 573 fmovel %d1,%FPCR 611 fmovel %d1,%FPCR 761 fmovel %d1,%FPCR 780 fmovel %d1,%FPCR 793 fmovel %d1,%FPCR 847 fmovel %d1,%FPCR 859 fmovel %d1,%FPCR
|
H A D | stwotox.S | 199 fmovel %d1,%fpcr | ...set user's rounding mode/precision 280 fmovel %d1,%FPCR |restore users exceptions 303 fmovel %d1,%fpcr | ...set user's rounding mode/precision 418 fmovel %d1,%FPCR |restore users exceptions
|
H A D | slogn.S | 417 fmovel %d1,%fpcr 466 fmovel %d1,%fpcr 491 fmovel %d1,%fpcr 582 fmovel %d1,%fpcr 588 fmovel %d1,%fpcr
|
H A D | fpsp.h | 20 | movem.l d0-d1/a0-a1,USER_DA(a6) 50 | movem.l USER_DA(a6),d0-d1/a0-a1 69 | The registers d0, d1, a0, a1 and fp0-fp3 are always saved and variable
|
H A D | stan.S | 246 fmovel %d1,%fpcr |restore users exceptions 286 fmovel %d1,%fpcr |restore users exceptions 300 fmovel %d1,%fpcr |restore users exceptions
|
H A D | kernel_ex.S | 293 fmovel %d1,%fpcr |restore user's rmode/prec 319 fmovel %d1,%fpcr |restore user's rmode/prec 338 fmovel %d1,%fpcr |restore user's rmode/prec 354 fmovel %d1,%fpcr |restore user's rmode/prec
|
/linux-4.1.27/arch/mn10300/kernel/ |
H A D | gdb-low.S | 36 mov d1,a1 39 movbu (a0),d1 41 movbu d1,(a1) 48 mov d1,a1 51 movhu (a0),d1 53 movhu d1,(a1) 60 mov d1,a1 63 mov (a0),d1 65 mov d1,(a1) 79 mov d1,a1 90 mov d1,a1 101 mov d1,a1
|
H A D | mn10300-watchdog-low.S | 39 lsr 2,d1 57 clr d1 61 inc d1 62 cmp NR_CPUS, d1
|
H A D | switch_to.S | 38 mov d1,a1 103 mov a2,d1 104 add 1,d1 117 mov a2,d1 118 add 1,d1 150 mov a2,d1 151 add 1,d1 165 mov a2,d1 166 add 1,d1
|
H A D | fpu-low.S | 166 mov epsw,d1 180 mov d1,epsw 201 mov (a1),d1 /* get epsw of user context */ 204 btst EPSW_nSL,d1 207 or EPSW_FE,d1 208 mov d1,(sp) 209 mov (TASK_THREAD+THREAD_FPU_FLAGS,a2),d1 211 or __THREAD_HAS_FPU,d1 212 mov d1,(TASK_THREAD+THREAD_FPU_FLAGS,a2) 230 btst __THREAD_USING_FPU,d1
|
H A D | head.S | 141 cmp 0xabadcafe,d1 201 mov 0xed00d1ed,d1 238 mov 0xfd00d1ed,d1 383 mov 0x1,d1 384 asl d0,d1 386 bset d1,(a0)
|
H A D | asm-offsets.c | 21 OFFSET(SIGCONTEXT_d1, sigcontext, d1); foo() 33 OFFSET(REG_D1, pt_regs, d1); foo()
|
H A D | gdb-io-serial-low.S | 79 mov 0x280,d1
|
H A D | gdb-io-ttysm-low.S | 81 mov 0x280,d1
|
H A D | entry.S | 144 mov d2,d1 154 mov (REG_D1,fp),d1 262 mov (BCBEAR),d1 # destination of erroneous access 324 mov a2,d1 # arg 1: exception number 382 mov a2,d1 389 mov a2,d1 # arg 1: exception number 419 mov d1,(d0) 420 mov 4,d1
|
H A D | signal.c | 49 COPY(d1); COPY(d2); COPY(d3); restore_sigcontext() 164 COPY(d0); COPY(d1); COPY(d2); COPY(d3); setup_sigcontext() 249 regs->d1 = (unsigned long) &frame->sc; setup_frame() 315 regs->d1 = (long) &frame->info; setup_rt_frame()
|
H A D | kgdb.c | 37 gdb_regs[GDB_FR_D1] = regs->d1; pt_regs_to_gdb_regs() 89 regs->d1 = gdb_regs[GDB_FR_D1]; gdb_regs_to_pt_regs()
|
H A D | traps.c | 307 printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", show_registers_only() 308 regs->d0, regs->d1, regs->d2, regs->d3); show_registers_only()
|
/linux-4.1.27/arch/mn10300/mm/ |
H A D | cache-inv-by-reg.S | 111 mov PAGE_SIZE,d1 113 add d0,d1 121 btst ~L1_CACHE_TAG_MASK,d1 142 add d2,d1 143 and L1_CACHE_TAG_MASK,d1 # round end addr up 146 sub d0,d1,d2 # calculate the total size 148 mov d1,a1 # A1 = end address 159 mov d2,d1 170 * d1 = alignsize; 172 mov L1_CACHE_BYTES,d1 173 lsr 1,d1 175 add d1,d1 176 mov d1,d0 193 * d1 = invsize 195 cmp d2,d1 197 mov d2,d1 203 and d1,e0 205 mov d0,d1 213 mov d1,d0 228 sub d1,d2 # decrease size remaining 229 add d1,a2 # increase next start address 265 mov PAGE_SIZE,d1 267 add d0,d1 285 sub d0,d1,d3 296 mov d1,a1 303 mov (a0),d1 304 btst ICIVCR_ICIVBSY,d1 312 mov d2,d1 313 add -1,d1 314 not d1 315 mov d1,(ICIVMR) 317 and d1,d0,a2 330 mov (a0),d1 331 btst ICIVCR_ICIVBSY,d1
|
H A D | cache-flush-by-reg.S | 58 LOCAL_CLI_SAVE(d1) 83 LOCAL_IRQ_RESTORE(d1) 106 mov PAGE_SIZE,d1 108 add d0,d1 124 sub d0,d1,d3 133 mov d1,a1 # a1 = end 140 mov (a0),d1 141 btst DCPGCR_DCPGBSY,d1 145 mov d2,d1 146 add -1,d1 147 not d1 # d1 = mask = ~(alignsize-1) 148 mov d1,(DCPGMR) 150 and d1,d0,a2 # a2 = mask & start 160 mov (a0),d1 161 btst DCPGCR_DCPGBSY,d1 194 LOCAL_CLI_SAVE(d1) 216 LOCAL_IRQ_RESTORE(d1) 239 mov PAGE_SIZE,d1 241 add d0,d1 257 sub d0,d1,d3 266 mov d1,a1 # a1 = end 273 mov (a0),d1 274 btst DCPGCR_DCPGBSY,d1 278 mov d2,d1 279 add -1,d1 280 not d1 # d1 = mask = ~(alignsize-1) 281 mov d1,(DCPGMR) 283 and d1,d0,a2 # a2 = mask & start 293 mov (a0),d1 294 btst DCPGCR_DCPGBSY,d1
|
H A D | cache-dbg-flush-by-reg.S | 41 mov epsw,d1 84 mov d1,epsw 101 movhu (CHCTR),d1 102 btst CHCTR_DCEN|CHCTR_ICEN,d1 104 btst CHCTR_DCEN,d1 110 mov d0,d1 118 or L1_CACHE_TAG_VALID,d1 121 mov d1,(L1_CACHE_WAYDISP*0,a0) 132 LOCAL_CLI_SAVE(d1) 156 LOCAL_IRQ_RESTORE(d1)
|
H A D | cache-flush-by-tag.S | 61 mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries 72 add -1,d1 96 mov PAGE_SIZE,d1 98 add d0,d1 106 sub d0,d1,a0 118 add L1_CACHE_BYTES,d1 # round end addr up 119 and L1_CACHE_TAG_MASK,d1 128 sub a1,d1 129 lsr L1_CACHE_SHIFT,d1 # total number of entries to 142 add -1,d1 166 mov L1_CACHE_NENTRIES,d1 176 add -1,d1 200 mov PAGE_SIZE,d1 202 add d0,d1 210 sub d0,d1,a0 221 add L1_CACHE_BYTES,d1 # round end addr up 222 and L1_CACHE_TAG_MASK,d1 232 sub a1,d1 233 lsr L1_CACHE_SHIFT,d1 # total number of entries to 243 add -1,d1
|
H A D | cache-dbg-inv-by-tag.S | 55 mov L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_VALID,d1 73 and d1,d0 79 and d1,d0 85 and d1,d0 91 and d1,d0
|
H A D | tlb-mn10300.S | 159 mov (MMUCTR),d1 160 mov d1,(MMUCTR) 167 movhu (MMUFCR_IFC),d1 168 or 0x00010000,d1 # it's an instruction fetch 196 mov (MMUCTR),d1 197 mov d1,(MMUCTR) 204 movhu (MMUFCR_DFC),d1
|
H A D | cache-inv-by-tag.S | 119 mov PAGE_SIZE,d1 121 add d0,d1 129 btst ~L1_CACHE_TAG_MASK,d1 146 add L1_CACHE_BYTES,d1 # round end addr up 147 and L1_CACHE_TAG_MASK,d1 162 sub a1,d1 163 lsr L1_CACHE_SHIFT,d1 # total number of entries to 247 add -1,d1 248 btst mn10300_local_dcache_inv_range_intr_interval,d1 269 add 0,d1
|
H A D | cache-dbg-flush-by-tag.S | 88 movhu (CHCTR),d1 89 btst CHCTR_DCEN|CHCTR_ICEN,d1 91 btst CHCTR_DCEN,d1
|
H A D | cache-dbg-inv-by-reg.S | 40 LOCAL_CLI_SAVE(d1) 65 LOCAL_IRQ_RESTORE(d1)
|
/linux-4.1.27/arch/m68k/kernel/ |
H A D | relocate_kernel.S | 15 movel %sp@(12),%d1 /* d1 = cpu_mmu_flags */ 20 btst #MMU_BASE + MMUB_68851,%d1 40 btst #MMU_BASE + MMUB_68030,%d1 43 btst #MMU_BASE + MMUB_68040,%d1 76 btst #MMU_BASE + MMUB_68060,%d1 121 btst #CPUB_68020,%d1 133 btst #CPUB_68030,%d1 136 btst #CPUB_68040,%d1 150 btst #CPUB_68060,%d1
|
H A D | sun3-head.S | 52 movsb %a0@, %d1 53 movsb %d1, %a1@ 54 cmpib #SUN3_INVALID_PMEG, %d1
|
H A D | head.S | 661 clrl %d1 668 bset #CPUTYPE_060,%d1 669 bset #CPUTYPE_0460,%d1 677 bset #CPUTYPE_040,%d1 678 bset #CPUTYPE_0460,%d1 686 bset #CPUTYPE_020,%d1 693 movel %d1,%a0@ 711 clrl %d1 717 * d1 := cacheable write-through 736 movel #_PAGE_CACHE040W,%d1 744 movel %d1,%a0@ 916 movel #16*1024*1024,%d1 917 cmpl %d0,%d1 919 lsrl #1,%d1 920 cmpl %d0,%d1 922 lsrl #1,%d1 925 movel %d1,%a0@ 926 mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\ 1203 moveq #ROOT_INDEX_SHIFT, %d1 1204 lsrl %d1,%d0 1208 moveq #PTR_INDEX_SHIFT, %d1 1209 lsrl %d1,%d0 1214 moveq #PAGE_INDEX_SHIFT, %d1 1215 lsrl %d1,%d0 1223 movel #((0x200000 >> 13)-1), %d1 1231 dbra %d1,1b 1519 func_start get_bi_record,%d1 1716 movel #0,%d1 1740 moveml %d0-%d1,%sp@- 1742 movel %d6,%d1 1743 andil #0xfffff4e0,%d1 1746 moveml %sp@+,%d0-%d1 1754 addq #1,%d1 1755 cmpib #128,%d1 1765 movel %d0,%d1 1766 andiw #0x8000,%d1 /* is it valid ? */ 1769 movel %d0,%d1 1770 andil #0xff000000,%d1 /* Get the address */ 1771 putn %d1 1773 putn %d1 1779 movel %d0,%d1 1780 andiw #0x8000,%d1 /* is it valid ? */ 1783 movel %d0,%d1 1784 andil #0xff000000,%d1 /* Get the address */ 1785 putn %d1 1787 putn %d1 1844 movel #0,%d1 1881 addq #1,%d1 1882 cmpib #128,%d1 1897 moveml %d0-%d1,%sp@- 1899 movel %d6,%d1 1902 moveml %sp@+,%d0-%d1 1931 cmpl %a0@(mmu_next_physical),%d1 1937 putn %d1 1939 movel %d1,%d6 1945 movel %d1,%a0@(mmu_next_physical) 1997 func_start mmu_map_tt,%d0/%d1/%a0,4 2010 bfffo ARG3{#0,#32},%d1 2011 cmpw #8,%d1 2017 lsrl %d1,%d0 2022 movel %d0,%d1 2023 notl %d1 2024 andl ARG2,%d1 2029 orl %d0,%d1 2030 clrw %d1 2037 orw #TTR_ENABLE+TTR_RWM+TTR_FCB2+TTR_FCM1+TTR_FCM0,%d1 2041 orw #TTR_CI,%d1 2044 dputn %d1 2045 movel %d1,%a0@ 2058 orw #TTR_ENABLE+TTR_KERNELMODE,%d1 2059 orl ARG4,%d1 2060 dputn %d1 2065 movec %d1,%itt0 2066 movec %d1,%dtt0 2068 1: movec %d1,%itt1 2069 movec %d1,%dtt1 2159 moveq #ROOT_INDEX_SHIFT,%d1 2160 lsrl %d1,%d0 2166 moveq #PTR_INDEX_SHIFT,%d1 2167 lsrl %d1,%d0 2174 moveq #PAGE_INDEX_SHIFT,%d1 2175 lsrl %d1,%d0 2202 moveq #ROOT_INDEX_SHIFT,%d1 2203 lsrl %d1,%d0 2242 moveq #PTR_INDEX_SHIFT,%d1 2243 lsrl %d1,%d0 2297 moveq #ROOT_INDEX_SHIFT,%d1 2298 lsrl %d1,%d0 2304 moveq #PTR_INDEX_SHIFT,%d1 2305 lsrl %d1,%d0 2312 moveq #PAGE_INDEX_SHIFT,%d1 2313 lsrl %d1,%d0 2333 func_start mmu_temp_map,%d0/%d1/%a0/%a1 2345 moveq #ROOT_INDEX_SHIFT,%d1 2346 lsrl %d1,%d0 2372 moveq #PTR_INDEX_SHIFT,%d1 2373 lsrl %d1,%d0 2402 moveq #PAGE_INDEX_SHIFT,%d1 2403 lsrl %d1,%d0 2817 func_start serial_init,%d0/%d1/%a0/%a1 2986 func_start serial_putc,%d0/%d1/%a0/%a1 3076 movel vme_brdtype,%d1 3078 cmpi #VME_TYPE_MVME162,%d1 3080 cmpi #VME_TYPE_MVME172,%d1 3102 moveb M167_PCTPIACKR,%d1 3103 moveb M167_CYLICR,%d1 3172 movel %pc@(L(uart_scode)),%d1 /* Check the scode */ 3174 cmpi #256,%d1 /* APCI scode? */ 3176 1: moveb %a1@(DCALSR),%d1 /* Output to DCA */ 3177 andb #0x20,%d1 3181 2: moveb %a1@(APCILSR),%d1 /* Output to APCI */ 3182 andb #0x20,%d1 3220 moveq #7,%d1 3235 dbra %d1,1b 3252 moveml %d0/%d1/%a0,%sp@- 3256 movel %sp@(22),%d1 /* fetch parameter */ 3265 subq #1,%d1 3271 moveml %sp@+,%d0/%d1/%a0 3380 movel %d4,%d1 /* screen height in pixels */ 3381 divul %a0@(FONT_DESC_HEIGHT),%d1 /* d1 = max num rows */ 3384 movel %d1,%a2@(Lconsole_struct_num_rows) 3409 clrl %d1 /* start at the top */ 3417 console_plot_pixel %d0,%d1,%d2 3420 console_plot_pixel %d0,%d1,%d2 3425 addq #1,%d1 3530 movel %a0@(Lconsole_struct_num_rows),%d1 3531 cmpl %d1,%d0 3558 * d1 = cursor row to draw the character 3564 movel %a0@(Lconsole_struct_num_columns),%d1 3565 cmpl %d1,%d0 3569 movel %a0@(Lconsole_struct_cur_row),%d1 3585 * d1 = pixel coordinate, y 3593 mulul %a0@(FONT_DESC_HEIGHT),%d1 3606 console_plot_pixel %d0,%d1,%d2 3612 addq #1,%d1 3621 * d1 = y coordinate 3630 movel ARG2,%d1 3631 mulul %pc@(L(mac_rowbytes)),%d1 3637 * d1 = y coord 3648 addal %d1,%a1 3665 addal %d1,%a1 3687 addal %d1,%a1 3715 addal %d1,%a1 3729 addal %d1,%a1
|
H A D | entry.S | 101 | so that %d1 contains the previous task 104 movel %d1,%sp@- 111 movel %d1,%sp@- 194 GET_CURRENT(%d1) 195 movel %d1,%a1 369 /* Return previous task in %d1 */ 370 movel %curptr,%d1
|
H A D | asm-offsets.c | 47 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); main()
|
H A D | process.c | 98 regs->d3, regs->d2, regs->d1); show_regs() 128 return do_fork(regs->d1, regs->d2, 0, m68k_clone()
|
/linux-4.1.27/arch/powerpc/math-emu/ |
H A D | udivmodti4.c | 11 _FP_W_TYPE d1, _FP_W_TYPE d0) _fp_udivmodti4() 16 if (d1 == 0) _fp_udivmodti4() 117 if (d1 > n1) _fp_udivmodti4() 132 count_leading_zeros (bm, d1); _fp_udivmodti4() 135 /* From (n1 >= d1) /\ (the most significant bit of d1 is set), _fp_udivmodti4() 142 n1 >= d1 (true due to program flow). */ _fp_udivmodti4() 143 if (n1 > d1 || n0 >= d0) _fp_udivmodti4() 146 sub_ddmmss (n1, n0, n1, n0, d1, d0); _fp_udivmodti4() 164 d1 = (d1 << bm) | (d0 >> b); _fp_udivmodti4() 170 udiv_qrnnd (q0, n1, n2, n1, d1); _fp_udivmodti4() 176 sub_ddmmss (m1, m0, m1, m0, d1, d0); _fp_udivmodti4() 9 _fp_udivmodti4(_FP_W_TYPE q[2], _FP_W_TYPE r[2], _FP_W_TYPE n1, _FP_W_TYPE n0, _FP_W_TYPE d1, _FP_W_TYPE d0) _fp_udivmodti4() argument
|
/linux-4.1.27/arch/m68k/math-emu/ |
H A D | fp_movem.S | 46 | %d1 contains the mask and count of the register list 58 2: move.l %d0,%d1 59 swap %d1 61 1: addq.w #1,%d1 | count the # of registers in 62 2: lsr.b #1,%d0 | register list and keep it in %d1 65 printf PDECODE,"#%08x",1,%d1 132 swap %d1 | get fpu register list 157 3: lsl.b #1,%d1 176 4: lsl.b #1,%d1 207 move.l %d0,%d1 208 swap %d1 210 1: addq.w #1,%d1 214 printf PDECODE,"#%08x",1,%d1 234 move.w %d0,%d1 240 move.w %d1,%d0 301 lea (%a0,%d1.w*4),%a1 306 swap %d1 307 lsl.l #5,%d1 312 3: lsl.b #1,%d1 319 swap %d1 | get fpu register list 320 lsl.l #5,%d1 331 3: lsl.b #1,%d1 341 4: lsl.b #1,%d1 350 moveq #3,%d1 351 and.l %d0,%d1 352 move.w %d1,(FPD_RND,FPDATA) 354 moveq #3,%d1 355 and.l %d0,%d1 356 move.w %d1,(FPD_PREC,FPDATA)
|
H A D | fp_util.S | 49 * something here. %d0 and %d1 is always usable, sometimes %d2 (or 99 clr.l %d1 | sign defaults to zero 103 moveq #1,%d1 105 1: swap %d1 106 move.w #0x3fff+31,%d1 107 move.l %d1,(%a0)+ | set sign / exp 130 move.l %d0,%d1 132 lsr.l #8,%d1 | exponent / sign 133 lsr.l #7,%d1 134 lsr.w #8,%d1 136 cmp.w #0xff,%d1 | NaN / Inf? 139 add.w #0x3fff-0x7f,%d1 | re-bias the exponent. 140 9: move.l %d1,(%a0)+ | fp_ext.sign, fp_ext.exp 152 move.w #0x4000-0x7f,%d1 157 move.w #0x7fff,%d1 163 getuser.l %a1@(4),%d1,fp_err_ua2,%a1 164 printf PCONV,"d2e: %p%p -> %p(",3,%d0,%d1,%a0 167 move.l %d0,%d1 170 lsr.l #8,%d1 | exponent / sign 171 lsr.l #7,%d1 172 lsr.w #5,%d1 174 cmp.w #0x7ff,%d1 | NaN / Inf? 177 add.w #0x3fff-0x3ff,%d1 | re-bias the exponent. 178 9: move.l %d1,(%a0)+ | fp_ext.sign, fp_ext.exp 181 move.l %d0,%d1 186 lsr.l %d0,%d1 187 or.l %d1,-(%a0) 197 move.w #0x4000-0x3ff,%d1 202 move.w #0x7fff,%d1 288 bfffo %d0{#0,#32},%d1 290 sub.w %d1,%d2 293 add.w %d2,%d1 296 move.w %d1,%d2 302 move.l %d0,%d1 307 lsr.l %d2,%d1 308 or.l %d1,-(%a0) 329 bfffo %d0{#0,#32},%d1 330 add.w #32,%d1 332 sub.w %d1,%d2 335 add.w %d2,%d1 338 ext.l %d1 343 lsl.l %d1,%d0 | lower lword needs only to be shifted 349 neg.w %d1 350 add.w #32,%d1 351 bfins %d0,(%a0){%d1,#8} 354 1: neg.w %d1 | lower lword is splitted between 355 bfins %d0,(%a0){%d1,#32} | higher and lower lword 359 move.w %d1,%d2 364 bfffo %d0{#24,#8},%d1 365 add.w #40,%d1 367 sub.w %d1,%d2 370 add.w %d2,%d1 373 ext.l %d1 375 cmp.w #8,%d1 378 sub.w #64,%d1 380 add.w #24,%d1 381 lsl.l %d1,%d0 384 1: neg.w %d1 385 bfins %d0,(%a0){%d1,#8} 387 2: lsl.l %d1,%d0 393 1: move.l %d0,%d1 | lower lword is splitted between 396 move.l %d1,%d0 512 bfffo %d0{#0,#32},%d1 514 sub.w %d1,%d2 517 add.w %d2,%d1 521 move.w %d1,%d2 527 move.l %d0,%d1 532 lsr.l %d2,%d1 533 or.l %d1,-(%a0) 554 bfffo %d0{#0,#32},%d1 555 add.w #32,%d1 557 sub.w %d1,%d2 560 add.w %d2,%d1 564 ext.l %d1 567 sub.w #32,%d1 569 lsl.l %d1,%d0 | lower lword needs only to be shifted 575 neg.w %d1 576 add.w #32,%d1 577 bfins %d0,(%a0){%d1,#8} 580 1: neg.w %d1 | lower lword is splitted between 581 bfins %d0,(%a0){%d1,#32} | higher and lower lword 585 move.w %d1,%d2 590 bfffo %d0{#24,#8},%d1 591 add.w #40,%d1 593 sub.w %d1,%d2 596 add.w %d2,%d1 599 ext.l %d1 601 cmp.w #8,%d1 604 sub.w #64,%d1 606 add.w #24,%d1 607 lsl.l %d1,%d0 610 1: neg.w %d1 611 bfins %d0,(%a0){%d1,#8} 613 2: lsl.l %d1,%d0 631 3: move.l %d0,%d1 632 lsl.l #1,%d1 676 moveq #21,%d1 677 lsl.l %d1,%d0 | keep 11 low bits. 713 | only happens when %d1 was 0xfffff800, it is now zero, so 747 move.l %d0,%d1 754 lsl.l %d2,%d1 755 or.l %d1,%d0 756 move.l (%a0),%d1 759 lsl.l %d2,%d1 769 move.l %d0,%d1 778 lsl.l %d2,%d1 901 | Overflow. This means that the %d1 was 0xffffff00, so it 933 move.l %d0,%d1 939 lsl.l %d2,%d1 1061 | Overflow. This means that the %d1 was 0xffffff00, so it 1148 move.l %d0,%d1 1149 lsl.l %d2,%d1 1177 tst.l %d1 | test guard bit 1181 lsl.l #1,%d1 | check low bits 1204 move.l (4,%a0),%d1 | test guard bit 1206 lsl.l #1,%d1 | check low bits 1284 move.l %d0,%d1 1291 lsl.l %d0,%d1 1295 or.l %d1,%d0 1299 getuser.l %a1@(0),%d1,fp_err_ua2,%a1 1300 printf PCONV,"%p(%08x%08x)\n",3,%a1,%d0,%d1 1309 move.l (%a0)+,%d1 1310 cmp.w #0x7fff,%d1 1312 move.w #0xff,%d1 1315 1: sub.w #0x3fff-0x7f,%d1 1318 clr.w %d1 1319 2: lsl.w #8,%d1 1320 lsl.l #7,%d1 1321 lsl.l #8,%d1 1324 or.l %d1,%d0 1379 moveq #FPSR_CC_Z-24,%d1 1386 2: moveq #FPSR_CC_NAN-24,%d1 1392 moveq #FPSR_CC_INF-24,%d1 1393 8: bset %d1,%d0 1431 move.l %d0,%d1 1432 lsr.l #4,%d1 1433 or.l %d0,%d1 1434 and.b #0x08,%d1 1437 or.l %d1,%d0 1438 move.l %d2,%d1 1439 lsr.l #4,%d1 1440 or.b #0xdf,%d1 1441 and.b %d1,%d0 1442 move.l %d2,%d1 1443 lsr.l #7,%d1 1444 and.b #0x80,%d1 1445 or.b %d1,%d0
|
H A D | fp_cond.S | 83 swap %d1 | test condition in %d1 84 tst.w %d1 86 move.l %d0,%d1 102 move.w %d0,%d1 103 swap %d1 116 move.w %d0,%d1 | save register nr 118 swap %d1 119 move.b %d1,%d0 120 swap %d1 165 swap %d1 166 putuser.b %d1,(%a0),fp_err_ua1,%a0 171 #define tst_NAN btst #24,%d1 172 #define tst_Z btst #26,%d1 173 #define tst_N btst #27,%d1 176 move.l (FPD_FPSR,FPDATA),%d1 181 bset #15,%d1 182 bset #7,%d1 183 move.l %d1,(FPD_FPSR,FPDATA)
|
H A D | fp_move.S | 48 move.w %d0,%d1 | store data size twice in %d1 49 swap %d1 | one can be trashed below 50 move.w %d0,%d1 54 move.b (%a0,%d1.w),%d0 75 move.w %d0,%d1 83 swap %d1 84 move.l %d1,%d2 86 jmp ([0f:w,%pc,%d1.w*4]) 98 move.l %d0,%d1 102 move.b %d1,%d0 103 move.w %d2,%d1 110 move.l %d0,%d1 114 move.w %d1,%d0 115 move.l %d2,%d1 123 move.w %d2,%d1 131 move.w %d2,%d1 181 move.l %d1,%d2 | save size
|
H A D | fp_decode.h | 48 * d1 - upper 16bit are reserved for caller 157 move.w %d2,%d1 | scale factor 158 rol.w #7,%d1 159 and.w #3,%d1 160 debug move.l "%d1,-(%sp)" 161 debug ext.l "%d1" 162 printf PDECODE,":%d",1,%d1 163 debug move.l "(%sp)+,%d1" 164 lsl.l %d1,%d0 231 move.w #6,%d1 236 cmp.w #6,%d1 239 move.w #4,%d1 252 lea (%a0,%d1.w*4),%a0 254 lea (%a0,%d1.w*8),%a0 257 add.w (fp_datasize,%d1.w*2),%a0 272 neg.w %d1 273 lea (%a0,%d1.w*4),%a0 274 add.w %d1,%d1 275 lea (%a0,%d1.w*4),%a0 279 neg.w %d1 280 lea (%a0,%d1.w*4),%a0 284 sub.w (fp_datasize,%d1.w*2),%a0
|
H A D | fp_scan.S | 46 | %d1 - operand size 106 | sort out fmovecr, keep data size in %d1 110 move.w %d0,%d1 | store data size twice in %d1 111 swap %d1 | one can be trashed below 112 move.w %d0,%d1 116 move.b (%a0,%d1.w),%d0 146 jmp ([0f:w,%pc,%d1.w*4]) 225 move.w (fp_datasize,%d1.w*2),%d0 229 movem.l %d0/%d1,-(%sp) 231 clr.l %d1 233 1: getuser.b (%a1)+,%d1,fp_err_ua1,%a1 234 printf PDECODE,"%02x",1,%d1 236 movem.l (%sp)+,%d0/%d1 244 swap %d1 246 jmp ([0f:w,%pc,%d1.w*4]) 348 move.l %d0,%d1 350 add.l %d1,%d0 353 addq.l #1,%d1 354 lsl.l %d1,%d0
|
H A D | fp_emu.h | 70 : "a1", "d1", "d2", "memory"); \ 114 : "a1", "d1", "d2", "memory"); \ 124 : "a1", "d1", "d2", "memory"); \
|
H A D | fp_entry.S | 131 printf PREGISTER,"{d1->%08x}",1,%d0 165 jmp ([0f:w,%pc,%d1.w*4]) 180 printf PREGISTER,"{d1<-%08x}",1,%d0
|
/linux-4.1.27/arch/m68k/lib/ |
H A D | mulsi3.S | 68 #define d1 REG (d1) define 91 movew sp@(6), d1 /* x1 -> d1 */ 92 muluw sp@(8), d1 /* x1*y0 */ 94 addw d1, d0 96 addl d1, d0 100 movew sp@(6), d1 /* x1 -> d1 */ 101 muluw sp@(10), d1 /* x1*y1 */ 102 addl d1, d0
|
H A D | modsi3.S | 68 #define d1 REG (d1) define 89 movel sp@(8), d1 /* d1 = divisor */ 91 movel d1, sp@- 95 movel sp@(8), d1 /* d1 = divisor */ 97 movel d1, sp@- 102 mulsl d1,d0 104 movel sp@(4), d1 /* d1 = dividend */ 105 subl d0, d1 /* d1 = a - (a/b)*b */ 106 movel d1, d0
|
H A D | umodsi3.S | 68 #define d1 REG (d1) define 89 movel sp@(8), d1 /* d1 = divisor */ 91 movel d1, sp@- 95 movel sp@(8), d1 /* d1 = divisor */ 97 movel d1, sp@- 102 mulsl d1,d0 104 movel sp@(4), d1 /* d1 = dividend */ 105 subl d0, d1 /* d1 = a - (a/b)*b */ 106 movel d1, d0
|
H A D | udivsi3.S | 68 #define d1 REG (d1) define 91 movel sp@(12), d1 /* d1 = divisor */ 94 cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */ 99 divu d1, d2 /* high quotient in lower word */ 103 divu d1, d2 /* low quotient */ 107 L3: movel d1, d2 /* use d2 as divisor backup */ 108 L4: lsrl IMM (1), d1 /* shift divisor */ 110 cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */ 112 divu d1, d0 /* now we have 16 bit divisor */ 118 movel d2, d1 119 mulu d0, d1 /* low part, 32 bits */ 125 addl d2, d1 /* add parts */ 127 cmpl sp@(8), d1 /* compare the sum with the dividend */ 141 movel a6@(12),d1 147 subl d1,d3
|
H A D | divsi3.S | 68 #define d1 REG (d1) define 92 movel sp@(12), d1 /* d1 = divisor */ 94 negl d1 109 L2: movel d1, sp@-
|
/linux-4.1.27/include/asm-generic/ |
H A D | xor.h | 109 register long d0, d1, d2, d3, d4, d5, d6, d7; xor_32regs_2() local 111 d1 = p1[1]; /* ... in bursts, if possible. */ xor_32regs_2() 119 d1 ^= p2[1]; xor_32regs_2() 127 p1[1] = d1; xor_32regs_2() 146 register long d0, d1, d2, d3, d4, d5, d6, d7; xor_32regs_3() local 148 d1 = p1[1]; /* ... in bursts, if possible. */ xor_32regs_3() 156 d1 ^= p2[1]; xor_32regs_3() 164 d1 ^= p3[1]; xor_32regs_3() 172 p1[1] = d1; xor_32regs_3() 192 register long d0, d1, d2, d3, d4, d5, d6, d7; xor_32regs_4() local 194 d1 = p1[1]; /* ... in bursts, if possible. */ xor_32regs_4() 202 d1 ^= p2[1]; xor_32regs_4() 210 d1 ^= p3[1]; xor_32regs_4() 218 d1 ^= p4[1]; xor_32regs_4() 226 p1[1] = d1; xor_32regs_4() 247 register long d0, d1, d2, d3, d4, d5, d6, d7; xor_32regs_5() local 249 d1 = p1[1]; /* ... in bursts, if possible. */ xor_32regs_5() 257 d1 ^= p2[1]; xor_32regs_5() 265 d1 ^= p3[1]; xor_32regs_5() 273 d1 ^= p4[1]; xor_32regs_5() 281 d1 ^= p5[1]; xor_32regs_5() 289 p1[1] = d1; xor_32regs_5() 440 register long d0, d1, d2, d3, d4, d5, d6, d7; xor_32regs_p_2() local 446 d1 = p1[1]; /* ... in bursts, if possible. */ xor_32regs_p_2() 454 d1 ^= p2[1]; xor_32regs_p_2() 462 p1[1] = d1; xor_32regs_p_2() 487 register long d0, d1, d2, d3, d4, d5, d6, d7; xor_32regs_p_3() local 494 d1 = p1[1]; /* ... in bursts, if possible. */ xor_32regs_p_3() 502 d1 ^= p2[1]; xor_32regs_p_3() 510 d1 ^= p3[1]; xor_32regs_p_3() 518 p1[1] = d1; xor_32regs_p_3() 545 register long d0, d1, d2, d3, d4, d5, d6, d7; xor_32regs_p_4() local 553 d1 = p1[1]; /* ... in bursts, if possible. */ xor_32regs_p_4() 561 d1 ^= p2[1]; xor_32regs_p_4() 569 d1 ^= p3[1]; xor_32regs_p_4() 577 d1 ^= p4[1]; xor_32regs_p_4() 585 p1[1] = d1; xor_32regs_p_4() 614 register long d0, d1, d2, d3, d4, d5, d6, d7; xor_32regs_p_5() local 623 d1 = p1[1]; /* ... in bursts, if possible. */ xor_32regs_p_5() 631 d1 ^= p2[1]; xor_32regs_p_5() 639 d1 ^= p3[1]; xor_32regs_p_5() 647 d1 ^= p4[1]; xor_32regs_p_5() 655 d1 ^= p5[1]; xor_32regs_p_5() 663 p1[1] = d1; xor_32regs_p_5()
|
/linux-4.1.27/arch/x86/lib/ |
H A D | strstr_32.c | 5 int d0, d1; strstr() local 26 : "=a" (__res), "=&c" (d0), "=&S" (d1) strstr()
|
H A D | string_32.c | 19 int d0, d1, d2; strcpy() local 24 : "=&S" (d0), "=&D" (d1), "=&a" (d2) strcpy() 34 int d0, d1, d2, d3; strncpy() local 44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) strncpy() 54 int d0, d1, d2, d3; strcat() local 62 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) strcat() 72 int d0, d1, d2, d3; strncat() local 85 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) strncat() 96 int d0, d1; strcmp() local 108 : "=a" (res), "=&S" (d0), "=&D" (d1) strcmp() 120 int d0, d1, d2; strncmp() local 133 : "=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) strncmp()
|
H A D | memcpy_32.c | 25 int d0,d1,d2,d3,d4,d5; memmove() local 198 : "=&c" (d0), "=&S" (d1), "=&D" (d2), memmove()
|
H A D | usercopy_32.c | 103 int d0, d1; __copy_user_intel() local 198 : "=&c"(size), "=&D" (d0), "=&S" (d1) __copy_user_intel() 207 int d0, d1; __copy_user_zeroing_intel() local 290 : "=&c"(size), "=&D" (d0), "=&S" (d1) __copy_user_zeroing_intel() 304 int d0, d1; __copy_user_zeroing_intel_nocache() local 389 : "=&c"(size), "=&D" (d0), "=&S" (d1) __copy_user_zeroing_intel_nocache() 398 int d0, d1; __copy_user_intel_nocache() local 477 : "=&c"(size), "=&D" (d0), "=&S" (d1) __copy_user_intel_nocache()
|
H A D | mmx_32.c | 338 int d0, d1; slow_zero_page() local 344 : "=&c" (d0), "=&D" (d1) slow_zero_page() 360 int d0, d1, d2; slow_copy_page() local 365 : "=&c" (d0), "=&D" (d1), "=&S" (d2) slow_copy_page()
|
/linux-4.1.27/arch/m68k/ifpsp060/src/ |
H A D | fplsp.S | 565 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 579 mov.b %d0,%d1 586 tst.b %d1 591 cmpi.b %d1,&ZERO # is operand a ZERO? 596 cmpi.b %d1,&INF # is operand an INF? 601 cmpi.b %d1,&QNAN # is operand a QNAN? 612 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 622 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 636 mov.b %d0,%d1 643 mov.b %d1,STAG(%a6) 644 tst.b %d1 649 cmpi.b %d1,&ZERO # is operand a ZERO? 654 cmpi.b %d1,&INF # is operand an INF? 659 cmpi.b %d1,&QNAN # is operand a QNAN? 670 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 680 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 695 mov.b %d0,%d1 702 tst.b %d1 707 cmpi.b %d1,&ZERO # is operand a ZERO? 712 cmpi.b %d1,&INF # is operand an INF? 717 cmpi.b %d1,&QNAN # is operand a QNAN? 728 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 742 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 756 mov.b %d0,%d1 763 tst.b %d1 768 cmpi.b %d1,&ZERO # is operand a ZERO? 773 cmpi.b %d1,&INF # is operand an INF? 778 cmpi.b %d1,&QNAN # is operand a QNAN? 789 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 799 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 813 mov.b %d0,%d1 820 mov.b %d1,STAG(%a6) 821 tst.b %d1 826 cmpi.b %d1,&ZERO # is operand a ZERO? 831 cmpi.b %d1,&INF # is operand an INF? 836 cmpi.b %d1,&QNAN # is operand a QNAN? 847 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 857 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 872 mov.b %d0,%d1 879 tst.b %d1 884 cmpi.b %d1,&ZERO # is operand a ZERO? 889 cmpi.b %d1,&INF # is operand an INF? 894 cmpi.b %d1,&QNAN # is operand a QNAN? 905 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 919 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 933 mov.b %d0,%d1 940 tst.b %d1 945 cmpi.b %d1,&ZERO # is operand a ZERO? 950 cmpi.b %d1,&INF # is operand an INF? 955 cmpi.b %d1,&QNAN # is operand a QNAN? 966 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 976 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 990 mov.b %d0,%d1 997 mov.b %d1,STAG(%a6) 998 tst.b %d1 1003 cmpi.b %d1,&ZERO # is operand a ZERO? 1008 cmpi.b %d1,&INF # is operand an INF? 1013 cmpi.b %d1,&QNAN # is operand a QNAN? 1024 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1034 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1049 mov.b %d0,%d1 1056 tst.b %d1 1061 cmpi.b %d1,&ZERO # is operand a ZERO? 1066 cmpi.b %d1,&INF # is operand an INF? 1071 cmpi.b %d1,&QNAN # is operand a QNAN? 1082 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1096 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1110 mov.b %d0,%d1 1117 tst.b %d1 1122 cmpi.b %d1,&ZERO # is operand a ZERO? 1127 cmpi.b %d1,&INF # is operand an INF? 1132 cmpi.b %d1,&QNAN # is operand a QNAN? 1143 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1153 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1167 mov.b %d0,%d1 1174 mov.b %d1,STAG(%a6) 1175 tst.b %d1 1180 cmpi.b %d1,&ZERO # is operand a ZERO? 1185 cmpi.b %d1,&INF # is operand an INF? 1190 cmpi.b %d1,&QNAN # is operand a QNAN? 1201 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1211 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1226 mov.b %d0,%d1 1233 tst.b %d1 1238 cmpi.b %d1,&ZERO # is operand a ZERO? 1243 cmpi.b %d1,&INF # is operand an INF? 1248 cmpi.b %d1,&QNAN # is operand a QNAN? 1259 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1273 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1287 mov.b %d0,%d1 1294 tst.b %d1 1299 cmpi.b %d1,&ZERO # is operand a ZERO? 1304 cmpi.b %d1,&INF # is operand an INF? 1309 cmpi.b %d1,&QNAN # is operand a QNAN? 1320 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1330 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1344 mov.b %d0,%d1 1351 mov.b %d1,STAG(%a6) 1352 tst.b %d1 1357 cmpi.b %d1,&ZERO # is operand a ZERO? 1362 cmpi.b %d1,&INF # is operand an INF? 1367 cmpi.b %d1,&QNAN # is operand a QNAN? 1378 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1388 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1403 mov.b %d0,%d1 1410 tst.b %d1 1415 cmpi.b %d1,&ZERO # is operand a ZERO? 1420 cmpi.b %d1,&INF # is operand an INF? 1425 cmpi.b %d1,&QNAN # is operand a QNAN? 1436 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1450 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1464 mov.b %d0,%d1 1471 tst.b %d1 1476 cmpi.b %d1,&ZERO # is operand a ZERO? 1481 cmpi.b %d1,&INF # is operand an INF? 1486 cmpi.b %d1,&QNAN # is operand a QNAN? 1497 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1507 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1521 mov.b %d0,%d1 1528 mov.b %d1,STAG(%a6) 1529 tst.b %d1 1534 cmpi.b %d1,&ZERO # is operand a ZERO? 1539 cmpi.b %d1,&INF # is operand an INF? 1544 cmpi.b %d1,&QNAN # is operand a QNAN? 1555 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1565 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1580 mov.b %d0,%d1 1587 tst.b %d1 1592 cmpi.b %d1,&ZERO # is operand a ZERO? 1597 cmpi.b %d1,&INF # is operand an INF? 1602 cmpi.b %d1,&QNAN # is operand a QNAN? 1613 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1627 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1641 mov.b %d0,%d1 1648 tst.b %d1 1653 cmpi.b %d1,&ZERO # is operand a ZERO? 1658 cmpi.b %d1,&INF # is operand an INF? 1663 cmpi.b %d1,&QNAN # is operand a QNAN? 1674 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1684 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1698 mov.b %d0,%d1 1705 mov.b %d1,STAG(%a6) 1706 tst.b %d1 1711 cmpi.b %d1,&ZERO # is operand a ZERO? 1716 cmpi.b %d1,&INF # is operand an INF? 1721 cmpi.b %d1,&QNAN # is operand a QNAN? 1732 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1742 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1757 mov.b %d0,%d1 1764 tst.b %d1 1769 cmpi.b %d1,&ZERO # is operand a ZERO? 1774 cmpi.b %d1,&INF # is operand an INF? 1779 cmpi.b %d1,&QNAN # is operand a QNAN? 1790 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1804 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1818 mov.b %d0,%d1 1825 tst.b %d1 1830 cmpi.b %d1,&ZERO # is operand a ZERO? 1835 cmpi.b %d1,&INF # is operand an INF? 1840 cmpi.b %d1,&QNAN # is operand a QNAN? 1851 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1861 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1875 mov.b %d0,%d1 1882 mov.b %d1,STAG(%a6) 1883 tst.b %d1 1888 cmpi.b %d1,&ZERO # is operand a ZERO? 1893 cmpi.b %d1,&INF # is operand an INF? 1898 cmpi.b %d1,&QNAN # is operand a QNAN? 1909 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1919 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1934 mov.b %d0,%d1 1941 tst.b %d1 1946 cmpi.b %d1,&ZERO # is operand a ZERO? 1951 cmpi.b %d1,&INF # is operand an INF? 1956 cmpi.b %d1,&QNAN # is operand a QNAN? 1967 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1981 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1995 mov.b %d0,%d1 2002 tst.b %d1 2007 cmpi.b %d1,&ZERO # is operand a ZERO? 2012 cmpi.b %d1,&INF # is operand an INF? 2017 cmpi.b %d1,&QNAN # is operand a QNAN? 2028 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2038 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2052 mov.b %d0,%d1 2059 mov.b %d1,STAG(%a6) 2060 tst.b %d1 2065 cmpi.b %d1,&ZERO # is operand a ZERO? 2070 cmpi.b %d1,&INF # is operand an INF? 2075 cmpi.b %d1,&QNAN # is operand a QNAN? 2086 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2096 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2111 mov.b %d0,%d1 2118 tst.b %d1 2123 cmpi.b %d1,&ZERO # is operand a ZERO? 2128 cmpi.b %d1,&INF # is operand an INF? 2133 cmpi.b %d1,&QNAN # is operand a QNAN? 2144 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2158 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2172 mov.b %d0,%d1 2179 tst.b %d1 2184 cmpi.b %d1,&ZERO # is operand a ZERO? 2189 cmpi.b %d1,&INF # is operand an INF? 2194 cmpi.b %d1,&QNAN # is operand a QNAN? 2205 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2215 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2229 mov.b %d0,%d1 2236 mov.b %d1,STAG(%a6) 2237 tst.b %d1 2242 cmpi.b %d1,&ZERO # is operand a ZERO? 2247 cmpi.b %d1,&INF # is operand an INF? 2252 cmpi.b %d1,&QNAN # is operand a QNAN? 2263 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2273 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2288 mov.b %d0,%d1 2295 tst.b %d1 2300 cmpi.b %d1,&ZERO # is operand a ZERO? 2305 cmpi.b %d1,&INF # is operand an INF? 2310 cmpi.b %d1,&QNAN # is operand a QNAN? 2321 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2335 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2349 mov.b %d0,%d1 2356 tst.b %d1 2361 cmpi.b %d1,&ZERO # is operand a ZERO? 2366 cmpi.b %d1,&INF # is operand an INF? 2371 cmpi.b %d1,&QNAN # is operand a QNAN? 2382 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2392 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2406 mov.b %d0,%d1 2413 mov.b %d1,STAG(%a6) 2414 tst.b %d1 2419 cmpi.b %d1,&ZERO # is operand a ZERO? 2424 cmpi.b %d1,&INF # is operand an INF? 2429 cmpi.b %d1,&QNAN # is operand a QNAN? 2440 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2450 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2465 mov.b %d0,%d1 2472 tst.b %d1 2477 cmpi.b %d1,&ZERO # is operand a ZERO? 2482 cmpi.b %d1,&INF # is operand an INF? 2487 cmpi.b %d1,&QNAN # is operand a QNAN? 2498 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2512 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2526 mov.b %d0,%d1 2533 tst.b %d1 2538 cmpi.b %d1,&ZERO # is operand a ZERO? 2543 cmpi.b %d1,&INF # is operand an INF? 2548 cmpi.b %d1,&QNAN # is operand a QNAN? 2559 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2569 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2583 mov.b %d0,%d1 2590 mov.b %d1,STAG(%a6) 2591 tst.b %d1 2596 cmpi.b %d1,&ZERO # is operand a ZERO? 2601 cmpi.b %d1,&INF # is operand an INF? 2606 cmpi.b %d1,&QNAN # is operand a QNAN? 2617 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2627 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2642 mov.b %d0,%d1 2649 tst.b %d1 2654 cmpi.b %d1,&ZERO # is operand a ZERO? 2659 cmpi.b %d1,&INF # is operand an INF? 2664 cmpi.b %d1,&QNAN # is operand a QNAN? 2675 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2689 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2703 mov.b %d0,%d1 2710 tst.b %d1 2715 cmpi.b %d1,&ZERO # is operand a ZERO? 2720 cmpi.b %d1,&INF # is operand an INF? 2725 cmpi.b %d1,&QNAN # is operand a QNAN? 2736 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2746 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2760 mov.b %d0,%d1 2767 mov.b %d1,STAG(%a6) 2768 tst.b %d1 2773 cmpi.b %d1,&ZERO # is operand a ZERO? 2778 cmpi.b %d1,&INF # is operand an INF? 2783 cmpi.b %d1,&QNAN # is operand a QNAN? 2794 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2804 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2819 mov.b %d0,%d1 2826 tst.b %d1 2831 cmpi.b %d1,&ZERO # is operand a ZERO? 2836 cmpi.b %d1,&INF # is operand an INF? 2841 cmpi.b %d1,&QNAN # is operand a QNAN? 2852 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2866 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2880 mov.b %d0,%d1 2887 tst.b %d1 2892 cmpi.b %d1,&ZERO # is operand a ZERO? 2897 cmpi.b %d1,&INF # is operand an INF? 2902 cmpi.b %d1,&QNAN # is operand a QNAN? 2913 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2923 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2937 mov.b %d0,%d1 2944 mov.b %d1,STAG(%a6) 2945 tst.b %d1 2950 cmpi.b %d1,&ZERO # is operand a ZERO? 2955 cmpi.b %d1,&INF # is operand an INF? 2960 cmpi.b %d1,&QNAN # is operand a QNAN? 2971 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2981 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2996 mov.b %d0,%d1 3003 tst.b %d1 3008 cmpi.b %d1,&ZERO # is operand a ZERO? 3013 cmpi.b %d1,&INF # is operand an INF? 3018 cmpi.b %d1,&QNAN # is operand a QNAN? 3029 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3043 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3057 mov.b %d0,%d1 3064 tst.b %d1 3069 cmpi.b %d1,&ZERO # is operand a ZERO? 3074 cmpi.b %d1,&INF # is operand an INF? 3079 cmpi.b %d1,&QNAN # is operand a QNAN? 3090 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3100 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3114 mov.b %d0,%d1 3121 mov.b %d1,STAG(%a6) 3122 tst.b %d1 3127 cmpi.b %d1,&ZERO # is operand a ZERO? 3132 cmpi.b %d1,&INF # is operand an INF? 3137 cmpi.b %d1,&QNAN # is operand a QNAN? 3148 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3158 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3173 mov.b %d0,%d1 3180 tst.b %d1 3185 cmpi.b %d1,&ZERO # is operand a ZERO? 3190 cmpi.b %d1,&INF # is operand an INF? 3195 cmpi.b %d1,&QNAN # is operand a QNAN? 3206 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3220 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3234 mov.b %d0,%d1 3241 tst.b %d1 3246 cmpi.b %d1,&ZERO # is operand a ZERO? 3251 cmpi.b %d1,&INF # is operand an INF? 3256 cmpi.b %d1,&QNAN # is operand a QNAN? 3267 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3277 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3291 mov.b %d0,%d1 3298 mov.b %d1,STAG(%a6) 3299 tst.b %d1 3304 cmpi.b %d1,&ZERO # is operand a ZERO? 3309 cmpi.b %d1,&INF # is operand an INF? 3314 cmpi.b %d1,&QNAN # is operand a QNAN? 3325 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3335 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3350 mov.b %d0,%d1 3357 tst.b %d1 3362 cmpi.b %d1,&ZERO # is operand a ZERO? 3367 cmpi.b %d1,&INF # is operand an INF? 3372 cmpi.b %d1,&QNAN # is operand a QNAN? 3383 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3397 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3411 mov.b %d0,%d1 3418 tst.b %d1 3423 cmpi.b %d1,&ZERO # is operand a ZERO? 3428 cmpi.b %d1,&INF # is operand an INF? 3433 cmpi.b %d1,&QNAN # is operand a QNAN? 3444 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3454 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3468 mov.b %d0,%d1 3475 mov.b %d1,STAG(%a6) 3476 tst.b %d1 3481 cmpi.b %d1,&ZERO # is operand a ZERO? 3486 cmpi.b %d1,&INF # is operand an INF? 3491 cmpi.b %d1,&QNAN # is operand a QNAN? 3502 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3512 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3527 mov.b %d0,%d1 3534 tst.b %d1 3539 cmpi.b %d1,&ZERO # is operand a ZERO? 3544 cmpi.b %d1,&INF # is operand an INF? 3549 cmpi.b %d1,&QNAN # is operand a QNAN? 3560 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3574 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3588 mov.b %d0,%d1 3595 tst.b %d1 3600 cmpi.b %d1,&ZERO # is operand a ZERO? 3605 cmpi.b %d1,&INF # is operand an INF? 3610 cmpi.b %d1,&QNAN # is operand a QNAN? 3621 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3631 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3645 mov.b %d0,%d1 3652 mov.b %d1,STAG(%a6) 3653 tst.b %d1 3658 cmpi.b %d1,&ZERO # is operand a ZERO? 3663 cmpi.b %d1,&INF # is operand an INF? 3668 cmpi.b %d1,&QNAN # is operand a QNAN? 3679 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3689 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3704 mov.b %d0,%d1 3711 tst.b %d1 3716 cmpi.b %d1,&ZERO # is operand a ZERO? 3721 cmpi.b %d1,&INF # is operand an INF? 3726 cmpi.b %d1,&QNAN # is operand a QNAN? 3737 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3751 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3765 mov.b %d0,%d1 3772 tst.b %d1 3777 cmpi.b %d1,&ZERO # is operand a ZERO? 3782 cmpi.b %d1,&INF # is operand an INF? 3787 cmpi.b %d1,&QNAN # is operand a QNAN? 3798 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3808 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3822 mov.b %d0,%d1 3829 mov.b %d1,STAG(%a6) 3830 tst.b %d1 3835 cmpi.b %d1,&ZERO # is operand a ZERO? 3840 cmpi.b %d1,&INF # is operand an INF? 3845 cmpi.b %d1,&QNAN # is operand a QNAN? 3856 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3866 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3881 mov.b %d0,%d1 3888 tst.b %d1 3893 cmpi.b %d1,&ZERO # is operand a ZERO? 3898 cmpi.b %d1,&INF # is operand an INF? 3903 cmpi.b %d1,&QNAN # is operand a QNAN? 3914 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3928 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3942 mov.b %d0,%d1 3949 tst.b %d1 3954 cmpi.b %d1,&ZERO # is operand a ZERO? 3959 cmpi.b %d1,&INF # is operand an INF? 3964 cmpi.b %d1,&QNAN # is operand a QNAN? 3975 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3985 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3999 mov.b %d0,%d1 4006 mov.b %d1,STAG(%a6) 4007 tst.b %d1 4012 cmpi.b %d1,&ZERO # is operand a ZERO? 4017 cmpi.b %d1,&INF # is operand an INF? 4022 cmpi.b %d1,&QNAN # is operand a QNAN? 4033 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4043 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4058 mov.b %d0,%d1 4065 tst.b %d1 4070 cmpi.b %d1,&ZERO # is operand a ZERO? 4075 cmpi.b %d1,&INF # is operand an INF? 4080 cmpi.b %d1,&QNAN # is operand a QNAN? 4091 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4105 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4119 mov.b %d0,%d1 4126 tst.b %d1 4131 cmpi.b %d1,&ZERO # is operand a ZERO? 4136 cmpi.b %d1,&INF # is operand an INF? 4141 cmpi.b %d1,&QNAN # is operand a QNAN? 4152 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4164 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4178 mov.b %d0,%d1 4185 mov.b %d1,STAG(%a6) 4186 tst.b %d1 4191 cmpi.b %d1,&ZERO # is operand a ZERO? 4196 cmpi.b %d1,&INF # is operand an INF? 4201 cmpi.b %d1,&QNAN # is operand a QNAN? 4212 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4224 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4239 mov.b %d0,%d1 4246 tst.b %d1 4251 cmpi.b %d1,&ZERO # is operand a ZERO? 4256 cmpi.b %d1,&INF # is operand an INF? 4261 cmpi.b %d1,&QNAN # is operand a QNAN? 4272 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4288 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4308 mov.l %d0,%d1 4318 tst.b %d1 4323 cmpi.b %d1,&ZERO # is operand a ZERO? 4328 cmpi.b %d1,&INF # is operand an INF? 4333 cmpi.b %d1,&QNAN # is operand a QNAN? 4344 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4354 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4374 mov.l %d0,%d1 4384 tst.b %d1 4389 cmpi.b %d1,&ZERO # is operand a ZERO? 4394 cmpi.b %d1,&INF # is operand an INF? 4399 cmpi.b %d1,&QNAN # is operand a QNAN? 4410 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4420 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4442 mov.l %d0,%d1 4452 tst.b %d1 4457 cmpi.b %d1,&ZERO # is operand a ZERO? 4462 cmpi.b %d1,&INF # is operand an INF? 4467 cmpi.b %d1,&QNAN # is operand a QNAN? 4478 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4492 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4512 mov.l %d0,%d1 4522 tst.b %d1 4527 cmpi.b %d1,&ZERO # is operand a ZERO? 4532 cmpi.b %d1,&INF # is operand an INF? 4537 cmpi.b %d1,&QNAN # is operand a QNAN? 4548 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4558 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4578 mov.l %d0,%d1 4588 tst.b %d1 4593 cmpi.b %d1,&ZERO # is operand a ZERO? 4598 cmpi.b %d1,&INF # is operand an INF? 4603 cmpi.b %d1,&QNAN # is operand a QNAN? 4614 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4624 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4646 mov.l %d0,%d1 4656 tst.b %d1 4661 cmpi.b %d1,&ZERO # is operand a ZERO? 4666 cmpi.b %d1,&INF # is operand an INF? 4671 cmpi.b %d1,&QNAN # is operand a QNAN? 4682 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4696 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4716 mov.l %d0,%d1 4726 tst.b %d1 4731 cmpi.b %d1,&ZERO # is operand a ZERO? 4736 cmpi.b %d1,&INF # is operand an INF? 4741 cmpi.b %d1,&QNAN # is operand a QNAN? 4752 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4762 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4782 mov.l %d0,%d1 4792 tst.b %d1 4797 cmpi.b %d1,&ZERO # is operand a ZERO? 4802 cmpi.b %d1,&INF # is operand an INF? 4807 cmpi.b %d1,&QNAN # is operand a QNAN? 4818 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4828 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4850 mov.l %d0,%d1 4860 tst.b %d1 4865 cmpi.b %d1,&ZERO # is operand a ZERO? 4870 cmpi.b %d1,&INF # is operand an INF? 4875 cmpi.b %d1,&QNAN # is operand a QNAN? 4886 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 5032 mov.l (%a0),%d1 # put exp in hi word 5033 mov.w 4(%a0),%d1 # fetch hi(man) 5034 and.l &0x7FFFFFFF,%d1 # strip sign 5036 cmpi.l %d1,&0x3FD78000 # is |X| >= 2**(-40)? 5041 cmp.l %d1,&0x4004BC7E # is |X| < 15 PI? 5055 mov.l INT(%a6),%d1 # make a copy of N 5056 asl.l &4,%d1 # N *= 16 5057 add.l %d1,%a1 # tbl_addr = a1 + (N*16) 5068 mov.l INT(%a6),%d1 5069 add.l ADJN(%a6),%d1 # SEE IF D0 IS ODD OR EVEN 5070 ror.l &1,%d1 # D0 WAS ODD IFF D0 IS NEGATIVE 5071 cmp.l %d1,&0 5094 ror.l &1,%d1 5095 and.l &0x80000000,%d1 5097 eor.l %d1,X(%a6) # X IS NOW R'= SGN*R 5148 ror.l &1,%d1 5149 and.l &0x80000000,%d1 5154 eor.l %d1,X(%a6) # X IS NOW S'= SGN*S 5155 and.l &0x80000000,%d1 5159 or.l &0x3F800000,%d1 # D0 IS SGN IN SINGLE 5160 mov.l %d1,POSNEG1(%a6) 5195 cmp.l %d1,&0x3FFF8000 5199 mov.l ADJN(%a6),%d1 5200 cmp.l %d1,&0 5209 mov.b &FMOV_OP,%d1 # last inst is MOVE 5242 mov.l (%a0),%d1 5243 mov.w 4(%a0),%d1 5244 and.l &0x7FFFFFFF,%d1 # COMPACTIFY X 5246 cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)? 5251 cmp.l %d1,&0x4004BC7E # |X| < 15 PI? 5267 mov.l INT(%a6),%d1 5268 asl.l &4,%d1 5269 add.l %d1,%a1 # ADDRESS OF N*PIBY2, IN Y1, Y2 5277 mov.l INT(%a6),%d1 5278 ror.l &1,%d1 5279 cmp.l %d1,&0 # D0 < 0 IFF N IS ODD 5294 mov.l %d1,%d2 5297 eor.l %d1,%d2 5307 ror.l &1,%d1 5308 and.l &0x80000000,%d1 5310 eor.l %d1,POSNEG1(%a6) 5320 eor.l %d1,SPRIME(%a6) 5370 ror.l &1,%d1 5371 and.l &0x80000000,%d1 5376 eor.l %d1,RPRIME(%a6) 5377 eor.l %d1,SPRIME(%a6) 5381 or.l &0x3F800000,%d1 5382 mov.l %d1,POSNEG1(%a6) 5432 cmp.l %d1,&0x3FFF8000 5445 mov.b &FMOV_OP,%d1 # last inst is MOVE 5474 cmp.l %d1,&0x7ffeffff # is arg dangerously large? 5504 mov.w INARG(%a6),%d1 5505 mov.l %d1,%a1 # save a copy of D0 5506 and.l &0x00007FFF,%d1 5507 sub.l &0x00003FFF,%d1 # d0 = K 5508 cmp.l %d1,&28 5511 sub.l &27,%d1 # d0 = L := K-27 5515 clr.l %d1 # d0 = L := 0 5526 sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI) 5550 mov.l %d1,%d2 # d2 = L 5557 add.l &0x00003FDD,%d1 5558 mov.w %d1,FP_SCR1_EX(%a6) 5562 mov.b ENDFLAG(%a6),%d1 5587 cmp.b %d1,&0 5600 mov.l ADJN(%a6),%d1 5601 cmp.l %d1,&4 5762 mov.l (%a0),%d1 5763 mov.w 4(%a0),%d1 5764 and.l &0x7FFFFFFF,%d1 5766 cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)? 5770 cmp.l %d1,&0x4004BC7E # |X| < 15 PI? 5782 fmov.l %fp1,%d1 # CONVERT TO INTEGER 5784 asl.l &4,%d1 5785 add.l %d1,%a1 # ADDRESS N*PIBY2 IN Y1, Y2 5791 ror.l &5,%d1 5792 and.l &0x80000000,%d1 # D0 WAS ODD IFF D0 < 0 5797 cmp.l %d1,&0 5878 cmp.l %d1,&0x3FFF8000 5884 mov.b &FMOV_OP,%d1 # last inst is MOVE 5905 cmp.l %d1,&0x7ffeffff # is arg dangerously large? 5935 mov.w INARG(%a6),%d1 5936 mov.l %d1,%a1 # save a copy of D0 5937 and.l &0x00007FFF,%d1 5938 sub.l &0x00003FFF,%d1 # d0 = K 5939 cmp.l %d1,&28 5942 sub.l &27,%d1 # d0 = L := K-27 5946 clr.l %d1 # d0 = L := 0 5957 sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI) 5981 mov.l %d1,%d2 # d2 = L 5988 add.l &0x00003FDD,%d1 5989 mov.w %d1,FP_SCR1_EX(%a6) 5993 mov.b ENDFLAG(%a6),%d1 6018 cmp.b %d1,&0 6031 mov.l INT(%a6),%d1 6032 ror.l &1,%d1 6244 mov.l (%a0),%d1 6245 mov.w 4(%a0),%d1 6247 and.l &0x7FFFFFFF,%d1 6249 cmp.l %d1,&0x3FFB8000 # |X| >= 1/16? 6254 cmp.l %d1,&0x4002FFFF # |X| < 16 ? 6296 mov.l %d1,%d2 # THE EXP AND 16 BITS OF X 6297 and.l &0x00007800,%d1 # 4 VARYING BITS OF F'S FRACTION 6301 add.l %d2,%d1 # THE 7 BITS IDENTIFYING F 6302 asr.l &7,%d1 # INDEX INTO TBL OF ATAN(|F|) 6304 add.l %d1,%a1 # ADDRESS OF ATAN(|F|) 6308 mov.l X(%a6),%d1 # LOAD SIGN AND EXPO. AGAIN 6309 and.l &0x80000000,%d1 # SIGN(F) 6310 or.l %d1,ATANF(%a6) # ATANF IS NOW SIGN(F)*ATAN(|F|) 6346 cmp.l %d1,&0x3FFF8000 6356 cmp.l %d1,&0x3FD78000 6399 mov.b &FMOV_OP,%d1 # last inst is MOVE 6407 cmp.l %d1,&0x40638000 6526 mov.l (%a0),%d1 6527 mov.w 4(%a0),%d1 6528 and.l &0x7FFFFFFF,%d1 6529 cmp.l %d1,&0x3FFF8000 6537 cmp.l %d1,&0x3FD78000 6567 mov.l (%a0),%d1 6568 and.l &0x80000000,%d1 # SIGN BIT OF X 6569 or.l &0x3F800000,%d1 # +-1 IN SGL FORMAT 6570 mov.l %d1,-(%sp) # push SIGN(X) IN SGL-FMT 6578 mov.b &FMOV_OP,%d1 # last inst is MOVE 6627 mov.l (%a0),%d1 # pack exp w/ upper 16 fraction 6628 mov.w 4(%a0),%d1 6629 and.l &0x7FFFFFFF,%d1 6630 cmp.l %d1,&0x3FFF8000 7105 mov.l (%a0),%d1 # load part of input X 7106 and.l &0x7FFF0000,%d1 # biased expo. of X 7107 cmp.l %d1,&0x3FBE0000 # 2^(-65) 7113 mov.w 4(%a0),%d1 # expo. and partial sig. of |X| 7114 cmp.l %d1,&0x400CB167 # 16380 log2 trunc. 16 bits 7127 fmov.l %fp0,%d1 # N = int( X * 64/log2 ) 7129 fmov.l %d1,%fp0 # convert to floating-format 7131 mov.l %d1,L_SCR1(%a6) # save N temporarily 7132 and.l &0x3F,%d1 # D0 is J = N mod 64 7133 lsl.l &4,%d1 7134 add.l %d1,%a1 # address of 2^(J/64) 7135 mov.l L_SCR1(%a6),%d1 7136 asr.l &6,%d1 # D0 is M 7137 add.w &0x3FFF,%d1 # biased expo. of 2^(M) 7169 mov.w %d1,SCALE(%a6) # SCALE is 2^(M) in extended 7193 mov.l ADJFLAG(%a6),%d1 7196 tst.l %d1 7202 mov.b &FMUL_OP,%d1 # last inst is MUL 7215 cmp.l %d1,&0x400CB27C # 16480 log2 7224 fmov.l %fp0,%d1 # N = int( X * 64/log2 ) 7226 fmov.l %d1,%fp0 # convert to floating-format 7227 mov.l %d1,L_SCR1(%a6) # save N temporarily 7228 and.l &0x3F,%d1 # D0 is J = N mod 64 7229 lsl.l &4,%d1 7230 add.l %d1,%a1 # address of 2^(J/64) 7231 mov.l L_SCR1(%a6),%d1 7232 asr.l &6,%d1 # D0 is K 7233 mov.l %d1,L_SCR1(%a6) # save K temporarily 7234 asr.l &1,%d1 # D0 is M1 7235 sub.l %d1,L_SCR1(%a6) # a1 is M 7236 add.w &0x3FFF,%d1 # biased expo. of 2^(M1) 7237 mov.w %d1,ADJSCALE(%a6) # ADJSCALE := 2^(M1) 7240 mov.l L_SCR1(%a6),%d1 # D0 is M 7241 add.w &0x3FFF,%d1 # biased expo. of 2^(M) 7269 mov.l (%a0),%d1 # load part of input X 7270 and.l &0x7FFF0000,%d1 # biased expo. of X 7271 cmp.l %d1,&0x3FFD0000 # 1/4 7278 mov.w 4(%a0),%d1 # expo. and partial sig. of |X| 7279 cmp.l %d1,&0x4004C215 # 70log2 rounded up to 16 bits 7291 fmov.l %fp0,%d1 # N = int( X * 64/log2 ) 7293 fmov.l %d1,%fp0 # convert to floating-format 7295 mov.l %d1,L_SCR1(%a6) # save N temporarily 7296 and.l &0x3F,%d1 # D0 is J = N mod 64 7297 lsl.l &4,%d1 7298 add.l %d1,%a1 # address of 2^(J/64) 7299 mov.l L_SCR1(%a6),%d1 7300 asr.l &6,%d1 # D0 is M 7301 mov.l %d1,L_SCR1(%a6) # save a copy of M 7311 add.w &0x3FFF,%d1 # D0 is biased expo. of 2^M 7330 mov.w %d1,SC(%a6) # SC is 2^(M) in extended 7335 mov.l L_SCR1(%a6),%d1 # D0 is M 7336 neg.w %d1 # D0 is -M 7338 add.w &0x3FFF,%d1 # biased expo. of 2^(-M) 7343 or.w &0x8000,%d1 # signed/expo. of -2^(-M) 7344 mov.w %d1,ONEBYSC(%a6) # OnebySc is -2^(-M) 7363 mov.l L_SCR1(%a6),%d1 # retrieve M 7364 cmp.l %d1,&63 7374 cmp.l %d1,&-3 7397 cmp.l %d1,&0x3FBE0000 # 2^(-65) 7402 cmp.l %d1,&0x00330000 # 2^(-16312) 7410 mov.b &FADD_OP,%d1 # last inst is ADD 7423 mov.b &FMUL_OP,%d1 # last inst is MUL 7480 mov.l (%a0),%d1 7481 cmp.l %d1,&0 7617 mov.l (%a0),%d1 7618 mov.w 4(%a0),%d1 7619 and.l &0x7FFFFFFF,%d1 7620 cmp.l %d1,&0x400CB167 7641 mov.b &FADD_OP,%d1 # last inst is ADD 7646 cmp.l %d1,&0x400CB2B3 7662 mov.b &FMUL_OP,%d1 # last inst is MUL 7729 mov.l (%a0),%d1 7730 mov.w 4(%a0),%d1 7731 mov.l %d1,%a1 # save (compacted) operand 7732 and.l &0x7FFFFFFF,%d1 7733 cmp.l %d1,&0x400CB167 7754 mov.l %a1,%d1 7755 and.l &0x80000000,%d1 7756 or.l &0x3F000000,%d1 7758 mov.l %d1,-(%sp) 7761 mov.b &FMUL_OP,%d1 # last inst is MUL 7766 cmp.l %d1,&0x400CB2B3 7772 mov.l %a1,%d1 7773 and.l &0x80000000,%d1 7774 or.l &0x7FFB0000,%d1 7775 mov.l %d1,-(%sp) # EXTENDED FMT 7787 mov.b &FMUL_OP,%d1 # last inst is MUL 7855 mov.l (%a0),%d1 7856 mov.w 4(%a0),%d1 7857 mov.l %d1,X(%a6) 7858 and.l &0x7FFFFFFF,%d1 7859 cmp.l %d1, &0x3fd78000 # is |X| < 2^(-40)? 7861 cmp.l %d1, &0x3fffddce # is |X| > (5/2)LOG2? 7867 mov.l X(%a6),%d1 7868 mov.l %d1,SGN(%a6) 7869 and.l &0x7FFF0000,%d1 7870 add.l &0x00010000,%d1 # EXPONENT OF 2|X| 7871 mov.l %d1,X(%a6) 7885 mov.l SGN(%a6),%d1 7887 eor.l %d1,V(%a6) 7894 cmp.l %d1,&0x3FFF8000 7897 cmp.l %d1,&0x40048AA1 7904 mov.l X(%a6),%d1 7905 mov.l %d1,SGN(%a6) 7906 and.l &0x7FFF0000,%d1 7907 add.l &0x00010000,%d1 # EXPO OF 2|X| 7908 mov.l %d1,X(%a6) # Y = 2|X| 7910 mov.l SGN(%a6),%d1 7920 mov.l SGN(%a6),%d1 7923 eor.l &0xC0000000,%d1 # -SIGN(X)*2 7924 fmov.s %d1,%fp1 # -SIGN(X)*2 IN SGL FMT 7927 mov.l SGN(%a6),%d1 7928 or.l &0x3F800000,%d1 # SGN 7929 fmov.s %d1,%fp0 # SGN IN SGL FMT 7932 mov.b &FADD_OP,%d1 # last inst is ADD 7938 mov.b &FMOV_OP,%d1 # last inst is MOVE 7944 mov.l X(%a6),%d1 7945 and.l &0x80000000,%d1 7946 or.l &0x3F800000,%d1 7947 fmov.s %d1,%fp0 7948 and.l &0x80000000,%d1 7949 eor.l &0x80800000,%d1 # -SIGN(X)*EPS 7952 fadd.s %d1,%fp0 8221 mov.l (%a0),%d1 8222 mov.w 4(%a0),%d1 8228 cmp.l %d1,&0 # CHECK IF X IS NEGATIVE 8231 cmp.l %d1,&0x3ffef07d # IS X < 15/16? 8233 cmp.l %d1,&0x3fff8841 # IS X > 17/16? 8249 asr.l &8,%d1 8250 asr.l &8,%d1 # SHIFTED 16 BITS, BIASED EXPO. OF X 8251 sub.l &0x3FFF,%d1 # THIS IS K 8252 add.l ADJK(%a6),%d1 # ADJUST K, ORIGINAL INPUT MAY BE DENORM. 8254 fmov.l %d1,%fp1 # CONVERT K TO FLOATING-POINT FORMAT 8261 mov.l FFRAC(%a6),%d1 # READY TO GET ADDRESS OF 1/F 8262 and.l &0x7E000000,%d1 8263 asr.l &8,%d1 8264 asr.l &8,%d1 8265 asr.l &4,%d1 # SHIFTED 20, D0 IS THE DISPLACEMENT 8266 add.l %d1,%a0 # A0 IS THE ADDRESS FOR 1/F 8447 mov.b &FMOV_OP,%d1 # last inst is MOVE 8458 mov.l X(%a6),%d1 8459 cmp.l %d1,&0 8461 cmp.l %d1,&0x3ffe8000 # IS BOUNDS [1/2,3/2]? 8463 cmp.l %d1,&0x3fffc000 8471 cmp.l %d1,&0x3ffef07d 8473 cmp.l %d1,&0x3fff8841 8497 cmp.l %d1,&0x3FFF8000 # SEE IF 1+Z > 1 8505 mov.l FFRAC(%a6),%d1 8506 and.l &0x7E000000,%d1 8507 asr.l &8,%d1 8508 asr.l &8,%d1 8509 asr.l &4,%d1 # D0 CONTAINS DISPLACEMENT FOR 1/F 8514 add.l %d1,%a0 8523 mov.l FFRAC(%a6),%d1 8524 and.l &0x7E000000,%d1 8525 asr.l &8,%d1 8526 asr.l &8,%d1 8527 asr.l &4,%d1 8531 add.l %d1,%a0 # A0 IS ADDRESS OF 1/F 8537 cmp.l %d1,&0 8601 mov.l (%a0),%d1 8602 mov.w 4(%a0),%d1 8603 and.l &0x7FFFFFFF,%d1 8604 cmp.l %d1,&0x3FFF8000 8616 mov.l (%a0),%d1 8617 and.l &0x80000000,%d1 8618 or.l &0x3F000000,%d1 # SIGN(X)*HALF 8619 mov.l %d1,-(%sp) 8630 mov.b &FMUL_OP,%d1 # last inst is MUL 8743 mov.l (%a0),%d1 8755 mov.l (%a0),%d1 8767 mov.l (%a0),%d1 8770 mov.l 8(%a0),%d1 8773 mov.l 4(%a0),%d1 8774 and.l &0x7FFFFFFF,%d1 8778 mov.w (%a0),%d1 8779 and.l &0x00007FFF,%d1 8780 sub.l &0x3FFF,%d1 8783 fmov.l %d1,%fp0 8800 mov.l (%a0),%d1 8988 mov.l (%a0),%d1 8989 mov.w 4(%a0),%d1 8991 and.l &0x7FFFFFFF,%d1 8993 cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)? 8998 cmp.l %d1,&0x400D80C0 # |X| > 16480? 9011 mov.l INT(%a6),%d1 9012 mov.l %d1,%d2 9013 and.l &0x3F,%d1 # D0 IS J 9014 asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64) 9015 add.l %d1,%a1 # ADDRESS FOR 2^(J/64) 9017 mov.l %d2,%d1 9018 asr.l &1,%d1 # D0 IS M 9019 sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J 9040 add.w %d1,FACT1(%a6) 9042 add.w %d1,FACT2(%a6) 9048 cmp.l %d1,&0x3FFF8000 9060 mov.l X(%a6),%d1 9061 cmp.l %d1,&0 9075 mov.l (%a0),%d1 9076 or.l &0x00800001,%d1 9077 fadd.s %d1,%fp0 9085 mov.l (%a0),%d1 9086 mov.w 4(%a0),%d1 9088 and.l &0x7FFFFFFF,%d1 9090 cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)? 9095 cmp.l %d1,&0x400B9B07 # |X| <= 16480*log2/log10 ? 9108 mov.l INT(%a6),%d1 9109 mov.l %d1,%d2 9110 and.l &0x3F,%d1 # D0 IS J 9111 asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64) 9112 add.l %d1,%a1 # ADDRESS FOR 2^(J/64) 9114 mov.l %d2,%d1 9115 asr.l &1,%d1 # D0 IS M 9116 sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J 9144 add.w %d1,FACT1(%a6) 9145 add.w %d1,FACT2(%a6) 9189 mov.b &FMUL_OP,%d1 # last inst is MUL 9199 mov.l (%a0),%d1 9200 or.l &0x00800001,%d1 9201 fadd.s %d1,%fp0 9224 mov.w DST_EX(%a1),%d1 # get dst exponent 9226 andi.l &0x00007fff,%d1 # strip sign from dst exp 9274 mov.l &0x80000000,%d1 # load normalized mantissa 9279 lsr.l %d0,%d1 # no; bit stays in upper lw 9281 mov.l %d1,-(%sp) # insert new high mantissa 9286 lsr.l %d0,%d1 # make low mantissa longword 9287 mov.l %d1,-(%sp) # insert new low mantissa 9307 mov.b &FMUL_OP,%d1 # last inst is MUL 9332 mov.b &FMOV_OP,%d1 # last inst is MOVE 9336 mov.l (%sp)+,%d0 # load control bits into d1 9481 mov.w SignY(%a6),%d1 9482 eor.l %d0,%d1 9483 and.l &0x00008000,%d1 9484 mov.w %d1,SignQ(%a6) # sign(Q) obtained 9486 mov.l DST_HI(%a1),%d1 9491 tst.l %d1 9495 mov.l %d2,%d1 9499 bfffo %d1{&0:&32},%d6 9500 lsl.l %d6,%d1 9507 bfffo %d1{&0:&32},%d6 9509 lsl.l %d6,%d1 9515 or.l %d7,%d1 # (D0,D1,D2) normalized 9550 cmp.l %d1,%d4 # compare hi(R) and hi(Y) 9566 subx.l %d4,%d1 # hi(R) - hi(Y) 9577 roxl.l &1,%d1 # hi(R) = 2hi(R) + carry 9590 tst.l %d1 9594 mov.l %d2,%d1 9598 bfffo %d1{&0:&32},%d6 9599 lsl.l %d6,%d1 9606 bfffo %d1{&0:&32},%d6 9609 lsl.l %d6,%d1 9615 or.l %d7,%d1 # (D0,D1,D2) normalized 9623 mov.l %d1,R_Hi(%a6) 9633 mov.l %d1,R_Hi(%a6) 9657 cmp.l %d1,%d4 9701 mov.b &FMUL_OP,%d1 # last inst is MUL 9712 mov.b &FMOV_OP,%d1 # last inst is MOVE 10033 mov.b %d0,%d1 # fetch rnd prec,mode 10034 andi.b &0xc0,%d1 # extract prec 10044 movm.l &0xc080,-(%sp) # save d0-d1/a0 10046 movm.l (%sp)+,&0x0103 # restore d0-d1/a0 10048 cmpi.b %d1,&0x40 # is precision sgl? 10057 mov.l LOCAL_LO(%a0),%d1 # are any of lo 11 bits of 10058 andi.l &0x7ff,%d1 # dbl mantissa set? 10662 mov.b DTAG(%a6),%d1 10664 cmpi.b %d1,&ZERO 10666 cmpi.b %d1,&INF 10668 cmpi.b %d1,&DENORM 10674 mov.b DTAG(%a6),%d1 10676 cmpi.b %d1,&ZERO 10678 cmpi.b %d1,&INF 10680 cmpi.b %d1,&DENORM 10686 mov.b DTAG(%a6),%d1 10688 cmpi.b %d1,&ZERO 10690 cmpi.b %d1,&INF 10692 cmpi.b %d1,&DENORM 10698 mov.b SRC_EX(%a0),%d1 # get src sign 10700 eor.b %d0,%d1 # get qbyte sign 10701 andi.b &0x80,%d1 10702 mov.b %d1,FPSR_QBYTE(%a6) 10711 mov.b SRC_EX(%a0),%d1 # get src sign 10713 eor.b %d0,%d1 # get qbyte sign 10714 andi.b &0x80,%d1 10715 mov.b %d1,FPSR_QBYTE(%a6) 10737 mov.b DTAG(%a6),%d1 10739 cmpi.b %d1,&ZERO 10741 cmpi.b %d1,&INF 10743 cmpi.b %d1,&DENORM 10749 mov.b DTAG(%a6),%d1 10751 cmpi.b %d1,&ZERO 10753 cmpi.b %d1,&INF 10755 cmpi.b %d1,&DENORM 10761 mov.b DTAG(%a6),%d1 10763 cmpi.b %d1,&ZERO 10765 cmpi.b %d1,&INF 10767 cmpi.b %d1,&DENORM 10777 mov.b DTAG(%a6),%d1 10779 cmpi.b %d1,&ZERO 10781 cmpi.b %d1,&INF 10783 cmpi.b %d1,&DENORM 10789 mov.b DTAG(%a6),%d1 10791 cmpi.b %d1,&ZERO 10793 cmpi.b %d1,&INF 10795 cmpi.b %d1,&DENORM 10801 mov.b DTAG(%a6),%d1 10803 cmpi.b %d1,&QNAN 10811 mov.b DTAG(%a6),%d1 10812 cmpi.b %d1,&QNAN 10841 mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa) 10848 bfextu %d1{&0:%d2}, %d3 # extract lo bits 10851 lsl.l %d2, %d1 # create lo(man) 10854 mov.l %d1, FTEMP_LO(%a0) # store new lo(man) 10864 bfffo %d1{&0:&32}, %d2 # how many places to shift? 10865 lsl.l %d2, %d1 # shift lo(man) 10868 mov.l %d1, FTEMP_HI(%a0) # store hi(man) 10916 clr.l %d1 # clear top word 10917 mov.w FTEMP_EX(%a0), %d1 # extract exponent 10918 and.w &0x7fff, %d1 # strip off sgn 10920 cmp.w %d0, %d1 # will denorm push exp < 0? 10926 sub.w %d0, %d1 # shift exponent value 10929 or.w %d0, %d1 # {sgn,new exp} 10930 mov.w %d1, FTEMP_EX(%a0) # insert new exponent 10941 cmp.b %d1, &32 # is exp <= 32? 10944 bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man) 10948 lsl.l %d1, %d0 # extract new lo(man) 10960 sub.w &32, %d1 # adjust shft amt by 32 10963 lsl.l %d1, %d0 # left shift lo(man)
|
H A D | pfpsp.S | 644 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 699 mov.b 1+EXC_CMDREG(%a6),%d1 700 andi.w &0x007f,%d1 # extract extension 711 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr 712 jsr (tbl_unsupp.l,%pc,%d1.l*1) 732 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 747 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 766 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 805 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 884 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 942 mov.b 1+EXC_CMDREG(%a6),%d1 943 andi.w &0x007f,%d1 # extract extension 954 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr 955 jsr (tbl_unsupp.l,%pc,%d1.l*1) 982 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1009 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1040 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1079 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1209 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1300 bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension 1305 mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr 1306 jsr (tbl_unsupp.l,%pc,%d1.l*1) 1339 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1406 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1571 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1595 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1678 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1693 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1710 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1739 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1763 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1791 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1845 bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension 1850 mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr 1851 jsr (tbl_unsupp.l,%pc,%d1.l*1) 1891 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1909 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1990 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2021 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2122 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2140 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2190 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2230 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2270 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2317 mov.l FP_SRC_HI(%a6),%d1 # fetch DENORM hi(man) 2318 lsr.l %d0,%d1 # shift it 2319 bset &31,%d1 # set j-bit 2320 mov.l %d1,FP_SRC_HI(%a6) # insert new hi(man) 2338 mov.w &0x3c01,%d1 # pass denorm threshold 2358 clr.l %d1 2465 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2513 tst.l %d1 # did ifetch fail? 2525 tst.l %d1 # did ifetch fail? 2593 mov.b 1+EXC_CMDREG(%a6),%d1 2594 andi.w &0x007f,%d1 # extract extension 2602 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr 2603 jsr (tbl_unsupp.l,%pc,%d1.l*1) 2638 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2698 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2780 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2797 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2803 # right now, d1 = size and d0 = the strg. 2805 mov.b %d1,EXC_VOFF(%a6) # store strg 2810 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2814 mov.l %d1,-(%sp) # save d1 2840 clr.l %d1 2841 mov.b EXC_VOFF(%a6),%d1 # fetch strg 2843 tst.b %d1 2848 lsl.b &0x1,%d1 2853 lsl.b &0x1,%d1 2858 lsl.b &0x1,%d1 2863 lsl.b &0x1,%d1 2868 lsl.b &0x1,%d1 2873 lsl.b &0x1,%d1 2878 lsl.b &0x1,%d1 2882 mov.l 0x4(%sp),%d1 2899 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2954 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2974 bfextu %d0{&19:&3},%d1 2976 cmpi.b %d1,&0x7 # move all regs? 2992 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3021 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3043 movc %pcr,%d1 3044 btst &0x1,%d1 3058 movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1 3112 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3140 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3166 mov.w FP_SRC_EX(%a6),%d1 # fetch exponent 3167 andi.w &0x7fff,%d1 3168 cmpi.w %d1,&0x7fff 3173 mov.l FP_SRC_HI(%a6),%d1 3174 andi.l &0x7fffffff,%d1 3181 mov.l &0x7fffffff,%d1 3184 addq.l &0x1,%d1 3186 mov.l %d1,L_SCR1(%a6) 3190 mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg 3206 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3211 tst.l %d1 # did dstore fail? 3216 andi.w &0x0007,%d1 3222 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3227 tst.l %d1 # did dstore fail? 3232 andi.w &0x0007,%d1 3238 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3243 tst.l %d1 # did dstore fail? 3248 andi.w &0x0007,%d1 3308 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3336 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3360 mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg 3377 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3382 tst.l %d1 # did dstore fail? 3387 andi.w &0x0007,%d1 3394 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3399 tst.l %d1 # did dstore fail? 3404 andi.w &0x0007,%d1 3411 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3416 tst.l %d1 # did dstore fail? 3421 andi.w &0x0007,%d1 3426 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3431 mov.l FP_SRC_HI(%a6),%d1 # load mantissa 3432 lsr.l &0x8,%d1 # shift mantissa for sgl 3433 or.l %d1,%d0 # create sgl SNAN 3437 tst.l %d1 # did dstore fail? 3445 mov.l %d1,-(%sp) 3446 mov.l FP_SRC_HI(%a6),%d1 # load mantissa 3447 lsr.l &0x8,%d1 # shift mantissa for sgl 3448 or.l %d1,%d0 # create sgl SNAN 3449 mov.l (%sp)+,%d1 3450 andi.w &0x0007,%d1 3458 mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa 3461 lsr.l %d0,%d1 3462 or.l %d1,FP_SCR0_EX(%a6) # create dbl hi 3463 mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa 3464 andi.l &0x000007ff,%d1 3465 ror.l %d0,%d1 3466 mov.l %d1,FP_SCR0_HI(%a6) # store to temp space 3467 mov.l FP_SRC_LO(%a6),%d1 # load lo mantissa 3468 lsr.l %d0,%d1 3469 or.l %d1,FP_SCR0_HI(%a6) # create dbl lo 3475 tst.l %d1 # did dstore fail? 3513 tst.l %d1 # did dstore fail? 3533 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3600 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3644 bfextu EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg 3645 cmpi.b %d1,&0x17 # is op an fmovecr? 3676 mov.b 1+EXC_CMDREG(%a6),%d1 3677 andi.w &0x007f,%d1 # extract extension 3682 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr 3683 jsr (tbl_unsupp.l,%pc,%d1.l*1) 3693 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3703 mov.b 1+EXC_CMDREG(%a6),%d1 3704 andi.l &0x0000007f,%d1 # pass rom offset 3774 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3798 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3880 mov.l %d0, %d1 # make a copy 3883 andi.l &0x7, %d1 # extract reg field 3891 or.w %d1,%d0 # concat mode,reg 3963 mov.l %d0,%d1 # make a copy 3966 andi.l &0x7,%d1 # extract reg field 3981 mov.w (tbl_ceaf_pi.b,%pc,%d1.w*2),%d1 3983 jmp (tbl_ceaf_pi.b,%pc,%d1.w*1) 4026 mov.w (tbl_ceaf_pd.b,%pc,%d1.w*2),%d1 4030 jmp (tbl_ceaf_pd.b,%pc,%d1.w*1) 4215 # d1 = Dn # 4268 mov.b 1+EXC_EXTWORD(%a6),%d1 # fetch extword 4269 andi.w &0x70,%d1 # extract reg bits 4270 lsr.b &0x4,%d1 # shift into lo bits 4282 mov.l (%sp)+,%d1 # restore strg 4304 mov.b (tbl_fmovm_convert.w,%pc,%d1.w*1),%d1 4325 tst.b %d1 # should FP0 be moved? 4333 lsl.b &0x1,%d1 # should FP1 be moved? 4341 lsl.b &0x1,%d1 # should FP2 be moved? 4348 lsl.b &0x1,%d1 # should FP3 be moved? 4355 lsl.b &0x1,%d1 # should FP4 be moved? 4362 lsl.b &0x1,%d1 # should FP5 be moved? 4369 lsl.b &0x1,%d1 # should FP6 be moved? 4376 lsl.b &0x1,%d1 # should FP7 be moved? 4392 tst.l %d1 # did dstore err? 4406 mov.l %d1,-(%sp) # save bit string for later 4413 tst.l %d1 # did dfetch fail? 4416 mov.l (%sp)+,%d1 # load bit string 4420 tst.b %d1 # should FP0 be moved? 4428 lsl.b &0x1,%d1 # should FP1 be moved? 4436 lsl.b &0x1,%d1 # should FP2 be moved? 4442 lsl.b &0x1,%d1 # should FP3 be moved? 4448 lsl.b &0x1,%d1 # should FP4 be moved? 4454 lsl.b &0x1,%d1 # should FP5 be moved? 4460 lsl.b &0x1,%d1 # should FP6 be moved? 4466 lsl.b &0x1,%d1 # should FP7 be moved? 4578 mov.w %d0,%d1 # make a copy 4581 andi.l &0x7,%d1 # extract reg field 4701 mov.l %d0,%d1 4702 add.l %a0,%d1 # Increment 4703 mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value 4709 mov.l %d0,%d1 4710 add.l %a0,%d1 # Increment 4711 mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value 4717 mov.l %d0,%d1 4718 add.l %a0,%d1 # Increment 4719 mov.l %d1,%a2 # Save incr value 4725 mov.l %d0,%d1 4726 add.l %a0,%d1 # Increment 4727 mov.l %d1,%a3 # Save incr value 4733 mov.l %d0,%d1 4734 add.l %a0,%d1 # Increment 4735 mov.l %d1,%a4 # Save incr value 4741 mov.l %d0,%d1 4742 add.l %a0,%d1 # Increment 4743 mov.l %d1,%a5 # Save incr value 4749 mov.l %d0,%d1 4750 add.l %a0,%d1 # Increment 4751 mov.l %d1,(%a6) # Save incr value 4759 mov.l %d0,%d1 4760 add.l %a0,%d1 # Increment 4761 mov.l %d1,EXC_A7(%a6) # Save incr value 4834 tst.l %d1 # did ifetch fail? 4847 tst.l %d1 # did ifetch fail? 4860 tst.l %d1 # did ifetch fail? 4873 tst.l %d1 # did ifetch fail? 4886 tst.l %d1 # did ifetch fail? 4899 tst.l %d1 # did ifetch fail? 4912 tst.l %d1 # did ifetch fail? 4925 tst.l %d1 # did ifetch fail? 4940 addq.l &0x8,%d1 4948 tst.l %d1 # did ifetch fail? 4958 mov.l %d0,%d1 4959 rol.w &0x4,%d1 4960 andi.w &0xf,%d1 # extract index regno 4972 mov.l %d2,%d1 4973 rol.w &0x7,%d1 4974 andi.l &0x3,%d1 # extract scale value 4976 lsl.l %d1,%d0 # shift index by scale 4993 tst.l %d1 # did ifetch fail? 5007 tst.l %d1 # did ifetch fail? 5021 tst.l %d1 # did ifetch fail? 5043 tst.l %d1 # did ifetch fail? 5054 mov.l %d0,%d1 # make extword copy 5055 rol.w &0x4,%d1 # rotate reg num into place 5056 andi.w &0xf,%d1 # extract register number 5068 mov.l %d2,%d1 5069 rol.w &0x7,%d1 # rotate scale value into place 5070 andi.l &0x3,%d1 # extract scale value 5072 lsl.l %d1,%d0 # shift index by scale 5100 bfextu %d0{&16:&4},%d1 # fetch dreg index 5136 tst.l %d1 # did ifetch fail? 5146 tst.l %d1 # did ifetch fail? 5167 tst.l %d1 # did ifetch fail? 5177 tst.l %d1 # did ifetch fail? 5195 tst.l %d1 # did dfetch fail? 5207 tst.l %d1 # did dfetch fail? 5300 tst.l %d1 # did ifetch fail? 5308 tst.l %d1 # did ifetch fail? 5320 tst.l %d1 # did ifetch fail? 5328 tst.l %d1 # did ifetch fail? 5340 tst.l %d1 # did ifetch fail? 5348 tst.l %d1 # did ifetch fail? 5360 tst.l %d1 # did ifetch fail? 5368 tst.l %d1 # did ifetch fail? 5376 tst.l %d1 # did ifetch fail? 5419 mov.w DST_EX(%a1),%d1 5421 mov.w %d1,FP_SCR1_EX(%a6) 5424 andi.w &0x7fff,%d1 5426 mov.w %d1,2+L_SCR1(%a6) # store dst exponent 5428 cmp.w %d0, %d1 # is src exp >= dst exp? 5453 mov.w FP_SCR0_EX(%a6),%d1 5454 and.w &0x8000,%d1 5455 or.w %d1,%d0 # concat {sgn,new exp} 5489 mov.w FP_SCR1_EX(%a6),%d1 5490 andi.w &0x8000,%d1 5491 or.w %d1,%d0 # concat {sgn,new exp} 5531 mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp} 5532 mov.w %d1,%d0 # make a copy 5534 andi.l &0x7fff,%d1 # extract operand's exponent 5546 sub.l %d1,%d0 # scale = BIAS + (-exp) 5554 mov.l %d0,%d1 # prepare for op_norm call 5588 mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp} 5589 andi.l &0x7fff,%d1 # extract operand's exponent 5593 btst &0x0,%d1 # is exp even or odd? 5599 sub.l %d1,%d0 # scale = BIAS + (-exp) 5607 sub.l %d1,%d0 # scale = BIAS + (-exp) 5658 mov.w FP_SCR1_EX(%a6),%d1 # extract operand's {sgn,exp} 5659 mov.w %d1,%d0 # make a copy 5661 andi.l &0x7fff,%d1 # extract operand's exponent 5673 sub.l %d1,%d0 # scale = BIAS + (-exp) 5680 mov.l %d0,%d1 # prepare for op_norm call 5841 mov.w (tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold 5842 mov.w %d1, %d0 # copy d1 into d0 5862 mov.w %d1, FTEMP_EX(%a0) # load exp with threshold 5863 clr.l FTEMP_HI(%a0) # set d1 = 0 (ms mantissa) 5873 # %d1{15:0} : denormalization threshold # 5897 mov.l %d1, %d0 # copy the denorm threshold 5898 sub.w FTEMP_EX(%a0), %d1 # d1 = threshold - uns exponent 5899 ble.b dnrm_no_lp # d1 <= 0 5900 cmpi.w %d1, &0x20 # is ( 0 <= d1 < 32) ? 5902 cmpi.w %d1, &0x40 # is (32 <= d1 < 64) ? 5904 bra.w case_3 # (d1 >= 64) 5914 # case (0<d1<32) 5917 # %d1 = "n" = amt to shift 5941 sub.w %d1, %d0 # %d0 = 32 - %d1 5943 cmpi.w %d1, &29 # is shft amt >= 29 5950 bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO 5954 mov.l %d1, FTEMP_LO(%a0) # store new FTEMP_LO 5966 # case (32<=d1<64) 5969 # %d1 = "n" = amt to shift 5992 subi.w &0x20, %d1 # %d1 now between 0 and 32 5994 sub.w %d1, %d0 # %d0 = 32 - %d1 6003 bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S 6005 bftst %d1{&2:&30} # were any bits shifted off? 6010 mov.l %d1, %d0 # move new G,R,S to %d0 6014 mov.l %d1, %d0 # move new G,R,S to %d0 6026 # case (d1>=64) 6029 # %d1 = amt to shift 6034 cmpi.w %d1, &65 # is shift amt > 65? 6039 # case (d1>65) 6050 # case (d1 == 64) 6071 mov.l %d0, %d1 # make a copy 6073 and.l &0x3fffffff, %d1 # extract other bits 6078 # case (d1 == 65) 6101 and.l &0x7fffffff, %d1 # extract other bits 6140 # d1(hi) = contains rounding precision: # 6144 # d1(lo) = contains rounding mode: # 6184 mov.w (tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset 6203 swap %d1 # set up d1 for round prec. 6205 cmpi.b %d1, &s_mode # is prec = sgl? 6220 swap %d1 # set up d1 for round prec. 6222 cmpi.b %d1, &s_mode # is prec = sgl? 6237 swap %d1 # set up d1 for round prec. 6239 cmpi.b %d1, &s_mode # is prec = sgl? 6313 swap %d1 # select rnd prec 6315 cmpi.b %d1, &s_mode # is prec sgl? 6327 # d1 = {PREC,ROUND} 6336 # Notes: the ext_grs uses the round PREC, and therefore has to swap d1 6337 # prior to usage, and needs to restore d1 to original. this 6343 swap %d1 # have d1.w point to round precision 6344 tst.b %d1 # is rnd prec = extended? 6352 swap %d1 # yes; return to correct positions 6358 cmpi.b %d1, &s_mode # is rnd prec = sgl? 6416 swap %d1 # restore d1 to original 6444 mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa) 6451 bfextu %d1{&0:%d2}, %d3 # extract lo bits 6454 lsl.l %d2, %d1 # create lo(man) 6457 mov.l %d1, FTEMP_LO(%a0) # store new lo(man) 6467 bfffo %d1{&0:&32}, %d2 # how many places to shift? 6468 lsl.l %d2, %d1 # shift lo(man) 6471 mov.l %d1, FTEMP_HI(%a0) # store hi(man) 6519 clr.l %d1 # clear top word 6520 mov.w FTEMP_EX(%a0), %d1 # extract exponent 6521 and.w &0x7fff, %d1 # strip off sgn 6523 cmp.w %d0, %d1 # will denorm push exp < 0? 6529 sub.w %d0, %d1 # shift exponent value 6532 or.w %d0, %d1 # {sgn,new exp} 6533 mov.w %d1, FTEMP_EX(%a0) # insert new exponent 6544 cmp.b %d1, &32 # is exp <= 32? 6547 bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man) 6551 lsl.l %d1, %d0 # extract new lo(man) 6563 sub.w &32, %d1 # adjust shft amt by 32 6566 lsl.l %d1, %d0 # left shift lo(man) 6688 mov.l %d0, %d1 6700 and.l &0x000fffff, %d1 6711 and.l &0x000fffff, %d1 6719 btst &19, %d1 6751 mov.l %d0, %d1 6763 and.l &0x007fffff, %d1 6772 and.l &0x007fffff, %d1 6778 btst &22, %d1 6802 # d1 = rounding precision/mode # 6823 mov.l %d1, -(%sp) # save rnd prec,mode on stack 6828 mov.w FTEMP_EX(%a0), %d1 # extract exponent 6829 and.w &0x7fff, %d1 6830 sub.w %d0, %d1 6831 mov.w %d1, FTEMP_EX(%a0) # insert 16 bit exponent 6841 mov.w 0x6(%sp),%d1 # load prec:mode into %d1 6842 andi.w &0xc0,%d1 # extract rnd prec 6843 lsr.w &0x4,%d1 6844 swap %d1 6845 mov.w 0x6(%sp),%d1 6846 andi.w &0x30,%d1 6847 lsr.w &0x4,%d1 6887 mov.l %d1,-(%sp) # save rnd prec,mode on stack 6892 mov.w FTEMP_EX(%a0),%d1 # extract exponent 6893 and.w &0x7fff,%d1 6894 sub.w %d0,%d1 6895 mov.w %d1,FTEMP_EX(%a0) # insert 16 bit exponent 6903 mov.w &s_mode,%d1 # force rnd prec = sgl 6904 swap %d1 6905 mov.w 0x6(%sp),%d1 # load rnd mode 6906 andi.w &0x30,%d1 # extract rnd prec 6907 lsr.w &0x4,%d1 6955 # d1.b = '-1' => (-); '0' => (+) # 6978 andi.w &0x10,%d1 # keep result sign 6980 or.b %d0,%d1 # concat the two 6981 mov.w %d1,%d0 # make a copy 6982 lsl.b &0x1,%d1 # multiply d1 by 2 6987 and.w &0x10, %d1 # keep result sign 6988 or.b %d0, %d1 # insert rnd mode 6990 or.b %d0, %d1 # insert rnd prec 6991 mov.w %d1, %d0 # make a copy 6992 lsl.b &0x1, %d1 # shift left by 1 7000 lea (tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr 7102 bfextu EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt 7103 mov.w (tbl_fout.b,%pc,%d1.w*2),%a1 # use as index 7135 fmov.l %fpsr,%d1 # fetch FPSR 7136 or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits 7138 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 7139 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 7145 tst.l %d1 # did dstore fail? 7151 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 7152 andi.w &0x7,%d1 7157 mov.l SRC_EX(%a0),%d1 7158 andi.l &0x80000000,%d1 # keep DENORM sign 7159 ori.l &0x00800000,%d1 # make smallest sgl 7160 fmov.s %d1,%fp0 7181 fmov.l %fpsr,%d1 # fetch FPSR 7182 or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits 7184 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 7185 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 7191 tst.l %d1 # did dstore fail? 7197 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 7198 andi.w &0x7,%d1 7203 mov.l SRC_EX(%a0),%d1 7204 andi.l &0x80000000,%d1 # keep DENORM sign 7205 ori.l &0x00800000,%d1 # make smallest sgl 7206 fmov.s %d1,%fp0 7227 fmov.l %fpsr,%d1 # fetch FPSR 7228 or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits 7231 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 7232 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 7238 tst.l %d1 # did dstore fail? 7244 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 7245 andi.w &0x7,%d1 7250 mov.l SRC_EX(%a0),%d1 7251 andi.l &0x80000000,%d1 # keep DENORM sign 7252 ori.l &0x00800000,%d1 # make smallest sgl 7253 fmov.s %d1,%fp0 7289 tst.l %d1 # did dstore fail? 7310 tst.l %d1 # did dstore fail? 7367 fmov.l %fpsr,%d1 # save FPSR 7369 or.w %d1,2+USER_FPSR(%a6) # set possible inex2/ainex 7372 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 7373 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 7379 tst.l %d1 # did dstore fail? 7385 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 7386 andi.w &0x7,%d1 7413 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 7419 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 7420 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 7426 tst.l %d1 # did dstore fail? 7432 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 7433 andi.w &0x7,%d1 7437 mov.b FPCR_ENABLE(%a6),%d1 7438 andi.b &0x0a,%d1 # is UNFL or INEX enabled? 7463 smi %d1 # set if so 7469 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 7470 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 7476 tst.l %d1 # did dstore fail? 7482 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 7483 andi.w &0x7,%d1 7487 mov.b FPCR_ENABLE(%a6),%d1 7488 andi.b &0x0a,%d1 # is UNFL or INEX enabled? 7502 mov.w SRC_EX(%a0),%d1 # fetch current sign 7503 andi.w &0x8000,%d1 # keep it,clear exp 7504 ori.w &0x3fff,%d1 # insert exp = 0 7505 mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp 7551 mov.b 3+L_SCR3(%a6),%d1 7552 lsr.b &0x4,%d1 7553 andi.w &0x0c,%d1 7554 swap %d1 7555 mov.b 3+L_SCR3(%a6),%d1 7556 lsr.b &0x4,%d1 7557 andi.w &0x03,%d1 7614 tst.l %d1 # did dstore fail? 7642 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 7648 mov.l %d1,L_SCR2(%a6) 7655 tst.l %d1 # did dstore fail? 7658 mov.b FPCR_ENABLE(%a6),%d1 7659 andi.b &0x0a,%d1 # is UNFL or INEX enabled? 7684 smi %d1 # set if so 7695 tst.l %d1 # did dstore fail? 7698 mov.b FPCR_ENABLE(%a6),%d1 7699 andi.b &0x0a,%d1 # is UNFL or INEX enabled? 7713 mov.w SRC_EX(%a0),%d1 # fetch current sign 7714 andi.w &0x8000,%d1 # keep it,clear exp 7715 ori.w &0x3fff,%d1 # insert exp = 0 7716 mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp 7742 # d1 = lo(double precision result) # 7783 mov.l FTEMP_HI(%a0),%d1 # get ms mantissa 7784 bfextu %d1{&1:&20},%d1 # get upper 20 bits of ms 7785 or.l %d1,%d0 # put these bits in ms word of double 7787 mov.l FTEMP_HI(%a0),%d1 # get ms mantissa 7789 lsl.l %d0,%d1 # put lower 11 bits in upper bits 7790 mov.l %d1,L_SCR2(%a6) # build lower lword in memory 7791 mov.l FTEMP_LO(%a0),%d1 # get ls mantissa 7792 bfextu %d1{&0:&21},%d0 # get ls 21 bits of double 7793 mov.l L_SCR2(%a6),%d1 7794 or.l %d0,%d1 # put them in double result 7848 mov.l FTEMP_HI(%a0),%d1 # get ms mantissa 7849 andi.l &0x7fffff00,%d1 # get upper 23 bits of ms 7850 lsr.l &0x8,%d1 # and put them flush right 7851 or.l %d1,%d0 # put these bits in ms word of single 7867 mov.b 1+EXC_CMDREG(%a6),%d1 # fetch dynamic reg 7868 lsr.b &0x4,%d1 7869 andi.w &0x7,%d1 7923 tst.l %d1 # did dstore fail? 7933 tst.l %d1 # did dstore fail? 8012 clr.w %d1 8013 mov.b DTAG(%a6),%d1 8014 lsl.b &0x3,%d1 8015 or.b STAG(%a6),%d1 # combine src tags 8034 mov.w 2+L_SCR3(%a6),%d1 # fetch precision 8035 lsr.b &0x6,%d1 # shift to lo bits 8037 cmp.l %d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl? 8041 cmp.l %d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl? 8061 fmov.l %fpsr,%d1 # save status 8064 or.l %d1,USER_FPSR(%a6) # save INEX2,N 8069 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp} 8070 mov.l %d1,%d2 # make a copy 8071 andi.l &0x7fff,%d1 # strip sign 8073 sub.l %d0,%d1 # add scale factor 8074 or.w %d2,%d1 # concat old sign,new exp 8075 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 8101 fmov.l %fpsr,%d1 # save status 8104 or.l %d1,USER_FPSR(%a6) # save INEX2,N 8110 mov.b FPCR_ENABLE(%a6),%d1 8111 andi.b &0x13,%d1 # is OVFL or INEX enabled? 8117 sne %d1 # set sign param accordingly 8131 mov.l L_SCR3(%a6),%d1 8132 andi.b &0xc0,%d1 # test the rnd prec 8139 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 8140 mov.w %d1,%d2 # make a copy 8141 andi.l &0x7fff,%d1 # strip sign 8142 sub.l %d0,%d1 # add scale factor 8143 subi.l &0x6000,%d1 # subtract bias 8144 andi.w &0x7fff,%d1 # clear sign bit 8146 or.w %d2,%d1 # concat old sign,new exp 8147 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 8155 mov.l L_SCR3(%a6),%d1 8156 andi.b &0x30,%d1 # keep rnd mode only 8157 fmov.l %d1,%fpcr # set FPCR 8179 fmov.l %fpsr,%d1 # save status 8182 or.l %d1,USER_FPSR(%a6) # save INEX2,N 8217 fmov.l %fpsr,%d1 # save status 8220 or.l %d1,USER_FPSR(%a6) # save INEX2,N 8222 mov.b FPCR_ENABLE(%a6),%d1 8223 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 8230 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 8242 mov.l L_SCR3(%a6),%d1 8243 andi.b &0xc0,%d1 # is precision extended? 8259 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 8260 mov.l %d1,%d2 # make a copy 8261 andi.l &0x7fff,%d1 # strip sign 8263 sub.l %d0,%d1 # add scale factor 8264 addi.l &0x6000,%d1 # add bias 8265 andi.w &0x7fff,%d1 8266 or.w %d2,%d1 # concat old sign,new exp 8267 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 8273 mov.l L_SCR3(%a6),%d1 8274 andi.b &0x30,%d1 # use only rnd mode 8275 fmov.l %d1,%fpcr # set FPCR 8290 fmov.l %fpsr,%d1 # save status 8293 or.l %d1,USER_FPSR(%a6) # save INEX2,N 8309 mov.l L_SCR3(%a6),%d1 8310 andi.b &0xc0,%d1 # keep rnd prec 8311 ori.b &rz_mode*0x10,%d1 # insert RZ 8313 fmov.l %d1,%fpcr # set FPCR 8330 mov.w (tbl_fmul_op.b,%pc,%d1.w*2),%d1 8331 jmp (tbl_fmul_op.b,%pc,%d1.w) 8402 mov.b DST_EX(%a1),%d1 8403 eor.b %d0,%d1 8426 mov.b DST_EX(%a1),%d1 8427 eor.b %d0,%d1 8443 mov.b DST_EX(%a1),%d1 8444 eor.b %d0,%d1 8497 mov.b STAG(%a6),%d1 # fetch src optype tag 8550 mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp 8551 andi.w &0x8000,%d1 # keep old sign 8553 or.w %d1,%d0 # concat new exo,old sign 8589 fmov.l %fpsr,%d1 # save FPSR 8592 or.l %d1,USER_FPSR(%a6) # save INEX2,N 8597 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp} 8598 mov.w %d1,%d2 # make a copy 8599 andi.l &0x7fff,%d1 # strip sign 8600 sub.l %d0,%d1 # add scale factor 8602 or.w %d1,%d2 # concat old sign,new exponent 8636 mov.b FPCR_ENABLE(%a6),%d1 8637 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 8642 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 8655 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent 8658 mov.w %d1,%d2 # make a copy 8659 andi.l &0x7fff,%d1 # strip sign 8660 sub.l %d0,%d1 # subtract scale factor 8662 addi.l &0x6000,%d1 # add new bias 8663 andi.w &0x7fff,%d1 8664 or.w %d1,%d2 # concat old sign,new exp 8680 fmov.l %fpsr,%d1 # save FPSR 8682 or.l %d1,USER_FPSR(%a6) # save INEX2,N 8687 mov.b FPCR_ENABLE(%a6),%d1 8688 andi.b &0x13,%d1 # is OVFL or INEX enabled? 8697 sne %d1 # set sign param accordingly 8711 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 8712 mov.l %d1,%d2 # make a copy 8713 andi.l &0x7fff,%d1 # strip sign 8715 sub.l %d0,%d1 # add scale factor 8716 sub.l &0x6000,%d1 # subtract bias 8717 andi.w &0x7fff,%d1 8718 or.w %d2,%d1 8719 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 8733 fmov.l %fpsr,%d1 # save status 8736 or.l %d1,USER_FPSR(%a6) # save INEX2,N 8751 cmpi.b %d1,&DENORM # weed out DENORM 8753 cmpi.b %d1,&SNAN # weed out SNANs 8755 cmpi.b %d1,&QNAN # weed out QNANs 8830 clr.w %d1 8831 mov.b DTAG(%a6),%d1 8832 lsl.b &0x3,%d1 8833 or.b STAG(%a6),%d1 # combine src tags 8857 mov.w 2+L_SCR3(%a6),%d1 # fetch precision 8858 lsr.b &0x6,%d1 # shift to lo bits 8860 cmp.l %d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow? 8863 cmp.l %d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow? 8875 fmov.l %fpsr,%d1 # save FPSR 8878 or.l %d1,USER_FPSR(%a6) # save INEX2,N 8883 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp} 8884 mov.l %d1,%d2 # make a copy 8885 andi.l &0x7fff,%d1 # strip sign 8887 sub.l %d0,%d1 # add scale factor 8888 or.w %d2,%d1 # concat old sign,new exp 8889 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 8923 cmp.l %d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4) 8930 mov.b FPCR_ENABLE(%a6),%d1 8931 andi.b &0x13,%d1 # is OVFL or INEX enabled? 8936 sne %d1 # set sign param accordingly 8944 mov.l L_SCR3(%a6),%d1 8945 andi.b &0xc0,%d1 # is precision extended? 8952 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 8953 mov.w %d1,%d2 # make a copy 8954 andi.l &0x7fff,%d1 # strip sign 8955 sub.l %d0,%d1 # add scale factor 8956 subi.l &0x6000,%d1 # subtract bias 8957 andi.w &0x7fff,%d1 # clear sign bit 8959 or.w %d2,%d1 # concat old sign,new exp 8960 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 8968 mov.l L_SCR3(%a6),%d1 8969 andi.b &0x30,%d1 # keep rnd mode 8970 fmov.l %d1,%fpcr # set FPCR 8987 fmov.l %fpsr,%d1 # save status 8990 or.l %d1,USER_FPSR(%a6) # save INEX2,N 8992 mov.b FPCR_ENABLE(%a6),%d1 8993 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 9000 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 9012 mov.l L_SCR3(%a6),%d1 9013 andi.b &0xc0,%d1 # is precision extended? 9027 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 9028 mov.l %d1,%d2 # make a copy 9029 andi.l &0x7fff,%d1 # strip sign 9031 sub.l %d0,%d1 # add scale factoer 9032 addi.l &0x6000,%d1 # add bias 9033 andi.w &0x7fff,%d1 9034 or.w %d2,%d1 # concat old sign,new exp 9035 mov.w %d1,FP_SCR0_EX(%a6) # insert new exp 9041 mov.l L_SCR3(%a6),%d1 9042 andi.b &0x30,%d1 # use only rnd mode 9043 fmov.l %d1,%fpcr # set FPCR 9058 fmov.l %fpsr,%d1 # save status 9061 or.l %d1,USER_FPSR(%a6) # save INEX2,N 9077 mov.l L_SCR3(%a6),%d1 9078 andi.b &0xc0,%d1 # keep rnd prec 9079 ori.b &rz_mode*0x10,%d1 # insert RZ 9081 fmov.l %d1,%fpcr # set FPCR 9098 mov.w (tbl_fdiv_op.b,%pc,%d1.w*2),%d1 9099 jmp (tbl_fdiv_op.b,%pc,%d1.w*1) 9167 mov.b DST_EX(%a1),%d1 # or of input signs. 9168 eor.b %d0,%d1 9187 mov.b DST_EX(%a1),%d1 9188 eor.b %d0,%d1 9207 mov.b SRC_EX(%a0),%d1 9208 eor.b %d0,%d1 9270 mov.b STAG(%a6),%d1 9330 mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp 9331 andi.w &0x8000,%d1 # keep old sign 9333 or.w %d1,%d0 # concat old sign, new exponent 9369 fmov.l %fpsr,%d1 # save FPSR 9372 or.l %d1,USER_FPSR(%a6) # save INEX2,N 9377 mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp 9378 mov.w %d1,%d2 # make a copy 9379 andi.l &0x7fff,%d1 # strip sign 9380 sub.l %d0,%d1 # add scale factor 9382 or.w %d1,%d2 # concat old sign,new exp 9416 mov.b FPCR_ENABLE(%a6),%d1 9417 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 9422 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 9435 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent 9438 mov.l %d1,%d2 # make a copy 9439 andi.l &0x7fff,%d1 # strip sign 9441 sub.l %d0,%d1 # subtract scale factor 9442 addi.l &0x6000,%d1 # add new bias 9443 andi.w &0x7fff,%d1 9444 or.w %d2,%d1 # concat new sign,new exp 9445 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp 9460 fmov.l %fpsr,%d1 # save FPSR 9462 or.l %d1,USER_FPSR(%a6) # save INEX2,N 9467 mov.b FPCR_ENABLE(%a6),%d1 9468 andi.b &0x13,%d1 # is OVFL or INEX enabled? 9477 sne %d1 # set sign param accordingly 9491 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 9492 mov.l %d1,%d2 # make a copy 9493 andi.l &0x7fff,%d1 # strip sign 9495 sub.l %d0,%d1 # add scale factor 9496 subi.l &0x6000,%d1 # subtract bias 9497 andi.w &0x7fff,%d1 9498 or.w %d2,%d1 # concat sign,exp 9499 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 9513 fmov.l %fpsr,%d1 # save status 9516 or.l %d1,USER_FPSR(%a6) # save INEX2,N 9531 cmpi.b %d1,&DENORM # weed out DENORM 9533 cmpi.b %d1,&SNAN # weed out SNAN 9535 cmpi.b %d1,&QNAN # weed out QNAN 9570 mov.b STAG(%a6),%d1 9588 cmpi.b %d1,&ZERO # weed out ZERO 9590 cmpi.b %d1,&INF # weed out INF 9592 cmpi.b %d1,&SNAN # weed out SNAN 9594 cmpi.b %d1,&QNAN # weed out QNAN 9662 mov.b STAG(%a6),%d1 9686 cmpi.b %d1,&ZERO # weed out ZERO 9688 cmpi.b %d1,&INF # weed out INF 9690 cmpi.b %d1,&DENORM # weed out DENORM 9692 cmpi.b %d1,&SNAN # weed out SNAN 9768 mov.b STAG(%a6),%d1 9788 cmpi.b %d1,&ZERO # weed out ZERO 9790 cmpi.b %d1,&INF # weed out INF 9792 cmpi.b %d1,&DENORM # weed out DENORM 9794 cmpi.b %d1,&SNAN # weed out SNAN 9893 mov.b STAG(%a6),%d1 9910 mov.w SRC_EX(%a0),%d1 9911 bclr &15,%d1 # force absolute value 9912 mov.w %d1,FP_SCR0_EX(%a6) # insert exponent 9948 mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp 9949 andi.w &0x8000,%d1 # keep old sign 9951 or.w %d1,%d0 # concat old sign, new exponent 9987 fmov.l %fpsr,%d1 # save FPSR 9990 or.l %d1,USER_FPSR(%a6) # save INEX2,N 9995 mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp 9996 mov.l %d1,%d2 # make a copy 9997 andi.l &0x7fff,%d1 # strip sign 9998 sub.l %d0,%d1 # add scale factor 10000 or.w %d1,%d2 # concat old sign,new exp 10031 mov.b FPCR_ENABLE(%a6),%d1 10032 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 10037 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 10050 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent 10053 mov.l %d1,%d2 # make a copy 10054 andi.l &0x7fff,%d1 # strip sign 10056 sub.l %d0,%d1 # subtract scale factor 10057 addi.l &0x6000,%d1 # add new bias 10058 andi.w &0x7fff,%d1 10059 or.w %d2,%d1 # concat new sign,new exp 10060 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp 10075 fmov.l %fpsr,%d1 # save FPSR 10077 or.l %d1,USER_FPSR(%a6) # save INEX2,N 10082 mov.b FPCR_ENABLE(%a6),%d1 10083 andi.b &0x13,%d1 # is OVFL or INEX enabled? 10092 sne %d1 # set sign param accordingly 10106 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 10107 mov.l %d1,%d2 # make a copy 10108 andi.l &0x7fff,%d1 # strip sign 10110 sub.l %d0,%d1 # add scale factor 10111 subi.l &0x6000,%d1 # subtract bias 10112 andi.w &0x7fff,%d1 10113 or.w %d2,%d1 # concat sign,exp 10114 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 10128 fmov.l %fpsr,%d1 # save status 10131 or.l %d1,USER_FPSR(%a6) # save INEX2,N 10146 cmpi.b %d1,&DENORM # weed out DENORM 10148 cmpi.b %d1,&SNAN # weed out SNAN 10150 cmpi.b %d1,&QNAN # weed out QNAN 10155 cmpi.b %d1,&INF # weed out INF 10189 clr.w %d1 10190 mov.b DTAG(%a6),%d1 10191 lsl.b &0x3,%d1 10192 or.b STAG(%a6),%d1 10213 mov.w (tbl_fcmp_op.b,%pc,%d1.w*2),%d1 10214 jmp (tbl_fcmp_op.b,%pc,%d1.w*1) 10330 mov.b DST_EX(%a1),%d1 10331 eor.b %d0,%d1 10344 mov.b DST_EX(%a1),%d1 10345 eor.b %d0,%d1 10393 clr.w %d1 10394 mov.b DTAG(%a6),%d1 10395 lsl.b &0x3,%d1 10396 or.b STAG(%a6),%d1 10432 fmov.l %fpsr,%d1 # save status 10435 or.l %d1,USER_FPSR(%a6) # save INEX2,N 10440 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp} 10441 mov.l %d1,%d2 # make a copy 10442 andi.l &0x7fff,%d1 # strip sign 10444 sub.l %d0,%d1 # add scale factor 10445 or.w %d2,%d1 # concat old sign,new exp 10446 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 10459 fmov.l %fpsr,%d1 # save status 10462 or.l %d1,USER_FPSR(%a6) # save INEX2,N 10469 mov.b FPCR_ENABLE(%a6),%d1 10470 andi.b &0x13,%d1 # is OVFL or INEX enabled? 10475 sne %d1 # set sign param accordingly 10487 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 10488 mov.l %d1,%d2 # make a copy 10489 andi.l &0x7fff,%d1 # strip sign 10490 sub.l %d0,%d1 # add scale factor 10491 subi.l &0x6000,%d1 # subtract bias 10492 andi.w &0x7fff,%d1 10494 or.w %d2,%d1 # concat old sign,new exp 10495 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 10508 fmov.l %fpsr,%d1 # save status 10511 or.l %d1,USER_FPSR(%a6) # save INEX2,N 10530 fmov.l %fpsr,%d1 # save status 10533 or.l %d1,USER_FPSR(%a6) # save INEX2,N 10535 mov.b FPCR_ENABLE(%a6),%d1 10536 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 10543 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 10564 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 10565 mov.l %d1,%d2 # make a copy 10566 andi.l &0x7fff,%d1 # strip sign 10568 sub.l %d0,%d1 # add scale factor 10569 addi.l &0x6000,%d1 # add bias 10570 andi.w &0x7fff,%d1 10571 or.w %d2,%d1 # concat old sign,new exp 10572 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 10585 fmov.l %fpsr,%d1 # save status 10588 or.l %d1,USER_FPSR(%a6) # save INEX2,N 10604 mov.l L_SCR3(%a6),%d1 10605 andi.b &0xc0,%d1 # keep rnd prec 10606 ori.b &rz_mode*0x10,%d1 # insert RZ 10608 fmov.l %d1,%fpcr # set FPCR 10625 mov.w (tbl_fsglmul_op.b,%pc,%d1.w*2),%d1 10626 jmp (tbl_fsglmul_op.b,%pc,%d1.w*1) 10734 clr.w %d1 10735 mov.b DTAG(%a6),%d1 10736 lsl.b &0x3,%d1 10737 or.b STAG(%a6),%d1 # combine src tags 10761 mov.w 2+L_SCR3(%a6),%d1 # fetch precision,mode 10762 lsr.b &0x6,%d1 10779 fmov.l %fpsr,%d1 # save FPSR 10782 or.l %d1,USER_FPSR(%a6) # save INEX2,N 10787 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp} 10788 mov.l %d1,%d2 # make a copy 10789 andi.l &0x7fff,%d1 # strip sign 10791 sub.l %d0,%d1 # add scale factor 10792 or.w %d2,%d1 # concat old sign,new exp 10793 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 10806 fmov.l %fpsr,%d1 10809 or.l %d1,USER_FPSR(%a6) # save INEX,N 10812 mov.w (%sp),%d1 # fetch new exponent 10814 andi.l &0x7fff,%d1 # strip sign 10815 sub.l %d0,%d1 # add scale factor 10816 cmp.l %d1,&0x7fff # did divide overflow? 10822 mov.b FPCR_ENABLE(%a6),%d1 10823 andi.b &0x13,%d1 # is OVFL or INEX enabled? 10828 sne %d1 # set sign param accordingly 10840 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 10841 mov.l %d1,%d2 # make a copy 10842 andi.l &0x7fff,%d1 # strip sign 10844 sub.l %d0,%d1 # add scale factor 10845 subi.l &0x6000,%d1 # subtract new bias 10846 andi.w &0x7fff,%d1 # clear ms bit 10847 or.w %d2,%d1 # concat old sign,new exp 10848 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 10863 fmov.l %fpsr,%d1 # save status 10866 or.l %d1,USER_FPSR(%a6) # save INEX2,N 10868 mov.b FPCR_ENABLE(%a6),%d1 10869 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 10876 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 10897 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 10898 mov.l %d1,%d2 # make a copy 10899 andi.l &0x7fff,%d1 # strip sign 10901 sub.l %d0,%d1 # add scale factor 10902 addi.l &0x6000,%d1 # add bias 10903 andi.w &0x7fff,%d1 # clear top bit 10904 or.w %d2,%d1 # concat old sign, new exp 10905 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 10921 fmov.l %fpsr,%d1 # save status 10924 or.l %d1,USER_FPSR(%a6) # save INEX2,N 10940 clr.l %d1 # clear scratch register 10941 ori.b &rz_mode*0x10,%d1 # force RZ rnd mode 10943 fmov.l %d1,%fpcr # set FPCR 10960 mov.w (tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1 10961 jmp (tbl_fsgldiv_op.b,%pc,%d1.w*1) 11081 clr.w %d1 11082 mov.b DTAG(%a6),%d1 11083 lsl.b &0x3,%d1 11084 or.b STAG(%a6),%d1 # combine src tags 11103 fmov.l %fpsr,%d1 # fetch INEX2,N,Z 11105 or.l %d1,USER_FPSR(%a6) # save exc and ccode bits 11113 mov.w 2+L_SCR3(%a6),%d1 11114 lsr.b &0x6,%d1 11120 cmp.l %d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow? 11123 cmp.l %d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow? 11128 mov.w (%sp),%d1 11129 andi.w &0x8000,%d1 # keep sign 11130 or.w %d2,%d1 # concat sign,new exp 11131 mov.w %d1,(%sp) # insert new exponent 11155 mov.b FPCR_ENABLE(%a6),%d1 11156 andi.b &0x13,%d1 # is OVFL or INEX enabled? 11162 sne %d1 # set sign param accordingly 11171 mov.b L_SCR3(%a6),%d1 11172 andi.b &0xc0,%d1 # is precision extended? 11176 mov.w (%sp),%d1 11177 andi.w &0x8000,%d1 # keep sign 11180 or.w %d2,%d1 # concat sign,new exp 11181 mov.w %d1,(%sp) # insert new exponent 11189 mov.l L_SCR3(%a6),%d1 11190 andi.b &0x30,%d1 # keep rnd mode 11191 fmov.l %d1,%fpcr # set FPCR 11214 fmov.l %fpsr,%d1 # save status 11216 or.l %d1,USER_FPSR(%a6) # save INEX,N 11218 mov.b FPCR_ENABLE(%a6),%d1 11219 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 11226 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 11236 mov.l L_SCR3(%a6),%d1 11237 andi.b &0xc0,%d1 # is precision extended? 11250 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 11251 mov.l %d1,%d2 # make a copy 11252 andi.l &0x7fff,%d1 # strip sign 11254 sub.l %d0,%d1 # add scale factor 11255 addi.l &0x6000,%d1 # add new bias 11256 andi.w &0x7fff,%d1 # clear top bit 11257 or.w %d2,%d1 # concat sign,new exp 11258 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 11263 mov.l L_SCR3(%a6),%d1 11264 andi.b &0x30,%d1 # use only rnd mode 11265 fmov.l %d1,%fpcr # set FPCR 11275 mov.l L_SCR3(%a6),%d1 11276 andi.b &0xc0,%d1 11279 mov.l 0x4(%sp),%d1 # extract hi(man) 11280 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000? 11301 mov.l L_SCR3(%a6),%d1 11302 andi.b &0xc0,%d1 # keep rnd prec 11303 ori.b &rz_mode*0x10,%d1 # insert rnd mode 11304 fmov.l %d1,%fpcr # set FPCR 11324 mov.w (tbl_fadd_op.b,%pc,%d1.w*2),%d1 11325 jmp (tbl_fadd_op.b,%pc,%d1.w*1) 11393 mov.b DST_EX(%a1),%d1 11394 eor.b %d0,%d1 11411 mov.b 3+L_SCR3(%a6),%d1 11412 andi.b &0x30,%d1 # extract rnd mode 11413 cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM? 11454 mov.b DST_EX(%a1),%d1 11455 eor.b %d1,%d0 11534 clr.w %d1 11535 mov.b DTAG(%a6),%d1 11536 lsl.b &0x3,%d1 11537 or.b STAG(%a6),%d1 # combine src tags 11556 fmov.l %fpsr,%d1 # fetch INEX2, N, Z 11558 or.l %d1,USER_FPSR(%a6) # save exc and ccode bits 11566 mov.w 2+L_SCR3(%a6),%d1 11567 lsr.b &0x6,%d1 11573 cmp.l %d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow? 11576 cmp.l %d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow? 11581 mov.w (%sp),%d1 11582 andi.w &0x8000,%d1 # keep sign 11583 or.w %d2,%d1 # insert new exponent 11584 mov.w %d1,(%sp) # insert new exponent 11608 mov.b FPCR_ENABLE(%a6),%d1 11609 andi.b &0x13,%d1 # is OVFL or INEX enabled? 11615 sne %d1 # set sign param accordingly 11624 mov.b L_SCR3(%a6),%d1 11625 andi.b &0xc0,%d1 # is precision extended? 11629 mov.w (%sp),%d1 # fetch {sgn,exp} 11630 andi.w &0x8000,%d1 # keep sign 11633 or.w %d2,%d1 # concat sign,exp 11634 mov.w %d1,(%sp) # insert new exponent 11642 mov.l L_SCR3(%a6),%d1 11643 andi.b &0x30,%d1 # clear rnd prec 11644 fmov.l %d1,%fpcr # set FPCR 11667 fmov.l %fpsr,%d1 # save status 11669 or.l %d1,USER_FPSR(%a6) 11671 mov.b FPCR_ENABLE(%a6),%d1 11672 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 11679 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 11689 mov.l L_SCR3(%a6),%d1 11690 andi.b &0xc0,%d1 # is precision extended? 11703 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 11704 mov.l %d1,%d2 # make a copy 11705 andi.l &0x7fff,%d1 # strip sign 11707 sub.l %d0,%d1 # add scale factor 11708 addi.l &0x6000,%d1 # subtract new bias 11709 andi.w &0x7fff,%d1 # clear top bit 11710 or.w %d2,%d1 # concat sgn,exp 11711 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 11716 mov.l L_SCR3(%a6),%d1 11717 andi.b &0x30,%d1 # clear rnd prec 11718 fmov.l %d1,%fpcr # set FPCR 11728 mov.l L_SCR3(%a6),%d1 11729 andi.b &0xc0,%d1 # fetch rnd prec 11732 mov.l 0x4(%sp),%d1 11733 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000? 11754 mov.l L_SCR3(%a6),%d1 11755 andi.b &0xc0,%d1 # keep rnd prec 11756 ori.b &rz_mode*0x10,%d1 # insert rnd mode 11757 fmov.l %d1,%fpcr # set FPCR 11777 mov.w (tbl_fsub_op.b,%pc,%d1.w*2),%d1 11778 jmp (tbl_fsub_op.b,%pc,%d1.w*1) 11846 mov.b DST_EX(%a1),%d1 11847 eor.b %d1,%d0 11863 mov.b 3+L_SCR3(%a6),%d1 11864 andi.b &0x30,%d1 # extract rnd mode 11865 cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM? 11906 mov.b DST_EX(%a1),%d1 11907 eor.b %d1,%d0 11978 clr.w %d1 11979 mov.b STAG(%a6),%d1 11997 fmov.l %fpsr,%d1 11998 or.l %d1,USER_FPSR(%a6) # set N,INEX 12050 fmov.l %fpsr,%d1 # save FPSR 12053 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12058 mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp 12059 mov.l %d1,%d2 # make a copy 12060 andi.l &0x7fff,%d1 # strip sign 12061 sub.l %d0,%d1 # add scale factor 12063 or.w %d1,%d2 # concat old sign,new exp 12105 fmov.l %fpsr,%d1 # save status 12108 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12111 mov.b FPCR_ENABLE(%a6),%d1 12112 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 12119 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 12132 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent 12135 mov.l %d1,%d2 # make a copy 12136 andi.l &0x7fff,%d1 # strip sign 12138 sub.l %d0,%d1 # subtract scale factor 12139 addi.l &0x6000,%d1 # add new bias 12140 andi.w &0x7fff,%d1 12141 or.w %d2,%d1 # concat new sign,new exp 12142 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp 12157 fmov.l %fpsr,%d1 # save FPSR 12159 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12164 mov.b FPCR_ENABLE(%a6),%d1 12165 andi.b &0x13,%d1 # is OVFL or INEX enabled? 12174 sne %d1 # set sign param accordingly 12188 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 12189 mov.l %d1,%d2 # make a copy 12190 andi.l &0x7fff,%d1 # strip sign 12192 sub.l %d0,%d1 # add scale factor 12193 subi.l &0x6000,%d1 # subtract bias 12194 andi.w &0x7fff,%d1 12195 or.w %d2,%d1 # concat sign,exp 12196 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 12213 fmov.l %fpsr,%d1 # save status 12216 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12231 cmpi.b %d1,&DENORM # weed out DENORM 12233 cmpi.b %d1,&ZERO # weed out ZERO 12235 cmpi.b %d1,&INF # weed out INF 12237 cmpi.b %d1,&SNAN # weed out SNAN 12269 # fetch_dreg(): fetch register according to index in d1 # 12275 # d1 = index of register to fetch from # 12281 # According to the index value in d1 which can range from zero # 12288 # this routine leaves d1 intact for subsequent store_dreg calls. 12291 mov.w (tbl_fdreg.b,%pc,%d1.w*2),%d0 12363 # store_dreg_l(): store longword to data register specified by d1 # 12370 # d1 = index of register to fetch from # 12376 # According to the index value in d1, store the longword value # 12384 mov.w (tbl_sdregl.b,%pc,%d1.w*2),%d1 12385 jmp (tbl_sdregl.b,%pc,%d1.w*1) 12424 # store_dreg_w(): store word to data register specified by d1 # 12431 # d1 = index of register to fetch from # 12437 # According to the index value in d1, store the word value # 12445 mov.w (tbl_sdregw.b,%pc,%d1.w*2),%d1 12446 jmp (tbl_sdregw.b,%pc,%d1.w*1) 12485 # store_dreg_b(): store byte to data register specified by d1 # 12492 # d1 = index of register to fetch from # 12498 # According to the index value in d1, store the byte value # 12506 mov.w (tbl_sdregb.b,%pc,%d1.w*2),%d1 12507 jmp (tbl_sdregb.b,%pc,%d1.w*1) 12553 # d1 = index of address register to increment # 12561 # specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside # 12572 mov.w (tbl_iareg.b,%pc,%d1.w*2),%d1 12573 jmp (tbl_iareg.b,%pc,%d1.w*1) 12617 # d1 = index of address register to decrement # 12625 # specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside # 12636 mov.w (tbl_dareg.b,%pc,%d1.w*2),%d1 12637 jmp (tbl_dareg.b,%pc,%d1.w*1) 12927 tst.l %d1 # did dfetch fail? 13043 # 2. Calculate absolute value of exponent in d1 by mul and add. 13052 # (*) d1: accumulator for binary exponent 13065 clr.l %d1 # zero d1 for accumulator 13067 mulu.l &0xa,%d1 # mul partial product by one digit place 13069 add.l %d0,%d1 # d1 = d1 + d0 13074 neg.l %d1 # negate before subtracting 13076 sub.l &16,%d1 # sub to compensate for shift of mant 13078 neg.l %d1 # now negative, make pos and set SE 13082 mov.l %d1,-(%sp) # save exp on stack 13094 # (*) d1: lword counter 13105 mov.l &1,%d1 # word counter, init to 1 13120 mov.l (%a0,%d1.L*4),%d4 # load mantissa lonqword into d4 13130 # then inc d1 (=2) to point to the next long word and reset d3 to 0 13137 addq.l &1,%d1 # inc lw pointer in mantissa 13138 cmp.l %d1,&2 # test for last lw 13182 # (*) d1: zero count 13198 mov.l (%sp),%d1 # load expA for range test 13199 cmp.l %d1,&27 # test is with 27 13203 clr.l %d1 # zero count reg 13207 addq.l &1,%d1 # inc zero count 13211 addq.l &8,%d1 # and inc count by 8 13221 addq.l &1,%d1 # inc digit counter 13224 mov.l %d1,%d0 # copy counter to d2 13225 mov.l (%sp),%d1 # get adjusted exp from memory 13226 sub.l %d0,%d1 # subtract count from exp 13228 neg.l %d1 # now its neg; get abs 13255 clr.l %d1 # clr counter 13260 addq.l &8,%d1 # inc counter by 8 13269 addq.l &1,%d1 # inc digit counter 13272 mov.l %d1,%d0 # copy counter to d0 13273 mov.l (%sp),%d1 # get adjusted exp from memory 13274 sub.l %d0,%d1 # subtract count from exp 13276 neg.l %d1 # take abs of exp and clr SE 13306 # ( ) d1: exponent 13313 # ( ) d1: exponent 13366 mov.l %d1,%d0 # copy exp to d0;use d0 13552 # d1: scratch 13596 mov.l 4(%a0),%d1 13601 roxl.l &1,%d1 13602 tst.l %d1 13613 mov.l %d1,4(%a0) 13808 bfextu USER_FPCR(%a6){&26:&2},%d1 # get initial rmode bits 13809 lsl.w &1,%d1 # put them in bits 2:1 13810 add.w %d5,%d1 # add in LAMBDA 13811 lsl.w &1,%d1 # put them in bits 3:1 13814 addq.l &1,%d1 # if neg, set bit 0 13817 mov.b (%a2,%d1),%d3 # load d3 with new rmode 14018 movm.l &0xc0c0,-(%sp) # save regs used by sintd0 {%d0-%d1/%a0-%a1} 14048 movm.l (%sp)+,&0x303 # restore regs used by sint {%d0-%d1/%a0-%a1} 14174 # d1: x/0 14219 clr.l %d1 # put zero in d1 for addx 14221 addx.l %d1,%d2 # continue inc 14247 # d1: x/scratch (0);shift count for final exponent packing 14304 clr.l %d1 # put zero in d1 for addx 14306 addx.l %d1,%d2 # continue inc 14312 mov.l &12,%d1 # use d1 for shift count 14313 lsr.l %d1,%d0 # shift d0 right by 12 14315 lsr.l %d1,%d0 # shift d0 right by 12 14441 # extracts and shifts. The three msbs from d2 will go into d1. # 14463 # d1: temp used to form the digit 14489 # A3. Multiply d2:d3 by 8; extract msbs into d1. 14491 bfextu %d2{&0:&3},%d1 # copy 3 msbs of d2 into d1 14497 # A4. Multiply d4:d5 by 2; add carry out to d1. 14502 addx.w %d6,%d1 # add in extend from mul by 2 14510 addx.w %d6,%d1 # add in extend from add to d1 14520 add.w %d1,%d7 # add in ls digit to d7b 14528 mov.w %d1,%d7 # put new digit in d7b 14653 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 14679 mov.b EXC_OPWORD+0x1(%a6),%d1 14680 andi.b &0x38,%d1 # extract opmode 14681 cmpi.b %d1,&0x18 # postinc? 14683 cmpi.b %d1,&0x20 # predec? 14688 mov.b EXC_OPWORD+0x1(%a6),%d1 14689 andi.w &0x0007,%d1 # fetch An 14691 mov.w (tbl_rest_inc.b,%pc,%d1.w*2),%d1 14692 jmp (tbl_rest_inc.b,%pc,%d1.w*1)
|
H A D | fpsp.S | 645 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 700 mov.b 1+EXC_CMDREG(%a6),%d1 701 andi.w &0x007f,%d1 # extract extension 712 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr 713 jsr (tbl_unsupp.l,%pc,%d1.l*1) 733 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 748 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 767 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 806 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 885 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 943 mov.b 1+EXC_CMDREG(%a6),%d1 944 andi.w &0x007f,%d1 # extract extension 955 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr 956 jsr (tbl_unsupp.l,%pc,%d1.l*1) 983 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1010 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1041 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1080 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1210 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 1301 bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension 1306 mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr 1307 jsr (tbl_unsupp.l,%pc,%d1.l*1) 1340 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1407 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1572 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1596 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1679 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1694 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1711 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1740 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1764 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1792 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1846 bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension 1851 mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr 1852 jsr (tbl_unsupp.l,%pc,%d1.l*1) 1892 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1910 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 1991 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2022 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2123 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2141 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2191 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2231 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2271 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2318 mov.l FP_SRC_HI(%a6),%d1 # fetch DENORM hi(man) 2319 lsr.l %d0,%d1 # shift it 2320 bset &31,%d1 # set j-bit 2321 mov.l %d1,FP_SRC_HI(%a6) # insert new hi(man) 2339 mov.w &0x3c01,%d1 # pass denorm threshold 2359 clr.l %d1 2466 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2514 tst.l %d1 # did ifetch fail? 2526 tst.l %d1 # did ifetch fail? 2594 mov.b 1+EXC_CMDREG(%a6),%d1 2595 andi.w &0x007f,%d1 # extract extension 2603 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr 2604 jsr (tbl_unsupp.l,%pc,%d1.l*1) 2639 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2699 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2781 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2798 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2804 # right now, d1 = size and d0 = the strg. 2806 mov.b %d1,EXC_VOFF(%a6) # store strg 2811 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2815 mov.l %d1,-(%sp) # save d1 2841 clr.l %d1 2842 mov.b EXC_VOFF(%a6),%d1 # fetch strg 2844 tst.b %d1 2849 lsl.b &0x1,%d1 2854 lsl.b &0x1,%d1 2859 lsl.b &0x1,%d1 2864 lsl.b &0x1,%d1 2869 lsl.b &0x1,%d1 2874 lsl.b &0x1,%d1 2879 lsl.b &0x1,%d1 2883 mov.l 0x4(%sp),%d1 2900 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 2955 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 2975 bfextu %d0{&19:&3},%d1 2977 cmpi.b %d1,&0x7 # move all regs? 2993 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3022 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3044 movc %pcr,%d1 3045 btst &0x1,%d1 3059 movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1 3113 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3141 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3167 mov.w FP_SRC_EX(%a6),%d1 # fetch exponent 3168 andi.w &0x7fff,%d1 3169 cmpi.w %d1,&0x7fff 3174 mov.l FP_SRC_HI(%a6),%d1 3175 andi.l &0x7fffffff,%d1 3182 mov.l &0x7fffffff,%d1 3185 addq.l &0x1,%d1 3187 mov.l %d1,L_SCR1(%a6) 3191 mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg 3207 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3212 tst.l %d1 # did dstore fail? 3217 andi.w &0x0007,%d1 3223 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3228 tst.l %d1 # did dstore fail? 3233 andi.w &0x0007,%d1 3239 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3244 tst.l %d1 # did dstore fail? 3249 andi.w &0x0007,%d1 3309 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3337 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3361 mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg 3378 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3383 tst.l %d1 # did dstore fail? 3388 andi.w &0x0007,%d1 3395 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3400 tst.l %d1 # did dstore fail? 3405 andi.w &0x0007,%d1 3412 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3417 tst.l %d1 # did dstore fail? 3422 andi.w &0x0007,%d1 3427 cmpi.b %d1,&0x7 # is <ea> mode a data reg? 3432 mov.l FP_SRC_HI(%a6),%d1 # load mantissa 3433 lsr.l &0x8,%d1 # shift mantissa for sgl 3434 or.l %d1,%d0 # create sgl SNAN 3438 tst.l %d1 # did dstore fail? 3446 mov.l %d1,-(%sp) 3447 mov.l FP_SRC_HI(%a6),%d1 # load mantissa 3448 lsr.l &0x8,%d1 # shift mantissa for sgl 3449 or.l %d1,%d0 # create sgl SNAN 3450 mov.l (%sp)+,%d1 3451 andi.w &0x0007,%d1 3459 mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa 3462 lsr.l %d0,%d1 3463 or.l %d1,FP_SCR0_EX(%a6) # create dbl hi 3464 mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa 3465 andi.l &0x000007ff,%d1 3466 ror.l %d0,%d1 3467 mov.l %d1,FP_SCR0_HI(%a6) # store to temp space 3468 mov.l FP_SRC_LO(%a6),%d1 # load lo mantissa 3469 lsr.l %d0,%d1 3470 or.l %d1,FP_SCR0_HI(%a6) # create dbl lo 3476 tst.l %d1 # did dstore fail? 3514 tst.l %d1 # did dstore fail? 3534 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3601 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3645 bfextu EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg 3646 cmpi.b %d1,&0x17 # is op an fmovecr? 3677 mov.b 1+EXC_CMDREG(%a6),%d1 3678 andi.w &0x007f,%d1 # extract extension 3683 mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr 3684 jsr (tbl_unsupp.l,%pc,%d1.l*1) 3694 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3704 mov.b 1+EXC_CMDREG(%a6),%d1 3705 andi.l &0x0000007f,%d1 # pass rom offset 3775 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3799 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3863 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 3870 bfextu %d0{&0:&10},%d1 # is it an fmovecr? 3871 cmpi.w %d1,&0x03c8 3874 bfextu %d0{&16:&6},%d1 # is it an fmovecr? 3875 cmpi.b %d1,&0x17 3890 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3904 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 3918 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4021 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1 4075 bfextu %d0{&16:&6},%d1 # extract upper 6 of cmdreg 4076 cmpi.b %d1,&0x17 # is op an fmovecr? 4085 mov.b 1+EXC_CMDREG(%a6),%d1 4086 andi.w &0x003f,%d1 # extract extension bits 4087 lsl.w &0x3,%d1 # shift right 3 bits 4088 or.b STAG(%a6),%d1 # insert src optag bits 4093 mov.w (tbl_trans.w,%pc,%d1.w*2),%d1 4094 jsr (tbl_trans.w,%pc,%d1.w*1) # emulate 4107 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4167 mov.b 1+EXC_CMDREG(%a6),%d1 4168 andi.l &0x0000007f,%d1 # pass rom offset in d1 4245 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4260 bfextu %d0{&10:&3},%d1 # extract mode field 4261 cmpi.b %d1,&0x1 # is it an fdb<cc>? 4263 cmpi.b %d1,&0x7 # is it an fs<cc>? 4265 bfextu %d0{&13:&3},%d1 4266 cmpi.b %d1,&0x2 # is it an fs<cc>? 4305 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4319 tst.l %d1 # did ifetch fail? 4364 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4412 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4433 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 4978 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 5138 mov.l (%a0),%d1 # put exp in hi word 5139 mov.w 4(%a0),%d1 # fetch hi(man) 5140 and.l &0x7FFFFFFF,%d1 # strip sign 5142 cmpi.l %d1,&0x3FD78000 # is |X| >= 2**(-40)? 5147 cmp.l %d1,&0x4004BC7E # is |X| < 15 PI? 5161 mov.l INT(%a6),%d1 # make a copy of N 5162 asl.l &4,%d1 # N *= 16 5163 add.l %d1,%a1 # tbl_addr = a1 + (N*16) 5174 mov.l INT(%a6),%d1 5175 add.l ADJN(%a6),%d1 # SEE IF D0 IS ODD OR EVEN 5176 ror.l &1,%d1 # D0 WAS ODD IFF D0 IS NEGATIVE 5177 cmp.l %d1,&0 5200 ror.l &1,%d1 5201 and.l &0x80000000,%d1 5203 eor.l %d1,X(%a6) # X IS NOW R'= SGN*R 5254 ror.l &1,%d1 5255 and.l &0x80000000,%d1 5260 eor.l %d1,X(%a6) # X IS NOW S'= SGN*S 5261 and.l &0x80000000,%d1 5265 or.l &0x3F800000,%d1 # D0 IS SGN IN SINGLE 5266 mov.l %d1,POSNEG1(%a6) 5301 cmp.l %d1,&0x3FFF8000 5305 mov.l ADJN(%a6),%d1 5306 cmp.l %d1,&0 5315 mov.b &FMOV_OP,%d1 # last inst is MOVE 5348 mov.l (%a0),%d1 5349 mov.w 4(%a0),%d1 5350 and.l &0x7FFFFFFF,%d1 # COMPACTIFY X 5352 cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)? 5357 cmp.l %d1,&0x4004BC7E # |X| < 15 PI? 5373 mov.l INT(%a6),%d1 5374 asl.l &4,%d1 5375 add.l %d1,%a1 # ADDRESS OF N*PIBY2, IN Y1, Y2 5383 mov.l INT(%a6),%d1 5384 ror.l &1,%d1 5385 cmp.l %d1,&0 # D0 < 0 IFF N IS ODD 5400 mov.l %d1,%d2 5403 eor.l %d1,%d2 5413 ror.l &1,%d1 5414 and.l &0x80000000,%d1 5416 eor.l %d1,POSNEG1(%a6) 5426 eor.l %d1,SPRIME(%a6) 5476 ror.l &1,%d1 5477 and.l &0x80000000,%d1 5482 eor.l %d1,RPRIME(%a6) 5483 eor.l %d1,SPRIME(%a6) 5487 or.l &0x3F800000,%d1 5488 mov.l %d1,POSNEG1(%a6) 5538 cmp.l %d1,&0x3FFF8000 5551 mov.b &FMOV_OP,%d1 # last inst is MOVE 5580 cmp.l %d1,&0x7ffeffff # is arg dangerously large? 5610 mov.w INARG(%a6),%d1 5611 mov.l %d1,%a1 # save a copy of D0 5612 and.l &0x00007FFF,%d1 5613 sub.l &0x00003FFF,%d1 # d0 = K 5614 cmp.l %d1,&28 5617 sub.l &27,%d1 # d0 = L := K-27 5621 clr.l %d1 # d0 = L := 0 5632 sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI) 5656 mov.l %d1,%d2 # d2 = L 5663 add.l &0x00003FDD,%d1 5664 mov.w %d1,FP_SCR1_EX(%a6) 5668 mov.b ENDFLAG(%a6),%d1 5693 cmp.b %d1,&0 5706 mov.l ADJN(%a6),%d1 5707 cmp.l %d1,&4 5868 mov.l (%a0),%d1 5869 mov.w 4(%a0),%d1 5870 and.l &0x7FFFFFFF,%d1 5872 cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)? 5876 cmp.l %d1,&0x4004BC7E # |X| < 15 PI? 5888 fmov.l %fp1,%d1 # CONVERT TO INTEGER 5890 asl.l &4,%d1 5891 add.l %d1,%a1 # ADDRESS N*PIBY2 IN Y1, Y2 5897 ror.l &5,%d1 5898 and.l &0x80000000,%d1 # D0 WAS ODD IFF D0 < 0 5903 cmp.l %d1,&0 5984 cmp.l %d1,&0x3FFF8000 5990 mov.b &FMOV_OP,%d1 # last inst is MOVE 6011 cmp.l %d1,&0x7ffeffff # is arg dangerously large? 6041 mov.w INARG(%a6),%d1 6042 mov.l %d1,%a1 # save a copy of D0 6043 and.l &0x00007FFF,%d1 6044 sub.l &0x00003FFF,%d1 # d0 = K 6045 cmp.l %d1,&28 6048 sub.l &27,%d1 # d0 = L := K-27 6052 clr.l %d1 # d0 = L := 0 6063 sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI) 6087 mov.l %d1,%d2 # d2 = L 6094 add.l &0x00003FDD,%d1 6095 mov.w %d1,FP_SCR1_EX(%a6) 6099 mov.b ENDFLAG(%a6),%d1 6124 cmp.b %d1,&0 6137 mov.l INT(%a6),%d1 6138 ror.l &1,%d1 6350 mov.l (%a0),%d1 6351 mov.w 4(%a0),%d1 6353 and.l &0x7FFFFFFF,%d1 6355 cmp.l %d1,&0x3FFB8000 # |X| >= 1/16? 6360 cmp.l %d1,&0x4002FFFF # |X| < 16 ? 6402 mov.l %d1,%d2 # THE EXP AND 16 BITS OF X 6403 and.l &0x00007800,%d1 # 4 VARYING BITS OF F'S FRACTION 6407 add.l %d2,%d1 # THE 7 BITS IDENTIFYING F 6408 asr.l &7,%d1 # INDEX INTO TBL OF ATAN(|F|) 6410 add.l %d1,%a1 # ADDRESS OF ATAN(|F|) 6414 mov.l X(%a6),%d1 # LOAD SIGN AND EXPO. AGAIN 6415 and.l &0x80000000,%d1 # SIGN(F) 6416 or.l %d1,ATANF(%a6) # ATANF IS NOW SIGN(F)*ATAN(|F|) 6452 cmp.l %d1,&0x3FFF8000 6462 cmp.l %d1,&0x3FD78000 6505 mov.b &FMOV_OP,%d1 # last inst is MOVE 6513 cmp.l %d1,&0x40638000 6632 mov.l (%a0),%d1 6633 mov.w 4(%a0),%d1 6634 and.l &0x7FFFFFFF,%d1 6635 cmp.l %d1,&0x3FFF8000 6643 cmp.l %d1,&0x3FD78000 6673 mov.l (%a0),%d1 6674 and.l &0x80000000,%d1 # SIGN BIT OF X 6675 or.l &0x3F800000,%d1 # +-1 IN SGL FORMAT 6676 mov.l %d1,-(%sp) # push SIGN(X) IN SGL-FMT 6684 mov.b &FMOV_OP,%d1 # last inst is MOVE 6733 mov.l (%a0),%d1 # pack exp w/ upper 16 fraction 6734 mov.w 4(%a0),%d1 6735 and.l &0x7FFFFFFF,%d1 6736 cmp.l %d1,&0x3FFF8000 7211 mov.l (%a0),%d1 # load part of input X 7212 and.l &0x7FFF0000,%d1 # biased expo. of X 7213 cmp.l %d1,&0x3FBE0000 # 2^(-65) 7219 mov.w 4(%a0),%d1 # expo. and partial sig. of |X| 7220 cmp.l %d1,&0x400CB167 # 16380 log2 trunc. 16 bits 7233 fmov.l %fp0,%d1 # N = int( X * 64/log2 ) 7235 fmov.l %d1,%fp0 # convert to floating-format 7237 mov.l %d1,L_SCR1(%a6) # save N temporarily 7238 and.l &0x3F,%d1 # D0 is J = N mod 64 7239 lsl.l &4,%d1 7240 add.l %d1,%a1 # address of 2^(J/64) 7241 mov.l L_SCR1(%a6),%d1 7242 asr.l &6,%d1 # D0 is M 7243 add.w &0x3FFF,%d1 # biased expo. of 2^(M) 7275 mov.w %d1,SCALE(%a6) # SCALE is 2^(M) in extended 7299 mov.l ADJFLAG(%a6),%d1 7302 tst.l %d1 7308 mov.b &FMUL_OP,%d1 # last inst is MUL 7321 cmp.l %d1,&0x400CB27C # 16480 log2 7330 fmov.l %fp0,%d1 # N = int( X * 64/log2 ) 7332 fmov.l %d1,%fp0 # convert to floating-format 7333 mov.l %d1,L_SCR1(%a6) # save N temporarily 7334 and.l &0x3F,%d1 # D0 is J = N mod 64 7335 lsl.l &4,%d1 7336 add.l %d1,%a1 # address of 2^(J/64) 7337 mov.l L_SCR1(%a6),%d1 7338 asr.l &6,%d1 # D0 is K 7339 mov.l %d1,L_SCR1(%a6) # save K temporarily 7340 asr.l &1,%d1 # D0 is M1 7341 sub.l %d1,L_SCR1(%a6) # a1 is M 7342 add.w &0x3FFF,%d1 # biased expo. of 2^(M1) 7343 mov.w %d1,ADJSCALE(%a6) # ADJSCALE := 2^(M1) 7346 mov.l L_SCR1(%a6),%d1 # D0 is M 7347 add.w &0x3FFF,%d1 # biased expo. of 2^(M) 7375 mov.l (%a0),%d1 # load part of input X 7376 and.l &0x7FFF0000,%d1 # biased expo. of X 7377 cmp.l %d1,&0x3FFD0000 # 1/4 7384 mov.w 4(%a0),%d1 # expo. and partial sig. of |X| 7385 cmp.l %d1,&0x4004C215 # 70log2 rounded up to 16 bits 7397 fmov.l %fp0,%d1 # N = int( X * 64/log2 ) 7399 fmov.l %d1,%fp0 # convert to floating-format 7401 mov.l %d1,L_SCR1(%a6) # save N temporarily 7402 and.l &0x3F,%d1 # D0 is J = N mod 64 7403 lsl.l &4,%d1 7404 add.l %d1,%a1 # address of 2^(J/64) 7405 mov.l L_SCR1(%a6),%d1 7406 asr.l &6,%d1 # D0 is M 7407 mov.l %d1,L_SCR1(%a6) # save a copy of M 7417 add.w &0x3FFF,%d1 # D0 is biased expo. of 2^M 7436 mov.w %d1,SC(%a6) # SC is 2^(M) in extended 7441 mov.l L_SCR1(%a6),%d1 # D0 is M 7442 neg.w %d1 # D0 is -M 7444 add.w &0x3FFF,%d1 # biased expo. of 2^(-M) 7449 or.w &0x8000,%d1 # signed/expo. of -2^(-M) 7450 mov.w %d1,ONEBYSC(%a6) # OnebySc is -2^(-M) 7469 mov.l L_SCR1(%a6),%d1 # retrieve M 7470 cmp.l %d1,&63 7480 cmp.l %d1,&-3 7503 cmp.l %d1,&0x3FBE0000 # 2^(-65) 7508 cmp.l %d1,&0x00330000 # 2^(-16312) 7516 mov.b &FADD_OP,%d1 # last inst is ADD 7529 mov.b &FMUL_OP,%d1 # last inst is MUL 7586 mov.l (%a0),%d1 7587 cmp.l %d1,&0 7723 mov.l (%a0),%d1 7724 mov.w 4(%a0),%d1 7725 and.l &0x7FFFFFFF,%d1 7726 cmp.l %d1,&0x400CB167 7747 mov.b &FADD_OP,%d1 # last inst is ADD 7752 cmp.l %d1,&0x400CB2B3 7768 mov.b &FMUL_OP,%d1 # last inst is MUL 7835 mov.l (%a0),%d1 7836 mov.w 4(%a0),%d1 7837 mov.l %d1,%a1 # save (compacted) operand 7838 and.l &0x7FFFFFFF,%d1 7839 cmp.l %d1,&0x400CB167 7860 mov.l %a1,%d1 7861 and.l &0x80000000,%d1 7862 or.l &0x3F000000,%d1 7864 mov.l %d1,-(%sp) 7867 mov.b &FMUL_OP,%d1 # last inst is MUL 7872 cmp.l %d1,&0x400CB2B3 7878 mov.l %a1,%d1 7879 and.l &0x80000000,%d1 7880 or.l &0x7FFB0000,%d1 7881 mov.l %d1,-(%sp) # EXTENDED FMT 7893 mov.b &FMUL_OP,%d1 # last inst is MUL 7961 mov.l (%a0),%d1 7962 mov.w 4(%a0),%d1 7963 mov.l %d1,X(%a6) 7964 and.l &0x7FFFFFFF,%d1 7965 cmp.l %d1, &0x3fd78000 # is |X| < 2^(-40)? 7967 cmp.l %d1, &0x3fffddce # is |X| > (5/2)LOG2? 7973 mov.l X(%a6),%d1 7974 mov.l %d1,SGN(%a6) 7975 and.l &0x7FFF0000,%d1 7976 add.l &0x00010000,%d1 # EXPONENT OF 2|X| 7977 mov.l %d1,X(%a6) 7991 mov.l SGN(%a6),%d1 7993 eor.l %d1,V(%a6) 8000 cmp.l %d1,&0x3FFF8000 8003 cmp.l %d1,&0x40048AA1 8010 mov.l X(%a6),%d1 8011 mov.l %d1,SGN(%a6) 8012 and.l &0x7FFF0000,%d1 8013 add.l &0x00010000,%d1 # EXPO OF 2|X| 8014 mov.l %d1,X(%a6) # Y = 2|X| 8016 mov.l SGN(%a6),%d1 8026 mov.l SGN(%a6),%d1 8029 eor.l &0xC0000000,%d1 # -SIGN(X)*2 8030 fmov.s %d1,%fp1 # -SIGN(X)*2 IN SGL FMT 8033 mov.l SGN(%a6),%d1 8034 or.l &0x3F800000,%d1 # SGN 8035 fmov.s %d1,%fp0 # SGN IN SGL FMT 8038 mov.b &FADD_OP,%d1 # last inst is ADD 8044 mov.b &FMOV_OP,%d1 # last inst is MOVE 8050 mov.l X(%a6),%d1 8051 and.l &0x80000000,%d1 8052 or.l &0x3F800000,%d1 8053 fmov.s %d1,%fp0 8054 and.l &0x80000000,%d1 8055 eor.l &0x80800000,%d1 # -SIGN(X)*EPS 8058 fadd.s %d1,%fp0 8327 mov.l (%a0),%d1 8328 mov.w 4(%a0),%d1 8334 cmp.l %d1,&0 # CHECK IF X IS NEGATIVE 8337 cmp.l %d1,&0x3ffef07d # IS X < 15/16? 8339 cmp.l %d1,&0x3fff8841 # IS X > 17/16? 8355 asr.l &8,%d1 8356 asr.l &8,%d1 # SHIFTED 16 BITS, BIASED EXPO. OF X 8357 sub.l &0x3FFF,%d1 # THIS IS K 8358 add.l ADJK(%a6),%d1 # ADJUST K, ORIGINAL INPUT MAY BE DENORM. 8360 fmov.l %d1,%fp1 # CONVERT K TO FLOATING-POINT FORMAT 8367 mov.l FFRAC(%a6),%d1 # READY TO GET ADDRESS OF 1/F 8368 and.l &0x7E000000,%d1 8369 asr.l &8,%d1 8370 asr.l &8,%d1 8371 asr.l &4,%d1 # SHIFTED 20, D0 IS THE DISPLACEMENT 8372 add.l %d1,%a0 # A0 IS THE ADDRESS FOR 1/F 8553 mov.b &FMOV_OP,%d1 # last inst is MOVE 8564 mov.l X(%a6),%d1 8565 cmp.l %d1,&0 8567 cmp.l %d1,&0x3ffe8000 # IS BOUNDS [1/2,3/2]? 8569 cmp.l %d1,&0x3fffc000 8577 cmp.l %d1,&0x3ffef07d 8579 cmp.l %d1,&0x3fff8841 8603 cmp.l %d1,&0x3FFF8000 # SEE IF 1+Z > 1 8611 mov.l FFRAC(%a6),%d1 8612 and.l &0x7E000000,%d1 8613 asr.l &8,%d1 8614 asr.l &8,%d1 8615 asr.l &4,%d1 # D0 CONTAINS DISPLACEMENT FOR 1/F 8620 add.l %d1,%a0 8629 mov.l FFRAC(%a6),%d1 8630 and.l &0x7E000000,%d1 8631 asr.l &8,%d1 8632 asr.l &8,%d1 8633 asr.l &4,%d1 8637 add.l %d1,%a0 # A0 IS ADDRESS OF 1/F 8643 cmp.l %d1,&0 8707 mov.l (%a0),%d1 8708 mov.w 4(%a0),%d1 8709 and.l &0x7FFFFFFF,%d1 8710 cmp.l %d1,&0x3FFF8000 8722 mov.l (%a0),%d1 8723 and.l &0x80000000,%d1 8724 or.l &0x3F000000,%d1 # SIGN(X)*HALF 8725 mov.l %d1,-(%sp) 8736 mov.b &FMUL_OP,%d1 # last inst is MUL 8849 mov.l (%a0),%d1 8861 mov.l (%a0),%d1 8873 mov.l (%a0),%d1 8876 mov.l 8(%a0),%d1 8879 mov.l 4(%a0),%d1 8880 and.l &0x7FFFFFFF,%d1 8884 mov.w (%a0),%d1 8885 and.l &0x00007FFF,%d1 8886 sub.l &0x3FFF,%d1 8889 fmov.l %d1,%fp0 8906 mov.l (%a0),%d1 9094 mov.l (%a0),%d1 9095 mov.w 4(%a0),%d1 9097 and.l &0x7FFFFFFF,%d1 9099 cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)? 9104 cmp.l %d1,&0x400D80C0 # |X| > 16480? 9117 mov.l INT(%a6),%d1 9118 mov.l %d1,%d2 9119 and.l &0x3F,%d1 # D0 IS J 9120 asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64) 9121 add.l %d1,%a1 # ADDRESS FOR 2^(J/64) 9123 mov.l %d2,%d1 9124 asr.l &1,%d1 # D0 IS M 9125 sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J 9146 add.w %d1,FACT1(%a6) 9148 add.w %d1,FACT2(%a6) 9154 cmp.l %d1,&0x3FFF8000 9166 mov.l X(%a6),%d1 9167 cmp.l %d1,&0 9181 mov.l (%a0),%d1 9182 or.l &0x00800001,%d1 9183 fadd.s %d1,%fp0 9191 mov.l (%a0),%d1 9192 mov.w 4(%a0),%d1 9194 and.l &0x7FFFFFFF,%d1 9196 cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)? 9201 cmp.l %d1,&0x400B9B07 # |X| <= 16480*log2/log10 ? 9214 mov.l INT(%a6),%d1 9215 mov.l %d1,%d2 9216 and.l &0x3F,%d1 # D0 IS J 9217 asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64) 9218 add.l %d1,%a1 # ADDRESS FOR 2^(J/64) 9220 mov.l %d2,%d1 9221 asr.l &1,%d1 # D0 IS M 9222 sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J 9250 add.w %d1,FACT1(%a6) 9251 add.w %d1,FACT2(%a6) 9295 mov.b &FMUL_OP,%d1 # last inst is MUL 9305 mov.l (%a0),%d1 9306 or.l &0x00800001,%d1 9307 fadd.s %d1,%fp0 9311 # smovcr(): returns the ROM constant at the offset specified in d1 # 9316 # d1 = ROM offset # 9325 mov.l %d1,-(%sp) # save rom offset for a sec 9328 mov.l %d0,%d1 # make a copy 9329 andi.w &0x3,%d1 # extract rnd mode 9332 mov.w %d1,%d0 # put rnd mode in lo 9334 mov.l (%sp)+,%d1 # get rom offset 9339 tst.b %d1 # if zero, offset is to pi 9341 cmpi.b %d1,&0x0a # check range $01 - $0a 9343 cmpi.b %d1,&0x0e # check range $0b - $0e 9345 cmpi.b %d1,&0x2f # check range $10 - $2f 9347 cmpi.b %d1,&0x3f # check range $30 - $3f 9386 subi.b &0xb,%d1 # make offset in 0-4 range 9392 cmpi.b %d1,&0x2 # is result log10(e)? 9428 subi.b &0x30,%d1 # make offset in 0-f range 9434 cmpi.b %d1,&0x1 # is offset <= $31? 9436 cmpi.b %d1,&0x7 # is $32 <= offset <= $37? 9453 mulu.w &0xc,%d1 # offset points into tables 9460 fmovm.x (%a0,%d1.w),&0x80 # return result in fp0 9470 mov.w 0x0(%a0,%d1.w),FP_SCR1_EX(%a6) # load first word 9471 mov.l 0x4(%a0,%d1.w),FP_SCR1_HI(%a6) # load second word 9472 mov.l 0x8(%a0,%d1.w),FP_SCR1_LO(%a6) # load third word 9473 mov.l %d0,%d1 9583 mov.w DST_EX(%a1),%d1 # get dst exponent 9585 andi.l &0x00007fff,%d1 # strip sign from dst exp 9633 mov.l &0x80000000,%d1 # load normalized mantissa 9638 lsr.l %d0,%d1 # no; bit stays in upper lw 9640 mov.l %d1,-(%sp) # insert new high mantissa 9645 lsr.l %d0,%d1 # make low mantissa longword 9646 mov.l %d1,-(%sp) # insert new low mantissa 9666 mov.b &FMUL_OP,%d1 # last inst is MUL 9691 mov.b &FMOV_OP,%d1 # last inst is MOVE 9695 mov.l (%sp)+,%d0 # load control bits into d1 9840 mov.w SignY(%a6),%d1 9841 eor.l %d0,%d1 9842 and.l &0x00008000,%d1 9843 mov.w %d1,SignQ(%a6) # sign(Q) obtained 9845 mov.l DST_HI(%a1),%d1 9850 tst.l %d1 9854 mov.l %d2,%d1 9858 bfffo %d1{&0:&32},%d6 9859 lsl.l %d6,%d1 9866 bfffo %d1{&0:&32},%d6 9868 lsl.l %d6,%d1 9874 or.l %d7,%d1 # (D0,D1,D2) normalized 9909 cmp.l %d1,%d4 # compare hi(R) and hi(Y) 9925 subx.l %d4,%d1 # hi(R) - hi(Y) 9936 roxl.l &1,%d1 # hi(R) = 2hi(R) + carry 9949 tst.l %d1 9953 mov.l %d2,%d1 9957 bfffo %d1{&0:&32},%d6 9958 lsl.l %d6,%d1 9965 bfffo %d1{&0:&32},%d6 9968 lsl.l %d6,%d1 9974 or.l %d7,%d1 # (D0,D1,D2) normalized 9982 mov.l %d1,R_Hi(%a6) 9992 mov.l %d1,R_Hi(%a6) 10016 cmp.l %d1,%d4 10060 mov.b &FMUL_OP,%d1 # last inst is MUL 10071 mov.b &FMOV_OP,%d1 # last inst is MOVE 10179 mov.l %d0,%d1 # make copy of rnd prec,mode 10180 andi.b &0xc0,%d1 # extended precision? 10194 smi.b %d1 # set d0 accodingly 10241 smi.b %d1 # set d1 accordingly 10253 sf.b %d1 # set d0 to represent positive 10278 mov.b %d0,%d1 # fetch rnd mode/prec 10279 andi.b &0xc0,%d1 # extract rnd prec 10292 movm.l &0xc080,-(%sp) # save d0-d1/a0 10294 movm.l (%sp)+,&0x0103 # restore d0-d1/a0 10297 cmpi.b %d1,&0x40 # is prec dbl? 10306 mov.l LOCAL_LO(%a0),%d1 # are any of lo 11 bits of 10307 andi.l &0x7ff,%d1 # dbl mantissa set? 10319 smi.b %d1 # set d1 accordingly 10332 sf.b %d1 # clear sign flag for positive 10411 # d1.b : sign bit of result ('11111111 = (-) ; '00000000 = (+)) # 10418 andi.w &0x10,%d1 # keep sign bit in 4th spot 10422 or.b %d1,%d0 # concat {sgn,mode,prec} 10424 mov.l %d0,%d1 # make a copy 10425 lsl.b &0x1,%d1 # mult index 2 by 2 10428 lea (tbl_unf_result.b,%pc,%d1.w*8),%a0 # grab result ptr 10745 mov.b DTAG(%a6),%d1 10747 cmpi.b %d1,&ZERO 10749 cmpi.b %d1,&INF 10751 cmpi.b %d1,&DENORM 10753 cmpi.b %d1,&SNAN 10759 mov.b DTAG(%a6),%d1 10761 cmpi.b %d1,&ZERO 10763 cmpi.b %d1,&INF 10765 cmpi.b %d1,&DENORM 10767 cmpi.b %d1,&QNAN 10773 mov.b DTAG(%a6),%d1 10775 cmpi.b %d1,&ZERO 10777 cmpi.b %d1,&INF 10779 cmpi.b %d1,&DENORM 10781 cmpi.b %d1,&QNAN 10787 mov.b SRC_EX(%a0),%d1 # get src sign 10789 eor.b %d0,%d1 # get qbyte sign 10790 andi.b &0x80,%d1 10791 mov.b %d1,FPSR_QBYTE(%a6) 10800 mov.b SRC_EX(%a0),%d1 # get src sign 10802 eor.b %d0,%d1 # get qbyte sign 10803 andi.b &0x80,%d1 10804 mov.b %d1,FPSR_QBYTE(%a6) 10826 mov.b DTAG(%a6),%d1 10828 cmpi.b %d1,&ZERO 10830 cmpi.b %d1,&INF 10832 cmpi.b %d1,&DENORM 10834 cmpi.b %d1,&QNAN 10840 mov.b DTAG(%a6),%d1 10842 cmpi.b %d1,&ZERO 10844 cmpi.b %d1,&INF 10846 cmpi.b %d1,&DENORM 10848 cmpi.b %d1,&QNAN 10854 mov.b DTAG(%a6),%d1 10856 cmpi.b %d1,&ZERO 10858 cmpi.b %d1,&INF 10860 cmpi.b %d1,&DENORM 10862 cmpi.b %d1,&QNAN 10871 mov.b DTAG(%a6),%d1 10873 cmpi.b %d1,&ZERO 10875 cmpi.b %d1,&INF 10877 cmpi.b %d1,&DENORM 10879 cmpi.b %d1,&QNAN 10885 mov.b DTAG(%a6),%d1 10887 cmpi.b %d1,&ZERO 10889 cmpi.b %d1,&INF 10891 cmpi.b %d1,&DENORM 10893 cmpi.b %d1,&QNAN 10899 mov.b DTAG(%a6),%d1 10901 cmpi.b %d1,&QNAN 10903 cmpi.b %d1,&SNAN 10914 mov.b DTAG(%a6),%d1 10915 cmpi.b %d1,&QNAN 10917 cmpi.b %d1,&SNAN 10926 mov.b DTAG(%a6),%d1 10927 cmpi.b %d1,&QNAN 10929 cmpi.b %d1,&SNAN 10998 mov.b STAG(%a6),%d1 11000 cmpi.b %d1,&ZERO 11002 cmpi.b %d1,&INF 11004 cmpi.b %d1,&DENORM 11006 cmpi.b %d1,&QNAN 11012 mov.b STAG(%a6),%d1 11014 cmpi.b %d1,&ZERO 11016 cmpi.b %d1,&INF 11018 cmpi.b %d1,&DENORM 11020 cmpi.b %d1,&QNAN 11026 mov.b STAG(%a6),%d1 11028 cmpi.b %d1,&ZERO 11030 cmpi.b %d1,&INF 11032 cmpi.b %d1,&DENORM 11034 cmpi.b %d1,&QNAN 11040 mov.b STAG(%a6),%d1 11042 cmpi.b %d1,&ZERO 11044 cmpi.b %d1,&INF 11046 cmpi.b %d1,&DENORM 11048 cmpi.b %d1,&QNAN 11054 mov.b STAG(%a6),%d1 11056 cmpi.b %d1,&ZERO 11058 cmpi.b %d1,&INF 11060 cmpi.b %d1,&DENORM 11062 cmpi.b %d1,&QNAN 11068 mov.b STAG(%a6),%d1 11070 cmpi.b %d1,&ZERO 11072 cmpi.b %d1,&INF 11074 cmpi.b %d1,&DENORM 11076 cmpi.b %d1,&QNAN 11082 mov.b STAG(%a6),%d1 11084 cmpi.b %d1,&ZERO 11086 cmpi.b %d1,&INF 11088 cmpi.b %d1,&DENORM 11090 cmpi.b %d1,&QNAN 11096 mov.b STAG(%a6),%d1 11098 cmpi.b %d1,&ZERO 11100 cmpi.b %d1,&INF 11102 cmpi.b %d1,&DENORM 11104 cmpi.b %d1,&QNAN 11110 mov.b STAG(%a6),%d1 11112 cmpi.b %d1,&ZERO 11114 cmpi.b %d1,&INF 11116 cmpi.b %d1,&DENORM 11118 cmpi.b %d1,&QNAN 11124 mov.b STAG(%a6),%d1 11126 cmpi.b %d1,&ZERO 11128 cmpi.b %d1,&INF 11130 cmpi.b %d1,&DENORM 11132 cmpi.b %d1,&QNAN 11138 mov.b STAG(%a6),%d1 11140 cmpi.b %d1,&ZERO 11142 cmpi.b %d1,&INF 11144 cmpi.b %d1,&DENORM 11146 cmpi.b %d1,&QNAN 11152 mov.b STAG(%a6),%d1 11154 cmpi.b %d1,&ZERO 11156 cmpi.b %d1,&INF 11158 cmpi.b %d1,&DENORM 11160 cmpi.b %d1,&QNAN 11166 mov.b STAG(%a6),%d1 11168 cmpi.b %d1,&ZERO 11170 cmpi.b %d1,&INF 11172 cmpi.b %d1,&DENORM 11174 cmpi.b %d1,&QNAN 11180 mov.b STAG(%a6),%d1 11182 cmpi.b %d1,&ZERO 11184 cmpi.b %d1,&INF 11186 cmpi.b %d1,&DENORM 11188 cmpi.b %d1,&QNAN 11194 mov.b STAG(%a6),%d1 11196 cmpi.b %d1,&ZERO 11198 cmpi.b %d1,&INF 11200 cmpi.b %d1,&DENORM 11202 cmpi.b %d1,&QNAN 11208 mov.b STAG(%a6),%d1 11210 cmpi.b %d1,&ZERO 11212 cmpi.b %d1,&INF 11214 cmpi.b %d1,&DENORM 11216 cmpi.b %d1,&QNAN 11222 mov.b STAG(%a6),%d1 11224 cmpi.b %d1,&ZERO 11226 cmpi.b %d1,&INF 11228 cmpi.b %d1,&DENORM 11230 cmpi.b %d1,&QNAN 11236 mov.b STAG(%a6),%d1 11238 cmpi.b %d1,&ZERO 11240 cmpi.b %d1,&INF 11242 cmpi.b %d1,&DENORM 11244 cmpi.b %d1,&QNAN 11250 mov.b STAG(%a6),%d1 11252 cmpi.b %d1,&ZERO 11254 cmpi.b %d1,&INF 11256 cmpi.b %d1,&DENORM 11258 cmpi.b %d1,&QNAN 11264 mov.b STAG(%a6),%d1 11266 cmpi.b %d1,&ZERO 11268 cmpi.b %d1,&INF 11270 cmpi.b %d1,&DENORM 11272 cmpi.b %d1,&QNAN 11278 mov.b STAG(%a6),%d1 11280 cmpi.b %d1,&ZERO 11282 cmpi.b %d1,&INF 11284 cmpi.b %d1,&DENORM 11286 cmpi.b %d1,&QNAN 11292 mov.b STAG(%a6),%d1 11294 cmpi.b %d1,&ZERO 11296 cmpi.b %d1,&INF 11298 cmpi.b %d1,&DENORM 11300 cmpi.b %d1,&QNAN 11306 mov.b STAG(%a6),%d1 11308 cmpi.b %d1,&ZERO 11310 cmpi.b %d1,&INF 11312 cmpi.b %d1,&DENORM 11314 cmpi.b %d1,&QNAN 11320 mov.b STAG(%a6),%d1 11322 cmpi.b %d1,&ZERO 11324 cmpi.b %d1,&INF 11326 cmpi.b %d1,&DENORM 11328 cmpi.b %d1,&QNAN 11383 cmpi.b %d1,&FMOV_OP 11385 cmpi.b %d1,&FADD_OP 11581 clr.w %d1 11582 mov.b DTAG(%a6),%d1 11583 lsl.b &0x3,%d1 11584 or.b STAG(%a6),%d1 # combine src tags 11603 mov.w 2+L_SCR3(%a6),%d1 # fetch precision 11604 lsr.b &0x6,%d1 # shift to lo bits 11606 cmp.l %d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl? 11610 cmp.l %d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl? 11630 fmov.l %fpsr,%d1 # save status 11633 or.l %d1,USER_FPSR(%a6) # save INEX2,N 11638 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp} 11639 mov.l %d1,%d2 # make a copy 11640 andi.l &0x7fff,%d1 # strip sign 11642 sub.l %d0,%d1 # add scale factor 11643 or.w %d2,%d1 # concat old sign,new exp 11644 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 11670 fmov.l %fpsr,%d1 # save status 11673 or.l %d1,USER_FPSR(%a6) # save INEX2,N 11679 mov.b FPCR_ENABLE(%a6),%d1 11680 andi.b &0x13,%d1 # is OVFL or INEX enabled? 11686 sne %d1 # set sign param accordingly 11700 mov.l L_SCR3(%a6),%d1 11701 andi.b &0xc0,%d1 # test the rnd prec 11708 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 11709 mov.w %d1,%d2 # make a copy 11710 andi.l &0x7fff,%d1 # strip sign 11711 sub.l %d0,%d1 # add scale factor 11712 subi.l &0x6000,%d1 # subtract bias 11713 andi.w &0x7fff,%d1 # clear sign bit 11715 or.w %d2,%d1 # concat old sign,new exp 11716 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 11724 mov.l L_SCR3(%a6),%d1 11725 andi.b &0x30,%d1 # keep rnd mode only 11726 fmov.l %d1,%fpcr # set FPCR 11748 fmov.l %fpsr,%d1 # save status 11751 or.l %d1,USER_FPSR(%a6) # save INEX2,N 11786 fmov.l %fpsr,%d1 # save status 11789 or.l %d1,USER_FPSR(%a6) # save INEX2,N 11791 mov.b FPCR_ENABLE(%a6),%d1 11792 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 11799 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 11811 mov.l L_SCR3(%a6),%d1 11812 andi.b &0xc0,%d1 # is precision extended? 11828 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 11829 mov.l %d1,%d2 # make a copy 11830 andi.l &0x7fff,%d1 # strip sign 11832 sub.l %d0,%d1 # add scale factor 11833 addi.l &0x6000,%d1 # add bias 11834 andi.w &0x7fff,%d1 11835 or.w %d2,%d1 # concat old sign,new exp 11836 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 11842 mov.l L_SCR3(%a6),%d1 11843 andi.b &0x30,%d1 # use only rnd mode 11844 fmov.l %d1,%fpcr # set FPCR 11859 fmov.l %fpsr,%d1 # save status 11862 or.l %d1,USER_FPSR(%a6) # save INEX2,N 11878 mov.l L_SCR3(%a6),%d1 11879 andi.b &0xc0,%d1 # keep rnd prec 11880 ori.b &rz_mode*0x10,%d1 # insert RZ 11882 fmov.l %d1,%fpcr # set FPCR 11899 mov.w (tbl_fmul_op.b,%pc,%d1.w*2),%d1 11900 jmp (tbl_fmul_op.b,%pc,%d1.w) 11971 mov.b DST_EX(%a1),%d1 11972 eor.b %d0,%d1 11995 mov.b DST_EX(%a1),%d1 11996 eor.b %d0,%d1 12012 mov.b DST_EX(%a1),%d1 12013 eor.b %d0,%d1 12066 mov.b STAG(%a6),%d1 # fetch src optype tag 12119 mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp 12120 andi.w &0x8000,%d1 # keep old sign 12122 or.w %d1,%d0 # concat new exo,old sign 12158 fmov.l %fpsr,%d1 # save FPSR 12161 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12166 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp} 12167 mov.w %d1,%d2 # make a copy 12168 andi.l &0x7fff,%d1 # strip sign 12169 sub.l %d0,%d1 # add scale factor 12171 or.w %d1,%d2 # concat old sign,new exponent 12205 mov.b FPCR_ENABLE(%a6),%d1 12206 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 12211 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 12224 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent 12227 mov.w %d1,%d2 # make a copy 12228 andi.l &0x7fff,%d1 # strip sign 12229 sub.l %d0,%d1 # subtract scale factor 12231 addi.l &0x6000,%d1 # add new bias 12232 andi.w &0x7fff,%d1 12233 or.w %d1,%d2 # concat old sign,new exp 12249 fmov.l %fpsr,%d1 # save FPSR 12251 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12256 mov.b FPCR_ENABLE(%a6),%d1 12257 andi.b &0x13,%d1 # is OVFL or INEX enabled? 12266 sne %d1 # set sign param accordingly 12280 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 12281 mov.l %d1,%d2 # make a copy 12282 andi.l &0x7fff,%d1 # strip sign 12284 sub.l %d0,%d1 # add scale factor 12285 sub.l &0x6000,%d1 # subtract bias 12286 andi.w &0x7fff,%d1 12287 or.w %d2,%d1 12288 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 12302 fmov.l %fpsr,%d1 # save status 12305 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12320 cmpi.b %d1,&DENORM # weed out DENORM 12322 cmpi.b %d1,&SNAN # weed out SNANs 12324 cmpi.b %d1,&QNAN # weed out QNANs 12399 clr.w %d1 12400 mov.b DTAG(%a6),%d1 12401 lsl.b &0x3,%d1 12402 or.b STAG(%a6),%d1 # combine src tags 12426 mov.w 2+L_SCR3(%a6),%d1 # fetch precision 12427 lsr.b &0x6,%d1 # shift to lo bits 12429 cmp.l %d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow? 12432 cmp.l %d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow? 12444 fmov.l %fpsr,%d1 # save FPSR 12447 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12452 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp} 12453 mov.l %d1,%d2 # make a copy 12454 andi.l &0x7fff,%d1 # strip sign 12456 sub.l %d0,%d1 # add scale factor 12457 or.w %d2,%d1 # concat old sign,new exp 12458 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 12492 cmp.l %d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4) 12499 mov.b FPCR_ENABLE(%a6),%d1 12500 andi.b &0x13,%d1 # is OVFL or INEX enabled? 12505 sne %d1 # set sign param accordingly 12513 mov.l L_SCR3(%a6),%d1 12514 andi.b &0xc0,%d1 # is precision extended? 12521 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 12522 mov.w %d1,%d2 # make a copy 12523 andi.l &0x7fff,%d1 # strip sign 12524 sub.l %d0,%d1 # add scale factor 12525 subi.l &0x6000,%d1 # subtract bias 12526 andi.w &0x7fff,%d1 # clear sign bit 12528 or.w %d2,%d1 # concat old sign,new exp 12529 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 12537 mov.l L_SCR3(%a6),%d1 12538 andi.b &0x30,%d1 # keep rnd mode 12539 fmov.l %d1,%fpcr # set FPCR 12556 fmov.l %fpsr,%d1 # save status 12559 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12561 mov.b FPCR_ENABLE(%a6),%d1 12562 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 12569 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 12581 mov.l L_SCR3(%a6),%d1 12582 andi.b &0xc0,%d1 # is precision extended? 12596 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 12597 mov.l %d1,%d2 # make a copy 12598 andi.l &0x7fff,%d1 # strip sign 12600 sub.l %d0,%d1 # add scale factoer 12601 addi.l &0x6000,%d1 # add bias 12602 andi.w &0x7fff,%d1 12603 or.w %d2,%d1 # concat old sign,new exp 12604 mov.w %d1,FP_SCR0_EX(%a6) # insert new exp 12610 mov.l L_SCR3(%a6),%d1 12611 andi.b &0x30,%d1 # use only rnd mode 12612 fmov.l %d1,%fpcr # set FPCR 12627 fmov.l %fpsr,%d1 # save status 12630 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12646 mov.l L_SCR3(%a6),%d1 12647 andi.b &0xc0,%d1 # keep rnd prec 12648 ori.b &rz_mode*0x10,%d1 # insert RZ 12650 fmov.l %d1,%fpcr # set FPCR 12667 mov.w (tbl_fdiv_op.b,%pc,%d1.w*2),%d1 12668 jmp (tbl_fdiv_op.b,%pc,%d1.w*1) 12736 mov.b DST_EX(%a1),%d1 # or of input signs. 12737 eor.b %d0,%d1 12756 mov.b DST_EX(%a1),%d1 12757 eor.b %d0,%d1 12776 mov.b SRC_EX(%a0),%d1 12777 eor.b %d0,%d1 12839 mov.b STAG(%a6),%d1 12899 mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp 12900 andi.w &0x8000,%d1 # keep old sign 12902 or.w %d1,%d0 # concat old sign, new exponent 12938 fmov.l %fpsr,%d1 # save FPSR 12941 or.l %d1,USER_FPSR(%a6) # save INEX2,N 12946 mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp 12947 mov.w %d1,%d2 # make a copy 12948 andi.l &0x7fff,%d1 # strip sign 12949 sub.l %d0,%d1 # add scale factor 12951 or.w %d1,%d2 # concat old sign,new exp 12985 mov.b FPCR_ENABLE(%a6),%d1 12986 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 12991 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 13004 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent 13007 mov.l %d1,%d2 # make a copy 13008 andi.l &0x7fff,%d1 # strip sign 13010 sub.l %d0,%d1 # subtract scale factor 13011 addi.l &0x6000,%d1 # add new bias 13012 andi.w &0x7fff,%d1 13013 or.w %d2,%d1 # concat new sign,new exp 13014 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp 13029 fmov.l %fpsr,%d1 # save FPSR 13031 or.l %d1,USER_FPSR(%a6) # save INEX2,N 13036 mov.b FPCR_ENABLE(%a6),%d1 13037 andi.b &0x13,%d1 # is OVFL or INEX enabled? 13046 sne %d1 # set sign param accordingly 13060 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 13061 mov.l %d1,%d2 # make a copy 13062 andi.l &0x7fff,%d1 # strip sign 13064 sub.l %d0,%d1 # add scale factor 13065 subi.l &0x6000,%d1 # subtract bias 13066 andi.w &0x7fff,%d1 13067 or.w %d2,%d1 # concat sign,exp 13068 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 13082 fmov.l %fpsr,%d1 # save status 13085 or.l %d1,USER_FPSR(%a6) # save INEX2,N 13100 cmpi.b %d1,&DENORM # weed out DENORM 13102 cmpi.b %d1,&SNAN # weed out SNAN 13104 cmpi.b %d1,&QNAN # weed out QNAN 13139 mov.b STAG(%a6),%d1 13157 cmpi.b %d1,&ZERO # weed out ZERO 13159 cmpi.b %d1,&INF # weed out INF 13161 cmpi.b %d1,&SNAN # weed out SNAN 13163 cmpi.b %d1,&QNAN # weed out QNAN 13231 mov.b STAG(%a6),%d1 13255 cmpi.b %d1,&ZERO # weed out ZERO 13257 cmpi.b %d1,&INF # weed out INF 13259 cmpi.b %d1,&DENORM # weed out DENORM 13261 cmpi.b %d1,&SNAN # weed out SNAN 13337 mov.b STAG(%a6),%d1 13357 cmpi.b %d1,&ZERO # weed out ZERO 13359 cmpi.b %d1,&INF # weed out INF 13361 cmpi.b %d1,&DENORM # weed out DENORM 13363 cmpi.b %d1,&SNAN # weed out SNAN 13462 mov.b STAG(%a6),%d1 13479 mov.w SRC_EX(%a0),%d1 13480 bclr &15,%d1 # force absolute value 13481 mov.w %d1,FP_SCR0_EX(%a6) # insert exponent 13517 mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp 13518 andi.w &0x8000,%d1 # keep old sign 13520 or.w %d1,%d0 # concat old sign, new exponent 13556 fmov.l %fpsr,%d1 # save FPSR 13559 or.l %d1,USER_FPSR(%a6) # save INEX2,N 13564 mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp 13565 mov.l %d1,%d2 # make a copy 13566 andi.l &0x7fff,%d1 # strip sign 13567 sub.l %d0,%d1 # add scale factor 13569 or.w %d1,%d2 # concat old sign,new exp 13600 mov.b FPCR_ENABLE(%a6),%d1 13601 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 13606 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 13619 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent 13622 mov.l %d1,%d2 # make a copy 13623 andi.l &0x7fff,%d1 # strip sign 13625 sub.l %d0,%d1 # subtract scale factor 13626 addi.l &0x6000,%d1 # add new bias 13627 andi.w &0x7fff,%d1 13628 or.w %d2,%d1 # concat new sign,new exp 13629 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp 13644 fmov.l %fpsr,%d1 # save FPSR 13646 or.l %d1,USER_FPSR(%a6) # save INEX2,N 13651 mov.b FPCR_ENABLE(%a6),%d1 13652 andi.b &0x13,%d1 # is OVFL or INEX enabled? 13661 sne %d1 # set sign param accordingly 13675 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 13676 mov.l %d1,%d2 # make a copy 13677 andi.l &0x7fff,%d1 # strip sign 13679 sub.l %d0,%d1 # add scale factor 13680 subi.l &0x6000,%d1 # subtract bias 13681 andi.w &0x7fff,%d1 13682 or.w %d2,%d1 # concat sign,exp 13683 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 13697 fmov.l %fpsr,%d1 # save status 13700 or.l %d1,USER_FPSR(%a6) # save INEX2,N 13715 cmpi.b %d1,&DENORM # weed out DENORM 13717 cmpi.b %d1,&SNAN # weed out SNAN 13719 cmpi.b %d1,&QNAN # weed out QNAN 13724 cmpi.b %d1,&INF # weed out INF 13758 clr.w %d1 13759 mov.b DTAG(%a6),%d1 13760 lsl.b &0x3,%d1 13761 or.b STAG(%a6),%d1 13782 mov.w (tbl_fcmp_op.b,%pc,%d1.w*2),%d1 13783 jmp (tbl_fcmp_op.b,%pc,%d1.w*1) 13899 mov.b DST_EX(%a1),%d1 13900 eor.b %d0,%d1 13913 mov.b DST_EX(%a1),%d1 13914 eor.b %d0,%d1 13962 clr.w %d1 13963 mov.b DTAG(%a6),%d1 13964 lsl.b &0x3,%d1 13965 or.b STAG(%a6),%d1 14001 fmov.l %fpsr,%d1 # save status 14004 or.l %d1,USER_FPSR(%a6) # save INEX2,N 14009 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp} 14010 mov.l %d1,%d2 # make a copy 14011 andi.l &0x7fff,%d1 # strip sign 14013 sub.l %d0,%d1 # add scale factor 14014 or.w %d2,%d1 # concat old sign,new exp 14015 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 14028 fmov.l %fpsr,%d1 # save status 14031 or.l %d1,USER_FPSR(%a6) # save INEX2,N 14038 mov.b FPCR_ENABLE(%a6),%d1 14039 andi.b &0x13,%d1 # is OVFL or INEX enabled? 14044 sne %d1 # set sign param accordingly 14056 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 14057 mov.l %d1,%d2 # make a copy 14058 andi.l &0x7fff,%d1 # strip sign 14059 sub.l %d0,%d1 # add scale factor 14060 subi.l &0x6000,%d1 # subtract bias 14061 andi.w &0x7fff,%d1 14063 or.w %d2,%d1 # concat old sign,new exp 14064 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 14077 fmov.l %fpsr,%d1 # save status 14080 or.l %d1,USER_FPSR(%a6) # save INEX2,N 14099 fmov.l %fpsr,%d1 # save status 14102 or.l %d1,USER_FPSR(%a6) # save INEX2,N 14104 mov.b FPCR_ENABLE(%a6),%d1 14105 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 14112 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 14133 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 14134 mov.l %d1,%d2 # make a copy 14135 andi.l &0x7fff,%d1 # strip sign 14137 sub.l %d0,%d1 # add scale factor 14138 addi.l &0x6000,%d1 # add bias 14139 andi.w &0x7fff,%d1 14140 or.w %d2,%d1 # concat old sign,new exp 14141 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 14154 fmov.l %fpsr,%d1 # save status 14157 or.l %d1,USER_FPSR(%a6) # save INEX2,N 14173 mov.l L_SCR3(%a6),%d1 14174 andi.b &0xc0,%d1 # keep rnd prec 14175 ori.b &rz_mode*0x10,%d1 # insert RZ 14177 fmov.l %d1,%fpcr # set FPCR 14194 mov.w (tbl_fsglmul_op.b,%pc,%d1.w*2),%d1 14195 jmp (tbl_fsglmul_op.b,%pc,%d1.w*1) 14303 clr.w %d1 14304 mov.b DTAG(%a6),%d1 14305 lsl.b &0x3,%d1 14306 or.b STAG(%a6),%d1 # combine src tags 14330 mov.w 2+L_SCR3(%a6),%d1 # fetch precision,mode 14331 lsr.b &0x6,%d1 14348 fmov.l %fpsr,%d1 # save FPSR 14351 or.l %d1,USER_FPSR(%a6) # save INEX2,N 14356 mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp} 14357 mov.l %d1,%d2 # make a copy 14358 andi.l &0x7fff,%d1 # strip sign 14360 sub.l %d0,%d1 # add scale factor 14361 or.w %d2,%d1 # concat old sign,new exp 14362 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 14375 fmov.l %fpsr,%d1 14378 or.l %d1,USER_FPSR(%a6) # save INEX,N 14381 mov.w (%sp),%d1 # fetch new exponent 14383 andi.l &0x7fff,%d1 # strip sign 14384 sub.l %d0,%d1 # add scale factor 14385 cmp.l %d1,&0x7fff # did divide overflow? 14391 mov.b FPCR_ENABLE(%a6),%d1 14392 andi.b &0x13,%d1 # is OVFL or INEX enabled? 14397 sne %d1 # set sign param accordingly 14409 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 14410 mov.l %d1,%d2 # make a copy 14411 andi.l &0x7fff,%d1 # strip sign 14413 sub.l %d0,%d1 # add scale factor 14414 subi.l &0x6000,%d1 # subtract new bias 14415 andi.w &0x7fff,%d1 # clear ms bit 14416 or.w %d2,%d1 # concat old sign,new exp 14417 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 14432 fmov.l %fpsr,%d1 # save status 14435 or.l %d1,USER_FPSR(%a6) # save INEX2,N 14437 mov.b FPCR_ENABLE(%a6),%d1 14438 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 14445 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 14466 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 14467 mov.l %d1,%d2 # make a copy 14468 andi.l &0x7fff,%d1 # strip sign 14470 sub.l %d0,%d1 # add scale factor 14471 addi.l &0x6000,%d1 # add bias 14472 andi.w &0x7fff,%d1 # clear top bit 14473 or.w %d2,%d1 # concat old sign, new exp 14474 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 14490 fmov.l %fpsr,%d1 # save status 14493 or.l %d1,USER_FPSR(%a6) # save INEX2,N 14509 clr.l %d1 # clear scratch register 14510 ori.b &rz_mode*0x10,%d1 # force RZ rnd mode 14512 fmov.l %d1,%fpcr # set FPCR 14529 mov.w (tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1 14530 jmp (tbl_fsgldiv_op.b,%pc,%d1.w*1) 14650 clr.w %d1 14651 mov.b DTAG(%a6),%d1 14652 lsl.b &0x3,%d1 14653 or.b STAG(%a6),%d1 # combine src tags 14672 fmov.l %fpsr,%d1 # fetch INEX2,N,Z 14674 or.l %d1,USER_FPSR(%a6) # save exc and ccode bits 14682 mov.w 2+L_SCR3(%a6),%d1 14683 lsr.b &0x6,%d1 14689 cmp.l %d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow? 14692 cmp.l %d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow? 14697 mov.w (%sp),%d1 14698 andi.w &0x8000,%d1 # keep sign 14699 or.w %d2,%d1 # concat sign,new exp 14700 mov.w %d1,(%sp) # insert new exponent 14724 mov.b FPCR_ENABLE(%a6),%d1 14725 andi.b &0x13,%d1 # is OVFL or INEX enabled? 14731 sne %d1 # set sign param accordingly 14740 mov.b L_SCR3(%a6),%d1 14741 andi.b &0xc0,%d1 # is precision extended? 14745 mov.w (%sp),%d1 14746 andi.w &0x8000,%d1 # keep sign 14749 or.w %d2,%d1 # concat sign,new exp 14750 mov.w %d1,(%sp) # insert new exponent 14758 mov.l L_SCR3(%a6),%d1 14759 andi.b &0x30,%d1 # keep rnd mode 14760 fmov.l %d1,%fpcr # set FPCR 14783 fmov.l %fpsr,%d1 # save status 14785 or.l %d1,USER_FPSR(%a6) # save INEX,N 14787 mov.b FPCR_ENABLE(%a6),%d1 14788 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 14795 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 14805 mov.l L_SCR3(%a6),%d1 14806 andi.b &0xc0,%d1 # is precision extended? 14819 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 14820 mov.l %d1,%d2 # make a copy 14821 andi.l &0x7fff,%d1 # strip sign 14823 sub.l %d0,%d1 # add scale factor 14824 addi.l &0x6000,%d1 # add new bias 14825 andi.w &0x7fff,%d1 # clear top bit 14826 or.w %d2,%d1 # concat sign,new exp 14827 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 14832 mov.l L_SCR3(%a6),%d1 14833 andi.b &0x30,%d1 # use only rnd mode 14834 fmov.l %d1,%fpcr # set FPCR 14844 mov.l L_SCR3(%a6),%d1 14845 andi.b &0xc0,%d1 14848 mov.l 0x4(%sp),%d1 # extract hi(man) 14849 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000? 14870 mov.l L_SCR3(%a6),%d1 14871 andi.b &0xc0,%d1 # keep rnd prec 14872 ori.b &rz_mode*0x10,%d1 # insert rnd mode 14873 fmov.l %d1,%fpcr # set FPCR 14893 mov.w (tbl_fadd_op.b,%pc,%d1.w*2),%d1 14894 jmp (tbl_fadd_op.b,%pc,%d1.w*1) 14962 mov.b DST_EX(%a1),%d1 14963 eor.b %d0,%d1 14980 mov.b 3+L_SCR3(%a6),%d1 14981 andi.b &0x30,%d1 # extract rnd mode 14982 cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM? 15023 mov.b DST_EX(%a1),%d1 15024 eor.b %d1,%d0 15103 clr.w %d1 15104 mov.b DTAG(%a6),%d1 15105 lsl.b &0x3,%d1 15106 or.b STAG(%a6),%d1 # combine src tags 15125 fmov.l %fpsr,%d1 # fetch INEX2, N, Z 15127 or.l %d1,USER_FPSR(%a6) # save exc and ccode bits 15135 mov.w 2+L_SCR3(%a6),%d1 15136 lsr.b &0x6,%d1 15142 cmp.l %d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow? 15145 cmp.l %d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow? 15150 mov.w (%sp),%d1 15151 andi.w &0x8000,%d1 # keep sign 15152 or.w %d2,%d1 # insert new exponent 15153 mov.w %d1,(%sp) # insert new exponent 15177 mov.b FPCR_ENABLE(%a6),%d1 15178 andi.b &0x13,%d1 # is OVFL or INEX enabled? 15184 sne %d1 # set sign param accordingly 15193 mov.b L_SCR3(%a6),%d1 15194 andi.b &0xc0,%d1 # is precision extended? 15198 mov.w (%sp),%d1 # fetch {sgn,exp} 15199 andi.w &0x8000,%d1 # keep sign 15202 or.w %d2,%d1 # concat sign,exp 15203 mov.w %d1,(%sp) # insert new exponent 15211 mov.l L_SCR3(%a6),%d1 15212 andi.b &0x30,%d1 # clear rnd prec 15213 fmov.l %d1,%fpcr # set FPCR 15236 fmov.l %fpsr,%d1 # save status 15238 or.l %d1,USER_FPSR(%a6) 15240 mov.b FPCR_ENABLE(%a6),%d1 15241 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 15248 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 15258 mov.l L_SCR3(%a6),%d1 15259 andi.b &0xc0,%d1 # is precision extended? 15272 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 15273 mov.l %d1,%d2 # make a copy 15274 andi.l &0x7fff,%d1 # strip sign 15276 sub.l %d0,%d1 # add scale factor 15277 addi.l &0x6000,%d1 # subtract new bias 15278 andi.w &0x7fff,%d1 # clear top bit 15279 or.w %d2,%d1 # concat sgn,exp 15280 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 15285 mov.l L_SCR3(%a6),%d1 15286 andi.b &0x30,%d1 # clear rnd prec 15287 fmov.l %d1,%fpcr # set FPCR 15297 mov.l L_SCR3(%a6),%d1 15298 andi.b &0xc0,%d1 # fetch rnd prec 15301 mov.l 0x4(%sp),%d1 15302 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000? 15323 mov.l L_SCR3(%a6),%d1 15324 andi.b &0xc0,%d1 # keep rnd prec 15325 ori.b &rz_mode*0x10,%d1 # insert rnd mode 15326 fmov.l %d1,%fpcr # set FPCR 15346 mov.w (tbl_fsub_op.b,%pc,%d1.w*2),%d1 15347 jmp (tbl_fsub_op.b,%pc,%d1.w*1) 15415 mov.b DST_EX(%a1),%d1 15416 eor.b %d1,%d0 15432 mov.b 3+L_SCR3(%a6),%d1 15433 andi.b &0x30,%d1 # extract rnd mode 15434 cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM? 15475 mov.b DST_EX(%a1),%d1 15476 eor.b %d1,%d0 15547 clr.w %d1 15548 mov.b STAG(%a6),%d1 15566 fmov.l %fpsr,%d1 15567 or.l %d1,USER_FPSR(%a6) # set N,INEX 15619 fmov.l %fpsr,%d1 # save FPSR 15622 or.l %d1,USER_FPSR(%a6) # save INEX2,N 15627 mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp 15628 mov.l %d1,%d2 # make a copy 15629 andi.l &0x7fff,%d1 # strip sign 15630 sub.l %d0,%d1 # add scale factor 15632 or.w %d1,%d2 # concat old sign,new exp 15674 fmov.l %fpsr,%d1 # save status 15677 or.l %d1,USER_FPSR(%a6) # save INEX2,N 15680 mov.b FPCR_ENABLE(%a6),%d1 15681 andi.b &0x0b,%d1 # is UNFL or INEX enabled? 15688 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 15701 mov.w FP_SCR0_EX(%a6),%d1 # load current exponent 15704 mov.l %d1,%d2 # make a copy 15705 andi.l &0x7fff,%d1 # strip sign 15707 sub.l %d0,%d1 # subtract scale factor 15708 addi.l &0x6000,%d1 # add new bias 15709 andi.w &0x7fff,%d1 15710 or.w %d2,%d1 # concat new sign,new exp 15711 mov.w %d1,FP_SCR1_EX(%a6) # insert new exp 15726 fmov.l %fpsr,%d1 # save FPSR 15728 or.l %d1,USER_FPSR(%a6) # save INEX2,N 15733 mov.b FPCR_ENABLE(%a6),%d1 15734 andi.b &0x13,%d1 # is OVFL or INEX enabled? 15743 sne %d1 # set sign param accordingly 15757 mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp} 15758 mov.l %d1,%d2 # make a copy 15759 andi.l &0x7fff,%d1 # strip sign 15761 sub.l %d0,%d1 # add scale factor 15762 subi.l &0x6000,%d1 # subtract bias 15763 andi.w &0x7fff,%d1 15764 or.w %d2,%d1 # concat sign,exp 15765 mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent 15782 fmov.l %fpsr,%d1 # save status 15785 or.l %d1,USER_FPSR(%a6) # save INEX2,N 15800 cmpi.b %d1,&DENORM # weed out DENORM 15802 cmpi.b %d1,&ZERO # weed out ZERO 15804 cmpi.b %d1,&INF # weed out INF 15806 cmpi.b %d1,&SNAN # weed out SNAN 15873 mov.w DST_EX(%a1),%d1 15875 mov.w %d1,FP_SCR1_EX(%a6) 15878 andi.w &0x7fff,%d1 15880 mov.w %d1,2+L_SCR1(%a6) # store dst exponent 15882 cmp.w %d0, %d1 # is src exp >= dst exp? 15907 mov.w FP_SCR0_EX(%a6),%d1 15908 and.w &0x8000,%d1 15909 or.w %d1,%d0 # concat {sgn,new exp} 15943 mov.w FP_SCR1_EX(%a6),%d1 15944 andi.w &0x8000,%d1 15945 or.w %d1,%d0 # concat {sgn,new exp} 15985 mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp} 15986 mov.w %d1,%d0 # make a copy 15988 andi.l &0x7fff,%d1 # extract operand's exponent 16000 sub.l %d1,%d0 # scale = BIAS + (-exp) 16008 mov.l %d0,%d1 # prepare for op_norm call 16042 mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp} 16043 andi.l &0x7fff,%d1 # extract operand's exponent 16047 btst &0x0,%d1 # is exp even or odd? 16053 sub.l %d1,%d0 # scale = BIAS + (-exp) 16061 sub.l %d1,%d0 # scale = BIAS + (-exp) 16112 mov.w FP_SCR1_EX(%a6),%d1 # extract operand's {sgn,exp} 16113 mov.w %d1,%d0 # make a copy 16115 andi.l &0x7fff,%d1 # extract operand's exponent 16127 sub.l %d1,%d0 # scale = BIAS + (-exp) 16134 mov.l %d0,%d1 # prepare for op_norm call 16284 clr.l %d1 # clear scratch reg 16285 mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes 16286 ror.l &0x8,%d1 # rotate to top byte 16287 fmov.l %d1,%fpsr # insert into FPSR 16289 mov.w (tbl_fdbcc.b,%pc,%d0.w*2),%d1 # load table 16290 jmp (tbl_fdbcc.b,%pc,%d1.w) # jump to fdbcc routine 16848 mov.b 1+EXC_OPWORD(%a6), %d1 # fetch lo opword 16849 andi.w &0x7, %d1 # extract count register 16912 clr.l %d1 # clear scratch reg 16913 mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes 16914 ror.l &0x8,%d1 # rotate to top byte 16915 fmov.l %d1,%fpsr # insert into FPSR 16917 mov.w (tbl_ftrapcc.b,%pc,%d0.w*2), %d1 # load table 16918 jmp (tbl_ftrapcc.b,%pc,%d1.w) # jump to ftrapcc routine 17482 clr.l %d1 # clear scratch reg 17483 mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes 17484 ror.l &0x8,%d1 # rotate to top byte 17485 fmov.l %d1,%fpsr # insert into FPSR 17487 mov.w (tbl_fscc.b,%pc,%d0.w*2),%d1 # load table 17488 jmp (tbl_fscc.b,%pc,%d1.w) # jump to fscc routine 18064 mov.b 1+EXC_OPWORD(%a6),%d1 # fetch lo opword 18065 mov.l %d1,%d0 # make a copy 18066 andi.b &0x38,%d1 # extract src mode 18070 mov.l %d0,%d1 18071 andi.w &0x7,%d1 # pass index in d1 18084 cmpi.b %d1,&0x18 # is <ea> (An)+ ? 18086 cmpi.b %d1,&0x20 # is <ea> -(An) ? 18093 tst.l %d1 # did dstore fail? 18106 tst.l %d1 # did dstore fail? 18109 mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword 18110 andi.w &0x7,%d1 # pass index in d1 18124 tst.l %d1 # did dstore fail? 18127 mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword 18128 andi.w &0x7,%d1 # pass index in d1 18166 # d1 = Dn # 18219 mov.b 1+EXC_EXTWORD(%a6),%d1 # fetch extword 18220 andi.w &0x70,%d1 # extract reg bits 18221 lsr.b &0x4,%d1 # shift into lo bits 18233 mov.l (%sp)+,%d1 # restore strg 18255 mov.b (tbl_fmovm_convert.w,%pc,%d1.w*1),%d1 18276 tst.b %d1 # should FP0 be moved? 18284 lsl.b &0x1,%d1 # should FP1 be moved? 18292 lsl.b &0x1,%d1 # should FP2 be moved? 18299 lsl.b &0x1,%d1 # should FP3 be moved? 18306 lsl.b &0x1,%d1 # should FP4 be moved? 18313 lsl.b &0x1,%d1 # should FP5 be moved? 18320 lsl.b &0x1,%d1 # should FP6 be moved? 18327 lsl.b &0x1,%d1 # should FP7 be moved? 18343 tst.l %d1 # did dstore err? 18357 mov.l %d1,-(%sp) # save bit string for later 18364 tst.l %d1 # did dfetch fail? 18367 mov.l (%sp)+,%d1 # load bit string 18371 tst.b %d1 # should FP0 be moved? 18379 lsl.b &0x1,%d1 # should FP1 be moved? 18387 lsl.b &0x1,%d1 # should FP2 be moved? 18393 lsl.b &0x1,%d1 # should FP3 be moved? 18399 lsl.b &0x1,%d1 # should FP4 be moved? 18405 lsl.b &0x1,%d1 # should FP5 be moved? 18411 lsl.b &0x1,%d1 # should FP6 be moved? 18417 lsl.b &0x1,%d1 # should FP7 be moved? 18529 mov.w %d0,%d1 # make a copy 18532 andi.l &0x7,%d1 # extract reg field 18652 mov.l %d0,%d1 18653 add.l %a0,%d1 # Increment 18654 mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value 18660 mov.l %d0,%d1 18661 add.l %a0,%d1 # Increment 18662 mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value 18668 mov.l %d0,%d1 18669 add.l %a0,%d1 # Increment 18670 mov.l %d1,%a2 # Save incr value 18676 mov.l %d0,%d1 18677 add.l %a0,%d1 # Increment 18678 mov.l %d1,%a3 # Save incr value 18684 mov.l %d0,%d1 18685 add.l %a0,%d1 # Increment 18686 mov.l %d1,%a4 # Save incr value 18692 mov.l %d0,%d1 18693 add.l %a0,%d1 # Increment 18694 mov.l %d1,%a5 # Save incr value 18700 mov.l %d0,%d1 18701 add.l %a0,%d1 # Increment 18702 mov.l %d1,(%a6) # Save incr value 18710 mov.l %d0,%d1 18711 add.l %a0,%d1 # Increment 18712 mov.l %d1,EXC_A7(%a6) # Save incr value 18785 tst.l %d1 # did ifetch fail? 18798 tst.l %d1 # did ifetch fail? 18811 tst.l %d1 # did ifetch fail? 18824 tst.l %d1 # did ifetch fail? 18837 tst.l %d1 # did ifetch fail? 18850 tst.l %d1 # did ifetch fail? 18863 tst.l %d1 # did ifetch fail? 18876 tst.l %d1 # did ifetch fail? 18891 addq.l &0x8,%d1 18899 tst.l %d1 # did ifetch fail? 18909 mov.l %d0,%d1 18910 rol.w &0x4,%d1 18911 andi.w &0xf,%d1 # extract index regno 18923 mov.l %d2,%d1 18924 rol.w &0x7,%d1 18925 andi.l &0x3,%d1 # extract scale value 18927 lsl.l %d1,%d0 # shift index by scale 18944 tst.l %d1 # did ifetch fail? 18958 tst.l %d1 # did ifetch fail? 18972 tst.l %d1 # did ifetch fail? 18994 tst.l %d1 # did ifetch fail? 19005 mov.l %d0,%d1 # make extword copy 19006 rol.w &0x4,%d1 # rotate reg num into place 19007 andi.w &0xf,%d1 # extract register number 19019 mov.l %d2,%d1 19020 rol.w &0x7,%d1 # rotate scale value into place 19021 andi.l &0x3,%d1 # extract scale value 19023 lsl.l %d1,%d0 # shift index by scale 19051 bfextu %d0{&16:&4},%d1 # fetch dreg index 19087 tst.l %d1 # did ifetch fail? 19097 tst.l %d1 # did ifetch fail? 19118 tst.l %d1 # did ifetch fail? 19128 tst.l %d1 # did ifetch fail? 19146 tst.l %d1 # did dfetch fail? 19158 tst.l %d1 # did dfetch fail? 19251 tst.l %d1 # did ifetch fail? 19259 tst.l %d1 # did ifetch fail? 19271 tst.l %d1 # did ifetch fail? 19279 tst.l %d1 # did ifetch fail? 19291 tst.l %d1 # did ifetch fail? 19299 tst.l %d1 # did ifetch fail? 19311 tst.l %d1 # did ifetch fail? 19319 tst.l %d1 # did ifetch fail? 19327 tst.l %d1 # did ifetch fail? 19367 mov.l %d0, %d1 # make a copy 19370 andi.l &0x7, %d1 # extract reg field 19378 or.w %d1,%d0 # concat mode,reg 19450 mov.l %d0,%d1 # make a copy 19453 andi.l &0x7,%d1 # extract reg field 19468 mov.w (tbl_ceaf_pi.b,%pc,%d1.w*2),%d1 19470 jmp (tbl_ceaf_pi.b,%pc,%d1.w*1) 19513 mov.w (tbl_ceaf_pd.b,%pc,%d1.w*2),%d1 19517 jmp (tbl_ceaf_pd.b,%pc,%d1.w*1) 19695 bfextu EXC_OPWORD(%a6){&10:&3}, %d1 # extract <ea> mode field 19700 bfextu EXC_OPWORD(%a6){&13:&3}, %d1 # extract src reg field 19830 tst.l %d1 # did dfetch fail? 19846 tst.l %d1 # did ifetch fail? 19866 tst.l %d1 # did dfetch fail? 19882 tst.l %d1 # did ifetch fail? 19902 tst.l %d1 # did dfetch fail? 19918 tst.l %d1 # did ifetch fail? 19939 tst.l %d1 # did dfetch fail? 19960 tst.l %d1 # did ifetch fail? 19982 mov.w &0x3f81, %d1 # xprec exp = 0x3f81 19983 sub.w %d0, %d1 # exp = 0x3f81 - shft amt. 19984 or.w %d1, FP_SRC_EX(%a6) # {sgn,exp} 20022 tst.l %d1 # did dfetch fail? 20045 tst.l %d1 # did ifetch fail? 20057 mov.l &0xb, %d1 20058 lsl.l %d1, %d0 20068 mov.w &0x3c01, %d1 # xprec exp = 0x3c01 20069 sub.w %d0, %d1 # exp = 0x3c01 - shft amt. 20070 or.w %d1, FP_SRC_EX(%a6) # {sgn,exp} 20083 mov.l &0xb, %d1 20084 lsl.l %d1, %d0 20108 tst.l %d1 # did dfetch fail? 20201 bfextu EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt 20202 mov.w (tbl_fout.b,%pc,%d1.w*2),%a1 # use as index 20234 fmov.l %fpsr,%d1 # fetch FPSR 20235 or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits 20237 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 20238 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 20244 tst.l %d1 # did dstore fail? 20250 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 20251 andi.w &0x7,%d1 20256 mov.l SRC_EX(%a0),%d1 20257 andi.l &0x80000000,%d1 # keep DENORM sign 20258 ori.l &0x00800000,%d1 # make smallest sgl 20259 fmov.s %d1,%fp0 20280 fmov.l %fpsr,%d1 # fetch FPSR 20281 or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits 20283 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 20284 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 20290 tst.l %d1 # did dstore fail? 20296 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 20297 andi.w &0x7,%d1 20302 mov.l SRC_EX(%a0),%d1 20303 andi.l &0x80000000,%d1 # keep DENORM sign 20304 ori.l &0x00800000,%d1 # make smallest sgl 20305 fmov.s %d1,%fp0 20326 fmov.l %fpsr,%d1 # fetch FPSR 20327 or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits 20330 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 20331 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 20337 tst.l %d1 # did dstore fail? 20343 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 20344 andi.w &0x7,%d1 20349 mov.l SRC_EX(%a0),%d1 20350 andi.l &0x80000000,%d1 # keep DENORM sign 20351 ori.l &0x00800000,%d1 # make smallest sgl 20352 fmov.s %d1,%fp0 20388 tst.l %d1 # did dstore fail? 20409 tst.l %d1 # did dstore fail? 20466 fmov.l %fpsr,%d1 # save FPSR 20468 or.w %d1,2+USER_FPSR(%a6) # set possible inex2/ainex 20471 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 20472 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 20478 tst.l %d1 # did dstore fail? 20484 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 20485 andi.w &0x7,%d1 20512 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 20518 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 20519 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 20525 tst.l %d1 # did dstore fail? 20531 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 20532 andi.w &0x7,%d1 20536 mov.b FPCR_ENABLE(%a6),%d1 20537 andi.b &0x0a,%d1 # is UNFL or INEX enabled? 20562 smi %d1 # set if so 20568 mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode 20569 andi.b &0x38,%d1 # is mode == 0? (Dreg dst) 20575 tst.l %d1 # did dstore fail? 20581 mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn 20582 andi.w &0x7,%d1 20586 mov.b FPCR_ENABLE(%a6),%d1 20587 andi.b &0x0a,%d1 # is UNFL or INEX enabled? 20601 mov.w SRC_EX(%a0),%d1 # fetch current sign 20602 andi.w &0x8000,%d1 # keep it,clear exp 20603 ori.w &0x3fff,%d1 # insert exp = 0 20604 mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp 20650 mov.b 3+L_SCR3(%a6),%d1 20651 lsr.b &0x4,%d1 20652 andi.w &0x0c,%d1 20653 swap %d1 20654 mov.b 3+L_SCR3(%a6),%d1 20655 lsr.b &0x4,%d1 20656 andi.w &0x03,%d1 20713 tst.l %d1 # did dstore fail? 20741 mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode 20747 mov.l %d1,L_SCR2(%a6) 20754 tst.l %d1 # did dstore fail? 20757 mov.b FPCR_ENABLE(%a6),%d1 20758 andi.b &0x0a,%d1 # is UNFL or INEX enabled? 20783 smi %d1 # set if so 20794 tst.l %d1 # did dstore fail? 20797 mov.b FPCR_ENABLE(%a6),%d1 20798 andi.b &0x0a,%d1 # is UNFL or INEX enabled? 20812 mov.w SRC_EX(%a0),%d1 # fetch current sign 20813 andi.w &0x8000,%d1 # keep it,clear exp 20814 ori.w &0x3fff,%d1 # insert exp = 0 20815 mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp 20841 # d1 = lo(double precision result) # 20882 mov.l FTEMP_HI(%a0),%d1 # get ms mantissa 20883 bfextu %d1{&1:&20},%d1 # get upper 20 bits of ms 20884 or.l %d1,%d0 # put these bits in ms word of double 20886 mov.l FTEMP_HI(%a0),%d1 # get ms mantissa 20888 lsl.l %d0,%d1 # put lower 11 bits in upper bits 20889 mov.l %d1,L_SCR2(%a6) # build lower lword in memory 20890 mov.l FTEMP_LO(%a0),%d1 # get ls mantissa 20891 bfextu %d1{&0:&21},%d0 # get ls 21 bits of double 20892 mov.l L_SCR2(%a6),%d1 20893 or.l %d0,%d1 # put them in double result 20947 mov.l FTEMP_HI(%a0),%d1 # get ms mantissa 20948 andi.l &0x7fffff00,%d1 # get upper 23 bits of ms 20949 lsr.l &0x8,%d1 # and put them flush right 20950 or.l %d1,%d0 # put these bits in ms word of single 20966 mov.b 1+EXC_CMDREG(%a6),%d1 # fetch dynamic reg 20967 lsr.b &0x4,%d1 20968 andi.w &0x7,%d1 21022 tst.l %d1 # did dstore fail? 21032 tst.l %d1 # did dstore fail? 21053 # fetch_dreg(): fetch register according to index in d1 # 21059 # d1 = index of register to fetch from # 21065 # According to the index value in d1 which can range from zero # 21072 # this routine leaves d1 intact for subsequent store_dreg calls. 21075 mov.w (tbl_fdreg.b,%pc,%d1.w*2),%d0 21147 # store_dreg_l(): store longword to data register specified by d1 # 21154 # d1 = index of register to fetch from # 21160 # According to the index value in d1, store the longword value # 21168 mov.w (tbl_sdregl.b,%pc,%d1.w*2),%d1 21169 jmp (tbl_sdregl.b,%pc,%d1.w*1) 21208 # store_dreg_w(): store word to data register specified by d1 # 21215 # d1 = index of register to fetch from # 21221 # According to the index value in d1, store the word value # 21229 mov.w (tbl_sdregw.b,%pc,%d1.w*2),%d1 21230 jmp (tbl_sdregw.b,%pc,%d1.w*1) 21269 # store_dreg_b(): store byte to data register specified by d1 # 21276 # d1 = index of register to fetch from # 21282 # According to the index value in d1, store the byte value # 21290 mov.w (tbl_sdregb.b,%pc,%d1.w*2),%d1 21291 jmp (tbl_sdregb.b,%pc,%d1.w*1) 21337 # d1 = index of address register to increment # 21345 # specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside # 21356 mov.w (tbl_iareg.b,%pc,%d1.w*2),%d1 21357 jmp (tbl_iareg.b,%pc,%d1.w*1) 21401 # d1 = index of address register to decrement # 21409 # specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside # 21420 mov.w (tbl_dareg.b,%pc,%d1.w*2),%d1 21421 jmp (tbl_dareg.b,%pc,%d1.w*1) 21719 mov.w (tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold 21720 mov.w %d1, %d0 # copy d1 into d0 21740 mov.w %d1, FTEMP_EX(%a0) # load exp with threshold 21741 clr.l FTEMP_HI(%a0) # set d1 = 0 (ms mantissa) 21751 # %d1{15:0} : denormalization threshold # 21775 mov.l %d1, %d0 # copy the denorm threshold 21776 sub.w FTEMP_EX(%a0), %d1 # d1 = threshold - uns exponent 21777 ble.b dnrm_no_lp # d1 <= 0 21778 cmpi.w %d1, &0x20 # is ( 0 <= d1 < 32) ? 21780 cmpi.w %d1, &0x40 # is (32 <= d1 < 64) ? 21782 bra.w case_3 # (d1 >= 64) 21792 # case (0<d1<32) 21795 # %d1 = "n" = amt to shift 21819 sub.w %d1, %d0 # %d0 = 32 - %d1 21821 cmpi.w %d1, &29 # is shft amt >= 29 21828 bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO 21832 mov.l %d1, FTEMP_LO(%a0) # store new FTEMP_LO 21844 # case (32<=d1<64) 21847 # %d1 = "n" = amt to shift 21870 subi.w &0x20, %d1 # %d1 now between 0 and 32 21872 sub.w %d1, %d0 # %d0 = 32 - %d1 21881 bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S 21883 bftst %d1{&2:&30} # were any bits shifted off? 21888 mov.l %d1, %d0 # move new G,R,S to %d0 21892 mov.l %d1, %d0 # move new G,R,S to %d0 21904 # case (d1>=64) 21907 # %d1 = amt to shift 21912 cmpi.w %d1, &65 # is shift amt > 65? 21917 # case (d1>65) 21928 # case (d1 == 64) 21949 mov.l %d0, %d1 # make a copy 21951 and.l &0x3fffffff, %d1 # extract other bits 21956 # case (d1 == 65) 21979 and.l &0x7fffffff, %d1 # extract other bits 22018 # d1(hi) = contains rounding precision: # 22022 # d1(lo) = contains rounding mode: # 22062 mov.w (tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset 22081 swap %d1 # set up d1 for round prec. 22083 cmpi.b %d1, &s_mode # is prec = sgl? 22098 swap %d1 # set up d1 for round prec. 22100 cmpi.b %d1, &s_mode # is prec = sgl? 22115 swap %d1 # set up d1 for round prec. 22117 cmpi.b %d1, &s_mode # is prec = sgl? 22191 swap %d1 # select rnd prec 22193 cmpi.b %d1, &s_mode # is prec sgl? 22205 # d1 = {PREC,ROUND} 22214 # Notes: the ext_grs uses the round PREC, and therefore has to swap d1 22215 # prior to usage, and needs to restore d1 to original. this 22221 swap %d1 # have d1.w point to round precision 22222 tst.b %d1 # is rnd prec = extended? 22230 swap %d1 # yes; return to correct positions 22236 cmpi.b %d1, &s_mode # is rnd prec = sgl? 22294 swap %d1 # restore d1 to original 22322 mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa) 22329 bfextu %d1{&0:%d2}, %d3 # extract lo bits 22332 lsl.l %d2, %d1 # create lo(man) 22335 mov.l %d1, FTEMP_LO(%a0) # store new lo(man) 22345 bfffo %d1{&0:&32}, %d2 # how many places to shift? 22346 lsl.l %d2, %d1 # shift lo(man) 22349 mov.l %d1, FTEMP_HI(%a0) # store hi(man) 22397 clr.l %d1 # clear top word 22398 mov.w FTEMP_EX(%a0), %d1 # extract exponent 22399 and.w &0x7fff, %d1 # strip off sgn 22401 cmp.w %d0, %d1 # will denorm push exp < 0? 22407 sub.w %d0, %d1 # shift exponent value 22410 or.w %d0, %d1 # {sgn,new exp} 22411 mov.w %d1, FTEMP_EX(%a0) # insert new exponent 22422 cmp.b %d1, &32 # is exp <= 32? 22425 bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man) 22429 lsl.l %d1, %d0 # extract new lo(man) 22441 sub.w &32, %d1 # adjust shft amt by 32 22444 lsl.l %d1, %d0 # left shift lo(man) 22566 mov.l %d0, %d1 22578 and.l &0x000fffff, %d1 22589 and.l &0x000fffff, %d1 22597 btst &19, %d1 22629 mov.l %d0, %d1 22641 and.l &0x007fffff, %d1 22650 and.l &0x007fffff, %d1 22656 btst &22, %d1 22680 # d1 = rounding precision/mode # 22701 mov.l %d1, -(%sp) # save rnd prec,mode on stack 22706 mov.w FTEMP_EX(%a0), %d1 # extract exponent 22707 and.w &0x7fff, %d1 22708 sub.w %d0, %d1 22709 mov.w %d1, FTEMP_EX(%a0) # insert 16 bit exponent 22719 mov.w 0x6(%sp),%d1 # load prec:mode into %d1 22720 andi.w &0xc0,%d1 # extract rnd prec 22721 lsr.w &0x4,%d1 22722 swap %d1 22723 mov.w 0x6(%sp),%d1 22724 andi.w &0x30,%d1 22725 lsr.w &0x4,%d1 22765 mov.l %d1,-(%sp) # save rnd prec,mode on stack 22770 mov.w FTEMP_EX(%a0),%d1 # extract exponent 22771 and.w &0x7fff,%d1 22772 sub.w %d0,%d1 22773 mov.w %d1,FTEMP_EX(%a0) # insert 16 bit exponent 22781 mov.w &s_mode,%d1 # force rnd prec = sgl 22782 swap %d1 22783 mov.w 0x6(%sp),%d1 # load rnd mode 22784 andi.w &0x30,%d1 # extract rnd prec 22785 lsr.w &0x4,%d1 22833 # d1.b = '-1' => (-); '0' => (+) # 22856 andi.w &0x10,%d1 # keep result sign 22858 or.b %d0,%d1 # concat the two 22859 mov.w %d1,%d0 # make a copy 22860 lsl.b &0x1,%d1 # multiply d1 by 2 22865 and.w &0x10, %d1 # keep result sign 22866 or.b %d0, %d1 # insert rnd mode 22868 or.b %d0, %d1 # insert rnd prec 22869 mov.w %d1, %d0 # make a copy 22870 lsl.b &0x1, %d1 # shift left by 1 22878 lea (tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr 22967 tst.l %d1 # did dfetch fail? 23083 # 2. Calculate absolute value of exponent in d1 by mul and add. 23092 # (*) d1: accumulator for binary exponent 23105 clr.l %d1 # zero d1 for accumulator 23107 mulu.l &0xa,%d1 # mul partial product by one digit place 23109 add.l %d0,%d1 # d1 = d1 + d0 23114 neg.l %d1 # negate before subtracting 23116 sub.l &16,%d1 # sub to compensate for shift of mant 23118 neg.l %d1 # now negative, make pos and set SE 23122 mov.l %d1,-(%sp) # save exp on stack 23134 # (*) d1: lword counter 23145 mov.l &1,%d1 # word counter, init to 1 23160 mov.l (%a0,%d1.L*4),%d4 # load mantissa lonqword into d4 23170 # then inc d1 (=2) to point to the next long word and reset d3 to 0 23177 addq.l &1,%d1 # inc lw pointer in mantissa 23178 cmp.l %d1,&2 # test for last lw 23222 # (*) d1: zero count 23238 mov.l (%sp),%d1 # load expA for range test 23239 cmp.l %d1,&27 # test is with 27 23243 clr.l %d1 # zero count reg 23247 addq.l &1,%d1 # inc zero count 23251 addq.l &8,%d1 # and inc count by 8 23261 addq.l &1,%d1 # inc digit counter 23264 mov.l %d1,%d0 # copy counter to d2 23265 mov.l (%sp),%d1 # get adjusted exp from memory 23266 sub.l %d0,%d1 # subtract count from exp 23268 neg.l %d1 # now its neg; get abs 23295 clr.l %d1 # clr counter 23300 addq.l &8,%d1 # inc counter by 8 23309 addq.l &1,%d1 # inc digit counter 23312 mov.l %d1,%d0 # copy counter to d0 23313 mov.l (%sp),%d1 # get adjusted exp from memory 23314 sub.l %d0,%d1 # subtract count from exp 23316 neg.l %d1 # take abs of exp and clr SE 23346 # ( ) d1: exponent 23353 # ( ) d1: exponent 23406 mov.l %d1,%d0 # copy exp to d0;use d0 23592 # d1: scratch 23636 mov.l 4(%a0),%d1 23641 roxl.l &1,%d1 23642 tst.l %d1 23653 mov.l %d1,4(%a0) 23848 bfextu USER_FPCR(%a6){&26:&2},%d1 # get initial rmode bits 23849 lsl.w &1,%d1 # put them in bits 2:1 23850 add.w %d5,%d1 # add in LAMBDA 23851 lsl.w &1,%d1 # put them in bits 3:1 23854 addq.l &1,%d1 # if neg, set bit 0 23857 mov.b (%a2,%d1),%d3 # load d3 with new rmode 24058 movm.l &0xc0c0,-(%sp) # save regs used by sintd0 {%d0-%d1/%a0-%a1} 24088 movm.l (%sp)+,&0x303 # restore regs used by sint {%d0-%d1/%a0-%a1} 24214 # d1: x/0 24259 clr.l %d1 # put zero in d1 for addx 24261 addx.l %d1,%d2 # continue inc 24287 # d1: x/scratch (0);shift count for final exponent packing 24344 clr.l %d1 # put zero in d1 for addx 24346 addx.l %d1,%d2 # continue inc 24352 mov.l &12,%d1 # use d1 for shift count 24353 lsr.l %d1,%d0 # shift d0 right by 12 24355 lsr.l %d1,%d0 # shift d0 right by 12 24481 # extracts and shifts. The three msbs from d2 will go into d1. # 24503 # d1: temp used to form the digit 24529 # A3. Multiply d2:d3 by 8; extract msbs into d1. 24531 bfextu %d2{&0:&3},%d1 # copy 3 msbs of d2 into d1 24537 # A4. Multiply d4:d5 by 2; add carry out to d1. 24542 addx.w %d6,%d1 # add in extend from mul by 2 24550 addx.w %d6,%d1 # add in extend from add to d1 24560 add.w %d1,%d7 # add in ls digit to d7b 24568 mov.w %d1,%d7 # put new digit in d7b 24693 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1 24719 mov.b EXC_OPWORD+0x1(%a6),%d1 24720 andi.b &0x38,%d1 # extract opmode 24721 cmpi.b %d1,&0x18 # postinc? 24723 cmpi.b %d1,&0x20 # predec? 24728 mov.b EXC_OPWORD+0x1(%a6),%d1 24729 andi.w &0x0007,%d1 # fetch An 24731 mov.w (tbl_rest_inc.b,%pc,%d1.w*2),%d1 24732 jmp (tbl_rest_inc.b,%pc,%d1.w*1)
|
H A D | ilsp.S | 227 # here, the result is in d1 and d0. the current strategy is to save 298 clr.l %d1 305 mov.w %d5, %d1 # first quotient word 311 swap %d1 312 mov.w %d5, %d1 # 2nd quotient 'digit' 315 mov.l %d1, %d6 # and quotient 329 clr.l %d1 # %d1 will hold trial quotient 348 mov.w &0xffff, %d1 # use max trial quotient word 351 mov.l %d5, %d1 353 divu.w %d3, %d1 # use quotient of mslw/msw 355 andi.l &0x0000ffff, %d1 # zero any remainder 365 mov.l %d1, %d2 368 mulu.w %d1, %d3 # V1q 384 subq.l &0x1, %d1 # yes, decrement and recheck 390 mov.l %d1, %d6 401 subq.l &0x1, %d1 # q is one too large 420 mov.w %d1, DDQUOTIENT(%a6) 421 clr.l %d1 430 mov.w %d1, DDQUOTIENT+2(%a6) 522 mov.l 0xc(%a6),%d1 # get multiplicand in d1 548 mov.l %d1,%d4 # md in d4 553 mulu.w %d1,%d0 # [1] lo(mr) * lo(md) 554 mulu.w %d3,%d1 # [2] hi(mr) * lo(md) 563 add.w %d1,%d0 # hi([1]) + lo([2]) 571 clr.w %d1 # clear lo([2]) 573 swap %d1 # hi([2]) in lo d1 575 add.l %d2,%d1 # [4] + hi([2]) 576 add.l %d3,%d1 # [4] + hi([3]) 582 tst.l %d1 # may set 'N' bit 588 # here, the result is in d1 and d0. the current strategy is to save 592 exg %d1,%d0 607 clr.l %d1 633 mov.l 0xc(%a6),%d1 # get multiplicand in d1 645 tst.l %d1 # is multiplicand negative? 647 neg.l %d1 # make multiplicand positive 674 mov.l %d1,%d4 # md in d4 679 mulu.w %d1,%d0 # [1] lo(mr) * lo(md) 680 mulu.w %d3,%d1 # [2] hi(mr) * lo(md) 689 add.w %d1,%d0 # hi([1]) + lo([2]) 697 clr.w %d1 # clear lo([2]) 699 swap %d1 # hi([2]) in lo d1 701 add.l %d2,%d1 # [4] + hi([2]) 702 add.l %d3,%d1 # [4] + hi([3]) 712 not.l %d1 # negate hi(result) bits 714 addx.l %d4,%d1 # add carry to hi(result) 719 tst.l %d1 # may set 'N' bit 725 # here, the result is in d1 and d0. the current strategy is to save 729 exg %d1,%d0 744 clr.l %d1 801 mov.b ([0xc,%a6],0x1),%d1 804 extb.l %d1 # sign extend hi bnd 820 mov.w ([0xc,%a6],0x2),%d1 823 ext.l %d1 # sign extend hi bnd 839 mov.l ([0xc,%a6],0x4),%d1 855 mov.b ([0xc,%a6],0x1),%d1 858 extb.l %d1 # sign extend hi bnd 878 mov.w ([0xc,%a6],0x2),%d1 881 ext.l %d1 # sign extend hi bnd 901 mov.l ([0xc,%a6],0x4),%d1 914 sub.l %d0, %d1 # (hi - lo) 915 cmp.l %d1,%d2 # ((hi - lo) - (Rn - hi))
|
H A D | itest.S | 61 mov.l %d1,-(%sp) 169 clr.l %d1 177 mulu.l %d1,%d2:%d3 193 mov.l &0x77777777,%d1 201 mulu.l %d1,%d2:%d3 217 mov.l &0x00000010,%d1 224 mulu.l %d1,%d2:%d2 239 mov.l &0x55555555,%d1 247 mulu.l %d1,%d2:%d3 263 mov.l &0x40000000,%d1 271 mulu.l %d1,%d2:%d3 287 mov.l &0xffffffff,%d1 295 mulu.l %d1,%d2:%d3 311 mov.l &0x80000000,%d1 319 muls.l %d1,%d2:%d3 335 mov.l &0x80000000,%d1 343 muls.l %d1,%d2:%d3 359 mov.l &0x00000001,%d1 367 muls.l %d1,%d2:%d3 378 mov.l TESTCTR(%a6),%d1 410 mov.b 0x2(%a0),%d1 411 lsl.w &0x8,%d1 412 mov.b 0x0(%a0),%d1 414 cmp.w %d0,%d1 478 mov.b 0x2(%a0),%d1 479 lsl.w &0x8,%d1 480 mov.b 0x0(%a0),%d1 482 cmp.w %d0,%d1 511 mov.w &0xaaaa,%d1 513 cmp.w %d0,%d1 544 mov.b 0x6(%a0),%d1 545 lsl.l &0x8,%d1 546 mov.b 0x4(%a0),%d1 547 lsl.l &0x8,%d1 548 mov.b 0x2(%a0),%d1 549 lsl.l &0x8,%d1 550 mov.b 0x0(%a0),%d1 552 cmp.l %d0,%d1 620 mov.l &0xaaaaaaaa,%d1 622 cmp.l %d0,%d1 651 mov.b 0x2(%a0),%d1 652 lsl.w &0x8,%d1 653 mov.b 0x0(%a0),%d1 655 cmp.w %d7,%d1 684 mov.w &0xaaaa,%d1 686 cmp.w %d7,%d1 715 mov.b 0x2(%a0),%d1 716 lsl.w &0x8,%d1 717 mov.b 0x0(%a0),%d1 719 cmp.w %d0,%d1 748 mov.b 0x2+0x8(%a0),%d1 749 lsl.w &0x8,%d1 750 mov.b 0x0+0x8(%a0),%d1 752 cmp.w %d0,%d1 781 mov.w &0xaaaa,%d1 783 cmp.w %d0,%d1 814 mov.b 0x6+0x8(%a0),%d1 815 lsl.l &0x8,%d1 816 mov.b 0x4+0x8(%a0),%d1 817 lsl.l &0x8,%d1 818 mov.b 0x2+0x8(%a0),%d1 819 lsl.l &0x8,%d1 820 mov.b 0x0+0x8(%a0),%d1 822 cmp.l %d0,%d1 853 mov.l &0xaaaaaaaa,%d1 855 cmp.l %d0,%d1 884 mov.b 0x2-0x8(%a0),%d1 885 lsl.w &0x8,%d1 886 mov.b 0x0-0x8(%a0),%d1 888 cmp.w %d0,%d1 917 mov.w &0xaaaa,%d1 919 cmp.w %d0,%d1 950 mov.b 0x6-0x8(%a0),%d1 951 lsl.l &0x8,%d1 952 mov.b 0x4-0x8(%a0),%d1 953 lsl.l &0x8,%d1 954 mov.b 0x2-0x8(%a0),%d1 955 lsl.l &0x8,%d1 956 mov.b 0x0-0x8(%a0),%d1 958 cmp.l %d0,%d1 989 mov.l &0xaaaaaaaa,%d1 991 cmp.l %d0,%d1 998 mov.l TESTCTR(%a6),%d1 1013 # clr.l %d1 1021 # divu.l %d1,%d2:%d3 1035 mov.l &0x00000001,%d1 1043 divu.l %d1,%d2:%d3 1057 mov.l &0x44444444,%d1 1065 divu.l %d1,%d2:%d3 1081 mov.l &0x55555555,%d1 1089 divu.l %d1,%d2:%d3 1105 mov.l &0x11111111,%d1 1113 divu.l %d1,%d2:%d3 1127 mov.l &0xfffffffe,%d1 1135 divs.l %d1,%d2:%d3 1149 mov.l &0xfffffffe,%d1 1157 divs.l %d1,%d2:%d3 1173 mov.l &0x00000002,%d1 1181 divs.l %d1,%d2:%d3 1195 mov.l &0xffffffff,%d1 1203 divu.l %d1,%d2:%d3 1217 mov.l &0xffffffff,%d1 1240 mov.l &0x0000ffff,%d1 1248 divu.l %d1,%d2:%d3 1259 mov.l TESTCTR(%a6),%d1 1278 mov.w &0xaaaa,%d1 1285 cas.w %d1,%d2,(%a0) # Dc,Du,<ea> 1305 mov.w &0x0000aaaa,%d1 1312 cas.w %d1,%d2,(%a0) # Dc,Du,<ea> 1333 mov.l &0xaaaaaaaa,%d1 1340 cas.l %d1,%d2,(%a0) # Dc,Du,<ea> 1360 mov.l &0xaaaaaaaa,%d1 1367 cas.l %d1,%d2,(%a0) # Dc,Du,<ea> 1388 mov.l &0xaaaaaaaa,%d1 1395 cas.l %d1,%d2,(%a0) # Dc,Du,<ea> 1415 mov.l &0x80000000,%d1 1422 cas.l %d1,%d2,(%a0) # Dc,Du,<ea> 1434 mov.l TESTCTR(%a6),%d1 1455 mov.l &0xaaaaaaaa,%d1 1464 cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1488 mov.l &0xaaaaaaaa,%d1 1497 cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1521 mov.l &0xaaaaaaaa,%d1 1530 cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1554 mov.l &0xaaaaaaaa,%d1 1563 cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1589 mov.l &0xaaaaaaaa,%d1 1598 cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1624 mov.l &0xaaaaaaaa,%d1 1633 cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1659 mov.l &0xaaaaaaaa,%d1 1668 cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1694 mov.l &0xaaaaaaaa,%d1 1703 cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1729 mov.l &0xaaaaaaaa,%d1 1738 cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1765 mov.w &0xaaaa,%d1 1774 cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1798 mov.w &0xaaaa,%d1 1807 cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1831 mov.w &0xaaaa,%d1 1840 cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1866 mov.w &0xaaaa,%d1 1875 cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1901 mov.w &0xaaaa,%d1 1910 cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1936 mov.w &0xaaaa,%d1 1945 cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) 1960 mov.l TESTCTR(%a6),%d1 1977 mov.l &0x11111120,%d1 1983 cmp2.b %d1,DATA(%a6) 2019 mov.l &0x11111130,%d1 2025 chk2.b DATA(%a6),%d1 2061 mov.l &0x11111150,%d1 2067 cmp2.b %d1,DATA(%a6) 2104 mov.l &0x11112000,%d1 2110 cmp2.w %d1,DATA(%a6) 2146 mov.l &0x11113000,%d1 2152 chk2.w DATA(%a6),%d1 2188 mov.l &0x11111000,%d1 2194 cmp2.w %d1,DATA(%a6) 2232 mov.l &0xa0000000,%d1 2238 cmp2.l %d1,DATA(%a6) 2276 mov.l &0xb0000000,%d1 2282 chk2.l DATA(%a6),%d1 2320 mov.l &0x90000000,%d1 2326 cmp2.l %d1,DATA(%a6) 2364 mov.l &0x111111a0,%d1 2370 cmp2.b %d1,DATA(%a6) 2406 mov.l &0x111111b0,%d1 2412 cmp2.b %d1,DATA(%a6) 2448 mov.l &0x11111190,%d1 2454 cmp2.b %d1,DATA(%a6) 2491 mov.l &0x111111a0,%d1 2497 cmp2.b %d1,DATA(%a6) 2533 mov.l &0x111111b0,%d1 2539 chk2.b DATA(%a6),%d1 2575 mov.l &0x111111d0,%d1 2581 cmp2.b %d1,DATA(%a6) 2611 mov.l TESTCTR(%a6),%d1 5614 mov.l TESTCTR(%a6),%d1 6347 mov.w SCCR(%a6),%d1 6348 cmp.w %d0,%d1 6359 mov.l TESTCTR(%a6),%d1
|
H A D | isp.S | 289 set EXC_D1, EXC_DREGS+(1*4) # offset of d1 553 tst.l %d1 # ifetch error? 918 mov.w %d0,%d1 # make a copy 921 andi.l &0x7,%d1 # extract reg field 1222 tst.l %d1 # ifetch error? 1234 tst.l %d1 # ifetch error? 1246 tst.l %d1 # ifetch error? 1258 tst.l %d1 # ifetch error? 1270 tst.l %d1 # ifetch error? 1282 tst.l %d1 # ifetch error? 1294 tst.l %d1 # ifetch error? 1306 tst.l %d1 # ifetch error? 1320 mov.l %d1,-(%sp) 1326 tst.l %d1 # ifetch error? 1329 mov.l (%sp)+,%d1 1331 mov.l (EXC_AREGS,%a6,%d1.w*4),%a0 # put base in a0 1346 mov.l %d0,%d1 1347 rol.w &0x4,%d1 1348 andi.w &0xf,%d1 # extract index regno 1350 mov.l (EXC_DREGS,%a6,%d1.w*4),%d1 # fetch index reg value 1354 ext.l %d1 # sign extend word index 1360 lsl.l %d2,%d1 # shift index by scale 1363 add.l %d1,%d0 # index + disp 1390 tst.l %d1 # ifetch error? 1404 tst.l %d1 # ifetch error? 1418 tst.l %d1 # ifetch error? 1441 tst.l %d1 # ifetch error? 1462 mov.l %d0,%d1 # make extword copy 1463 rol.w &0x4,%d1 # rotate reg num into place 1464 andi.w &0xf,%d1 # extract register number 1466 mov.l (EXC_DREGS,%a6,%d1.w*4),%d1 # fetch index reg value 1470 ext.l %d1 # sign extend word index 1476 lsl.l %d2,%d1 # shift index by scale 1479 add.l %d1,%d0 # index + disp 1523 tst.l %d1 # ifetch error? 1532 tst.l %d1 # ifetch error? 1550 tst.l %d1 # ifetch error? 1560 tst.l %d1 # ifetch error? 1576 tst.l %d1 # dfetch error? 1588 tst.l %d1 # ifetch error? 1603 # if dmem_read_long() returns a fail message in d1, the package 1652 mov.w EXC_OPWORD(%a6),%d1 # fetch the opcode word 1654 mov.b %d1,%d0 1661 btst &0x7,%d1 # (reg 2 mem) or (mem 2 reg) 1666 mov.w %d1,%d0 1672 btst &0x6,%d1 # word or long operation? 1685 tst.l %d1 # dfetch error? 1695 tst.l %d1 # dfetch error? 1705 tst.l %d1 # dfetch error? 1715 tst.l %d1 # dfetch error? 1729 tst.l %d1 # dfetch error? 1738 tst.l %d1 # dfetch error? 1746 btst &0x6,%d1 # word or long operation? 1755 tst.l %d1 # dfetch error? 1765 tst.l %d1 # dfetch error? 1776 tst.l %d1 # dfetch error? 1787 tst.l %d1 # dfetch error? 1793 mov.b EXC_OPWORD(%a6),%d1 1794 lsr.b &0x1,%d1 1795 and.w &0x7,%d1 # extract Dx from opcode word 1797 mov.l %d2,(EXC_DREGS,%a6,%d1.w*4) # store dx 1807 tst.l %d1 # dfetch error? 1817 tst.l %d1 # dfetch error? 1823 mov.b EXC_OPWORD(%a6),%d1 1824 lsr.b &0x1,%d1 1825 and.w &0x7,%d1 # extract Dx from opcode word 1827 mov.w %d2,(EXC_DREGS+2,%a6,%d1.w*4) # store dx 1831 # if dmem_{read,write}_byte() returns a fail message in d1, the package 1908 # bound into d0 and the higher bound into d1. 1913 tst.l %d1 # dfetch error? 1921 tst.l %d1 # dfetch error? 1924 mov.l %d0,%d1 # long upper bound in d1 1935 tst.l %d1 # dfetch error? 1938 mov.w %d0, %d1 # place hi in %d1 1942 ext.l %d1 # sign extend hi bnd 1959 tst.l %d1 # dfetch error? 1962 mov.b %d0, %d1 # place hi in %d1 1966 extb.l %d1 # sign extend hi bnd 1986 sub.l %d0, %d1 # (hi - lo) 1987 cmp.l %d1,%d2 # ((hi - lo) - (Rn - hi)) 2014 # if dmem_read_{long,word}() returns a fail message in d1, the package 2100 mov.b EXC_EXTWORD(%a6), %d1 # extract Dq from extword 2102 lsr.b &0x4, %d1 2103 and.w &0x7, %d1 2105 mov.w %d1, NDQSAVE(%a6) # save Dq for later 2109 mov.l (EXC_DREGS,%a6,%d1.w*4), %d6 # get dividend lo 2197 mov.w NDQSAVE(%a6), %d1 # get Dq off stack 2202 mov.l %d6, (EXC_DREGS,%a6,%d1.w*4) # save quotient 2248 clr.l %d1 2255 mov.w %d5, %d1 # first quotient word 2261 swap %d1 2262 mov.w %d5, %d1 # 2nd quotient 'digit' 2265 mov.l %d1, %d6 # and quotient 2279 clr.l %d1 # %d1 will hold trial quotient 2298 mov.w &0xffff, %d1 # use max trial quotient word 2301 mov.l %d5, %d1 2303 divu.w %d3, %d1 # use quotient of mslw/msw 2305 andi.l &0x0000ffff, %d1 # zero any remainder 2315 mov.l %d1, %d2 2318 mulu.w %d1, %d3 # V1q 2334 subq.l &0x1, %d1 # yes, decrement and recheck 2340 mov.l %d1, %d6 2351 subq.l &0x1, %d1 # q is one too large 2370 mov.w %d1, DDQUOTIENT(%a6) 2371 clr.l %d1 2380 mov.w %d1, DDQUOTIENT+2(%a6) 2439 tst.l %d1 # dfetch error? 2452 tst.l %d1 # ifetch error? 2460 # if dmem_read_long() returns a fail message in d1, the package 2529 clr.w %d1 # clear Dh reg 2530 mov.b %d2, %d1 # grab Dh 2634 mov.l %d4, (EXC_DREGS,%a6,%d1.w*4) # save hi(result) 2654 clr.l (EXC_DREGS,%a6,%d1.w*4) # save hi(result) 2673 tst.l %d1 # dfetch error? 2687 tst.l %d1 # ifetch error? 2695 # if dmem_read_long() returns a fail message in d1, the package 2766 mov.l %d0,%d1 # extension word in d0 2773 mov.l %d1,%d0 2775 lsr.w &0x6,%d1 2776 andi.w &0x7,%d1 # extract Du2 2777 mov.l (EXC_DREGS,%a6,%d1.w*4),%d5 # fetch Update2 Op 2784 mov.l %d0,%d1 2791 mov.l %d1,%d0 2793 lsr.w &0x6,%d1 2794 andi.w &0x7,%d1 # extract Du1 2795 mov.l (EXC_DREGS,%a6,%d1.w*4),%d4 # fetch Update1 Op 2810 mov.l %d7,%d1 # pass size 2817 mov.l %d7,%d1 # pass size 2834 mov.l %d7,%d1 # pass size 2852 cmp.w %d1,%d3 2860 mov.w %d1,(2+EXC_DREGS,%a6,%d3.w*4) # store new Compare2 Op 2869 sf %d1 # pass size 2874 sf %d1 # pass size 2883 cmp.l %d1,%d3 2891 mov.l %d1,(EXC_DREGS,%a6,%d3.w*4) # store new Compare2 Op 2900 st %d1 # pass size 2905 st %d1 # pass size 3023 mov.l %d0,%d1 # make a copy 3029 andi.w &0x7,%d1 # extract Dc 3030 mov.l (EXC_DREGS,%a6,%d1.w*4),%d4 # get compare operand 3031 mov.w %d1,DC(%a6) # save Dc 3037 mov.l %d7,%d1 # pass size 3059 tst.b %d1 # update compare reg? 3067 sf %d1 # pass size 3080 tst.b %d1 # update compare reg? 3088 st %d1 # pass size 3128 mov.l &26,%d1 # want to move 51 longwords 3133 dbra.w %d1,cas_term_cont # keep going 3277 mov.l %a2,%d1 # ADDR1 3301 mov.l %d1,%a2 # ADDR1 3302 addq.l &0x3,%d1 3303 mov.l %d1,%a4 # ADDR1+3 3345 movs.l (%a1),%d1 # fetch Dest2[31:0] 3354 cmp.l %d1,%d3 # Dest2 - Compare2 3436 movs.l (%a1),%d1 # fetch Dest2[31:0] 3445 cmp.l %d1,%d3 # Dest2 - Compare2 3496 movs.l (%a1),%d1 # fetch Dest2[31:0] 3505 cmp.l %d1,%d3 # Dest2 - Compare2 3579 mov.l %a2,%d1 # ADDR1 3603 mov.l %d1,%a2 # ADDR1 3604 addq.l &0x3,%d1 3605 mov.l %d1,%a4 # ADDR1+3 3645 movs.w (%a1),%d1 # fetch Dest2[15:0] 3654 cmp.w %d1,%d3 # Dest2 - Compare2 3736 movs.w (%a1),%d1 # fetch Dest2[15:0] 3745 cmp.w %d1,%d3 # Dest2 - Compare2 4016 sf %d1 # indicate no update was done 4028 st %d1 # indicate update was done 4041 mov.l %a0,%d1 # byte or word misaligned? 4042 btst &0x0,%d1 4172 sf %d1 # indicate no update was done 4184 st %d1 # indicate update was done
|
/linux-4.1.27/arch/m68k/68360/ |
H A D | entry.S | 46 movel %sp@(PT_OFF_ORIG_D0),%d1 48 cmpl #NR_syscalls,%d1 50 lsl #2,%d1 52 jbsr %a0@(%d1) 74 movel %sp,%d1 /* get thread_info pointer */ 75 andl #-THREAD_SIZE,%d1 76 movel %d1,%a2 100 movel %sp,%d1 /* get thread_info pointer */ 101 andl #-THREAD_SIZE,%d1 102 movel %d1,%a2 104 move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */ 109 movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */ 110 btst #TIF_NEED_RESCHED,%d1 151 movel %a0,%d1 /* save prev thread in d1 */
|
H A D | head-ram.S | 109 moveq.l #0x07, %d1 /* Setup MBAR */ 110 movec %d1, %dfc 118 moveq.l #0x05, %d1 119 movec.l %d1, %dfc
|
H A D | head-rom.S | 121 moveq.l #0x07, %d1 /* Setup MBAR */ 122 movec %d1, %dfc 130 moveq.l #0x05, %d1 131 movec.l %d1, %dfc
|
/linux-4.1.27/drivers/net/wan/ |
H A D | wanxlfw.S | 303 movel SICR, %d1 // D1 = clock settings in SICR 304 andl clocking_mask(%d0), %d1 307 orl clocking_txfromrx(%d0), %d1 311 orl clocking_ext(%d0), %d1 313 movel %d1, SICR // update clock settings in SICR 319 movel first_buffer(%d0), %d1 // D1 = starting buffer address 328 movel %d1, (%a1)+ // buffer address 329 addl #BUFFER_LENGTH, %d1 334 movel %d1, (%a1)+ // buffer address 340 movel %d1, (%a1)+ // buffer address 341 addl #BUFFER_LENGTH, %d1 345 movel %d1, (%a1)+ // buffer address 354 movel tx_first_bd(%d0), %d1 355 movew %d1, SCC_TBASE(%a1) // D1 = offset of first TxBD 356 addl #TX_BUFFERS * 8, %d1 357 movew %d1, SCC_RBASE(%a1) // D1 = offset of first RxBD 419 movel %d0, %d1 420 lsll #4, %d1 // D1 bits 7 and 6 = port 421 orl #1, %d1 422 movew %d1, CR // Init SCC RX and TX params 442 movel ch_status_addr(%d0), %d1 443 clrl STATUS_OPEN(%d1) // confirm the port is closed 453 movel tx_out(%d0), %d1 454 movel %d1, %d2 // D1 = D2 = tx_out BD# = desc# 463 lsll #3, %d1 // BD is 8-bytes long 464 addl tx_first_bd(%d0), %d1 // D1 = current tx_out BD addr 466 movel 4(%d1), %a1 // A1 = dest address 468 movew %d2, 2(%d1) // length into BD 470 bsetl #31, (%d1) // CP go ahead 473 movel tx_out(%d0), %d1 474 addl #1, %d1 475 cmpl #TX_BUFFERS, %d1 477 clrl %d1 478 tx_1: movel %d1, tx_out(%d0) 489 rx: movel rx_in(%d0), %d1 // D1 = rx_in BD# 490 lsll #3, %d1 // BD is 8-bytes long 491 addl rx_first_bd(%d0), %d1 // D1 = current rx_in BD address 492 movew (%d1), %d2 // D2 = RX BD flags 507 movew 2(%d1), %d3 520 movel 4(%d1), %a0 // A0 = source address 538 andw #0xF000, (%d1) // clear CM and error bits 539 bsetl #31, (%d1) // free BD 541 movel rx_in(%d0), %d1 542 addl #1, %d1 543 cmpl #RX_BUFFERS, %d1 545 clrl %d1 546 rx_2: movel %d1, rx_in(%d0) 568 movel tx_in(%d0), %d1 569 movel %d1, %d2 // D1 = D2 = tx_in BD# = desc# 570 lsll #3, %d1 // BD is 8-bytes long 571 addl tx_first_bd(%d0), %d1 // D1 = current tx_in BD address 572 movew (%d1), %d3 // D3 = TX BD flags 579 movel tx_in(%d0), %d1 580 addl #1, %d1 581 cmpl #TX_BUFFERS, %d1 583 clrl %d1 585 movel %d1, tx_in(%d0) 688 movel %d1, -(%sp) 697 movew (%a0), %d1 // D1 = CSR input bits 698 andl #0xE7, %d1 // PM and cable sense bits (no DCE bit) 699 cmpw #STATUS_CABLE_V35 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1 701 movew #0x0E08, %d1 705 cmpw #STATUS_CABLE_X21 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1 707 movew #0x0408, %d1 711 cmpw #STATUS_CABLE_V24 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1 713 movew #0x0208, %d1 717 cmpw #STATUS_CABLE_EIA530 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1 719 movew #0x0D08, %d1 723 movew #0x0008, %d1 // D1 = disable everything 730 orw %d2, %d1 // D1 = all requested output bits 734 cmpw old_csr_output(%d0), %d1 736 movew %d1, old_csr_output(%d0) 737 movew %d1, (%a0) // Write CSR output bits 740 movew (PCDAT), %d1 741 andw dcd_mask(%d0), %d1 743 movew (%a0), %d1 // D1 = CSR input bits 744 andw #~STATUS_CABLE_DCD, %d1 // DCD off 748 movew (%a0), %d1 // D1 = CSR input bits 749 orw #STATUS_CABLE_DCD, %d1 // DCD on 751 andw %d2, %d1 // input mask 753 cmpl STATUS_CABLE(%a1), %d1 // check for change 755 movel %d1, STATUS_CABLE(%a1) // update status 767 movel (%sp)+, %d1 782 movel #0x12345678, %d1 // D1 = test value 783 movel %d1, (128 * 1024 - 4) 790 cmpl (%a0), %d1 797 eorl #0xFFFFFFFF, %d1 798 movel %d1, (128 * 1024 - 4) 799 cmpl (%a0), %d1 806 movel %d0, %d1 // D1 = DBf counter 809 dbfw %d1, ram_test_fill 810 subl #0x10000, %d1 811 cmpl #0xFFFFFFFF, %d1
|
/linux-4.1.27/arch/m68k/ifpsp060/ |
H A D | os.S | 84 | d1 - 0 = success, !0 = failure 94 clr.l %d1 | return success 97 move.b (%a0)+,%d1 | copy 1 byte 99 movs.b %d1,(%a1)+ 101 clr.l %d1 | return success 115 | d1 - 0 = success, !0 = failure 127 clr.l %d1 | return success 131 movs.b (%a0)+,%d1 132 move.b %d1,(%a1)+ | copy 1 byte 134 clr.l %d1 | return success 147 | d1 - 0 = success, !0 = failure 152 clr.l %d1 | assume success 170 | d1 - 0 = success, !0 = failure 181 | d1 - 0 = success, !0 = failure 187 clr.l %d1 | assume success 206 | d1 - 0 = success, !0 = failure 217 | d1 - 0 = success, !0 = failure 223 clr.l %d1 | assume success 241 | d1 - 0 = success, !0 = failure 245 clr.l %d1 | assume success 263 | d1 - 0 = success, !0 = failure 267 clr.l %d1 | assume success 274 dmwwr: clr.l %d1 | return success 287 | d1 - 0 = success, !0 = failure 291 clr.l %d1 | assume success 323 move.b (%a0)+,%d1 | fetch supervisor byte 325 movs.b %d1,(%a1)+ | store user byte 341 movs.b (%a0)+,%d1 | fetch user byte 342 move.b %d1,(%a1)+ | write supervisor byte 384 1: moveq #-1,%d1
|
H A D | iskeleton.S | 193 | d1 = `xxxxxxff -> longword; `xxxxxx00 -> word 220 tst.b %d1 259 | d1 = `xxxxxxff -> longword; `xxxxxx00 -> word
|
/linux-4.1.27/arch/nios2/kernel/ |
H A D | misaligned.c | 73 u8 a, b, d0, d1, d2, d3; handle_unaligned_c() local 103 fault |= __get_user(d1, (u8 *)(addr+1)); handle_unaligned_c() 104 val = (d1 << 8) | d0; handle_unaligned_c() 110 d1 = val >> 8; handle_unaligned_c() 120 *(u8 *)(addr+1) = d1; handle_unaligned_c() 123 fault |= __put_user(d1, (u8 *)(addr+1)); handle_unaligned_c() 129 fault |= __get_user(d1, (u8 *)(addr+1)); handle_unaligned_c() 130 val = (short)((d1 << 8) | d0); handle_unaligned_c() 138 d1 = val >> 8; handle_unaligned_c() 142 *(u8 *)(addr+1) = d1; handle_unaligned_c() 147 fault |= __put_user(d1, (u8 *)(addr+1)); handle_unaligned_c() 155 fault |= __get_user(d1, (u8 *)(addr+1)); handle_unaligned_c() 158 val = (d3 << 24) | (d2 << 16) | (d1 << 8) | d0; handle_unaligned_c()
|
/linux-4.1.27/arch/m68k/coldfire/ |
H A D | entry.S | 112 movel %sp,%d1 /* get thread_info pointer */ 113 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 114 movel %d1,%a0 115 movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */ 116 andl #(1<<TIF_NEED_RESCHED),%d1 119 movel %a0@(TINFO_PREEMPT),%d1 120 cmpl #0,%d1 128 moveml %sp@,%d1-%d5/%a0-%a2 136 movel %sp,%d1 /* get thread_info pointer */ 137 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 138 movel %d1,%a0 139 moveb %a0@(TINFO_FLAGS+3),%d1 /* thread_info->flags (low 8 bits) */ 146 movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */ 148 btst #TIF_NEED_RESCHED,%d1 186 movew %sr,%d1 /* save current status */ 187 movew %d1,%a0@(TASK_THREAD+THREAD_SR) 188 movel %a0,%d1 /* get prev thread in d1 */
|
H A D | head.S | 51 movel MCFSIM_DMR1,%d1 /* get mask for 2nd bank */ 52 btst #0,%d1 /* check if region enabled */ 54 andl #0xfffc0000,%d1 56 addl #0x00040000,%d1 57 addl %d1,%d0 /* total mem size in d0 */ 82 moveql #1, %d1 83 lsll %d2, %d1 /* 2 ^ exponent */ 84 addl %d1, %d0 /* Total size of SDRAM in d0 */
|
/linux-4.1.27/include/uapi/linux/ |
H A D | uuid.h | 35 #define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ 40 (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) 42 #define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ 47 (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
|
/linux-4.1.27/lib/mpi/ |
H A D | mpih-div.c | 103 mpi_limb_t d1, d0; mpihelp_divrem() local 106 d1 = dp[1]; mpihelp_divrem() 111 if (n1 >= d1 && (n1 > d1 || n0 >= d0)) { mpihelp_divrem() 112 sub_ddmmss(n1, n0, n1, n0, d1, d0); mpihelp_divrem() 125 if (n1 == d1) { mpihelp_divrem() 131 r = n0 + d1; mpihelp_divrem() 132 if (r < d1) { /* Carry in the addition? */ mpihelp_divrem() 141 udiv_qrnnd(q, r, n1, n0, d1); mpihelp_divrem() 151 r += d1; mpihelp_divrem() 152 if (r >= d1) /* If not carry, test Q again. */ mpihelp_divrem() 167 mpi_limb_t dX, d1, n0; mpihelp_divrem() local 171 d1 = dp[dsize - 2]; mpihelp_divrem() 205 umul_ppmm(n1, n0, d1, q); mpihelp_divrem() 214 n1 -= n0 < d1; mpihelp_divrem() 215 n0 -= d1; mpihelp_divrem()
|
/linux-4.1.27/arch/m68k/68000/ |
H A D | entry.S | 50 movel %sp@(PT_OFF_ORIG_D0),%d1 52 cmpl #NR_syscalls,%d1 54 lsl #2,%d1 56 jbsr %a0@(%d1) 78 movel %sp,%d1 /* get thread_info pointer */ 79 andl #-THREAD_SIZE,%d1 80 movel %d1,%a2 104 movel %sp,%d1 /* get thread_info pointer */ 105 andl #-THREAD_SIZE,%d1 106 movel %d1,%a2 108 move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */ 113 movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */ 114 btst #TIF_NEED_RESCHED,%d1 231 movel %a0,%d1 /* save prev thread in d1 */
|
/linux-4.1.27/drivers/block/ |
H A D | swim_asm.S | 52 moveml %d1-%d5/%a0-%a4,%sp@- 55 moveml %sp@+, %d1-%d5/%a0-%a4 80 moveq #3, %d1 90 dbne %d1, wait_addr_mark_byte 155 moveml %d1-%d5/%a0-%a5,%sp@- 158 moveml %sp@+, %d1-%d5/%a0-%a5 178 moveq #3, %d1 190 dbne %d1, wait_data_mark_byte
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | entry.h | 13 * This allows access to the syscall arguments in registers d1-d5 15 * 0(sp) - d1 75 moveml %d1-%d5/%a0-%a2,%sp@ 85 moveml %d1-%d5/%a0-%a2,%sp@ 100 moveml %sp@,%d1-%d5/%a0-%a2 131 moveml %d1-%d5/%a0-%a2,%sp@ 140 moveml %d1-%d5/%a0-%a2,%sp@ 144 moveml %sp@,%d1-%d5/%a0-%a2 189 moveml %d1-%d5/%a0-%a2,%sp@- 196 moveml %d1-%d5/%a0-%a2,%sp@- 200 moveml %sp@+,%a0-%a2/%d1-%d5 250 "moveml %%d1-%%d5/%%a0-%%a2,%%sp@-"
|
H A D | bootstd.h | 63 register long __a __asm__ ("%d1") = (long)a; \ 75 register long __a __asm__ ("%d1") = (long)a; \ 88 register long __a __asm__ ("%d1") = (long)a; \ 103 register long __a __asm__ ("%d1") = (long)a; \ 119 register long __a __asm__ ("%d1") = (long)a; \
|
H A D | switch_to.h | 20 * Beware that resume now expects *next to be in d1 and the offset of 33 register void *_last __asm__ ("d1"); \
|
H A D | a.out-core.h | 42 dump->regs.d1 = regs->d1; aout_dump_thread()
|
H A D | elf.h | 81 pr_reg[0] = regs->d1; \
|
H A D | math-emu.h | 259 movem.l %d0/%d1/%a0/%a1,-(%sp) 275 movem.l (%sp)+,%d0/%d1/%a0/%a1
|
H A D | user.h | 40 long d1,d2,d3,d4,d5,d6,d7; member in struct:user_regs_struct
|
/linux-4.1.27/drivers/iio/adc/ |
H A D | twl6030-gpadc.c | 567 * The difference(d1, d2) between ideal and measured codes stored in trim 571 * gain: k = 1 + ((d2 - d1) / (x2 - x1)) 572 * offset: b = d1 + (k - 1) * x1 575 int channel, int d1, int d2) twl6030_calibrate_channel() 590 k = 1000 + (((d2 - d1) * 1000) / (x2 - x1)); twl6030_calibrate_channel() 593 b = (d1 * 1000) - (k - 1000) * x1; twl6030_calibrate_channel() 599 dev_dbg(gpadc->dev, "GPADC d1 for Chn: %d = %d\n", channel, d1); twl6030_calibrate_channel() 627 s8 d1, d2; twl6030_calibration() local 647 d1 = trim_regs[0]; twl6030_calibration() 655 d1 = trim_regs[4]; twl6030_calibration() 659 d1 = trim_regs[12]; twl6030_calibration() 663 d1 = trim_regs[6]; twl6030_calibration() 667 d1 = trim_regs[2]; twl6030_calibration() 671 d1 = trim_regs[8]; twl6030_calibration() 675 d1 = trim_regs[10]; twl6030_calibration() 679 d1 = trim_regs[14]; twl6030_calibration() 686 d1 = twl6030_gpadc_get_trim_offset(d1); twl6030_calibration() 689 twl6030_calibrate_channel(gpadc, chn, d1, d2); twl6030_calibration() 711 int chn, d1 = 0, d2 = 0, temp; twl6032_calibration() local 740 d1 = twl6032_get_trim_value(trim_regs, 2, 0, 0x1f, twl6032_calibration() 748 d1 = temp + twl6032_get_trim_value(trim_regs, 7, 6, twl6032_calibration() 759 d1 = temp + twl6032_get_trim_value(trim_regs, 13, 11, twl6032_calibration() 768 d1 = twl6032_get_trim_value(trim_regs, 10, 8, 0x0f, twl6032_calibration() 778 d1 = (trim_regs[4] & 0x7E) >> 1; twl6032_calibration() 780 d1 = -d1; twl6032_calibration() 781 d1 += temp; twl6032_calibration() 797 twl6030_calibrate_channel(gpadc, chn, d1, d2); twl6032_calibration() 574 twl6030_calibrate_channel(struct twl6030_gpadc_data *gpadc, int channel, int d1, int d2) twl6030_calibrate_channel() argument
|
/linux-4.1.27/fs/ntfs/ |
H A D | collate.c | 49 u32 d1, d2; ntfs_collate_ntofs_ulong() local 55 d1 = le32_to_cpup(data1); ntfs_collate_ntofs_ulong() 57 if (d1 < d2) ntfs_collate_ntofs_ulong() 60 if (d1 == d2) ntfs_collate_ntofs_ulong()
|
/linux-4.1.27/arch/mn10300/boot/compressed/ |
H A D | head.S | 42 mov d1,(4,a0) 98 mov (4,a0),d1 118 mov L1_CACHE_NENTRIES,d1 128 add -1,d1
|
/linux-4.1.27/arch/c6x/include/asm/ |
H A D | checksum.h | 18 asm ("add .d1 %1,%5,%1\n" csum_tcpudp_nofold()
|
/linux-4.1.27/drivers/staging/comedi/drivers/ |
H A D | comedi_8254.c | 366 unsigned int d1 = i8254->next_div1 ? i8254->next_div1 : I8254_MAX_COUNT; comedi_8254_cascade_ns_to_timer() local 368 unsigned int div = d1 * d2; comedi_8254_cascade_ns_to_timer() 382 d1 > 1 && d1 <= I8254_MAX_COUNT && comedi_8254_cascade_ns_to_timer() 385 div > d1 && div > d2 && comedi_8254_cascade_ns_to_timer() 395 for (d1 = start; d1 <= div / d1 + 1 && d1 <= I8254_MAX_COUNT; d1++) { comedi_8254_cascade_ns_to_timer() 396 for (d2 = div / d1; comedi_8254_cascade_ns_to_timer() 397 d1 * d2 <= div + d1 + 1 && d2 <= I8254_MAX_COUNT; d2++) { comedi_8254_cascade_ns_to_timer() 398 ns = i8254->osc_base * d1 * d2; comedi_8254_cascade_ns_to_timer() 401 d1_glb = d1; comedi_8254_cascade_ns_to_timer() 406 d1_lub = d1; comedi_8254_cascade_ns_to_timer() 418 d1 = d1_lub; comedi_8254_cascade_ns_to_timer() 421 d1 = d1_glb; comedi_8254_cascade_ns_to_timer() 426 d1 = d1_lub; comedi_8254_cascade_ns_to_timer() 430 d1 = d1_glb; comedi_8254_cascade_ns_to_timer() 435 *nanosec = d1 * d2 * i8254->osc_base; comedi_8254_cascade_ns_to_timer() 436 i8254->next_div1 = d1; comedi_8254_cascade_ns_to_timer()
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | string_32.h | 34 int d0, d1, d2; __memcpy() local 41 : "=&c" (d0), "=&D" (d1), "=&S" (d2) __memcpy() 209 int d0, d1; __memset_generic() local 212 : "=&c" (d0), "=&D" (d1) __memset_generic() 229 int d0, d1; __constant_c_memset() local 238 : "=&c" (d0), "=&D" (d1) __constant_c_memset() 281 : "=&c" (d0), "=&D" (d1) \ __constant_c_and_count_memset() 286 int d0, d1; __constant_c_and_count_memset() local
|
H A D | string_64.h | 11 unsigned long d0, d1, d2; __inline_memcpy() local 20 : "=&c" (d0), "=&D" (d1), "=&S" (d2) __inline_memcpy()
|
/linux-4.1.27/fs/hpfs/ |
H A D | dnode.c | 545 struct dnode *d1; delete_empty_dnode() local 555 if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { delete_empty_dnode() 556 d1->up = cpu_to_le32(up); delete_empty_dnode() 557 d1->root_dnode = 1; delete_empty_dnode() 586 struct dnode *d1; delete_empty_dnode() local 589 if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { delete_empty_dnode() 590 d1->up = cpu_to_le32(up); delete_empty_dnode() 603 struct dnode *d1; delete_empty_dnode() local 617 if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) { delete_empty_dnode() 618 d1->up = cpu_to_le32(ndown); delete_empty_dnode() 631 struct dnode *d1; delete_empty_dnode() local 643 if ((d1 = hpfs_map_dnode(i->i_sb, ndown, &qbh1))) { delete_empty_dnode() 644 struct hpfs_dirent *del = dnode_last_de(d1); delete_empty_dnode() 647 if (le32_to_cpu(d1->first_free) > 2044) { delete_empty_dnode() 661 le32_add_cpu(&d1->first_free, 4); delete_empty_dnode() 666 le32_add_cpu(&d1->first_free, -4); delete_empty_dnode() 689 if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) { delete_empty_dnode() 690 d1->up = cpu_to_le32(ndown); delete_empty_dnode() 749 int d1, d2 = 0; hpfs_count_dnodes() local 789 if (hpfs_stop_cycles(s, ptr, &d1, &d2, "hpfs_count_dnodes #2")) return; hpfs_count_dnodes() 953 dnode_secno d1, d2, rdno = dno; hpfs_remove_dtree() local 958 if (de->down) d1 = de_down_pointer(de); hpfs_remove_dtree() 962 dno = d1; hpfs_remove_dtree() 966 d1 = de->down ? de_down_pointer(de) : 0; hpfs_remove_dtree() 973 while (d1) { hpfs_remove_dtree() 974 if (!(dnode = hpfs_map_dnode(s, dno = d1, &qbh))) return; hpfs_remove_dtree() 977 d1 = de->down ? de_down_pointer(de) : 0; hpfs_remove_dtree() 981 d1 = d2; hpfs_remove_dtree() 983 } while (d1); hpfs_remove_dtree() 1009 int d1, d2 = 0; map_fnode_dirent() local 1083 if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) { map_fnode_dirent()
|
H A D | anode.c | 283 int d1, d2; hpfs_remove_btree() local 290 if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) hpfs_remove_btree()
|
/linux-4.1.27/net/ipv4/ |
H A D | tcp_illinois.c | 140 u32 d1 = dm / 100; /* Low threshold */ alpha() local 142 if (da <= d1) { alpha() 163 * (dm - d1) amin amax alpha() 167 * (dm - d1) amin alpha() 168 * k2 = ---------------- - d1 alpha() 176 dm -= d1; alpha() 177 da -= d1; alpha()
|
/linux-4.1.27/net/netfilter/ |
H A D | nft_bitwise.c | 54 struct nft_data_desc d1, d2; nft_bitwise_init() local 76 err = nft_data_init(NULL, &priv->mask, sizeof(priv->mask), &d1, nft_bitwise_init() 80 if (d1.len != priv->len) nft_bitwise_init()
|
/linux-4.1.27/drivers/video/fbdev/ |
H A D | amifb.c | 2605 unsigned long d0, d1; bitcpy() local 2672 d1 = *src; bitcpy() 2673 *dst = comp(d0 << left | d1 >> right, *dst, bitcpy() 2687 d1 = *src++; bitcpy() 2688 *dst = comp(d0 << left | d1 >> right, *dst, bitcpy() 2690 d0 = d1; bitcpy() 2699 d1 = *src++; bitcpy() 2700 *dst++ = d0 << left | d1 >> right; bitcpy() 2701 d0 = d1; bitcpy() 2702 d1 = *src++; bitcpy() 2703 *dst++ = d0 << left | d1 >> right; bitcpy() 2704 d0 = d1; bitcpy() 2705 d1 = *src++; bitcpy() 2706 *dst++ = d0 << left | d1 >> right; bitcpy() 2707 d0 = d1; bitcpy() 2708 d1 = *src++; bitcpy() 2709 *dst++ = d0 << left | d1 >> right; bitcpy() 2710 d0 = d1; bitcpy() 2714 d1 = *src++; bitcpy() 2715 *dst++ = d0 << left | d1 >> right; bitcpy() 2716 d0 = d1; bitcpy() 2726 d1 = *src; bitcpy() 2727 *dst = comp(d0 << left | d1 >> right, bitcpy() 2745 unsigned long d0, d1; bitcpy_rev() local 2823 d1 = *src; bitcpy_rev() 2824 *dst = comp(d0 >> right | d1 << left, *dst, bitcpy_rev() 2838 d1 = *src--; bitcpy_rev() 2839 *dst = comp(d0 >> right | d1 << left, *dst, bitcpy_rev() 2841 d0 = d1; bitcpy_rev() 2850 d1 = *src--; bitcpy_rev() 2851 *dst-- = d0 >> right | d1 << left; bitcpy_rev() 2852 d0 = d1; bitcpy_rev() 2853 d1 = *src--; bitcpy_rev() 2854 *dst-- = d0 >> right | d1 << left; bitcpy_rev() 2855 d0 = d1; bitcpy_rev() 2856 d1 = *src--; bitcpy_rev() 2857 *dst-- = d0 >> right | d1 << left; bitcpy_rev() 2858 d0 = d1; bitcpy_rev() 2859 d1 = *src--; bitcpy_rev() 2860 *dst-- = d0 >> right | d1 << left; bitcpy_rev() 2861 d0 = d1; bitcpy_rev() 2865 d1 = *src--; bitcpy_rev() 2866 *dst-- = d0 >> right | d1 << left; bitcpy_rev() 2867 d0 = d1; bitcpy_rev() 2877 d1 = *src; bitcpy_rev() 2878 *dst = comp(d0 >> right | d1 << left, bitcpy_rev() 2897 unsigned long d0, d1; bitcpy_not() local 2964 d1 = ~*src; bitcpy_not() 2965 *dst = comp(d0 << left | d1 >> right, *dst, bitcpy_not() 2979 d1 = ~*src++; bitcpy_not() 2980 *dst = comp(d0 << left | d1 >> right, *dst, bitcpy_not() 2982 d0 = d1; bitcpy_not() 2991 d1 = ~*src++; bitcpy_not() 2992 *dst++ = d0 << left | d1 >> right; bitcpy_not() 2993 d0 = d1; bitcpy_not() 2994 d1 = ~*src++; bitcpy_not() 2995 *dst++ = d0 << left | d1 >> right; bitcpy_not() 2996 d0 = d1; bitcpy_not() 2997 d1 = ~*src++; bitcpy_not() 2998 *dst++ = d0 << left | d1 >> right; bitcpy_not() 2999 d0 = d1; bitcpy_not() 3000 d1 = ~*src++; bitcpy_not() 3001 *dst++ = d0 << left | d1 >> right; bitcpy_not() 3002 d0 = d1; bitcpy_not() 3006 d1 = ~*src++; bitcpy_not() 3007 *dst++ = d0 << left | d1 >> right; bitcpy_not() 3008 d0 = d1; bitcpy_not() 3018 d1 = ~*src; bitcpy_not() 3019 *dst = comp(d0 << left | d1 >> right, bitcpy_not()
|
H A D | atafb_utils.h | 225 "1: movem.l (%0)+,%%d0/%%d1/%%a0/%%a1\n" fast_memmove() 226 " movem.l %%d0/%%d1/%%a0/%%a1,%1@\n" fast_memmove() 233 : "d0", "d1", "a0", "a1", "memory"); fast_memmove() 237 " movem.l %0@,%%d0/%%d1/%%a0/%%a1\n" fast_memmove() 238 " movem.l %%d0/%%d1/%%a0/%%a1,-(%1)\n" fast_memmove() 244 : "d0", "d1", "a0", "a1", "memory"); fast_memmove()
|
H A D | tridentfb.c | 238 u32 d1 = point(x2, y2); blade_copy_rect() local 249 writemmr(par, DST1, direction ? d2 : d1); blade_copy_rect() 250 writemmr(par, DST2, direction ? d1 : d2); blade_copy_rect() 400 u32 d1 = point(x2, y2); image_copy_rect() local 411 writemmr(par, DST1, direction ? d2 : d1); image_copy_rect() 412 writemmr(par, DST2, direction ? d1 : d2); image_copy_rect()
|
H A D | platinumfb.h | 28 unsigned char d1; member in struct:cmap_regs
|
/linux-4.1.27/arch/arm/plat-samsung/include/plat/ |
H A D | adc.h | 30 unsigned d0, unsigned d1,
|
/linux-4.1.27/scripts/dtc/ |
H A D | data.c | 154 struct data data_merge(struct data d1, struct data d2) data_merge() argument 159 d = data_append_markers(data_append_data(d1, d2.val, d2.len), m2); data_merge() 161 /* Adjust for the length of d1 */ data_merge() 163 m2->offset += d1.len; data_merge()
|
/linux-4.1.27/arch/xtensa/platforms/iss/include/platform/ |
H A D | simcall.h | 68 register int d1 asm("a5") = d; __simc() 74 : "r"(c1), "r"(d1) __simc()
|
/linux-4.1.27/arch/mn10300/include/asm/ |
H A D | syscall.h | 64 *args++ = regs->d1; syscall_get_arguments() 96 regs->d1 = *args++; syscall_set_arguments()
|
H A D | elf.h | 81 _ur->a1 = 0; _ur->a0 = 0; _ur->d1 = 0; _ur->d0 = 0; \ 125 pr_reg[22] = regs->d1; \
|
H A D | gdb-stub.h | 80 u32 d0, d1, d2, d3, a0, a1, a2, a3; member in struct:gdb_regs
|
/linux-4.1.27/fs/fat/ |
H A D | dir.c | 706 struct dirent_type __user *d1 = buf->dirent; \ 707 struct dirent_type __user *d2 = d1 + 1; \ 715 if (name_len >= sizeof(d1->d_name)) \ 716 name_len = sizeof(d1->d_name) - 1; \ 720 copy_to_user(d1->d_name, name, name_len) || \ 721 put_user(0, d1->d_name + name_len) || \ 722 put_user(name_len, &d1->d_reclen)) \ 731 if (long_len >= sizeof(d1->d_name)) \ 732 long_len = sizeof(d1->d_name) - 1; \ 733 if (short_len >= sizeof(d1->d_name)) \ 734 short_len = sizeof(d1->d_name) - 1; \ 741 copy_to_user(d1->d_name, shortname, short_len) || \ 742 put_user(0, d1->d_name + short_len) || \ 743 put_user(short_len, &d1->d_reclen)) \ 784 struct __fat_dirent __user *d1 = (struct __fat_dirent __user *)arg; fat_dir_ioctl() local 800 if (!access_ok(VERIFY_WRITE, d1, sizeof(struct __fat_dirent[2]))) fat_dir_ioctl() 807 if (put_user(0, &d1->d_reclen)) fat_dir_ioctl() 810 return fat_ioctl_readdir(inode, filp, d1, fat_ioctl_filldir, fat_dir_ioctl() 824 struct compat_dirent __user *d1 = compat_ptr(arg); fat_compat_dir_ioctl() local 840 if (!access_ok(VERIFY_WRITE, d1, sizeof(struct compat_dirent[2]))) fat_compat_dir_ioctl() 847 if (put_user(0, &d1->d_reclen)) fat_compat_dir_ioctl() 850 return fat_ioctl_readdir(inode, filp, d1, fat_compat_ioctl_filldir, fat_compat_dir_ioctl()
|
/linux-4.1.27/drivers/ata/ |
H A D | pata_at32.c | 243 const int d1 = 0xff; pata_at32_debug_bus() local 249 iowrite8(d1, info->alt_addr + (0x06 << 1)); pata_at32_debug_bus() 253 iowrite8(d1, info->ide_addr + (i << 1)); pata_at32_debug_bus() 258 iowrite16(d1, info->ide_addr); pata_at32_debug_bus() 259 iowrite16(d1 << 8, info->ide_addr); pata_at32_debug_bus() 261 iowrite16(d1, info->ide_addr); pata_at32_debug_bus() 262 iowrite16(d1 << 8, info->ide_addr); pata_at32_debug_bus()
|
/linux-4.1.27/drivers/media/rc/ |
H A D | rc-core-priv.h | 121 static inline bool geq_margin(unsigned d1, unsigned d2, unsigned margin) geq_margin() argument 123 return d1 > (d2 - margin); geq_margin() 126 static inline bool eq_margin(unsigned d1, unsigned d2, unsigned margin) eq_margin() argument 128 return ((d1 > (d2 - margin)) && (d1 < (d2 + margin))); eq_margin()
|
/linux-4.1.27/drivers/clk/ |
H A D | clk-vt8500.c | 332 #define WM8650_BITS_TO_FREQ(r, m, d1, d2) \ 333 (r * m / (d1 * (1 << d2))) 335 #define WM8650_BITS_TO_VAL(m, d1, d2) \ 336 ((d2 << 13) | (d1 << 10) | (m & 0x3FF)) 342 #define WM8750_BITS_TO_FREQ(r, m, d1, d2) \ 343 (r * (m+1) / ((d1+1) * (1 << d2))) 345 #define WM8750_BITS_TO_VAL(f, m, d1, d2) \ 346 ((f << 24) | ((m - 1) << 16) | ((d1 - 1) << 8) | d2) 352 #define WM8850_BITS_TO_FREQ(r, m, d1, d2) \ 353 (r * ((m + 1) * 2) / ((d1+1) * (1 << d2))) 355 #define WM8850_BITS_TO_VAL(m, d1, d2) \ 356 ((((m / 2) - 1) << 16) | ((d1 - 1) << 8) | d2)
|
/linux-4.1.27/arch/mips/alchemy/devboards/ |
H A D | db1000.c | 498 int c0, c1, d0, d1, s0, s1, flashsize = 32, twosocks = 1; db1000_dev_setup() local 506 d1 = AU1500_GPIO3_INT; db1000_dev_setup() 513 d1 = AU1100_GPIO3_INT; db1000_dev_setup() 548 d1 = AU1000_GPIO3_INT; db1000_dev_setup() 599 irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH); db1000_dev_setup() 610 c1, d1, /*s1*/0, 0, 1); db1000_dev_setup()
|
/linux-4.1.27/arch/sh/lib/ |
H A D | udiv_qrnnd.S | 41 /* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */ 42 /* n1 < d, but n1 might be larger than d1. */
|
/linux-4.1.27/arch/powerpc/crypto/ |
H A D | aes-tab-4k.S | 59 .long R(d1, e5, e5, 34), R(f9, f1, f1, 08) 78 .long R(a6, 53, 53, f5), R(b9, d1, d1, 68) 111 .long R(9e, 4f, 4f, d1), R(a3, dc, dc, 7f) 204 .long R(d1, 34, 62, 1f), R(c4, a6, fe, 8a) 224 .long R(9b, 5b, 54, d1), R(24, 36, 2e, 3a) 249 .long R(87, 49, 4e, c7), R(d9, 38, d1, c1) 275 .long R(9e, d1, b5, e3), R(4c, 6a, 88, 1b)
|
H A D | aes-spe-modes.S | 138 #define GF128_MUL(d0, d1, d2, d3, t0) \ 144 rlwimi d2,d1,0,0,0; \ 146 rlwimi d1,d0,0,0,0; \ 148 rotlwi d1,d1,1; \ 151 #define START_KEY(d0, d1, d2, d3) \ 158 xor rD1,d1,rW1; \
|
/linux-4.1.27/drivers/ipack/devices/ |
H A D | scc2698.h | 27 u8 d1, sr; /* Status register */ member in struct:scc2698_channel::__anon5134 34 u8 d1, csr; /* Clock select register */ member in struct:scc2698_channel::__anon5135 52 u8 d1, sra; /* Status register (a) */ member in struct:scc2698_block::__anon5136 70 u8 d1, csra; /* Clock select register (a) */ member in struct:scc2698_block::__anon5137
|
/linux-4.1.27/arch/x86/math-emu/ |
H A D | reg_add_sub.c | 249 FPU_REG const *d1, *d2; FPU_sub() local 251 d1 = b; FPU_sub() 254 d1 = a; FPU_sub() 258 return real_2op_NaN(b, tagb, deststnr, d1); FPU_sub()
|
/linux-4.1.27/arch/mn10300/include/uapi/asm/ |
H A D | sigcontext.h | 22 unsigned long d1; member in struct:sigcontext
|
H A D | ptrace.h | 70 unsigned long d1; /* syscall arg 2 */ member in struct:pt_regs
|
/linux-4.1.27/arch/m68k/include/uapi/asm/ |
H A D | ptrace.h | 30 long d1; member in struct:pt_regs
|
/linux-4.1.27/drivers/parisc/ |
H A D | iosapic.c | 621 u32 d0, d1; iosapic_mask_irq() local 624 iosapic_rd_irt_entry(vi, &d0, &d1); iosapic_mask_irq() 626 iosapic_wr_irt_entry(vi, d0, d1); iosapic_mask_irq() 633 u32 d0, d1; iosapic_unmask_irq() local 638 iosapic_set_irt_data(vi, &d0, &d1); iosapic_unmask_irq() 639 iosapic_wr_irt_entry(vi, d0, d1); iosapic_unmask_irq() 655 d1 = iosapic_read(isp->addr, d0); iosapic_unmask_irq() 656 printk(" %x", d1); iosapic_unmask_irq() 686 u32 d0, d1, dummy_d0; iosapic_set_affinity_irq() local 698 /* d1 contains the destination CPU, so only want to set that iosapic_set_affinity_irq() 700 iosapic_rd_irt_entry(vi, &d0, &d1); iosapic_set_affinity_irq() 701 iosapic_set_irt_data(vi, &dummy_d0, &d1); iosapic_set_affinity_irq() 702 iosapic_wr_irt_entry(vi, d0, d1); iosapic_set_affinity_irq()
|
/linux-4.1.27/drivers/mtd/maps/ |
H A D | bfin-async-flash.c | 97 static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs) bfin_flash_write() argument 102 d = d1.x[0]; bfin_flash_write()
|
H A D | gpio-addr-flash.c | 131 static void gf_write(struct map_info *map, map_word d1, unsigned long ofs) gf_write() argument 138 d = d1.x[0]; gf_write()
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | process_32.c | 72 unsigned long d0, d1, d2, d3, d6, d7; __show_regs() local 109 get_debugreg(d1, 1); __show_regs() 116 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && __show_regs() 121 d0, d1, d2, d3); __show_regs()
|
H A D | process_64.c | 62 unsigned long d0, d1, d2, d3, d6, d7; __show_regs() local 107 get_debugreg(d1, 1); __show_regs() 114 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && __show_regs() 118 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); __show_regs()
|
/linux-4.1.27/drivers/net/wireless/ath/ath9k/ |
H A D | ar9003_aic.c | 116 (0x1f & 0x1f); /* -01 dB: 4'd1, 5'd31, 00 dB: 4'd0, 5'd31 */ ar9003_aic_gain_table() 122 (0x1e & 0x1f); /* -07 dB: 4'd1, 5'd30, -06 dB: 4'd0, 5'd30 */ ar9003_aic_gain_table() 128 (0xf & 0x1f); /* -13 dB: 4'd1, 5'd15, -12 dB: 4'd0, 5'd15 */ ar9003_aic_gain_table() 134 (0x7 & 0x1f); /* -19 dB: 4'd1, 5'd07, -18 dB: 4'd0, 5'd07 */ ar9003_aic_gain_table() 146 (0x1 & 0x1f); /* -31 dB: 4'd1, 5'd01, -30 dB: 4'd0, 5'd01 */ ar9003_aic_gain_table()
|
/linux-4.1.27/drivers/platform/x86/ |
H A D | samsung-laptop.c | 60 u32 d1; member in struct:sabi_data::__anon8306::__anon8307 298 * d0, d1, d2, d3 - data fields 307 * echo 0 > d1 413 command, in->d0, in->d1, in->d2, in->d3); sabi_command() 427 writel(in->d1, samsung->sabi_iface + SABI_IFACE_DATA + 4); sabi_command() 456 out->d1 = readl(samsung->sabi_iface + SABI_IFACE_DATA + 4); sabi_command() 463 out->d0, out->d1, out->d2, out->d3); sabi_command() 475 struct sabi_data in = { { { .d0 = 0, .d1 = 0, .d2 = 0, .d3 = 0 } } }; sabi_set_commandb() 1263 sdata->d0, sdata->d1, sdata->d2, sdata->d3); show_call() 1274 sdata->d0, sdata->d1, sdata->d2, sdata->d3); show_call() 1325 dent = debugfs_create_u32("d1", S_IRUGO | S_IWUSR, samsung->debug.root, samsung_debugfs_init() 1326 &samsung->debug.data.d1); samsung_debugfs_init()
|
/linux-4.1.27/drivers/hwmon/ |
H A D | sht15.c | 69 * @d1: see data sheet 73 int d1; member in struct:sht15_temppair 630 int d1 = temppoints[0].d1; sht15_calc_temp() local 637 d1 = (data->supply_uv - temppoints[i - 1].vdd) sht15_calc_temp() 638 * (temppoints[i].d1 - temppoints[i - 1].d1) sht15_calc_temp() 640 + temppoints[i - 1].d1; sht15_calc_temp() 644 return data->val_temp * d2 + d1; sht15_calc_temp()
|
/linux-4.1.27/drivers/media/pci/mantis/ |
H A D | mantis_ioc.c | 52 dprintk(MANTIS_ERROR, 1, "ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >", read_eeprom_bytes()
|
H A D | mantis_core.c | 50 "ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >", read_eeprom_byte()
|
/linux-4.1.27/drivers/net/wireless/ath/wil6210/ |
H A D | txrx.h | 339 u32 d1; member in struct:vring_rx_mac 463 return WIL_GET_BITS(d->mac.d1, 8, 9); wil_rxdesc_ds_bits() 468 return WIL_GET_BITS(d->mac.d1, 21, 24); wil_rxdesc_mcs() 473 return WIL_GET_BITS(d->mac.d1, 13, 14); wil_rxdesc_mcast()
|
/linux-4.1.27/arch/x86/crypto/sha-mb/ |
H A D | sha1_x8_avx2.S | 75 # r3 = {d7 d6 d5 d4 d3 d2 d1 d0} 83 # r1 = {h1 g1 f1 e1 d1 c1 b1 a1} 96 vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0} 98 vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
|
/linux-4.1.27/arch/c6x/lib/ |
H A D | strasgi.S | 29 || sub .d1 A6, 24, A6
|
/linux-4.1.27/arch/arm/mach-sa1100/ |
H A D | cerf.c | 56 .name = "cerf:d1",
|
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/ |
H A D | cl_io.c | 246 const struct cl_lock_descr *d1) cl_lock_descr_sort() 248 return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?: cl_lock_descr_sort() 249 __diff_normalize(d0->cld_start, d1->cld_start); cl_lock_descr_sort() 253 const struct cl_lock_descr *d1) cl_lock_descr_cmp() 257 ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)); cl_lock_descr_cmp() 260 if (d0->cld_end < d1->cld_start) cl_lock_descr_cmp() 268 const struct cl_lock_descr *d1) cl_lock_descr_merge() 270 d0->cld_start = min(d0->cld_start, d1->cld_start); cl_lock_descr_merge() 271 d0->cld_end = max(d0->cld_end, d1->cld_end); cl_lock_descr_merge() 273 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE) cl_lock_descr_merge() 276 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP) cl_lock_descr_merge() 245 cl_lock_descr_sort(const struct cl_lock_descr *d0, const struct cl_lock_descr *d1) cl_lock_descr_sort() argument 252 cl_lock_descr_cmp(const struct cl_lock_descr *d0, const struct cl_lock_descr *d1) cl_lock_descr_cmp() argument 267 cl_lock_descr_merge(struct cl_lock_descr *d0, const struct cl_lock_descr *d1) cl_lock_descr_merge() argument
|
/linux-4.1.27/fs/xfs/ |
H A D | xfs_dquot.c | 1058 xfs_dquot_t *d1, xfs_dqlock2() 1061 if (d1 && d2) { xfs_dqlock2() 1062 ASSERT(d1 != d2); xfs_dqlock2() 1063 if (be32_to_cpu(d1->q_core.d_id) > xfs_dqlock2() 1066 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); xfs_dqlock2() 1068 mutex_lock(&d1->q_qlock); xfs_dqlock2() 1071 } else if (d1) { xfs_dqlock2() 1072 mutex_lock(&d1->q_qlock); xfs_dqlock2() 1057 xfs_dqlock2( xfs_dquot_t *d1, xfs_dquot_t *d2) xfs_dqlock2() argument
|
/linux-4.1.27/arch/frv/include/asm/ |
H A D | math-emu.h | 244 movem.l %d0/%d1/%a0/%a1,-(%sp) 260 movem.l (%sp)+,%d0/%d1/%a0/%a1
|
/linux-4.1.27/arch/m68k/mac/ |
H A D | macints.c | 323 printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", mac_nmi_handler() 324 fp->d0, fp->d1, fp->d2, fp->d3); mac_nmi_handler()
|
/linux-4.1.27/drivers/usb/misc/sisusbvga/ |
H A D | sisusb_init.c | 741 unsigned short d1, d2, d3; SiS_WriteDAC() local 745 d1 = dh; SiS_WriteDAC() 750 d1 = ah; SiS_WriteDAC() 755 d1 = al; SiS_WriteDAC() 759 SiS_SetRegByte(SiS_Pr, DACData, (d1 << shiftflag)); SiS_WriteDAC()
|
/linux-4.1.27/drivers/net/fddi/skfp/ |
H A D | hwmtm.c | 329 union s_fp_descr volatile *d1 ; init_descr_ring() local 334 for (i=count-1, d1=start; i ; i--) { init_descr_ring() 335 d2 = d1 ; init_descr_ring() 336 d1++ ; /* descr is owned by the host */ init_descr_ring() 338 d2->r.rxd_next = &d1->r ; init_descr_ring() 339 phys = mac_drv_virt2phys(smc,(void *)d1) ; init_descr_ring() 342 DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ; init_descr_ring() 343 d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; init_descr_ring() 344 d1->r.rxd_next = &start->r ; init_descr_ring() 346 d1->r.rxd_nrdadr = cpu_to_le32(phys) ; init_descr_ring() 348 for (i=count, d1=start; i ; i--) { init_descr_ring() 349 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; init_descr_ring() 350 d1++; init_descr_ring()
|
/linux-4.1.27/drivers/media/platform/ti-vpe/ |
H A D | csc.c | 25 * a0, b0, c0, a1, b1, c1, a2, b2, c2, d0, d1, d2
|
/linux-4.1.27/arch/metag/lib/ |
H A D | checksum.c | 23 * specify d0 and d1 as scratch registers. Letting gcc
|
/linux-4.1.27/arch/frv/lib/ |
H A D | checksum.c | 22 * specify d0 and d1 as scratch registers. Letting gcc choose these
|
/linux-4.1.27/arch/x86/crypto/ |
H A D | camellia-aesni-avx-asm_64.S | 431 #define byteslice_16x16b(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, \ 443 transpose_4x4(d0, d1, d2, d3, a0, a1); \ 459 vpshufb a0, d1, d1; \ 468 transpose_4x4(a1, b1, c1, d1, d2, d3); \
|
/linux-4.1.27/arch/powerpc/perf/ |
H A D | hv-24x7.c | 502 static int memord(const void *d1, size_t s1, const void *d2, size_t s2) memord() argument 509 return memcmp(d1, d2, s1); memord() 512 static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2, ev_uniq_ord() argument 519 if (d1 > d2) ev_uniq_ord() 521 if (d2 > d1) ev_uniq_ord()
|
/linux-4.1.27/drivers/i2c/busses/ |
H A D | i2c-diolan-u2c.c | 187 static int diolan_usb_cmd_data2(struct i2c_diolan_u2c *dev, u8 command, u8 d1, diolan_usb_cmd_data2() argument 191 dev->obuffer[dev->olen++] = d1; diolan_usb_cmd_data2()
|
/linux-4.1.27/drivers/pinctrl/mvebu/ |
H A D | pinctrl-armada-370.c | 110 MPP_FUNCTION(0x3, "sd0", "d1"), 285 MPP_FUNCTION(0x3, "sd0", "d1"),
|
H A D | pinctrl-armada-38x.c | 254 MPP_VAR_FUNCTION(4, "sd0", "d1", V_88F6810_PLUS), 372 MPP_VAR_FUNCTION(5, "sd0", "d1", V_88F6810_PLUS)),
|
H A D | pinctrl-armada-39x.c | 228 MPP_VAR_FUNCTION(4, "sd", "d1", V_88F6920_PLUS), 353 MPP_VAR_FUNCTION(5, "sd", "d1", V_88F6920_PLUS),
|
H A D | pinctrl-armada-xp.c | 62 MPP_VAR_FUNCTION(0x4, "lcd", "d1", V_MV78230_PLUS)), 212 MPP_VAR_FUNCTION(0x1, "sd0", "d1", V_MV78230_PLUS),
|
H A D | pinctrl-kirkwood.c | 144 MPP_VAR_FUNCTION(0x1, "sdio", "d1", V(1, 1, 1, 1, 1, 0)), 187 MPP_VAR_FUNCTION(0xb, "lcd", "d1", V(0, 0, 0, 0, 1, 0))),
|
/linux-4.1.27/drivers/acpi/acpica/ |
H A D | acmacros.h | 411 #define ACPI_INIT_UUID(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ 415 (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)
|
/linux-4.1.27/drivers/pinctrl/sh-pfc/ |
H A D | pfc-r8a7778.c | 1449 #define MMC_PFC_DAT4(name, d0, d1, d2, d3) SH_PFC_MUX4(name, d0, d1, d2, d3) 1450 #define MMC_PFC_DAT8(name, d0, d1, d2, d3, d4, d5, d6, d7) \ 1451 SH_PFC_MUX8(name, d0, d1, d2, d3, d4, d5, d6, d7) 1552 #define SDHI_PFC_DAT4(name, d0, d1, d2, d3) SH_PFC_MUX4(name, d0, d1, d2, d3) 1690 #define VIN_PFC_DAT8(name, d0, d1, d2, d3, d4, d5, d6, d7) \ 1691 SH_PFC_MUX8(name, d0, d1, d2, d3, d4, d5, d6, d7)
|
/linux-4.1.27/arch/arm/crypto/ |
H A D | aes-ce-core.S | 338 vld1.8 {d1}, [r1, :64] 339 veor d0, d0, d1
|
/linux-4.1.27/drivers/isdn/hardware/eicon/ |
H A D | kst_ifc.h | 171 diva_prot_statistics_t d1; member in struct:_diva_ifc_statistics
|
/linux-4.1.27/drivers/input/joystick/ |
H A D | interact.c | 242 printk(KERN_WARNING "interact.c: Unknown joystick on %s. [len %d d0 %08x d1 %08x i2 %08x]\n", interact_connect()
|
/linux-4.1.27/drivers/media/dvb-frontends/drx39xyj/ |
H A D | drx_dap_fasi.h | 148 * <S> <devW> d0 d1 [d2 d3] <P>
|
/linux-4.1.27/arch/cris/include/arch-v32/mach-a3/mach/hwregs/ |
H A D | pinmux_defs.h | 270 unsigned int d1 : 1; member in struct:__anon1173
|