1/*---------------------------------------------------------------------------+
2 |  round_Xsig.S                                                             |
3 |                                                                           |
4 | Copyright (C) 1992,1993,1994,1995                                         |
5 |                       W. Metzenthen, 22 Parker St, Ormond, Vic 3163,      |
6 |                       Australia.  E-mail billm@jacobi.maths.monash.edu.au |
7 |                                                                           |
8 | Normalize and round a 12 byte quantity.                                   |
9 | Call from C as:                                                           |
10 |   int round_Xsig(Xsig *n)                                                 |
11 |                                                                           |
12 | Normalize a 12 byte quantity.                                             |
13 | Call from C as:                                                           |
14 |   int norm_Xsig(Xsig *n)                                                  |
15 |                                                                           |
16 | Each function returns the size of the shift (nr of bits).                 |
17 |                                                                           |
18 +---------------------------------------------------------------------------*/
19	.file	"round_Xsig.S"
20
21#include "fpu_emu.h"
22
23
24.text
25ENTRY(round_Xsig)
26	pushl	%ebp
27	movl	%esp,%ebp
28	pushl	%ebx		/* Reserve some space */
29	pushl	%ebx
30	pushl	%esi
31
32	movl	PARAM1,%esi
33
34	movl	8(%esi),%edx
35	movl	4(%esi),%ebx
36	movl	(%esi),%eax
37
38	movl	$0,-4(%ebp)
39
40	orl	%edx,%edx	/* ms bits */
41	js	L_round		/* Already normalized */
42	jnz	L_shift_1	/* Shift left 1 - 31 bits */
43
44	movl	%ebx,%edx
45	movl	%eax,%ebx
46	xorl	%eax,%eax
47	movl	$-32,-4(%ebp)
48
49/* We need to shift left by 1 - 31 bits */
50L_shift_1:
51	bsrl	%edx,%ecx	/* get the required shift in %ecx */
52	subl	$31,%ecx
53	negl	%ecx
54	subl	%ecx,-4(%ebp)
55	shld	%cl,%ebx,%edx
56	shld	%cl,%eax,%ebx
57	shl	%cl,%eax
58
59L_round:
60	testl	$0x80000000,%eax
61	jz	L_exit
62
63	addl	$1,%ebx
64	adcl	$0,%edx
65	jnz	L_exit
66
67	movl	$0x80000000,%edx
68	incl	-4(%ebp)
69
70L_exit:
71	movl	%edx,8(%esi)
72	movl	%ebx,4(%esi)
73	movl	%eax,(%esi)
74
75	movl	-4(%ebp),%eax
76
77	popl	%esi
78	popl	%ebx
79	leave
80	ret
81
82
83
84
85ENTRY(norm_Xsig)
86	pushl	%ebp
87	movl	%esp,%ebp
88	pushl	%ebx		/* Reserve some space */
89	pushl	%ebx
90	pushl	%esi
91
92	movl	PARAM1,%esi
93
94	movl	8(%esi),%edx
95	movl	4(%esi),%ebx
96	movl	(%esi),%eax
97
98	movl	$0,-4(%ebp)
99
100	orl	%edx,%edx	/* ms bits */
101	js	L_n_exit		/* Already normalized */
102	jnz	L_n_shift_1	/* Shift left 1 - 31 bits */
103
104	movl	%ebx,%edx
105	movl	%eax,%ebx
106	xorl	%eax,%eax
107	movl	$-32,-4(%ebp)
108
109	orl	%edx,%edx	/* ms bits */
110	js	L_n_exit	/* Normalized now */
111	jnz	L_n_shift_1	/* Shift left 1 - 31 bits */
112
113	movl	%ebx,%edx
114	movl	%eax,%ebx
115	xorl	%eax,%eax
116	addl	$-32,-4(%ebp)
117	jmp	L_n_exit	/* Might not be normalized,
118	                           but shift no more. */
119
120/* We need to shift left by 1 - 31 bits */
121L_n_shift_1:
122	bsrl	%edx,%ecx	/* get the required shift in %ecx */
123	subl	$31,%ecx
124	negl	%ecx
125	subl	%ecx,-4(%ebp)
126	shld	%cl,%ebx,%edx
127	shld	%cl,%eax,%ebx
128	shl	%cl,%eax
129
130L_n_exit:
131	movl	%edx,8(%esi)
132	movl	%ebx,4(%esi)
133	movl	%eax,(%esi)
134
135	movl	-4(%ebp),%eax
136
137	popl	%esi
138	popl	%ebx
139	leave
140	ret
141
142