1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 #define _FP_W_TYPE_SIZE 32
32 #define _FP_W_TYPE unsigned int
33 #define _FP_WS_TYPE signed int
34 #define _FP_I_TYPE int
35
36 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
37 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
38 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82 #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
83 #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
84
85 #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
86 #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
87
88
89
90
91 #define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
92 #define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
93 #define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
94 #define _FP_NANSIGN_S 0
95 #define _FP_NANSIGN_D 0
96 #define _FP_NANSIGN_Q 0
97
98 #define _FP_KEEPNANFRACP 1
99
100 #ifdef FP_EX_BOOKE_E500_SPE
101 #define FP_EX_INEXACT (1 << 21)
102 #define FP_EX_INVALID (1 << 20)
103 #define FP_EX_DIVZERO (1 << 19)
104 #define FP_EX_UNDERFLOW (1 << 18)
105 #define FP_EX_OVERFLOW (1 << 17)
106 #define FP_INHIBIT_RESULTS 0
107
108 #define __FPU_FPSCR (current->thread.spefscr)
109 #define __FPU_ENABLED_EXC \
110 ({ \
111 (__FPU_FPSCR >> 2) & 0x1f; \
112 })
113 #else
114
115
116
117 #define FP_EX_INVALID (1 << (31 - 2))
118 #define FP_EX_INVALID_SNAN EFLAG_VXSNAN
119 #define FP_EX_INVALID_ISI EFLAG_VXISI
120 #define FP_EX_INVALID_IDI EFLAG_VXIDI
121 #define FP_EX_INVALID_ZDZ EFLAG_VXZDZ
122 #define FP_EX_INVALID_IMZ EFLAG_VXIMZ
123 #define FP_EX_OVERFLOW (1 << (31 - 3))
124 #define FP_EX_UNDERFLOW (1 << (31 - 4))
125 #define FP_EX_DIVZERO (1 << (31 - 5))
126 #define FP_EX_INEXACT (1 << (31 - 6))
127
128 #define __FPU_FPSCR (current->thread.fp_state.fpscr)
129
130
131
132
133 #define __FPU_ENABLED_EXC \
134 ({ \
135 (__FPU_FPSCR >> 3) & 0x1f; \
136 })
137
138 #endif
139
140
141
142
143
144 #define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
145 do { \
146 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
147 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
148 { \
149 R##_s = X##_s; \
150 _FP_FRAC_COPY_##wc(R,X); \
151 } \
152 else \
153 { \
154 R##_s = Y##_s; \
155 _FP_FRAC_COPY_##wc(R,Y); \
156 } \
157 R##_c = FP_CLS_NAN; \
158 } while (0)
159
160
161 #include <linux/kernel.h>
162 #include <linux/sched.h>
163
164 #define __FPU_TRAP_P(bits) \
165 ((__FPU_ENABLED_EXC & (bits)) != 0)
166
167 #define __FP_PACK_S(val,X) \
168 ({ int __exc = _FP_PACK_CANONICAL(S,1,X); \
169 if(!__exc || !__FPU_TRAP_P(__exc)) \
170 _FP_PACK_RAW_1_P(S,val,X); \
171 __exc; \
172 })
173
174 #define __FP_PACK_D(val,X) \
175 do { \
176 _FP_PACK_CANONICAL(D, 2, X); \
177 if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) \
178 _FP_PACK_RAW_2_P(D, val, X); \
179 } while (0)
180
181 #define __FP_PACK_DS(val,X) \
182 do { \
183 FP_DECL_S(__X); \
184 FP_CONV(S, D, 1, 2, __X, X); \
185 _FP_PACK_CANONICAL(S, 1, __X); \
186 if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) { \
187 _FP_UNPACK_CANONICAL(S, 1, __X); \
188 FP_CONV(D, S, 2, 1, X, __X); \
189 _FP_PACK_CANONICAL(D, 2, X); \
190 if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) \
191 _FP_PACK_RAW_2_P(D, val, X); \
192 } \
193 } while (0)
194
195
196 #define FP_ROUNDMODE \
197 ({ \
198 __FPU_FPSCR & 0x3; \
199 })
200
201
202
203
204
205 #include <linux/types.h>
206 #include <asm/byteorder.h>
207
208
209
210
211
212
213
214
215
216 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
217 do { \
218 if (__builtin_constant_p (bh) && (bh) == 0) \
219 __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2" \
220 : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
221 else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
222 __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2" \
223 : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
224 else \
225 __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3" \
226 : "=r" (sh), "=&r" (sl) \
227 : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
228 } while (0)
229
230
231
232
233
234
235
236
237
238
239 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
240 do { \
241 if (__builtin_constant_p (ah) && (ah) == 0) \
242 __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2" \
243 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
244 else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
245 __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2" \
246 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
247 else if (__builtin_constant_p (bh) && (bh) == 0) \
248 __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2" \
249 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
250 else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
251 __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2" \
252 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
253 else \
254 __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2" \
255 : "=r" (sh), "=&r" (sl) \
256 : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
257 } while (0)
258
259
260
261
262
263
264
265 #define umul_ppmm(ph, pl, m0, m1) \
266 do { \
267 USItype __m0 = (m0), __m1 = (m1); \
268 __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
269 (pl) = __m0 * __m1; \
270 } while (0)
271
272
273
274
275
276
277
278
279
280 #define udiv_qrnnd(q, r, n1, n0, d) \
281 do { \
282 UWtype __d1, __d0, __q1, __q0; \
283 UWtype __r1, __r0, __m; \
284 __d1 = __ll_highpart (d); \
285 __d0 = __ll_lowpart (d); \
286 \
287 __r1 = (n1) % __d1; \
288 __q1 = (n1) / __d1; \
289 __m = (UWtype) __q1 * __d0; \
290 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
291 if (__r1 < __m) \
292 { \
293 __q1--, __r1 += (d); \
294 if (__r1 >= (d)) \
295 if (__r1 < __m) \
296 __q1--, __r1 += (d); \
297 } \
298 __r1 -= __m; \
299 \
300 __r0 = __r1 % __d1; \
301 __q0 = __r1 / __d1; \
302 __m = (UWtype) __q0 * __d0; \
303 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
304 if (__r0 < __m) \
305 { \
306 __q0--, __r0 += (d); \
307 if (__r0 >= (d)) \
308 if (__r0 < __m) \
309 __q0--, __r0 += (d); \
310 } \
311 __r0 -= __m; \
312 \
313 (q) = (UWtype) __q1 * __ll_B | __q0; \
314 (r) = __r0; \
315 } while (0)
316
317 #define UDIV_NEEDS_NORMALIZATION 1
318
319 #define abort() \
320 return 0
321
322 #ifdef __BIG_ENDIAN
323 #define __BYTE_ORDER __BIG_ENDIAN
324 #else
325 #define __BYTE_ORDER __LITTLE_ENDIAN
326 #endif
327
328
329 #define EFLAG_INVALID (1 << (31 - 2))
330 #define EFLAG_OVERFLOW (1 << (31 - 3))
331 #define EFLAG_UNDERFLOW (1 << (31 - 4))
332 #define EFLAG_DIVZERO (1 << (31 - 5))
333 #define EFLAG_INEXACT (1 << (31 - 6))
334
335 #define EFLAG_VXSNAN (1 << (31 - 7))
336 #define EFLAG_VXISI (1 << (31 - 8))
337 #define EFLAG_VXIDI (1 << (31 - 9))
338 #define EFLAG_VXZDZ (1 << (31 - 10))
339 #define EFLAG_VXIMZ (1 << (31 - 11))
340 #define EFLAG_VXVC (1 << (31 - 12))
341 #define EFLAG_VXSOFT (1 << (31 - 21))
342 #define EFLAG_VXSQRT (1 << (31 - 22))
343 #define EFLAG_VXCVI (1 << (31 - 23))