This source file includes following definitions.
- align_mod
- mult_sh_align_mod
- check_mult_sh
- do_daddi_ov
- check_daddi
- check_daddiu
- check_bugs64_early
- check_bugs64
1
2
3
4
5 #include <linux/context_tracking.h>
6 #include <linux/init.h>
7 #include <linux/kernel.h>
8 #include <linux/ptrace.h>
9 #include <linux/stddef.h>
10
11 #include <asm/bugs.h>
12 #include <asm/compiler.h>
13 #include <asm/cpu.h>
14 #include <asm/fpu.h>
15 #include <asm/mipsregs.h>
16 #include <asm/setup.h>
17
18 static char bug64hit[] __initdata =
19 "reliable operation impossible!\n%s";
20 static char nowar[] __initdata =
21 "Please report to <linux-mips@linux-mips.org>.";
22 static char r4kwar[] __initdata =
23 "Enable CPU_R4000_WORKAROUNDS to rectify.";
24 static char daddiwar[] __initdata =
25 "Enable CPU_DADDI_WORKAROUNDS to rectify.";
26
27 static __always_inline __init
28 void align_mod(const int align, const int mod)
29 {
30 asm volatile(
31 ".set push\n\t"
32 ".set noreorder\n\t"
33 ".balign %0\n\t"
34 ".rept %1\n\t"
35 "nop\n\t"
36 ".endr\n\t"
37 ".set pop"
38 :
39 : "n"(align), "n"(mod));
40 }
41
42 static __always_inline __init
43 void mult_sh_align_mod(long *v1, long *v2, long *w,
44 const int align, const int mod)
45 {
46 unsigned long flags;
47 int m1, m2;
48 long p, s, lv1, lv2, lw;
49
50
51
52
53
54
55
56
57
58 local_irq_save(flags);
59
60
61
62
63
64
65
66
67
68
69 asm volatile(
70 ""
71 : "=r" (m1), "=r" (m2), "=r" (s)
72 : "0" (5), "1" (8), "2" (5));
73 align_mod(align, mod);
74
75
76
77
78
79
80 asm volatile(
81 ".set push\n\t"
82 ".set noat\n\t"
83 ".set noreorder\n\t"
84 ".set nomacro\n\t"
85 "mult %2, %3\n\t"
86 "dsll32 %0, %4, %5\n\t"
87 "mflo $0\n\t"
88 "dsll32 %1, %4, %5\n\t"
89 "nop\n\t"
90 ".set pop"
91 : "=&r" (lv1), "=r" (lw)
92 : "r" (m1), "r" (m2), "r" (s), "I" (0)
93 : "hi", "lo", "$0");
94
95
96
97
98
99
100 asm volatile(
101 ""
102 : "=r" (m1), "=r" (m2), "=r" (s)
103 : "0" (m1), "1" (m2), "2" (s));
104 align_mod(align, mod);
105 p = m1 * m2;
106 lv2 = s << 32;
107 asm volatile(
108 ""
109 : "=r" (lv2)
110 : "0" (lv2), "r" (p));
111 local_irq_restore(flags);
112
113 *v1 = lv1;
114 *v2 = lv2;
115 *w = lw;
116 }
117
118 static __always_inline __init void check_mult_sh(void)
119 {
120 long v1[8], v2[8], w[8];
121 int bug, fix, i;
122
123 printk("Checking for the multiply/shift bug... ");
124
125
126
127
128
129
130
131
132
133
134 mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
135 mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
136 mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
137 mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
138 mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
139 mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
140 mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
141 mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
142
143 bug = 0;
144 for (i = 0; i < 8; i++)
145 if (v1[i] != w[i])
146 bug = 1;
147
148 if (bug == 0) {
149 pr_cont("no.\n");
150 return;
151 }
152
153 pr_cont("yes, workaround... ");
154
155 fix = 1;
156 for (i = 0; i < 8; i++)
157 if (v2[i] != w[i])
158 fix = 0;
159
160 if (fix == 1) {
161 pr_cont("yes.\n");
162 return;
163 }
164
165 pr_cont("no.\n");
166 panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
167 }
168
169 static volatile int daddi_ov;
170
171 asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
172 {
173 enum ctx_state prev_state;
174
175 prev_state = exception_enter();
176 daddi_ov = 1;
177 regs->cp0_epc += 4;
178 exception_exit(prev_state);
179 }
180
181 static __init void check_daddi(void)
182 {
183 extern asmlinkage void handle_daddi_ov(void);
184 unsigned long flags;
185 void *handler;
186 long v, tmp;
187
188 printk("Checking for the daddi bug... ");
189
190 local_irq_save(flags);
191 handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
192
193
194
195
196
197
198
199
200
201 asm volatile(
202 ".set push\n\t"
203 ".set noat\n\t"
204 ".set noreorder\n\t"
205 ".set nomacro\n\t"
206 "addiu %1, $0, %2\n\t"
207 "dsrl %1, %1, 1\n\t"
208 #ifdef HAVE_AS_SET_DADDI
209 ".set daddi\n\t"
210 #endif
211 "daddi %0, %1, %3\n\t"
212 ".set pop"
213 : "=r" (v), "=&r" (tmp)
214 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
215 set_except_vector(EXCCODE_OV, handler);
216 local_irq_restore(flags);
217
218 if (daddi_ov) {
219 pr_cont("no.\n");
220 return;
221 }
222
223 pr_cont("yes, workaround... ");
224
225 local_irq_save(flags);
226 handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
227 asm volatile(
228 "addiu %1, $0, %2\n\t"
229 "dsrl %1, %1, 1\n\t"
230 "daddi %0, %1, %3"
231 : "=r" (v), "=&r" (tmp)
232 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
233 set_except_vector(EXCCODE_OV, handler);
234 local_irq_restore(flags);
235
236 if (daddi_ov) {
237 pr_cont("yes.\n");
238 return;
239 }
240
241 pr_cont("no.\n");
242 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
243 }
244
245 int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
246
247 static __init void check_daddiu(void)
248 {
249 long v, w, tmp;
250
251 printk("Checking for the daddiu bug... ");
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269 asm volatile(
270 ".set push\n\t"
271 ".set noat\n\t"
272 ".set noreorder\n\t"
273 ".set nomacro\n\t"
274 "addiu %2, $0, %3\n\t"
275 "dsrl %2, %2, 1\n\t"
276 #ifdef HAVE_AS_SET_DADDI
277 ".set daddi\n\t"
278 #endif
279 "daddiu %0, %2, %4\n\t"
280 "addiu %1, $0, %4\n\t"
281 "daddu %1, %2\n\t"
282 ".set pop"
283 : "=&r" (v), "=&r" (w), "=&r" (tmp)
284 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
285
286 daddiu_bug = v != w;
287
288 if (!daddiu_bug) {
289 pr_cont("no.\n");
290 return;
291 }
292
293 pr_cont("yes, workaround... ");
294
295 asm volatile(
296 "addiu %2, $0, %3\n\t"
297 "dsrl %2, %2, 1\n\t"
298 "daddiu %0, %2, %4\n\t"
299 "addiu %1, $0, %4\n\t"
300 "daddu %1, %2"
301 : "=&r" (v), "=&r" (w), "=&r" (tmp)
302 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
303
304 if (v == w) {
305 pr_cont("yes.\n");
306 return;
307 }
308
309 pr_cont("no.\n");
310 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
311 }
312
313 void __init check_bugs64_early(void)
314 {
315 if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) {
316 check_mult_sh();
317 check_daddiu();
318 }
319 }
320
321 void __init check_bugs64(void)
322 {
323 if (!IS_ENABLED(CONFIG_CPU_MIPSR6))
324 check_daddi();
325 }