This source file includes following definitions.
- vpe_run
- cleanup_tc
- vpe_alloc
- vpe_start
- vpe_stop
- vpe_free
- store_kill
- ntcs_show
- ntcs_store
- vpe_device_release
- vpe_module_init
- vpe_module_exit
1
2
3
4
5
6
7
8
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/fs.h>
12 #include <linux/slab.h>
13 #include <linux/export.h>
14
15 #include <asm/mipsregs.h>
16 #include <asm/mipsmtregs.h>
17 #include <asm/mips_mt.h>
18 #include <asm/vpe.h>
19
20 static int major;
21
22
23 static int hw_tcs, hw_vpes;
24
25
26 int vpe_run(struct vpe *v)
27 {
28 unsigned long flags, val, dmt_flag;
29 struct vpe_notifications *notifier;
30 unsigned int vpeflags;
31 struct tc *t;
32
33
34 local_irq_save(flags);
35 val = read_c0_vpeconf0();
36 if (!(val & VPECONF0_MVP)) {
37 pr_warn("VPE loader: only Master VPE's are able to config MT\n");
38 local_irq_restore(flags);
39
40 return -1;
41 }
42
43 dmt_flag = dmt();
44 vpeflags = dvpe();
45
46 if (list_empty(&v->tc)) {
47 evpe(vpeflags);
48 emt(dmt_flag);
49 local_irq_restore(flags);
50
51 pr_warn("VPE loader: No TC's associated with VPE %d\n",
52 v->minor);
53
54 return -ENOEXEC;
55 }
56
57 t = list_first_entry(&v->tc, struct tc, tc);
58
59
60 set_c0_mvpcontrol(MVPCONTROL_VPC);
61
62 settc(t->index);
63
64
65 if ((read_tc_c0_tcstatus() & TCSTATUS_A) ||
66 !(read_tc_c0_tchalt() & TCHALT_H)) {
67 evpe(vpeflags);
68 emt(dmt_flag);
69 local_irq_restore(flags);
70
71 pr_warn("VPE loader: TC %d is already active!\n",
72 t->index);
73
74 return -ENOEXEC;
75 }
76
77
78
79
80
81 write_tc_c0_tcrestart((unsigned long)v->__start);
82 write_tc_c0_tccontext((unsigned long)0);
83
84
85
86
87
88 val = read_tc_c0_tcstatus();
89 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
90 write_tc_c0_tcstatus(val);
91
92 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
93
94
95
96
97
98
99 mttgpr(6, v->ntcs);
100 mttgpr(7, physical_memsize);
101
102
103
104
105
106
107 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
108
109 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
110
111 back_to_back_c0_hazard();
112
113
114 write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
115 | (t->index << VPECONF0_XTC_SHIFT));
116
117 back_to_back_c0_hazard();
118
119
120 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
121
122
123 write_vpe_c0_status(0);
124 write_vpe_c0_cause(0);
125
126
127 clear_c0_mvpcontrol(MVPCONTROL_VPC);
128
129
130
131
132
133 #ifdef CONFIG_SMP
134 evpe(vpeflags);
135 #else
136 evpe(EVPE_ENABLE);
137 #endif
138 emt(dmt_flag);
139 local_irq_restore(flags);
140
141 list_for_each_entry(notifier, &v->notify, list)
142 notifier->start(VPE_MODULE_MINOR);
143
144 return 0;
145 }
146
147 void cleanup_tc(struct tc *tc)
148 {
149 unsigned long flags;
150 unsigned int mtflags, vpflags;
151 int tmp;
152
153 local_irq_save(flags);
154 mtflags = dmt();
155 vpflags = dvpe();
156
157 set_c0_mvpcontrol(MVPCONTROL_VPC);
158
159 settc(tc->index);
160 tmp = read_tc_c0_tcstatus();
161
162
163 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
164 tmp |= TCSTATUS_IXMT;
165 write_tc_c0_tcstatus(tmp);
166
167 write_tc_c0_tchalt(TCHALT_H);
168 mips_ihb();
169
170 clear_c0_mvpcontrol(MVPCONTROL_VPC);
171 evpe(vpflags);
172 emt(mtflags);
173 local_irq_restore(flags);
174 }
175
176
177
178 void *vpe_alloc(void)
179 {
180 int i;
181 struct vpe *v;
182
183
184 for (i = 1; i < MAX_VPES; i++) {
185 v = get_vpe(i);
186 if (v != NULL) {
187 v->state = VPE_STATE_INUSE;
188 return v;
189 }
190 }
191 return NULL;
192 }
193 EXPORT_SYMBOL(vpe_alloc);
194
195
196 int vpe_start(void *vpe, unsigned long start)
197 {
198 struct vpe *v = vpe;
199
200 v->__start = start;
201 return vpe_run(v);
202 }
203 EXPORT_SYMBOL(vpe_start);
204
205
206 int vpe_stop(void *vpe)
207 {
208 struct vpe *v = vpe;
209 struct tc *t;
210 unsigned int evpe_flags;
211
212 evpe_flags = dvpe();
213
214 t = list_entry(v->tc.next, struct tc, tc);
215 if (t != NULL) {
216 settc(t->index);
217 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
218 }
219
220 evpe(evpe_flags);
221
222 return 0;
223 }
224 EXPORT_SYMBOL(vpe_stop);
225
226
227 int vpe_free(void *vpe)
228 {
229 struct vpe *v = vpe;
230 struct tc *t;
231 unsigned int evpe_flags;
232
233 t = list_entry(v->tc.next, struct tc, tc);
234 if (t == NULL)
235 return -ENOEXEC;
236
237 evpe_flags = dvpe();
238
239
240 set_c0_mvpcontrol(MVPCONTROL_VPC);
241
242 settc(t->index);
243 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
244
245
246 write_tc_c0_tchalt(TCHALT_H);
247 mips_ihb();
248
249
250 write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
251
252 v->state = VPE_STATE_UNUSED;
253
254 clear_c0_mvpcontrol(MVPCONTROL_VPC);
255 evpe(evpe_flags);
256
257 return 0;
258 }
259 EXPORT_SYMBOL(vpe_free);
260
261 static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
262 const char *buf, size_t len)
263 {
264 struct vpe *vpe = get_vpe(aprp_cpu_index());
265 struct vpe_notifications *notifier;
266
267 list_for_each_entry(notifier, &vpe->notify, list)
268 notifier->stop(aprp_cpu_index());
269
270 release_progmem(vpe->load_addr);
271 cleanup_tc(get_tc(aprp_cpu_index()));
272 vpe_stop(vpe);
273 vpe_free(vpe);
274
275 return len;
276 }
277 static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
278
279 static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
280 char *buf)
281 {
282 struct vpe *vpe = get_vpe(aprp_cpu_index());
283
284 return sprintf(buf, "%d\n", vpe->ntcs);
285 }
286
287 static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
288 const char *buf, size_t len)
289 {
290 struct vpe *vpe = get_vpe(aprp_cpu_index());
291 unsigned long new;
292 int ret;
293
294 ret = kstrtoul(buf, 0, &new);
295 if (ret < 0)
296 return ret;
297
298 if (new == 0 || new > (hw_tcs - aprp_cpu_index()))
299 return -EINVAL;
300
301 vpe->ntcs = new;
302
303 return len;
304 }
305 static DEVICE_ATTR_RW(ntcs);
306
307 static struct attribute *vpe_attrs[] = {
308 &dev_attr_kill.attr,
309 &dev_attr_ntcs.attr,
310 NULL,
311 };
312 ATTRIBUTE_GROUPS(vpe);
313
314 static void vpe_device_release(struct device *cd)
315 {
316 kfree(cd);
317 }
318
319 static struct class vpe_class = {
320 .name = "vpe",
321 .owner = THIS_MODULE,
322 .dev_release = vpe_device_release,
323 .dev_groups = vpe_groups,
324 };
325
326 static struct device vpe_device;
327
328 int __init vpe_module_init(void)
329 {
330 unsigned int mtflags, vpflags;
331 unsigned long flags, val;
332 struct vpe *v = NULL;
333 struct tc *t;
334 int tc, err;
335
336 if (!cpu_has_mipsmt) {
337 pr_warn("VPE loader: not a MIPS MT capable processor\n");
338 return -ENODEV;
339 }
340
341 if (vpelimit == 0) {
342 pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
343 "Pass maxvpes=<n> argument as kernel argument\n");
344
345 return -ENODEV;
346 }
347
348 if (aprp_cpu_index() == 0) {
349 pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n"
350 "Pass maxtcs=<n> argument as kernel argument\n");
351
352 return -ENODEV;
353 }
354
355 major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
356 if (major < 0) {
357 pr_warn("VPE loader: unable to register character device\n");
358 return major;
359 }
360
361 err = class_register(&vpe_class);
362 if (err) {
363 pr_err("vpe_class registration failed\n");
364 goto out_chrdev;
365 }
366
367 device_initialize(&vpe_device);
368 vpe_device.class = &vpe_class,
369 vpe_device.parent = NULL,
370 dev_set_name(&vpe_device, "vpe1");
371 vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
372 err = device_add(&vpe_device);
373 if (err) {
374 pr_err("Adding vpe_device failed\n");
375 goto out_class;
376 }
377
378 local_irq_save(flags);
379 mtflags = dmt();
380 vpflags = dvpe();
381
382
383 set_c0_mvpcontrol(MVPCONTROL_VPC);
384
385 val = read_c0_mvpconf0();
386 hw_tcs = (val & MVPCONF0_PTC) + 1;
387 hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
388
389 for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) {
390
391
392
393
394 clear_c0_mvpcontrol(MVPCONTROL_VPC);
395 evpe(vpflags);
396 emt(mtflags);
397 local_irq_restore(flags);
398 t = alloc_tc(tc);
399 if (!t) {
400 err = -ENOMEM;
401 goto out_dev;
402 }
403
404 local_irq_save(flags);
405 mtflags = dmt();
406 vpflags = dvpe();
407 set_c0_mvpcontrol(MVPCONTROL_VPC);
408
409
410 if (tc < hw_tcs) {
411 settc(tc);
412
413 v = alloc_vpe(tc);
414 if (v == NULL) {
415 pr_warn("VPE: unable to allocate VPE\n");
416 goto out_reenable;
417 }
418
419 v->ntcs = hw_tcs - aprp_cpu_index();
420
421
422 list_add(&t->tc, &v->tc);
423
424
425 if (tc >= aprp_cpu_index()) {
426 unsigned long tmp = read_vpe_c0_vpeconf0();
427
428 tmp &= ~VPECONF0_VPA;
429
430
431 tmp |= VPECONF0_MVP;
432 write_vpe_c0_vpeconf0(tmp);
433 }
434
435
436 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() &
437 ~VPECONTROL_TE);
438
439 if (tc >= vpelimit) {
440
441
442
443
444 write_vpe_c0_config(read_c0_config());
445 }
446 }
447
448
449 t->pvpe = v;
450
451 if (tc >= aprp_cpu_index()) {
452 unsigned long tmp;
453
454 settc(tc);
455
456
457
458
459
460
461
462
463
464
465 tmp = read_tc_c0_tcbind();
466 if (tmp & TCBIND_CURVPE) {
467
468 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
469
470 t->pvpe = get_vpe(0);
471 }
472
473
474 write_tc_c0_tchalt(TCHALT_H);
475 mips_ihb();
476
477 tmp = read_tc_c0_tcstatus();
478
479
480 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
481 tmp |= TCSTATUS_IXMT;
482 write_tc_c0_tcstatus(tmp);
483 }
484 }
485
486 out_reenable:
487
488 clear_c0_mvpcontrol(MVPCONTROL_VPC);
489
490 evpe(vpflags);
491 emt(mtflags);
492 local_irq_restore(flags);
493
494 return 0;
495
496 out_dev:
497 device_del(&vpe_device);
498
499 out_class:
500 class_unregister(&vpe_class);
501
502 out_chrdev:
503 unregister_chrdev(major, VPE_MODULE_NAME);
504
505 return err;
506 }
507
508 void __exit vpe_module_exit(void)
509 {
510 struct vpe *v, *n;
511
512 device_del(&vpe_device);
513 class_unregister(&vpe_class);
514 unregister_chrdev(major, VPE_MODULE_NAME);
515
516
517 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
518 if (v->state != VPE_STATE_UNUSED)
519 release_vpe(v);
520 }
521 }