1/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#undef DEBUG
24
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/wait.h>
31#include <linux/mm.h>
32#include <linux/io.h>
33#include <linux/mutex.h>
34#include <linux/linux_logo.h>
35#include <linux/syscore_ops.h>
36#include <asm/spu.h>
37#include <asm/spu_priv1.h>
38#include <asm/spu_csa.h>
39#include <asm/xmon.h>
40#include <asm/prom.h>
41#include <asm/kexec.h>
42
43const struct spu_management_ops *spu_management_ops;
44EXPORT_SYMBOL_GPL(spu_management_ops);
45
46const struct spu_priv1_ops *spu_priv1_ops;
47EXPORT_SYMBOL_GPL(spu_priv1_ops);
48
49struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
50EXPORT_SYMBOL_GPL(cbe_spu_info);
51
52/*
53 * The spufs fault-handling code needs to call force_sig_info to raise signals
54 * on DMA errors. Export it here to avoid general kernel-wide access to this
55 * function
56 */
57EXPORT_SYMBOL_GPL(force_sig_info);
58
59/*
60 * Protects cbe_spu_info and spu->number.
61 */
62static DEFINE_SPINLOCK(spu_lock);
63
64/*
65 * List of all spus in the system.
66 *
67 * This list is iterated by callers from irq context and callers that
68 * want to sleep.  Thus modifications need to be done with both
69 * spu_full_list_lock and spu_full_list_mutex held, while iterating
70 * through it requires either of these locks.
71 *
72 * In addition spu_full_list_lock protects all assignmens to
73 * spu->mm.
74 */
75static LIST_HEAD(spu_full_list);
76static DEFINE_SPINLOCK(spu_full_list_lock);
77static DEFINE_MUTEX(spu_full_list_mutex);
78
79void spu_invalidate_slbs(struct spu *spu)
80{
81	struct spu_priv2 __iomem *priv2 = spu->priv2;
82	unsigned long flags;
83
84	spin_lock_irqsave(&spu->register_lock, flags);
85	if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
86		out_be64(&priv2->slb_invalidate_all_W, 0UL);
87	spin_unlock_irqrestore(&spu->register_lock, flags);
88}
89EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
90
91/* This is called by the MM core when a segment size is changed, to
92 * request a flush of all the SPEs using a given mm
93 */
94void spu_flush_all_slbs(struct mm_struct *mm)
95{
96	struct spu *spu;
97	unsigned long flags;
98
99	spin_lock_irqsave(&spu_full_list_lock, flags);
100	list_for_each_entry(spu, &spu_full_list, full_list) {
101		if (spu->mm == mm)
102			spu_invalidate_slbs(spu);
103	}
104	spin_unlock_irqrestore(&spu_full_list_lock, flags);
105}
106
107/* The hack below stinks... try to do something better one of
108 * these days... Does it even work properly with NR_CPUS == 1 ?
109 */
110static inline void mm_needs_global_tlbie(struct mm_struct *mm)
111{
112	int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
113
114	/* Global TLBIE broadcast required with SPEs. */
115	bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
116}
117
118void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
119{
120	unsigned long flags;
121
122	spin_lock_irqsave(&spu_full_list_lock, flags);
123	spu->mm = mm;
124	spin_unlock_irqrestore(&spu_full_list_lock, flags);
125	if (mm)
126		mm_needs_global_tlbie(mm);
127}
128EXPORT_SYMBOL_GPL(spu_associate_mm);
129
130int spu_64k_pages_available(void)
131{
132	return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
133}
134EXPORT_SYMBOL_GPL(spu_64k_pages_available);
135
136static void spu_restart_dma(struct spu *spu)
137{
138	struct spu_priv2 __iomem *priv2 = spu->priv2;
139
140	if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
141		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
142	else {
143		set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
144		mb();
145	}
146}
147
148static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb)
149{
150	struct spu_priv2 __iomem *priv2 = spu->priv2;
151
152	pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
153			__func__, slbe, slb->vsid, slb->esid);
154
155	out_be64(&priv2->slb_index_W, slbe);
156	/* set invalid before writing vsid */
157	out_be64(&priv2->slb_esid_RW, 0);
158	/* now it's safe to write the vsid */
159	out_be64(&priv2->slb_vsid_RW, slb->vsid);
160	/* setting the new esid makes the entry valid again */
161	out_be64(&priv2->slb_esid_RW, slb->esid);
162}
163
164static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
165{
166	struct copro_slb slb;
167	int ret;
168
169	ret = copro_calculate_slb(spu->mm, ea, &slb);
170	if (ret)
171		return ret;
172
173	spu_load_slb(spu, spu->slb_replace, &slb);
174
175	spu->slb_replace++;
176	if (spu->slb_replace >= 8)
177		spu->slb_replace = 0;
178
179	spu_restart_dma(spu);
180	spu->stats.slb_flt++;
181	return 0;
182}
183
184extern int hash_page(unsigned long ea, unsigned long access,
185		     unsigned long trap, unsigned long dsisr); //XXX
186static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
187{
188	int ret;
189
190	pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
191
192	/*
193	 * Handle kernel space hash faults immediately. User hash
194	 * faults need to be deferred to process context.
195	 */
196	if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
197	    (REGION_ID(ea) != USER_REGION_ID)) {
198
199		spin_unlock(&spu->register_lock);
200		ret = hash_page(ea, _PAGE_PRESENT, 0x300, dsisr);
201		spin_lock(&spu->register_lock);
202
203		if (!ret) {
204			spu_restart_dma(spu);
205			return 0;
206		}
207	}
208
209	spu->class_1_dar = ea;
210	spu->class_1_dsisr = dsisr;
211
212	spu->stop_callback(spu, 1);
213
214	spu->class_1_dar = 0;
215	spu->class_1_dsisr = 0;
216
217	return 0;
218}
219
220static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
221{
222	unsigned long ea = (unsigned long)addr;
223	u64 llp;
224
225	if (REGION_ID(ea) == KERNEL_REGION_ID)
226		llp = mmu_psize_defs[mmu_linear_psize].sllp;
227	else
228		llp = mmu_psize_defs[mmu_virtual_psize].sllp;
229
230	slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
231		SLB_VSID_KERNEL | llp;
232	slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
233}
234
235/**
236 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
237 * address @new_addr is present.
238 */
239static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
240		void *new_addr)
241{
242	unsigned long ea = (unsigned long)new_addr;
243	int i;
244
245	for (i = 0; i < nr_slbs; i++)
246		if (!((slbs[i].esid ^ ea) & ESID_MASK))
247			return 1;
248
249	return 0;
250}
251
252/**
253 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
254 * need to map both the context save area, and the save/restore code.
255 *
256 * Because the lscsa and code may cross segment boundaires, we check to see
257 * if mappings are required for the start and end of each range. We currently
258 * assume that the mappings are smaller that one segment - if not, something
259 * is seriously wrong.
260 */
261void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
262		void *code, int code_size)
263{
264	struct copro_slb slbs[4];
265	int i, nr_slbs = 0;
266	/* start and end addresses of both mappings */
267	void *addrs[] = {
268		lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
269		code, code + code_size - 1
270	};
271
272	/* check the set of addresses, and create a new entry in the slbs array
273	 * if there isn't already a SLB for that address */
274	for (i = 0; i < ARRAY_SIZE(addrs); i++) {
275		if (__slb_present(slbs, nr_slbs, addrs[i]))
276			continue;
277
278		__spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
279		nr_slbs++;
280	}
281
282	spin_lock_irq(&spu->register_lock);
283	/* Add the set of SLBs */
284	for (i = 0; i < nr_slbs; i++)
285		spu_load_slb(spu, i, &slbs[i]);
286	spin_unlock_irq(&spu->register_lock);
287}
288EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
289
290static irqreturn_t
291spu_irq_class_0(int irq, void *data)
292{
293	struct spu *spu;
294	unsigned long stat, mask;
295
296	spu = data;
297
298	spin_lock(&spu->register_lock);
299	mask = spu_int_mask_get(spu, 0);
300	stat = spu_int_stat_get(spu, 0) & mask;
301
302	spu->class_0_pending |= stat;
303	spu->class_0_dar = spu_mfc_dar_get(spu);
304	spu->stop_callback(spu, 0);
305	spu->class_0_pending = 0;
306	spu->class_0_dar = 0;
307
308	spu_int_stat_clear(spu, 0, stat);
309	spin_unlock(&spu->register_lock);
310
311	return IRQ_HANDLED;
312}
313
314static irqreturn_t
315spu_irq_class_1(int irq, void *data)
316{
317	struct spu *spu;
318	unsigned long stat, mask, dar, dsisr;
319
320	spu = data;
321
322	/* atomically read & clear class1 status. */
323	spin_lock(&spu->register_lock);
324	mask  = spu_int_mask_get(spu, 1);
325	stat  = spu_int_stat_get(spu, 1) & mask;
326	dar   = spu_mfc_dar_get(spu);
327	dsisr = spu_mfc_dsisr_get(spu);
328	if (stat & CLASS1_STORAGE_FAULT_INTR)
329		spu_mfc_dsisr_set(spu, 0ul);
330	spu_int_stat_clear(spu, 1, stat);
331
332	pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
333			dar, dsisr);
334
335	if (stat & CLASS1_SEGMENT_FAULT_INTR)
336		__spu_trap_data_seg(spu, dar);
337
338	if (stat & CLASS1_STORAGE_FAULT_INTR)
339		__spu_trap_data_map(spu, dar, dsisr);
340
341	if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
342		;
343
344	if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
345		;
346
347	spu->class_1_dsisr = 0;
348	spu->class_1_dar = 0;
349
350	spin_unlock(&spu->register_lock);
351
352	return stat ? IRQ_HANDLED : IRQ_NONE;
353}
354
355static irqreturn_t
356spu_irq_class_2(int irq, void *data)
357{
358	struct spu *spu;
359	unsigned long stat;
360	unsigned long mask;
361	const int mailbox_intrs =
362		CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
363
364	spu = data;
365	spin_lock(&spu->register_lock);
366	stat = spu_int_stat_get(spu, 2);
367	mask = spu_int_mask_get(spu, 2);
368	/* ignore interrupts we're not waiting for */
369	stat &= mask;
370	/* mailbox interrupts are level triggered. mask them now before
371	 * acknowledging */
372	if (stat & mailbox_intrs)
373		spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
374	/* acknowledge all interrupts before the callbacks */
375	spu_int_stat_clear(spu, 2, stat);
376
377	pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
378
379	if (stat & CLASS2_MAILBOX_INTR)
380		spu->ibox_callback(spu);
381
382	if (stat & CLASS2_SPU_STOP_INTR)
383		spu->stop_callback(spu, 2);
384
385	if (stat & CLASS2_SPU_HALT_INTR)
386		spu->stop_callback(spu, 2);
387
388	if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
389		spu->mfc_callback(spu);
390
391	if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
392		spu->wbox_callback(spu);
393
394	spu->stats.class2_intr++;
395
396	spin_unlock(&spu->register_lock);
397
398	return stat ? IRQ_HANDLED : IRQ_NONE;
399}
400
401static int spu_request_irqs(struct spu *spu)
402{
403	int ret = 0;
404
405	if (spu->irqs[0] != NO_IRQ) {
406		snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
407			 spu->number);
408		ret = request_irq(spu->irqs[0], spu_irq_class_0,
409				  0, spu->irq_c0, spu);
410		if (ret)
411			goto bail0;
412	}
413	if (spu->irqs[1] != NO_IRQ) {
414		snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
415			 spu->number);
416		ret = request_irq(spu->irqs[1], spu_irq_class_1,
417				  0, spu->irq_c1, spu);
418		if (ret)
419			goto bail1;
420	}
421	if (spu->irqs[2] != NO_IRQ) {
422		snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
423			 spu->number);
424		ret = request_irq(spu->irqs[2], spu_irq_class_2,
425				  0, spu->irq_c2, spu);
426		if (ret)
427			goto bail2;
428	}
429	return 0;
430
431bail2:
432	if (spu->irqs[1] != NO_IRQ)
433		free_irq(spu->irqs[1], spu);
434bail1:
435	if (spu->irqs[0] != NO_IRQ)
436		free_irq(spu->irqs[0], spu);
437bail0:
438	return ret;
439}
440
441static void spu_free_irqs(struct spu *spu)
442{
443	if (spu->irqs[0] != NO_IRQ)
444		free_irq(spu->irqs[0], spu);
445	if (spu->irqs[1] != NO_IRQ)
446		free_irq(spu->irqs[1], spu);
447	if (spu->irqs[2] != NO_IRQ)
448		free_irq(spu->irqs[2], spu);
449}
450
451void spu_init_channels(struct spu *spu)
452{
453	static const struct {
454		 unsigned channel;
455		 unsigned count;
456	} zero_list[] = {
457		{ 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
458		{ 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
459	}, count_list[] = {
460		{ 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
461		{ 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
462		{ 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
463	};
464	struct spu_priv2 __iomem *priv2;
465	int i;
466
467	priv2 = spu->priv2;
468
469	/* initialize all channel data to zero */
470	for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
471		int count;
472
473		out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
474		for (count = 0; count < zero_list[i].count; count++)
475			out_be64(&priv2->spu_chnldata_RW, 0);
476	}
477
478	/* initialize channel counts to meaningful values */
479	for (i = 0; i < ARRAY_SIZE(count_list); i++) {
480		out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
481		out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
482	}
483}
484EXPORT_SYMBOL_GPL(spu_init_channels);
485
486static struct bus_type spu_subsys = {
487	.name = "spu",
488	.dev_name = "spu",
489};
490
491int spu_add_dev_attr(struct device_attribute *attr)
492{
493	struct spu *spu;
494
495	mutex_lock(&spu_full_list_mutex);
496	list_for_each_entry(spu, &spu_full_list, full_list)
497		device_create_file(&spu->dev, attr);
498	mutex_unlock(&spu_full_list_mutex);
499
500	return 0;
501}
502EXPORT_SYMBOL_GPL(spu_add_dev_attr);
503
504int spu_add_dev_attr_group(struct attribute_group *attrs)
505{
506	struct spu *spu;
507	int rc = 0;
508
509	mutex_lock(&spu_full_list_mutex);
510	list_for_each_entry(spu, &spu_full_list, full_list) {
511		rc = sysfs_create_group(&spu->dev.kobj, attrs);
512
513		/* we're in trouble here, but try unwinding anyway */
514		if (rc) {
515			printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
516					__func__, attrs->name);
517
518			list_for_each_entry_continue_reverse(spu,
519					&spu_full_list, full_list)
520				sysfs_remove_group(&spu->dev.kobj, attrs);
521			break;
522		}
523	}
524
525	mutex_unlock(&spu_full_list_mutex);
526
527	return rc;
528}
529EXPORT_SYMBOL_GPL(spu_add_dev_attr_group);
530
531
532void spu_remove_dev_attr(struct device_attribute *attr)
533{
534	struct spu *spu;
535
536	mutex_lock(&spu_full_list_mutex);
537	list_for_each_entry(spu, &spu_full_list, full_list)
538		device_remove_file(&spu->dev, attr);
539	mutex_unlock(&spu_full_list_mutex);
540}
541EXPORT_SYMBOL_GPL(spu_remove_dev_attr);
542
543void spu_remove_dev_attr_group(struct attribute_group *attrs)
544{
545	struct spu *spu;
546
547	mutex_lock(&spu_full_list_mutex);
548	list_for_each_entry(spu, &spu_full_list, full_list)
549		sysfs_remove_group(&spu->dev.kobj, attrs);
550	mutex_unlock(&spu_full_list_mutex);
551}
552EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group);
553
554static int spu_create_dev(struct spu *spu)
555{
556	int ret;
557
558	spu->dev.id = spu->number;
559	spu->dev.bus = &spu_subsys;
560	ret = device_register(&spu->dev);
561	if (ret) {
562		printk(KERN_ERR "Can't register SPU %d with sysfs\n",
563				spu->number);
564		return ret;
565	}
566
567	sysfs_add_device_to_node(&spu->dev, spu->node);
568
569	return 0;
570}
571
572static int __init create_spu(void *data)
573{
574	struct spu *spu;
575	int ret;
576	static int number;
577	unsigned long flags;
578
579	ret = -ENOMEM;
580	spu = kzalloc(sizeof (*spu), GFP_KERNEL);
581	if (!spu)
582		goto out;
583
584	spu->alloc_state = SPU_FREE;
585
586	spin_lock_init(&spu->register_lock);
587	spin_lock(&spu_lock);
588	spu->number = number++;
589	spin_unlock(&spu_lock);
590
591	ret = spu_create_spu(spu, data);
592
593	if (ret)
594		goto out_free;
595
596	spu_mfc_sdr_setup(spu);
597	spu_mfc_sr1_set(spu, 0x33);
598	ret = spu_request_irqs(spu);
599	if (ret)
600		goto out_destroy;
601
602	ret = spu_create_dev(spu);
603	if (ret)
604		goto out_free_irqs;
605
606	mutex_lock(&cbe_spu_info[spu->node].list_mutex);
607	list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
608	cbe_spu_info[spu->node].n_spus++;
609	mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
610
611	mutex_lock(&spu_full_list_mutex);
612	spin_lock_irqsave(&spu_full_list_lock, flags);
613	list_add(&spu->full_list, &spu_full_list);
614	spin_unlock_irqrestore(&spu_full_list_lock, flags);
615	mutex_unlock(&spu_full_list_mutex);
616
617	spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
618	spu->stats.tstamp = ktime_get_ns();
619
620	INIT_LIST_HEAD(&spu->aff_list);
621
622	goto out;
623
624out_free_irqs:
625	spu_free_irqs(spu);
626out_destroy:
627	spu_destroy_spu(spu);
628out_free:
629	kfree(spu);
630out:
631	return ret;
632}
633
634static const char *spu_state_names[] = {
635	"user", "system", "iowait", "idle"
636};
637
638static unsigned long long spu_acct_time(struct spu *spu,
639		enum spu_utilization_state state)
640{
641	unsigned long long time = spu->stats.times[state];
642
643	/*
644	 * If the spu is idle or the context is stopped, utilization
645	 * statistics are not updated.  Apply the time delta from the
646	 * last recorded state of the spu.
647	 */
648	if (spu->stats.util_state == state)
649		time += ktime_get_ns() - spu->stats.tstamp;
650
651	return time / NSEC_PER_MSEC;
652}
653
654
655static ssize_t spu_stat_show(struct device *dev,
656				struct device_attribute *attr, char *buf)
657{
658	struct spu *spu = container_of(dev, struct spu, dev);
659
660	return sprintf(buf, "%s %llu %llu %llu %llu "
661		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
662		spu_state_names[spu->stats.util_state],
663		spu_acct_time(spu, SPU_UTIL_USER),
664		spu_acct_time(spu, SPU_UTIL_SYSTEM),
665		spu_acct_time(spu, SPU_UTIL_IOWAIT),
666		spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
667		spu->stats.vol_ctx_switch,
668		spu->stats.invol_ctx_switch,
669		spu->stats.slb_flt,
670		spu->stats.hash_flt,
671		spu->stats.min_flt,
672		spu->stats.maj_flt,
673		spu->stats.class2_intr,
674		spu->stats.libassist);
675}
676
677static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
678
679#ifdef CONFIG_KEXEC
680
681struct crash_spu_info {
682	struct spu *spu;
683	u32 saved_spu_runcntl_RW;
684	u32 saved_spu_status_R;
685	u32 saved_spu_npc_RW;
686	u64 saved_mfc_sr1_RW;
687	u64 saved_mfc_dar;
688	u64 saved_mfc_dsisr;
689};
690
691#define CRASH_NUM_SPUS	16	/* Enough for current hardware */
692static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
693
694static void crash_kexec_stop_spus(void)
695{
696	struct spu *spu;
697	int i;
698	u64 tmp;
699
700	for (i = 0; i < CRASH_NUM_SPUS; i++) {
701		if (!crash_spu_info[i].spu)
702			continue;
703
704		spu = crash_spu_info[i].spu;
705
706		crash_spu_info[i].saved_spu_runcntl_RW =
707			in_be32(&spu->problem->spu_runcntl_RW);
708		crash_spu_info[i].saved_spu_status_R =
709			in_be32(&spu->problem->spu_status_R);
710		crash_spu_info[i].saved_spu_npc_RW =
711			in_be32(&spu->problem->spu_npc_RW);
712
713		crash_spu_info[i].saved_mfc_dar    = spu_mfc_dar_get(spu);
714		crash_spu_info[i].saved_mfc_dsisr  = spu_mfc_dsisr_get(spu);
715		tmp = spu_mfc_sr1_get(spu);
716		crash_spu_info[i].saved_mfc_sr1_RW = tmp;
717
718		tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
719		spu_mfc_sr1_set(spu, tmp);
720
721		__delay(200);
722	}
723}
724
725static void crash_register_spus(struct list_head *list)
726{
727	struct spu *spu;
728	int ret;
729
730	list_for_each_entry(spu, list, full_list) {
731		if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
732			continue;
733
734		crash_spu_info[spu->number].spu = spu;
735	}
736
737	ret = crash_shutdown_register(&crash_kexec_stop_spus);
738	if (ret)
739		printk(KERN_ERR "Could not register SPU crash handler");
740}
741
742#else
743static inline void crash_register_spus(struct list_head *list)
744{
745}
746#endif
747
748static void spu_shutdown(void)
749{
750	struct spu *spu;
751
752	mutex_lock(&spu_full_list_mutex);
753	list_for_each_entry(spu, &spu_full_list, full_list) {
754		spu_free_irqs(spu);
755		spu_destroy_spu(spu);
756	}
757	mutex_unlock(&spu_full_list_mutex);
758}
759
760static struct syscore_ops spu_syscore_ops = {
761	.shutdown = spu_shutdown,
762};
763
764static int __init init_spu_base(void)
765{
766	int i, ret = 0;
767
768	for (i = 0; i < MAX_NUMNODES; i++) {
769		mutex_init(&cbe_spu_info[i].list_mutex);
770		INIT_LIST_HEAD(&cbe_spu_info[i].spus);
771	}
772
773	if (!spu_management_ops)
774		goto out;
775
776	/* create system subsystem for spus */
777	ret = subsys_system_register(&spu_subsys, NULL);
778	if (ret)
779		goto out;
780
781	ret = spu_enumerate_spus(create_spu);
782
783	if (ret < 0) {
784		printk(KERN_WARNING "%s: Error initializing spus\n",
785			__func__);
786		goto out_unregister_subsys;
787	}
788
789	if (ret > 0)
790		fb_append_extra_logo(&logo_spe_clut224, ret);
791
792	mutex_lock(&spu_full_list_mutex);
793	xmon_register_spus(&spu_full_list);
794	crash_register_spus(&spu_full_list);
795	mutex_unlock(&spu_full_list_mutex);
796	spu_add_dev_attr(&dev_attr_stat);
797	register_syscore_ops(&spu_syscore_ops);
798
799	spu_init_affinity();
800
801	return 0;
802
803 out_unregister_subsys:
804	bus_unregister(&spu_subsys);
805 out:
806	return ret;
807}
808module_init(init_spu_base);
809
810MODULE_LICENSE("GPL");
811MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
812