1/*
2 *  linux/arch/alpha/kernel/setup.c
3 *
4 *  Copyright (C) 1995  Linus Torvalds
5 */
6
7/* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
8
9/*
10 * Bootup setup stuff.
11 */
12
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/stddef.h>
17#include <linux/unistd.h>
18#include <linux/ptrace.h>
19#include <linux/slab.h>
20#include <linux/user.h>
21#include <linux/screen_info.h>
22#include <linux/delay.h>
23#include <linux/mc146818rtc.h>
24#include <linux/console.h>
25#include <linux/cpu.h>
26#include <linux/errno.h>
27#include <linux/init.h>
28#include <linux/string.h>
29#include <linux/ioport.h>
30#include <linux/platform_device.h>
31#include <linux/bootmem.h>
32#include <linux/pci.h>
33#include <linux/seq_file.h>
34#include <linux/root_dev.h>
35#include <linux/initrd.h>
36#include <linux/eisa.h>
37#include <linux/pfn.h>
38#ifdef CONFIG_MAGIC_SYSRQ
39#include <linux/sysrq.h>
40#include <linux/reboot.h>
41#endif
42#include <linux/notifier.h>
43#include <asm/setup.h>
44#include <asm/io.h>
45#include <linux/log2.h>
46#include <linux/export.h>
47
48extern struct atomic_notifier_head panic_notifier_list;
49static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
50static struct notifier_block alpha_panic_block = {
51	alpha_panic_event,
52        NULL,
53        INT_MAX /* try to do it first */
54};
55
56#include <asm/uaccess.h>
57#include <asm/pgtable.h>
58#include <asm/hwrpb.h>
59#include <asm/dma.h>
60#include <asm/mmu_context.h>
61#include <asm/console.h>
62
63#include "proto.h"
64#include "pci_impl.h"
65
66
67struct hwrpb_struct *hwrpb;
68EXPORT_SYMBOL(hwrpb);
69unsigned long srm_hae;
70
71int alpha_l1i_cacheshape;
72int alpha_l1d_cacheshape;
73int alpha_l2_cacheshape;
74int alpha_l3_cacheshape;
75
76#ifdef CONFIG_VERBOSE_MCHECK
77/* 0=minimum, 1=verbose, 2=all */
78/* These can be overridden via the command line, ie "verbose_mcheck=2") */
79unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
80#endif
81
82#ifdef CONFIG_NUMA
83struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
84EXPORT_SYMBOL(node_to_cpumask_map);
85#endif
86
87/* Which processor we booted from.  */
88int boot_cpuid;
89
90/*
91 * Using SRM callbacks for initial console output. This works from
92 * setup_arch() time through the end of time_init(), as those places
93 * are under our (Alpha) control.
94
95 * "srmcons" specified in the boot command arguments allows us to
96 * see kernel messages during the period of time before the true
97 * console device is "registered" during console_init().
98 * As of this version (2.5.59), console_init() will call
99 * disable_early_printk() as the last action before initializing
100 * the console drivers. That's the last possible time srmcons can be
101 * unregistered without interfering with console behavior.
102 *
103 * By default, OFF; set it with a bootcommand arg of "srmcons" or
104 * "console=srm". The meaning of these two args is:
105 *     "srmcons"     - early callback prints
106 *     "console=srm" - full callback based console, including early prints
107 */
108int srmcons_output = 0;
109
110/* Enforce a memory size limit; useful for testing. By default, none. */
111unsigned long mem_size_limit = 0;
112
113/* Set AGP GART window size (0 means disabled). */
114unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
115
116#ifdef CONFIG_ALPHA_GENERIC
117struct alpha_machine_vector alpha_mv;
118#endif
119
120#ifndef alpha_using_srm
121int alpha_using_srm;
122EXPORT_SYMBOL(alpha_using_srm);
123#endif
124
125#ifndef alpha_using_qemu
126int alpha_using_qemu;
127#endif
128
129static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
130					       unsigned long);
131static struct alpha_machine_vector *get_sysvec_byname(const char *);
132static void get_sysnames(unsigned long, unsigned long, unsigned long,
133			 char **, char **);
134static void determine_cpu_caches (unsigned int);
135
136static char __initdata command_line[COMMAND_LINE_SIZE];
137
138/*
139 * The format of "screen_info" is strange, and due to early
140 * i386-setup code. This is just enough to make the console
141 * code think we're on a VGA color display.
142 */
143
144struct screen_info screen_info = {
145	.orig_x = 0,
146	.orig_y = 25,
147	.orig_video_cols = 80,
148	.orig_video_lines = 25,
149	.orig_video_isVGA = 1,
150	.orig_video_points = 16
151};
152
153EXPORT_SYMBOL(screen_info);
154
155/*
156 * The direct map I/O window, if any.  This should be the same
157 * for all busses, since it's used by virt_to_bus.
158 */
159
160unsigned long __direct_map_base;
161unsigned long __direct_map_size;
162EXPORT_SYMBOL(__direct_map_base);
163EXPORT_SYMBOL(__direct_map_size);
164
165/*
166 * Declare all of the machine vectors.
167 */
168
169/* GCC 2.7.2 (on alpha at least) is lame.  It does not support either
170   __attribute__((weak)) or #pragma weak.  Bypass it and talk directly
171   to the assembler.  */
172
173#define WEAK(X) \
174	extern struct alpha_machine_vector X; \
175	asm(".weak "#X)
176
177WEAK(alcor_mv);
178WEAK(alphabook1_mv);
179WEAK(avanti_mv);
180WEAK(cabriolet_mv);
181WEAK(clipper_mv);
182WEAK(dp264_mv);
183WEAK(eb164_mv);
184WEAK(eb64p_mv);
185WEAK(eb66_mv);
186WEAK(eb66p_mv);
187WEAK(eiger_mv);
188WEAK(jensen_mv);
189WEAK(lx164_mv);
190WEAK(lynx_mv);
191WEAK(marvel_ev7_mv);
192WEAK(miata_mv);
193WEAK(mikasa_mv);
194WEAK(mikasa_primo_mv);
195WEAK(monet_mv);
196WEAK(nautilus_mv);
197WEAK(noname_mv);
198WEAK(noritake_mv);
199WEAK(noritake_primo_mv);
200WEAK(p2k_mv);
201WEAK(pc164_mv);
202WEAK(privateer_mv);
203WEAK(rawhide_mv);
204WEAK(ruffian_mv);
205WEAK(rx164_mv);
206WEAK(sable_mv);
207WEAK(sable_gamma_mv);
208WEAK(shark_mv);
209WEAK(sx164_mv);
210WEAK(takara_mv);
211WEAK(titan_mv);
212WEAK(webbrick_mv);
213WEAK(wildfire_mv);
214WEAK(xl_mv);
215WEAK(xlt_mv);
216
217#undef WEAK
218
219/*
220 * I/O resources inherited from PeeCees.  Except for perhaps the
221 * turbochannel alphas, everyone has these on some sort of SuperIO chip.
222 *
223 * ??? If this becomes less standard, move the struct out into the
224 * machine vector.
225 */
226
227static void __init
228reserve_std_resources(void)
229{
230	static struct resource standard_io_resources[] = {
231		{ .name = "rtc", .start = -1, .end = -1 },
232        	{ .name = "dma1", .start = 0x00, .end = 0x1f },
233        	{ .name = "pic1", .start = 0x20, .end = 0x3f },
234        	{ .name = "timer", .start = 0x40, .end = 0x5f },
235        	{ .name = "keyboard", .start = 0x60, .end = 0x6f },
236        	{ .name = "dma page reg", .start = 0x80, .end = 0x8f },
237        	{ .name = "pic2", .start = 0xa0, .end = 0xbf },
238        	{ .name = "dma2", .start = 0xc0, .end = 0xdf },
239	};
240
241	struct resource *io = &ioport_resource;
242	size_t i;
243
244	if (hose_head) {
245		struct pci_controller *hose;
246		for (hose = hose_head; hose; hose = hose->next)
247			if (hose->index == 0) {
248				io = hose->io_space;
249				break;
250			}
251	}
252
253	/* Fix up for the Jensen's queer RTC placement.  */
254	standard_io_resources[0].start = RTC_PORT(0);
255	standard_io_resources[0].end = RTC_PORT(0) + 0x10;
256
257	for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
258		request_resource(io, standard_io_resources+i);
259}
260
261#define PFN_MAX		PFN_DOWN(0x80000000)
262#define for_each_mem_cluster(memdesc, _cluster, i)		\
263	for ((_cluster) = (memdesc)->cluster, (i) = 0;		\
264	     (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
265
266static unsigned long __init
267get_mem_size_limit(char *s)
268{
269        unsigned long end = 0;
270        char *from = s;
271
272        end = simple_strtoul(from, &from, 0);
273        if ( *from == 'K' || *from == 'k' ) {
274                end = end << 10;
275                from++;
276        } else if ( *from == 'M' || *from == 'm' ) {
277                end = end << 20;
278                from++;
279        } else if ( *from == 'G' || *from == 'g' ) {
280                end = end << 30;
281                from++;
282        }
283        return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
284}
285
286#ifdef CONFIG_BLK_DEV_INITRD
287void * __init
288move_initrd(unsigned long mem_limit)
289{
290	void *start;
291	unsigned long size;
292
293	size = initrd_end - initrd_start;
294	start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
295	if (!start || __pa(start) + size > mem_limit) {
296		initrd_start = initrd_end = 0;
297		return NULL;
298	}
299	memmove(start, (void *)initrd_start, size);
300	initrd_start = (unsigned long)start;
301	initrd_end = initrd_start + size;
302	printk("initrd moved to %p\n", start);
303	return start;
304}
305#endif
306
307#ifndef CONFIG_DISCONTIGMEM
308static void __init
309setup_memory(void *kernel_end)
310{
311	struct memclust_struct * cluster;
312	struct memdesc_struct * memdesc;
313	unsigned long start_kernel_pfn, end_kernel_pfn;
314	unsigned long bootmap_size, bootmap_pages, bootmap_start;
315	unsigned long start, end;
316	unsigned long i;
317
318	/* Find free clusters, and init and free the bootmem accordingly.  */
319	memdesc = (struct memdesc_struct *)
320	  (hwrpb->mddt_offset + (unsigned long) hwrpb);
321
322	for_each_mem_cluster(memdesc, cluster, i) {
323		printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
324		       i, cluster->usage, cluster->start_pfn,
325		       cluster->start_pfn + cluster->numpages);
326
327		/* Bit 0 is console/PALcode reserved.  Bit 1 is
328		   non-volatile memory -- we might want to mark
329		   this for later.  */
330		if (cluster->usage & 3)
331			continue;
332
333		end = cluster->start_pfn + cluster->numpages;
334		if (end > max_low_pfn)
335			max_low_pfn = end;
336	}
337
338	/*
339	 * Except for the NUMA systems (wildfire, marvel) all of the
340	 * Alpha systems we run on support 32GB of memory or less.
341	 * Since the NUMA systems introduce large holes in memory addressing,
342	 * we can get into a situation where there is not enough contiguous
343	 * memory for the memory map.
344	 *
345	 * Limit memory to the first 32GB to limit the NUMA systems to
346	 * memory on their first node (wildfire) or 2 (marvel) to avoid
347	 * not being able to produce the memory map. In order to access
348	 * all of the memory on the NUMA systems, build with discontiguous
349	 * memory support.
350	 *
351	 * If the user specified a memory limit, let that memory limit stand.
352	 */
353	if (!mem_size_limit)
354		mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
355
356	if (mem_size_limit && max_low_pfn >= mem_size_limit)
357	{
358		printk("setup: forcing memory size to %ldK (from %ldK).\n",
359		       mem_size_limit << (PAGE_SHIFT - 10),
360		       max_low_pfn    << (PAGE_SHIFT - 10));
361		max_low_pfn = mem_size_limit;
362	}
363
364	/* Find the bounds of kernel memory.  */
365	start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
366	end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
367	bootmap_start = -1;
368
369 try_again:
370	if (max_low_pfn <= end_kernel_pfn)
371		panic("not enough memory to boot");
372
373	/* We need to know how many physically contiguous pages
374	   we'll need for the bootmap.  */
375	bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
376
377	/* Now find a good region where to allocate the bootmap.  */
378	for_each_mem_cluster(memdesc, cluster, i) {
379		if (cluster->usage & 3)
380			continue;
381
382		start = cluster->start_pfn;
383		end = start + cluster->numpages;
384		if (start >= max_low_pfn)
385			continue;
386		if (end > max_low_pfn)
387			end = max_low_pfn;
388		if (start < start_kernel_pfn) {
389			if (end > end_kernel_pfn
390			    && end - end_kernel_pfn >= bootmap_pages) {
391				bootmap_start = end_kernel_pfn;
392				break;
393			} else if (end > start_kernel_pfn)
394				end = start_kernel_pfn;
395		} else if (start < end_kernel_pfn)
396			start = end_kernel_pfn;
397		if (end - start >= bootmap_pages) {
398			bootmap_start = start;
399			break;
400		}
401	}
402
403	if (bootmap_start == ~0UL) {
404		max_low_pfn >>= 1;
405		goto try_again;
406	}
407
408	/* Allocate the bootmap and mark the whole MM as reserved.  */
409	bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
410
411	/* Mark the free regions.  */
412	for_each_mem_cluster(memdesc, cluster, i) {
413		if (cluster->usage & 3)
414			continue;
415
416		start = cluster->start_pfn;
417		end = cluster->start_pfn + cluster->numpages;
418		if (start >= max_low_pfn)
419			continue;
420		if (end > max_low_pfn)
421			end = max_low_pfn;
422		if (start < start_kernel_pfn) {
423			if (end > end_kernel_pfn) {
424				free_bootmem(PFN_PHYS(start),
425					     (PFN_PHYS(start_kernel_pfn)
426					      - PFN_PHYS(start)));
427				printk("freeing pages %ld:%ld\n",
428				       start, start_kernel_pfn);
429				start = end_kernel_pfn;
430			} else if (end > start_kernel_pfn)
431				end = start_kernel_pfn;
432		} else if (start < end_kernel_pfn)
433			start = end_kernel_pfn;
434		if (start >= end)
435			continue;
436
437		free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
438		printk("freeing pages %ld:%ld\n", start, end);
439	}
440
441	/* Reserve the bootmap memory.  */
442	reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size,
443			BOOTMEM_DEFAULT);
444	printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
445
446#ifdef CONFIG_BLK_DEV_INITRD
447	initrd_start = INITRD_START;
448	if (initrd_start) {
449		initrd_end = initrd_start+INITRD_SIZE;
450		printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
451		       (void *) initrd_start, INITRD_SIZE);
452
453		if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
454			if (!move_initrd(PFN_PHYS(max_low_pfn)))
455				printk("initrd extends beyond end of memory "
456				       "(0x%08lx > 0x%p)\ndisabling initrd\n",
457				       initrd_end,
458				       phys_to_virt(PFN_PHYS(max_low_pfn)));
459		} else {
460			reserve_bootmem(virt_to_phys((void *)initrd_start),
461					INITRD_SIZE, BOOTMEM_DEFAULT);
462		}
463	}
464#endif /* CONFIG_BLK_DEV_INITRD */
465}
466#else
467extern void setup_memory(void *);
468#endif /* !CONFIG_DISCONTIGMEM */
469
470int __init
471page_is_ram(unsigned long pfn)
472{
473	struct memclust_struct * cluster;
474	struct memdesc_struct * memdesc;
475	unsigned long i;
476
477	memdesc = (struct memdesc_struct *)
478		(hwrpb->mddt_offset + (unsigned long) hwrpb);
479	for_each_mem_cluster(memdesc, cluster, i)
480	{
481		if (pfn >= cluster->start_pfn  &&
482		    pfn < cluster->start_pfn + cluster->numpages) {
483			return (cluster->usage & 3) ? 0 : 1;
484		}
485	}
486
487	return 0;
488}
489
490static int __init
491register_cpus(void)
492{
493	int i;
494
495	for_each_possible_cpu(i) {
496		struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
497		if (!p)
498			return -ENOMEM;
499		register_cpu(p, i);
500	}
501	return 0;
502}
503
504arch_initcall(register_cpus);
505
506void __init
507setup_arch(char **cmdline_p)
508{
509	extern char _end[];
510
511	struct alpha_machine_vector *vec = NULL;
512	struct percpu_struct *cpu;
513	char *type_name, *var_name, *p;
514	void *kernel_end = _end; /* end of kernel */
515	char *args = command_line;
516
517	hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
518	boot_cpuid = hard_smp_processor_id();
519
520        /*
521	 * Pre-process the system type to make sure it will be valid.
522	 *
523	 * This may restore real CABRIO and EB66+ family names, ie
524	 * EB64+ and EB66.
525	 *
526	 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
527	 * and AS1200 (DIGITAL Server 5000 series) have the type as
528	 * the negative of the real one.
529	 */
530        if ((long)hwrpb->sys_type < 0) {
531		hwrpb->sys_type = -((long)hwrpb->sys_type);
532		hwrpb_update_checksum(hwrpb);
533	}
534
535	/* Register a call for panic conditions. */
536	atomic_notifier_chain_register(&panic_notifier_list,
537			&alpha_panic_block);
538
539#ifndef alpha_using_srm
540	/* Assume that we've booted from SRM if we haven't booted from MILO.
541	   Detect the later by looking for "MILO" in the system serial nr.  */
542	alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
543#endif
544#ifndef alpha_using_qemu
545	/* Similarly, look for QEMU.  */
546	alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0;
547#endif
548
549	/* If we are using SRM, we want to allow callbacks
550	   as early as possible, so do this NOW, and then
551	   they should work immediately thereafter.
552	*/
553	kernel_end = callback_init(kernel_end);
554
555	/*
556	 * Locate the command line.
557	 */
558	/* Hack for Jensen... since we're restricted to 8 or 16 chars for
559	   boot flags depending on the boot mode, we need some shorthand.
560	   This should do for installation.  */
561	if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
562		strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
563	} else {
564		strlcpy(command_line, COMMAND_LINE, sizeof command_line);
565	}
566	strcpy(boot_command_line, command_line);
567	*cmdline_p = command_line;
568
569	/*
570	 * Process command-line arguments.
571	 */
572	while ((p = strsep(&args, " \t")) != NULL) {
573		if (!*p) continue;
574		if (strncmp(p, "alpha_mv=", 9) == 0) {
575			vec = get_sysvec_byname(p+9);
576			continue;
577		}
578		if (strncmp(p, "cycle=", 6) == 0) {
579			est_cycle_freq = simple_strtol(p+6, NULL, 0);
580			continue;
581		}
582		if (strncmp(p, "mem=", 4) == 0) {
583			mem_size_limit = get_mem_size_limit(p+4);
584			continue;
585		}
586		if (strncmp(p, "srmcons", 7) == 0) {
587			srmcons_output |= 1;
588			continue;
589		}
590		if (strncmp(p, "console=srm", 11) == 0) {
591			srmcons_output |= 2;
592			continue;
593		}
594		if (strncmp(p, "gartsize=", 9) == 0) {
595			alpha_agpgart_size =
596				get_mem_size_limit(p+9) << PAGE_SHIFT;
597			continue;
598		}
599#ifdef CONFIG_VERBOSE_MCHECK
600		if (strncmp(p, "verbose_mcheck=", 15) == 0) {
601			alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
602			continue;
603		}
604#endif
605	}
606
607	/* Replace the command line, now that we've killed it with strsep.  */
608	strcpy(command_line, boot_command_line);
609
610	/* If we want SRM console printk echoing early, do it now. */
611	if (alpha_using_srm && srmcons_output) {
612		register_srm_console();
613
614		/*
615		 * If "console=srm" was specified, clear the srmcons_output
616		 * flag now so that time.c won't unregister_srm_console
617		 */
618		if (srmcons_output & 2)
619			srmcons_output = 0;
620	}
621
622#ifdef CONFIG_MAGIC_SYSRQ
623	/* If we're using SRM, make sysrq-b halt back to the prom,
624	   not auto-reboot.  */
625	if (alpha_using_srm) {
626		struct sysrq_key_op *op = __sysrq_get_key_op('b');
627		op->handler = (void *) machine_halt;
628	}
629#endif
630
631	/*
632	 * Identify and reconfigure for the current system.
633	 */
634	cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
635
636	get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
637		     cpu->type, &type_name, &var_name);
638	if (*var_name == '0')
639		var_name = "";
640
641	if (!vec) {
642		vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
643				 cpu->type);
644	}
645
646	if (!vec) {
647		panic("Unsupported system type: %s%s%s (%ld %ld)\n",
648		      type_name, (*var_name ? " variation " : ""), var_name,
649		      hwrpb->sys_type, hwrpb->sys_variation);
650	}
651	if (vec != &alpha_mv) {
652		alpha_mv = *vec;
653	}
654
655	printk("Booting "
656#ifdef CONFIG_ALPHA_GENERIC
657	       "GENERIC "
658#endif
659	       "on %s%s%s using machine vector %s from %s\n",
660	       type_name, (*var_name ? " variation " : ""),
661	       var_name, alpha_mv.vector_name,
662	       (alpha_using_srm ? "SRM" : "MILO"));
663
664	printk("Major Options: "
665#ifdef CONFIG_SMP
666	       "SMP "
667#endif
668#ifdef CONFIG_ALPHA_EV56
669	       "EV56 "
670#endif
671#ifdef CONFIG_ALPHA_EV67
672	       "EV67 "
673#endif
674#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
675	       "LEGACY_START "
676#endif
677#ifdef CONFIG_VERBOSE_MCHECK
678	       "VERBOSE_MCHECK "
679#endif
680
681#ifdef CONFIG_DISCONTIGMEM
682	       "DISCONTIGMEM "
683#ifdef CONFIG_NUMA
684	       "NUMA "
685#endif
686#endif
687
688#ifdef CONFIG_DEBUG_SPINLOCK
689	       "DEBUG_SPINLOCK "
690#endif
691#ifdef CONFIG_MAGIC_SYSRQ
692	       "MAGIC_SYSRQ "
693#endif
694	       "\n");
695
696	printk("Command line: %s\n", command_line);
697
698	/*
699	 * Sync up the HAE.
700	 * Save the SRM's current value for restoration.
701	 */
702	srm_hae = *alpha_mv.hae_register;
703	__set_hae(alpha_mv.hae_cache);
704
705	/* Reset enable correctable error reports.  */
706	wrmces(0x7);
707
708	/* Find our memory.  */
709	setup_memory(kernel_end);
710
711	/* First guess at cpu cache sizes.  Do this before init_arch.  */
712	determine_cpu_caches(cpu->type);
713
714	/* Initialize the machine.  Usually has to do with setting up
715	   DMA windows and the like.  */
716	if (alpha_mv.init_arch)
717		alpha_mv.init_arch();
718
719	/* Reserve standard resources.  */
720	reserve_std_resources();
721
722	/*
723	 * Give us a default console.  TGA users will see nothing until
724	 * chr_dev_init is called, rather late in the boot sequence.
725	 */
726
727#ifdef CONFIG_VT
728#if defined(CONFIG_VGA_CONSOLE)
729	conswitchp = &vga_con;
730#elif defined(CONFIG_DUMMY_CONSOLE)
731	conswitchp = &dummy_con;
732#endif
733#endif
734
735	/* Default root filesystem to sda2.  */
736	ROOT_DEV = Root_SDA2;
737
738#ifdef CONFIG_EISA
739	/* FIXME:  only set this when we actually have EISA in this box? */
740	EISA_bus = 1;
741#endif
742
743 	/*
744	 * Check ASN in HWRPB for validity, report if bad.
745	 * FIXME: how was this failing?  Should we trust it instead,
746	 * and copy the value into alpha_mv.max_asn?
747 	 */
748
749 	if (hwrpb->max_asn != MAX_ASN) {
750		printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
751 	}
752
753	/*
754	 * Identify the flock of penguins.
755	 */
756
757#ifdef CONFIG_SMP
758	setup_smp();
759#endif
760	paging_init();
761}
762
763static char sys_unknown[] = "Unknown";
764static char systype_names[][16] = {
765	"0",
766	"ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
767	"Pelican", "Morgan", "Sable", "Medulla", "Noname",
768	"Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
769	"Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
770	"Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
771	"Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
772	"Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
773};
774
775static char unofficial_names[][8] = {"100", "Ruffian"};
776
777static char api_names[][16] = {"200", "Nautilus"};
778
779static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
780static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
781
782static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
783static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
784
785static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
786static int eb64p_indices[] = {0,0,1,2};
787
788static char eb66_names[][8] = {"EB66", "EB66+"};
789static int eb66_indices[] = {0,0,1};
790
791static char marvel_names[][16] = {
792	"Marvel/EV7"
793};
794static int marvel_indices[] = { 0 };
795
796static char rawhide_names[][16] = {
797	"Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
798};
799static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
800
801static char titan_names[][16] = {
802	"DEFAULT", "Privateer", "Falcon", "Granite"
803};
804static int titan_indices[] = {0,1,2,2,3};
805
806static char tsunami_names[][16] = {
807	"0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
808	"Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
809	"Flying Clipper", "Shark"
810};
811static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
812
813static struct alpha_machine_vector * __init
814get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
815{
816	static struct alpha_machine_vector *systype_vecs[] __initdata =
817	{
818		NULL,		/* 0 */
819		NULL,		/* ADU */
820		NULL,		/* Cobra */
821		NULL,		/* Ruby */
822		NULL,		/* Flamingo */
823		NULL,		/* Mannequin */
824		&jensen_mv,
825		NULL, 		/* Pelican */
826		NULL,		/* Morgan */
827		NULL,		/* Sable -- see below.  */
828		NULL,		/* Medulla */
829		&noname_mv,
830		NULL,		/* Turbolaser */
831		&avanti_mv,
832		NULL,		/* Mustang */
833		NULL,		/* Alcor, Bret, Maverick. HWRPB inaccurate? */
834		NULL,		/* Tradewind */
835		NULL,		/* Mikasa -- see below.  */
836		NULL,		/* EB64 */
837		NULL,		/* EB66 -- see variation.  */
838		NULL,		/* EB64+ -- see variation.  */
839		&alphabook1_mv,
840		&rawhide_mv,
841		NULL,		/* K2 */
842		&lynx_mv,	/* Lynx */
843		&xl_mv,
844		NULL,		/* EB164 -- see variation.  */
845		NULL,		/* Noritake -- see below.  */
846		NULL,		/* Cortex */
847		NULL,		/* 29 */
848		&miata_mv,
849		NULL,		/* XXM */
850		&takara_mv,
851		NULL,		/* Yukon */
852		NULL,		/* Tsunami -- see variation.  */
853		&wildfire_mv,	/* Wildfire */
854		NULL,		/* CUSCO */
855		&eiger_mv,	/* Eiger */
856		NULL,		/* Titan */
857		NULL,		/* Marvel */
858	};
859
860	static struct alpha_machine_vector *unofficial_vecs[] __initdata =
861	{
862		NULL,		/* 100 */
863		&ruffian_mv,
864	};
865
866	static struct alpha_machine_vector *api_vecs[] __initdata =
867	{
868		NULL,		/* 200 */
869		&nautilus_mv,
870	};
871
872	static struct alpha_machine_vector *alcor_vecs[] __initdata =
873	{
874		&alcor_mv, &xlt_mv, &xlt_mv
875	};
876
877	static struct alpha_machine_vector *eb164_vecs[] __initdata =
878	{
879		&eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
880	};
881
882	static struct alpha_machine_vector *eb64p_vecs[] __initdata =
883	{
884		&eb64p_mv,
885		&cabriolet_mv,
886		&cabriolet_mv		/* AlphaPCI64 */
887	};
888
889	static struct alpha_machine_vector *eb66_vecs[] __initdata =
890	{
891		&eb66_mv,
892		&eb66p_mv
893	};
894
895	static struct alpha_machine_vector *marvel_vecs[] __initdata =
896	{
897		&marvel_ev7_mv,
898	};
899
900	static struct alpha_machine_vector *titan_vecs[] __initdata =
901	{
902		&titan_mv,		/* default   */
903		&privateer_mv,		/* privateer */
904		&titan_mv,		/* falcon    */
905		&privateer_mv,		/* granite   */
906	};
907
908	static struct alpha_machine_vector *tsunami_vecs[]  __initdata =
909	{
910		NULL,
911		&dp264_mv,		/* dp264 */
912		&dp264_mv,		/* warhol */
913		&dp264_mv,		/* windjammer */
914		&monet_mv,		/* monet */
915		&clipper_mv,		/* clipper */
916		&dp264_mv,		/* goldrush */
917		&webbrick_mv,		/* webbrick */
918		&dp264_mv,		/* catamaran */
919		NULL,			/* brisbane? */
920		NULL,			/* melbourne? */
921		NULL,			/* flying clipper? */
922		&shark_mv,		/* shark */
923	};
924
925	/* ??? Do we need to distinguish between Rawhides?  */
926
927	struct alpha_machine_vector *vec;
928
929	/* Search the system tables first... */
930	vec = NULL;
931	if (type < ARRAY_SIZE(systype_vecs)) {
932		vec = systype_vecs[type];
933	} else if ((type > ST_API_BIAS) &&
934		   (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) {
935		vec = api_vecs[type - ST_API_BIAS];
936	} else if ((type > ST_UNOFFICIAL_BIAS) &&
937		   (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) {
938		vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
939	}
940
941	/* If we've not found one, try for a variation.  */
942
943	if (!vec) {
944		/* Member ID is a bit-field. */
945		unsigned long member = (variation >> 10) & 0x3f;
946
947		cpu &= 0xffffffff; /* make it usable */
948
949		switch (type) {
950		case ST_DEC_ALCOR:
951			if (member < ARRAY_SIZE(alcor_indices))
952				vec = alcor_vecs[alcor_indices[member]];
953			break;
954		case ST_DEC_EB164:
955			if (member < ARRAY_SIZE(eb164_indices))
956				vec = eb164_vecs[eb164_indices[member]];
957			/* PC164 may show as EB164 variation with EV56 CPU,
958			   but, since no true EB164 had anything but EV5... */
959			if (vec == &eb164_mv && cpu == EV56_CPU)
960				vec = &pc164_mv;
961			break;
962		case ST_DEC_EB64P:
963			if (member < ARRAY_SIZE(eb64p_indices))
964				vec = eb64p_vecs[eb64p_indices[member]];
965			break;
966		case ST_DEC_EB66:
967			if (member < ARRAY_SIZE(eb66_indices))
968				vec = eb66_vecs[eb66_indices[member]];
969			break;
970		case ST_DEC_MARVEL:
971			if (member < ARRAY_SIZE(marvel_indices))
972				vec = marvel_vecs[marvel_indices[member]];
973			break;
974		case ST_DEC_TITAN:
975			vec = titan_vecs[0];	/* default */
976			if (member < ARRAY_SIZE(titan_indices))
977				vec = titan_vecs[titan_indices[member]];
978			break;
979		case ST_DEC_TSUNAMI:
980			if (member < ARRAY_SIZE(tsunami_indices))
981				vec = tsunami_vecs[tsunami_indices[member]];
982			break;
983		case ST_DEC_1000:
984			if (cpu == EV5_CPU || cpu == EV56_CPU)
985				vec = &mikasa_primo_mv;
986			else
987				vec = &mikasa_mv;
988			break;
989		case ST_DEC_NORITAKE:
990			if (cpu == EV5_CPU || cpu == EV56_CPU)
991				vec = &noritake_primo_mv;
992			else
993				vec = &noritake_mv;
994			break;
995		case ST_DEC_2100_A500:
996			if (cpu == EV5_CPU || cpu == EV56_CPU)
997				vec = &sable_gamma_mv;
998			else
999				vec = &sable_mv;
1000			break;
1001		}
1002	}
1003	return vec;
1004}
1005
1006static struct alpha_machine_vector * __init
1007get_sysvec_byname(const char *name)
1008{
1009	static struct alpha_machine_vector *all_vecs[] __initdata =
1010	{
1011		&alcor_mv,
1012		&alphabook1_mv,
1013		&avanti_mv,
1014		&cabriolet_mv,
1015		&clipper_mv,
1016		&dp264_mv,
1017		&eb164_mv,
1018		&eb64p_mv,
1019		&eb66_mv,
1020		&eb66p_mv,
1021		&eiger_mv,
1022		&jensen_mv,
1023		&lx164_mv,
1024		&lynx_mv,
1025		&miata_mv,
1026		&mikasa_mv,
1027		&mikasa_primo_mv,
1028		&monet_mv,
1029		&nautilus_mv,
1030		&noname_mv,
1031		&noritake_mv,
1032		&noritake_primo_mv,
1033		&p2k_mv,
1034		&pc164_mv,
1035		&privateer_mv,
1036		&rawhide_mv,
1037		&ruffian_mv,
1038		&rx164_mv,
1039		&sable_mv,
1040		&sable_gamma_mv,
1041		&shark_mv,
1042		&sx164_mv,
1043		&takara_mv,
1044		&webbrick_mv,
1045		&wildfire_mv,
1046		&xl_mv,
1047		&xlt_mv
1048	};
1049
1050	size_t i;
1051
1052	for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) {
1053		struct alpha_machine_vector *mv = all_vecs[i];
1054		if (strcasecmp(mv->vector_name, name) == 0)
1055			return mv;
1056	}
1057	return NULL;
1058}
1059
1060static void
1061get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1062	     char **type_name, char **variation_name)
1063{
1064	unsigned long member;
1065
1066	/* If not in the tables, make it UNKNOWN,
1067	   else set type name to family */
1068	if (type < ARRAY_SIZE(systype_names)) {
1069		*type_name = systype_names[type];
1070	} else if ((type > ST_API_BIAS) &&
1071		   (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) {
1072		*type_name = api_names[type - ST_API_BIAS];
1073	} else if ((type > ST_UNOFFICIAL_BIAS) &&
1074		   (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) {
1075		*type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
1076	} else {
1077		*type_name = sys_unknown;
1078		*variation_name = sys_unknown;
1079		return;
1080	}
1081
1082	/* Set variation to "0"; if variation is zero, done.  */
1083	*variation_name = systype_names[0];
1084	if (variation == 0) {
1085		return;
1086	}
1087
1088	member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
1089
1090	cpu &= 0xffffffff; /* make it usable */
1091
1092	switch (type) { /* select by family */
1093	default: /* default to variation "0" for now */
1094		break;
1095	case ST_DEC_EB164:
1096		if (member < ARRAY_SIZE(eb164_indices))
1097			*variation_name = eb164_names[eb164_indices[member]];
1098		/* PC164 may show as EB164 variation, but with EV56 CPU,
1099		   so, since no true EB164 had anything but EV5... */
1100		if (eb164_indices[member] == 0 && cpu == EV56_CPU)
1101			*variation_name = eb164_names[1]; /* make it PC164 */
1102		break;
1103	case ST_DEC_ALCOR:
1104		if (member < ARRAY_SIZE(alcor_indices))
1105			*variation_name = alcor_names[alcor_indices[member]];
1106		break;
1107	case ST_DEC_EB64P:
1108		if (member < ARRAY_SIZE(eb64p_indices))
1109			*variation_name = eb64p_names[eb64p_indices[member]];
1110		break;
1111	case ST_DEC_EB66:
1112		if (member < ARRAY_SIZE(eb66_indices))
1113			*variation_name = eb66_names[eb66_indices[member]];
1114		break;
1115	case ST_DEC_MARVEL:
1116		if (member < ARRAY_SIZE(marvel_indices))
1117			*variation_name = marvel_names[marvel_indices[member]];
1118		break;
1119	case ST_DEC_RAWHIDE:
1120		if (member < ARRAY_SIZE(rawhide_indices))
1121			*variation_name = rawhide_names[rawhide_indices[member]];
1122		break;
1123	case ST_DEC_TITAN:
1124		*variation_name = titan_names[0];	/* default */
1125		if (member < ARRAY_SIZE(titan_indices))
1126			*variation_name = titan_names[titan_indices[member]];
1127		break;
1128	case ST_DEC_TSUNAMI:
1129		if (member < ARRAY_SIZE(tsunami_indices))
1130			*variation_name = tsunami_names[tsunami_indices[member]];
1131		break;
1132	}
1133}
1134
1135/*
1136 * A change was made to the HWRPB via an ECO and the following code
1137 * tracks a part of the ECO.  In HWRPB versions less than 5, the ECO
1138 * was not implemented in the console firmware.  If it's revision 5 or
1139 * greater we can get the name of the platform as an ASCII string from
1140 * the HWRPB.  That's what this function does.  It checks the revision
1141 * level and if the string is in the HWRPB it returns the address of
1142 * the string--a pointer to the name of the platform.
1143 *
1144 * Returns:
1145 *      - Pointer to a ASCII string if it's in the HWRPB
1146 *      - Pointer to a blank string if the data is not in the HWRPB.
1147 */
1148
1149static char *
1150platform_string(void)
1151{
1152	struct dsr_struct *dsr;
1153	static char unk_system_string[] = "N/A";
1154
1155	/* Go to the console for the string pointer.
1156	 * If the rpb_vers is not 5 or greater the rpb
1157	 * is old and does not have this data in it.
1158	 */
1159	if (hwrpb->revision < 5)
1160		return (unk_system_string);
1161	else {
1162		/* The Dynamic System Recognition struct
1163		 * has the system platform name starting
1164		 * after the character count of the string.
1165		 */
1166		dsr =  ((struct dsr_struct *)
1167			((char *)hwrpb + hwrpb->dsr_offset));
1168		return ((char *)dsr + (dsr->sysname_off +
1169				       sizeof(long)));
1170	}
1171}
1172
1173static int
1174get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
1175{
1176	struct percpu_struct *cpu;
1177	unsigned long i;
1178	int count = 0;
1179
1180	for (i = 0; i < num; i++) {
1181		cpu = (struct percpu_struct *)
1182			((char *)cpubase + i*hwrpb->processor_size);
1183		if ((cpu->flags & 0x1cc) == 0x1cc)
1184			count++;
1185	}
1186	return count;
1187}
1188
1189static void
1190show_cache_size (struct seq_file *f, const char *which, int shape)
1191{
1192	if (shape == -1)
1193		seq_printf (f, "%s\t\t: n/a\n", which);
1194	else if (shape == 0)
1195		seq_printf (f, "%s\t\t: unknown\n", which);
1196	else
1197		seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
1198			    which, shape >> 10, shape & 15,
1199			    1 << ((shape >> 4) & 15));
1200}
1201
1202static int
1203show_cpuinfo(struct seq_file *f, void *slot)
1204{
1205	extern struct unaligned_stat {
1206		unsigned long count, va, pc;
1207	} unaligned[2];
1208
1209	static char cpu_names[][8] = {
1210		"EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
1211		"EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
1212		"EV68CX", "EV7", "EV79", "EV69"
1213	};
1214
1215	struct percpu_struct *cpu = slot;
1216	unsigned int cpu_index;
1217	char *cpu_name;
1218	char *systype_name;
1219	char *sysvariation_name;
1220	int nr_processors;
1221	unsigned long timer_freq;
1222
1223	cpu_index = (unsigned) (cpu->type - 1);
1224	cpu_name = "Unknown";
1225	if (cpu_index < ARRAY_SIZE(cpu_names))
1226		cpu_name = cpu_names[cpu_index];
1227
1228	get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
1229		     cpu->type, &systype_name, &sysvariation_name);
1230
1231	nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
1232
1233#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
1234	timer_freq = (100UL * hwrpb->intr_freq) / 4096;
1235#else
1236	timer_freq = 100UL * CONFIG_HZ;
1237#endif
1238
1239	seq_printf(f, "cpu\t\t\t: Alpha\n"
1240		      "cpu model\t\t: %s\n"
1241		      "cpu variation\t\t: %ld\n"
1242		      "cpu revision\t\t: %ld\n"
1243		      "cpu serial number\t: %s\n"
1244		      "system type\t\t: %s\n"
1245		      "system variation\t: %s\n"
1246		      "system revision\t\t: %ld\n"
1247		      "system serial number\t: %s\n"
1248		      "cycle frequency [Hz]\t: %lu %s\n"
1249		      "timer frequency [Hz]\t: %lu.%02lu\n"
1250		      "page size [bytes]\t: %ld\n"
1251		      "phys. address bits\t: %ld\n"
1252		      "max. addr. space #\t: %ld\n"
1253		      "BogoMIPS\t\t: %lu.%02lu\n"
1254		      "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1255		      "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1256		      "platform string\t\t: %s\n"
1257		      "cpus detected\t\t: %d\n",
1258		       cpu_name, cpu->variation, cpu->revision,
1259		       (char*)cpu->serial_no,
1260		       systype_name, sysvariation_name, hwrpb->sys_revision,
1261		       (char*)hwrpb->ssn,
1262		       est_cycle_freq ? : hwrpb->cycle_freq,
1263		       est_cycle_freq ? "est." : "",
1264		       timer_freq / 100, timer_freq % 100,
1265		       hwrpb->pagesize,
1266		       hwrpb->pa_bits,
1267		       hwrpb->max_asn,
1268		       loops_per_jiffy / (500000/HZ),
1269		       (loops_per_jiffy / (5000/HZ)) % 100,
1270		       unaligned[0].count, unaligned[0].pc, unaligned[0].va,
1271		       unaligned[1].count, unaligned[1].pc, unaligned[1].va,
1272		       platform_string(), nr_processors);
1273
1274#ifdef CONFIG_SMP
1275	seq_printf(f, "cpus active\t\t: %u\n"
1276		      "cpu active mask\t\t: %016lx\n",
1277		       num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]);
1278#endif
1279
1280	show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
1281	show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
1282	show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
1283	show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
1284
1285	return 0;
1286}
1287
1288static int __init
1289read_mem_block(int *addr, int stride, int size)
1290{
1291	long nloads = size / stride, cnt, tmp;
1292
1293	__asm__ __volatile__(
1294	"	rpcc    %0\n"
1295	"1:	ldl	%3,0(%2)\n"
1296	"	subq	%1,1,%1\n"
1297	/* Next two XORs introduce an explicit data dependency between
1298	   consecutive loads in the loop, which will give us true load
1299	   latency. */
1300	"	xor	%3,%2,%2\n"
1301	"	xor	%3,%2,%2\n"
1302	"	addq	%2,%4,%2\n"
1303	"	bne	%1,1b\n"
1304	"	rpcc	%3\n"
1305	"	subl	%3,%0,%0\n"
1306	: "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
1307	: "r" (stride), "1" (nloads), "2" (addr));
1308
1309	return cnt / (size / stride);
1310}
1311
1312#define CSHAPE(totalsize, linesize, assoc) \
1313  ((totalsize & ~0xff) | (linesize << 4) | assoc)
1314
1315/* ??? EV5 supports up to 64M, but did the systems with more than
1316   16M of BCACHE ever exist? */
1317#define MAX_BCACHE_SIZE	16*1024*1024
1318
1319/* Note that the offchip caches are direct mapped on all Alphas. */
1320static int __init
1321external_cache_probe(int minsize, int width)
1322{
1323	int cycles, prev_cycles = 1000000;
1324	int stride = 1 << width;
1325	long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
1326
1327	if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
1328		maxsize = 1 << (ilog2(max_low_pfn + 1) + PAGE_SHIFT);
1329
1330	/* Get the first block cached. */
1331	read_mem_block(__va(0), stride, size);
1332
1333	while (size < maxsize) {
1334		/* Get an average load latency in cycles. */
1335		cycles = read_mem_block(__va(0), stride, size);
1336		if (cycles > prev_cycles * 2) {
1337			/* Fine, we exceed the cache. */
1338			printk("%ldK Bcache detected; load hit latency %d "
1339			       "cycles, load miss latency %d cycles\n",
1340			       size >> 11, prev_cycles, cycles);
1341			return CSHAPE(size >> 1, width, 1);
1342		}
1343		/* Try to get the next block cached. */
1344		read_mem_block(__va(size), stride, size);
1345		prev_cycles = cycles;
1346		size <<= 1;
1347	}
1348	return -1;	/* No BCACHE found. */
1349}
1350
1351static void __init
1352determine_cpu_caches (unsigned int cpu_type)
1353{
1354	int L1I, L1D, L2, L3;
1355
1356	switch (cpu_type) {
1357	case EV4_CPU:
1358	case EV45_CPU:
1359	  {
1360		if (cpu_type == EV4_CPU)
1361			L1I = CSHAPE(8*1024, 5, 1);
1362		else
1363			L1I = CSHAPE(16*1024, 5, 1);
1364		L1D = L1I;
1365		L3 = -1;
1366
1367		/* BIU_CTL is a write-only Abox register.  PALcode has a
1368		   shadow copy, and may be available from some versions
1369		   of the CSERVE PALcall.  If we can get it, then
1370
1371			unsigned long biu_ctl, size;
1372			size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
1373			L2 = CSHAPE (size, 5, 1);
1374
1375		   Unfortunately, we can't rely on that.
1376		*/
1377		L2 = external_cache_probe(128*1024, 5);
1378		break;
1379	  }
1380
1381	case LCA4_CPU:
1382	  {
1383		unsigned long car, size;
1384
1385		L1I = L1D = CSHAPE(8*1024, 5, 1);
1386		L3 = -1;
1387
1388		car = *(vuip) phys_to_virt (0x120000078UL);
1389		size = 64*1024 * (1 << ((car >> 5) & 7));
1390		/* No typo -- 8 byte cacheline size.  Whodathunk.  */
1391		L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
1392		break;
1393	  }
1394
1395	case EV5_CPU:
1396	case EV56_CPU:
1397	  {
1398		unsigned long sc_ctl, width;
1399
1400		L1I = L1D = CSHAPE(8*1024, 5, 1);
1401
1402		/* Check the line size of the Scache.  */
1403		sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
1404		width = sc_ctl & 0x1000 ? 6 : 5;
1405		L2 = CSHAPE (96*1024, width, 3);
1406
1407		/* BC_CONTROL and BC_CONFIG are write-only IPRs.  PALcode
1408		   has a shadow copy, and may be available from some versions
1409		   of the CSERVE PALcall.  If we can get it, then
1410
1411			unsigned long bc_control, bc_config, size;
1412			size = 1024*1024 * (1 << ((bc_config & 7) - 1));
1413			L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
1414
1415		   Unfortunately, we can't rely on that.
1416		*/
1417		L3 = external_cache_probe(1024*1024, width);
1418		break;
1419	  }
1420
1421	case PCA56_CPU:
1422	case PCA57_CPU:
1423	  {
1424		if (cpu_type == PCA56_CPU) {
1425			L1I = CSHAPE(16*1024, 6, 1);
1426			L1D = CSHAPE(8*1024, 5, 1);
1427		} else {
1428			L1I = CSHAPE(32*1024, 6, 2);
1429			L1D = CSHAPE(16*1024, 5, 1);
1430		}
1431		L3 = -1;
1432
1433#if 0
1434		unsigned long cbox_config, size;
1435
1436		cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1437		size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1438
1439		L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1440#else
1441		L2 = external_cache_probe(512*1024, 6);
1442#endif
1443		break;
1444	  }
1445
1446	case EV6_CPU:
1447	case EV67_CPU:
1448	case EV68CB_CPU:
1449	case EV68AL_CPU:
1450	case EV68CX_CPU:
1451	case EV69_CPU:
1452		L1I = L1D = CSHAPE(64*1024, 6, 2);
1453		L2 = external_cache_probe(1024*1024, 6);
1454		L3 = -1;
1455		break;
1456
1457	case EV7_CPU:
1458	case EV79_CPU:
1459		L1I = L1D = CSHAPE(64*1024, 6, 2);
1460		L2 = CSHAPE(7*1024*1024/4, 6, 7);
1461		L3 = -1;
1462		break;
1463
1464	default:
1465		/* Nothing known about this cpu type.  */
1466		L1I = L1D = L2 = L3 = 0;
1467		break;
1468	}
1469
1470	alpha_l1i_cacheshape = L1I;
1471	alpha_l1d_cacheshape = L1D;
1472	alpha_l2_cacheshape = L2;
1473	alpha_l3_cacheshape = L3;
1474}
1475
1476/*
1477 * We show only CPU #0 info.
1478 */
1479static void *
1480c_start(struct seq_file *f, loff_t *pos)
1481{
1482	return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
1483}
1484
1485static void *
1486c_next(struct seq_file *f, void *v, loff_t *pos)
1487{
1488	return NULL;
1489}
1490
1491static void
1492c_stop(struct seq_file *f, void *v)
1493{
1494}
1495
1496const struct seq_operations cpuinfo_op = {
1497	.start	= c_start,
1498	.next	= c_next,
1499	.stop	= c_stop,
1500	.show	= show_cpuinfo,
1501};
1502
1503
1504static int
1505alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1506{
1507#if 1
1508	/* FIXME FIXME FIXME */
1509	/* If we are using SRM and serial console, just hard halt here. */
1510	if (alpha_using_srm && srmcons_output)
1511		__halt();
1512#endif
1513        return NOTIFY_DONE;
1514}
1515
1516static __init int add_pcspkr(void)
1517{
1518	struct platform_device *pd;
1519	int ret;
1520
1521	pd = platform_device_alloc("pcspkr", -1);
1522	if (!pd)
1523		return -ENOMEM;
1524
1525	ret = platform_device_add(pd);
1526	if (ret)
1527		platform_device_put(pd);
1528
1529	return ret;
1530}
1531device_initcall(add_pcspkr);
1532