1/*
2 * pSeries NUMA support
3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#define pr_fmt(fmt) "numa: " fmt
12
13#include <linux/threads.h>
14#include <linux/bootmem.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/mmzone.h>
18#include <linux/export.h>
19#include <linux/nodemask.h>
20#include <linux/cpu.h>
21#include <linux/notifier.h>
22#include <linux/memblock.h>
23#include <linux/of.h>
24#include <linux/pfn.h>
25#include <linux/cpuset.h>
26#include <linux/node.h>
27#include <linux/stop_machine.h>
28#include <linux/proc_fs.h>
29#include <linux/seq_file.h>
30#include <linux/uaccess.h>
31#include <linux/slab.h>
32#include <asm/cputhreads.h>
33#include <asm/sparsemem.h>
34#include <asm/prom.h>
35#include <asm/smp.h>
36#include <asm/cputhreads.h>
37#include <asm/topology.h>
38#include <asm/firmware.h>
39#include <asm/paca.h>
40#include <asm/hvcall.h>
41#include <asm/setup.h>
42#include <asm/vdso.h>
43
44static int numa_enabled = 1;
45
46static char *cmdline __initdata;
47
48static int numa_debug;
49#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
50
51int numa_cpu_lookup_table[NR_CPUS];
52cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
53struct pglist_data *node_data[MAX_NUMNODES];
54
55EXPORT_SYMBOL(numa_cpu_lookup_table);
56EXPORT_SYMBOL(node_to_cpumask_map);
57EXPORT_SYMBOL(node_data);
58
59static int min_common_depth;
60static int n_mem_addr_cells, n_mem_size_cells;
61static int form1_affinity;
62
63#define MAX_DISTANCE_REF_POINTS 4
64static int distance_ref_points_depth;
65static const __be32 *distance_ref_points;
66static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
67
68/*
69 * Allocate node_to_cpumask_map based on number of available nodes
70 * Requires node_possible_map to be valid.
71 *
72 * Note: cpumask_of_node() is not valid until after this is done.
73 */
74static void __init setup_node_to_cpumask_map(void)
75{
76	unsigned int node;
77
78	/* setup nr_node_ids if not done yet */
79	if (nr_node_ids == MAX_NUMNODES)
80		setup_nr_node_ids();
81
82	/* allocate the map */
83	for (node = 0; node < nr_node_ids; node++)
84		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
85
86	/* cpumask_of_node() will now work */
87	dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
88}
89
90static int __init fake_numa_create_new_node(unsigned long end_pfn,
91						unsigned int *nid)
92{
93	unsigned long long mem;
94	char *p = cmdline;
95	static unsigned int fake_nid;
96	static unsigned long long curr_boundary;
97
98	/*
99	 * Modify node id, iff we started creating NUMA nodes
100	 * We want to continue from where we left of the last time
101	 */
102	if (fake_nid)
103		*nid = fake_nid;
104	/*
105	 * In case there are no more arguments to parse, the
106	 * node_id should be the same as the last fake node id
107	 * (we've handled this above).
108	 */
109	if (!p)
110		return 0;
111
112	mem = memparse(p, &p);
113	if (!mem)
114		return 0;
115
116	if (mem < curr_boundary)
117		return 0;
118
119	curr_boundary = mem;
120
121	if ((end_pfn << PAGE_SHIFT) > mem) {
122		/*
123		 * Skip commas and spaces
124		 */
125		while (*p == ',' || *p == ' ' || *p == '\t')
126			p++;
127
128		cmdline = p;
129		fake_nid++;
130		*nid = fake_nid;
131		dbg("created new fake_node with id %d\n", fake_nid);
132		return 1;
133	}
134	return 0;
135}
136
137static void reset_numa_cpu_lookup_table(void)
138{
139	unsigned int cpu;
140
141	for_each_possible_cpu(cpu)
142		numa_cpu_lookup_table[cpu] = -1;
143}
144
145static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
146{
147	numa_cpu_lookup_table[cpu] = node;
148}
149
150static void map_cpu_to_node(int cpu, int node)
151{
152	update_numa_cpu_lookup_table(cpu, node);
153
154	dbg("adding cpu %d to node %d\n", cpu, node);
155
156	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
157		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
158}
159
160#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
161static void unmap_cpu_from_node(unsigned long cpu)
162{
163	int node = numa_cpu_lookup_table[cpu];
164
165	dbg("removing cpu %lu from node %d\n", cpu, node);
166
167	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
168		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
169	} else {
170		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
171		       cpu, node);
172	}
173}
174#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
175
176/* must hold reference to node during call */
177static const __be32 *of_get_associativity(struct device_node *dev)
178{
179	return of_get_property(dev, "ibm,associativity", NULL);
180}
181
182/*
183 * Returns the property linux,drconf-usable-memory if
184 * it exists (the property exists only in kexec/kdump kernels,
185 * added by kexec-tools)
186 */
187static const __be32 *of_get_usable_memory(struct device_node *memory)
188{
189	const __be32 *prop;
190	u32 len;
191	prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
192	if (!prop || len < sizeof(unsigned int))
193		return NULL;
194	return prop;
195}
196
197int __node_distance(int a, int b)
198{
199	int i;
200	int distance = LOCAL_DISTANCE;
201
202	if (!form1_affinity)
203		return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
204
205	for (i = 0; i < distance_ref_points_depth; i++) {
206		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
207			break;
208
209		/* Double the distance for each NUMA level */
210		distance *= 2;
211	}
212
213	return distance;
214}
215EXPORT_SYMBOL(__node_distance);
216
217static void initialize_distance_lookup_table(int nid,
218		const __be32 *associativity)
219{
220	int i;
221
222	if (!form1_affinity)
223		return;
224
225	for (i = 0; i < distance_ref_points_depth; i++) {
226		const __be32 *entry;
227
228		entry = &associativity[be32_to_cpu(distance_ref_points[i])];
229		distance_lookup_table[nid][i] = of_read_number(entry, 1);
230	}
231}
232
233/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
234 * info is found.
235 */
236static int associativity_to_nid(const __be32 *associativity)
237{
238	int nid = -1;
239
240	if (min_common_depth == -1)
241		goto out;
242
243	if (of_read_number(associativity, 1) >= min_common_depth)
244		nid = of_read_number(&associativity[min_common_depth], 1);
245
246	/* POWER4 LPAR uses 0xffff as invalid node */
247	if (nid == 0xffff || nid >= MAX_NUMNODES)
248		nid = -1;
249
250	if (nid > 0 &&
251	    of_read_number(associativity, 1) >= distance_ref_points_depth)
252		initialize_distance_lookup_table(nid, associativity);
253
254out:
255	return nid;
256}
257
258/* Returns the nid associated with the given device tree node,
259 * or -1 if not found.
260 */
261static int of_node_to_nid_single(struct device_node *device)
262{
263	int nid = -1;
264	const __be32 *tmp;
265
266	tmp = of_get_associativity(device);
267	if (tmp)
268		nid = associativity_to_nid(tmp);
269	return nid;
270}
271
272/* Walk the device tree upwards, looking for an associativity id */
273int of_node_to_nid(struct device_node *device)
274{
275	struct device_node *tmp;
276	int nid = -1;
277
278	of_node_get(device);
279	while (device) {
280		nid = of_node_to_nid_single(device);
281		if (nid != -1)
282			break;
283
284	        tmp = device;
285		device = of_get_parent(tmp);
286		of_node_put(tmp);
287	}
288	of_node_put(device);
289
290	return nid;
291}
292EXPORT_SYMBOL_GPL(of_node_to_nid);
293
294static int __init find_min_common_depth(void)
295{
296	int depth;
297	struct device_node *root;
298
299	if (firmware_has_feature(FW_FEATURE_OPAL))
300		root = of_find_node_by_path("/ibm,opal");
301	else
302		root = of_find_node_by_path("/rtas");
303	if (!root)
304		root = of_find_node_by_path("/");
305
306	/*
307	 * This property is a set of 32-bit integers, each representing
308	 * an index into the ibm,associativity nodes.
309	 *
310	 * With form 0 affinity the first integer is for an SMP configuration
311	 * (should be all 0's) and the second is for a normal NUMA
312	 * configuration. We have only one level of NUMA.
313	 *
314	 * With form 1 affinity the first integer is the most significant
315	 * NUMA boundary and the following are progressively less significant
316	 * boundaries. There can be more than one level of NUMA.
317	 */
318	distance_ref_points = of_get_property(root,
319					"ibm,associativity-reference-points",
320					&distance_ref_points_depth);
321
322	if (!distance_ref_points) {
323		dbg("NUMA: ibm,associativity-reference-points not found.\n");
324		goto err;
325	}
326
327	distance_ref_points_depth /= sizeof(int);
328
329	if (firmware_has_feature(FW_FEATURE_OPAL) ||
330	    firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
331		dbg("Using form 1 affinity\n");
332		form1_affinity = 1;
333	}
334
335	if (form1_affinity) {
336		depth = of_read_number(distance_ref_points, 1);
337	} else {
338		if (distance_ref_points_depth < 2) {
339			printk(KERN_WARNING "NUMA: "
340				"short ibm,associativity-reference-points\n");
341			goto err;
342		}
343
344		depth = of_read_number(&distance_ref_points[1], 1);
345	}
346
347	/*
348	 * Warn and cap if the hardware supports more than
349	 * MAX_DISTANCE_REF_POINTS domains.
350	 */
351	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
352		printk(KERN_WARNING "NUMA: distance array capped at "
353			"%d entries\n", MAX_DISTANCE_REF_POINTS);
354		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
355	}
356
357	of_node_put(root);
358	return depth;
359
360err:
361	of_node_put(root);
362	return -1;
363}
364
365static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
366{
367	struct device_node *memory = NULL;
368
369	memory = of_find_node_by_type(memory, "memory");
370	if (!memory)
371		panic("numa.c: No memory nodes found!");
372
373	*n_addr_cells = of_n_addr_cells(memory);
374	*n_size_cells = of_n_size_cells(memory);
375	of_node_put(memory);
376}
377
378static unsigned long read_n_cells(int n, const __be32 **buf)
379{
380	unsigned long result = 0;
381
382	while (n--) {
383		result = (result << 32) | of_read_number(*buf, 1);
384		(*buf)++;
385	}
386	return result;
387}
388
389/*
390 * Read the next memblock list entry from the ibm,dynamic-memory property
391 * and return the information in the provided of_drconf_cell structure.
392 */
393static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
394{
395	const __be32 *cp;
396
397	drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
398
399	cp = *cellp;
400	drmem->drc_index = of_read_number(cp, 1);
401	drmem->reserved = of_read_number(&cp[1], 1);
402	drmem->aa_index = of_read_number(&cp[2], 1);
403	drmem->flags = of_read_number(&cp[3], 1);
404
405	*cellp = cp + 4;
406}
407
408/*
409 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
410 *
411 * The layout of the ibm,dynamic-memory property is a number N of memblock
412 * list entries followed by N memblock list entries.  Each memblock list entry
413 * contains information as laid out in the of_drconf_cell struct above.
414 */
415static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
416{
417	const __be32 *prop;
418	u32 len, entries;
419
420	prop = of_get_property(memory, "ibm,dynamic-memory", &len);
421	if (!prop || len < sizeof(unsigned int))
422		return 0;
423
424	entries = of_read_number(prop++, 1);
425
426	/* Now that we know the number of entries, revalidate the size
427	 * of the property read in to ensure we have everything
428	 */
429	if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
430		return 0;
431
432	*dm = prop;
433	return entries;
434}
435
436/*
437 * Retrieve and validate the ibm,lmb-size property for drconf memory
438 * from the device tree.
439 */
440static u64 of_get_lmb_size(struct device_node *memory)
441{
442	const __be32 *prop;
443	u32 len;
444
445	prop = of_get_property(memory, "ibm,lmb-size", &len);
446	if (!prop || len < sizeof(unsigned int))
447		return 0;
448
449	return read_n_cells(n_mem_size_cells, &prop);
450}
451
452struct assoc_arrays {
453	u32	n_arrays;
454	u32	array_sz;
455	const __be32 *arrays;
456};
457
458/*
459 * Retrieve and validate the list of associativity arrays for drconf
460 * memory from the ibm,associativity-lookup-arrays property of the
461 * device tree..
462 *
463 * The layout of the ibm,associativity-lookup-arrays property is a number N
464 * indicating the number of associativity arrays, followed by a number M
465 * indicating the size of each associativity array, followed by a list
466 * of N associativity arrays.
467 */
468static int of_get_assoc_arrays(struct device_node *memory,
469			       struct assoc_arrays *aa)
470{
471	const __be32 *prop;
472	u32 len;
473
474	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
475	if (!prop || len < 2 * sizeof(unsigned int))
476		return -1;
477
478	aa->n_arrays = of_read_number(prop++, 1);
479	aa->array_sz = of_read_number(prop++, 1);
480
481	/* Now that we know the number of arrays and size of each array,
482	 * revalidate the size of the property read in.
483	 */
484	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
485		return -1;
486
487	aa->arrays = prop;
488	return 0;
489}
490
491/*
492 * This is like of_node_to_nid_single() for memory represented in the
493 * ibm,dynamic-reconfiguration-memory node.
494 */
495static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
496				   struct assoc_arrays *aa)
497{
498	int default_nid = 0;
499	int nid = default_nid;
500	int index;
501
502	if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
503	    !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
504	    drmem->aa_index < aa->n_arrays) {
505		index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
506		nid = of_read_number(&aa->arrays[index], 1);
507
508		if (nid == 0xffff || nid >= MAX_NUMNODES)
509			nid = default_nid;
510	}
511
512	return nid;
513}
514
515/*
516 * Figure out to which domain a cpu belongs and stick it there.
517 * Return the id of the domain used.
518 */
519static int numa_setup_cpu(unsigned long lcpu)
520{
521	int nid = -1;
522	struct device_node *cpu;
523
524	/*
525	 * If a valid cpu-to-node mapping is already available, use it
526	 * directly instead of querying the firmware, since it represents
527	 * the most recent mapping notified to us by the platform (eg: VPHN).
528	 */
529	if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
530		map_cpu_to_node(lcpu, nid);
531		return nid;
532	}
533
534	cpu = of_get_cpu_node(lcpu, NULL);
535
536	if (!cpu) {
537		WARN_ON(1);
538		if (cpu_present(lcpu))
539			goto out_present;
540		else
541			goto out;
542	}
543
544	nid = of_node_to_nid_single(cpu);
545
546out_present:
547	if (nid < 0 || !node_online(nid))
548		nid = first_online_node;
549
550	map_cpu_to_node(lcpu, nid);
551	of_node_put(cpu);
552out:
553	return nid;
554}
555
556static void verify_cpu_node_mapping(int cpu, int node)
557{
558	int base, sibling, i;
559
560	/* Verify that all the threads in the core belong to the same node */
561	base = cpu_first_thread_sibling(cpu);
562
563	for (i = 0; i < threads_per_core; i++) {
564		sibling = base + i;
565
566		if (sibling == cpu || cpu_is_offline(sibling))
567			continue;
568
569		if (cpu_to_node(sibling) != node) {
570			WARN(1, "CPU thread siblings %d and %d don't belong"
571				" to the same node!\n", cpu, sibling);
572			break;
573		}
574	}
575}
576
577static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
578			     void *hcpu)
579{
580	unsigned long lcpu = (unsigned long)hcpu;
581	int ret = NOTIFY_DONE, nid;
582
583	switch (action) {
584	case CPU_UP_PREPARE:
585	case CPU_UP_PREPARE_FROZEN:
586		nid = numa_setup_cpu(lcpu);
587		verify_cpu_node_mapping((int)lcpu, nid);
588		ret = NOTIFY_OK;
589		break;
590#ifdef CONFIG_HOTPLUG_CPU
591	case CPU_DEAD:
592	case CPU_DEAD_FROZEN:
593	case CPU_UP_CANCELED:
594	case CPU_UP_CANCELED_FROZEN:
595		unmap_cpu_from_node(lcpu);
596		ret = NOTIFY_OK;
597		break;
598#endif
599	}
600	return ret;
601}
602
603/*
604 * Check and possibly modify a memory region to enforce the memory limit.
605 *
606 * Returns the size the region should have to enforce the memory limit.
607 * This will either be the original value of size, a truncated value,
608 * or zero. If the returned value of size is 0 the region should be
609 * discarded as it lies wholly above the memory limit.
610 */
611static unsigned long __init numa_enforce_memory_limit(unsigned long start,
612						      unsigned long size)
613{
614	/*
615	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
616	 * we've already adjusted it for the limit and it takes care of
617	 * having memory holes below the limit.  Also, in the case of
618	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
619	 */
620
621	if (start + size <= memblock_end_of_DRAM())
622		return size;
623
624	if (start >= memblock_end_of_DRAM())
625		return 0;
626
627	return memblock_end_of_DRAM() - start;
628}
629
630/*
631 * Reads the counter for a given entry in
632 * linux,drconf-usable-memory property
633 */
634static inline int __init read_usm_ranges(const __be32 **usm)
635{
636	/*
637	 * For each lmb in ibm,dynamic-memory a corresponding
638	 * entry in linux,drconf-usable-memory property contains
639	 * a counter followed by that many (base, size) duple.
640	 * read the counter from linux,drconf-usable-memory
641	 */
642	return read_n_cells(n_mem_size_cells, usm);
643}
644
645/*
646 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
647 * node.  This assumes n_mem_{addr,size}_cells have been set.
648 */
649static void __init parse_drconf_memory(struct device_node *memory)
650{
651	const __be32 *uninitialized_var(dm), *usm;
652	unsigned int n, rc, ranges, is_kexec_kdump = 0;
653	unsigned long lmb_size, base, size, sz;
654	int nid;
655	struct assoc_arrays aa = { .arrays = NULL };
656
657	n = of_get_drconf_memory(memory, &dm);
658	if (!n)
659		return;
660
661	lmb_size = of_get_lmb_size(memory);
662	if (!lmb_size)
663		return;
664
665	rc = of_get_assoc_arrays(memory, &aa);
666	if (rc)
667		return;
668
669	/* check if this is a kexec/kdump kernel */
670	usm = of_get_usable_memory(memory);
671	if (usm != NULL)
672		is_kexec_kdump = 1;
673
674	for (; n != 0; --n) {
675		struct of_drconf_cell drmem;
676
677		read_drconf_cell(&drmem, &dm);
678
679		/* skip this block if the reserved bit is set in flags (0x80)
680		   or if the block is not assigned to this partition (0x8) */
681		if ((drmem.flags & DRCONF_MEM_RESERVED)
682		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
683			continue;
684
685		base = drmem.base_addr;
686		size = lmb_size;
687		ranges = 1;
688
689		if (is_kexec_kdump) {
690			ranges = read_usm_ranges(&usm);
691			if (!ranges) /* there are no (base, size) duple */
692				continue;
693		}
694		do {
695			if (is_kexec_kdump) {
696				base = read_n_cells(n_mem_addr_cells, &usm);
697				size = read_n_cells(n_mem_size_cells, &usm);
698			}
699			nid = of_drconf_to_nid_single(&drmem, &aa);
700			fake_numa_create_new_node(
701				((base + size) >> PAGE_SHIFT),
702					   &nid);
703			node_set_online(nid);
704			sz = numa_enforce_memory_limit(base, size);
705			if (sz)
706				memblock_set_node(base, sz,
707						  &memblock.memory, nid);
708		} while (--ranges);
709	}
710}
711
712static int __init parse_numa_properties(void)
713{
714	struct device_node *memory;
715	int default_nid = 0;
716	unsigned long i;
717
718	if (numa_enabled == 0) {
719		printk(KERN_WARNING "NUMA disabled by user\n");
720		return -1;
721	}
722
723	min_common_depth = find_min_common_depth();
724
725	if (min_common_depth < 0)
726		return min_common_depth;
727
728	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
729
730	/*
731	 * Even though we connect cpus to numa domains later in SMP
732	 * init, we need to know the node ids now. This is because
733	 * each node to be onlined must have NODE_DATA etc backing it.
734	 */
735	for_each_present_cpu(i) {
736		struct device_node *cpu;
737		int nid;
738
739		cpu = of_get_cpu_node(i, NULL);
740		BUG_ON(!cpu);
741		nid = of_node_to_nid_single(cpu);
742		of_node_put(cpu);
743
744		/*
745		 * Don't fall back to default_nid yet -- we will plug
746		 * cpus into nodes once the memory scan has discovered
747		 * the topology.
748		 */
749		if (nid < 0)
750			continue;
751		node_set_online(nid);
752	}
753
754	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
755
756	for_each_node_by_type(memory, "memory") {
757		unsigned long start;
758		unsigned long size;
759		int nid;
760		int ranges;
761		const __be32 *memcell_buf;
762		unsigned int len;
763
764		memcell_buf = of_get_property(memory,
765			"linux,usable-memory", &len);
766		if (!memcell_buf || len <= 0)
767			memcell_buf = of_get_property(memory, "reg", &len);
768		if (!memcell_buf || len <= 0)
769			continue;
770
771		/* ranges in cell */
772		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
773new_range:
774		/* these are order-sensitive, and modify the buffer pointer */
775		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
776		size = read_n_cells(n_mem_size_cells, &memcell_buf);
777
778		/*
779		 * Assumption: either all memory nodes or none will
780		 * have associativity properties.  If none, then
781		 * everything goes to default_nid.
782		 */
783		nid = of_node_to_nid_single(memory);
784		if (nid < 0)
785			nid = default_nid;
786
787		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
788		node_set_online(nid);
789
790		if (!(size = numa_enforce_memory_limit(start, size))) {
791			if (--ranges)
792				goto new_range;
793			else
794				continue;
795		}
796
797		memblock_set_node(start, size, &memblock.memory, nid);
798
799		if (--ranges)
800			goto new_range;
801	}
802
803	/*
804	 * Now do the same thing for each MEMBLOCK listed in the
805	 * ibm,dynamic-memory property in the
806	 * ibm,dynamic-reconfiguration-memory node.
807	 */
808	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
809	if (memory)
810		parse_drconf_memory(memory);
811
812	return 0;
813}
814
815static void __init setup_nonnuma(void)
816{
817	unsigned long top_of_ram = memblock_end_of_DRAM();
818	unsigned long total_ram = memblock_phys_mem_size();
819	unsigned long start_pfn, end_pfn;
820	unsigned int nid = 0;
821	struct memblock_region *reg;
822
823	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
824	       top_of_ram, total_ram);
825	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
826	       (top_of_ram - total_ram) >> 20);
827
828	for_each_memblock(memory, reg) {
829		start_pfn = memblock_region_memory_base_pfn(reg);
830		end_pfn = memblock_region_memory_end_pfn(reg);
831
832		fake_numa_create_new_node(end_pfn, &nid);
833		memblock_set_node(PFN_PHYS(start_pfn),
834				  PFN_PHYS(end_pfn - start_pfn),
835				  &memblock.memory, nid);
836		node_set_online(nid);
837	}
838}
839
840void __init dump_numa_cpu_topology(void)
841{
842	unsigned int node;
843	unsigned int cpu, count;
844
845	if (min_common_depth == -1 || !numa_enabled)
846		return;
847
848	for_each_online_node(node) {
849		printk(KERN_DEBUG "Node %d CPUs:", node);
850
851		count = 0;
852		/*
853		 * If we used a CPU iterator here we would miss printing
854		 * the holes in the cpumap.
855		 */
856		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
857			if (cpumask_test_cpu(cpu,
858					node_to_cpumask_map[node])) {
859				if (count == 0)
860					printk(" %u", cpu);
861				++count;
862			} else {
863				if (count > 1)
864					printk("-%u", cpu - 1);
865				count = 0;
866			}
867		}
868
869		if (count > 1)
870			printk("-%u", nr_cpu_ids - 1);
871		printk("\n");
872	}
873}
874
875static void __init dump_numa_memory_topology(void)
876{
877	unsigned int node;
878	unsigned int count;
879
880	if (min_common_depth == -1 || !numa_enabled)
881		return;
882
883	for_each_online_node(node) {
884		unsigned long i;
885
886		printk(KERN_DEBUG "Node %d Memory:", node);
887
888		count = 0;
889
890		for (i = 0; i < memblock_end_of_DRAM();
891		     i += (1 << SECTION_SIZE_BITS)) {
892			if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
893				if (count == 0)
894					printk(" 0x%lx", i);
895				++count;
896			} else {
897				if (count > 0)
898					printk("-0x%lx", i);
899				count = 0;
900			}
901		}
902
903		if (count > 0)
904			printk("-0x%lx", i);
905		printk("\n");
906	}
907}
908
909static struct notifier_block ppc64_numa_nb = {
910	.notifier_call = cpu_numa_callback,
911	.priority = 1 /* Must run before sched domains notifier. */
912};
913
914/* Initialize NODE_DATA for a node on the local memory */
915static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
916{
917	u64 spanned_pages = end_pfn - start_pfn;
918	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
919	u64 nd_pa;
920	void *nd;
921	int tnid;
922
923	if (spanned_pages)
924		pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
925			nid, start_pfn << PAGE_SHIFT,
926			(end_pfn << PAGE_SHIFT) - 1);
927	else
928		pr_info("Initmem setup node %d\n", nid);
929
930	nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
931	nd = __va(nd_pa);
932
933	/* report and initialize */
934	pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
935		nd_pa, nd_pa + nd_size - 1);
936	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
937	if (tnid != nid)
938		pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
939
940	node_data[nid] = nd;
941	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
942	NODE_DATA(nid)->node_id = nid;
943	NODE_DATA(nid)->node_start_pfn = start_pfn;
944	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
945}
946
947void __init initmem_init(void)
948{
949	int nid, cpu;
950
951	max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
952	max_pfn = max_low_pfn;
953
954	if (parse_numa_properties())
955		setup_nonnuma();
956	else
957		dump_numa_memory_topology();
958
959	memblock_dump_all();
960
961	/*
962	 * Reduce the possible NUMA nodes to the online NUMA nodes,
963	 * since we do not support node hotplug. This ensures that  we
964	 * lower the maximum NUMA node ID to what is actually present.
965	 */
966	nodes_and(node_possible_map, node_possible_map, node_online_map);
967
968	for_each_online_node(nid) {
969		unsigned long start_pfn, end_pfn;
970
971		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
972		setup_node_data(nid, start_pfn, end_pfn);
973		sparse_memory_present_with_active_regions(nid);
974	}
975
976	sparse_init();
977
978	setup_node_to_cpumask_map();
979
980	reset_numa_cpu_lookup_table();
981	register_cpu_notifier(&ppc64_numa_nb);
982	/*
983	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
984	 * even before we online them, so that we can use cpu_to_{node,mem}
985	 * early in boot, cf. smp_prepare_cpus().
986	 */
987	for_each_present_cpu(cpu) {
988		numa_setup_cpu((unsigned long)cpu);
989	}
990}
991
992static int __init early_numa(char *p)
993{
994	if (!p)
995		return 0;
996
997	if (strstr(p, "off"))
998		numa_enabled = 0;
999
1000	if (strstr(p, "debug"))
1001		numa_debug = 1;
1002
1003	p = strstr(p, "fake=");
1004	if (p)
1005		cmdline = p + strlen("fake=");
1006
1007	return 0;
1008}
1009early_param("numa", early_numa);
1010
1011static bool topology_updates_enabled = true;
1012
1013static int __init early_topology_updates(char *p)
1014{
1015	if (!p)
1016		return 0;
1017
1018	if (!strcmp(p, "off")) {
1019		pr_info("Disabling topology updates\n");
1020		topology_updates_enabled = false;
1021	}
1022
1023	return 0;
1024}
1025early_param("topology_updates", early_topology_updates);
1026
1027#ifdef CONFIG_MEMORY_HOTPLUG
1028/*
1029 * Find the node associated with a hot added memory section for
1030 * memory represented in the device tree by the property
1031 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1032 */
1033static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1034				     unsigned long scn_addr)
1035{
1036	const __be32 *dm;
1037	unsigned int drconf_cell_cnt, rc;
1038	unsigned long lmb_size;
1039	struct assoc_arrays aa;
1040	int nid = -1;
1041
1042	drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1043	if (!drconf_cell_cnt)
1044		return -1;
1045
1046	lmb_size = of_get_lmb_size(memory);
1047	if (!lmb_size)
1048		return -1;
1049
1050	rc = of_get_assoc_arrays(memory, &aa);
1051	if (rc)
1052		return -1;
1053
1054	for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1055		struct of_drconf_cell drmem;
1056
1057		read_drconf_cell(&drmem, &dm);
1058
1059		/* skip this block if it is reserved or not assigned to
1060		 * this partition */
1061		if ((drmem.flags & DRCONF_MEM_RESERVED)
1062		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1063			continue;
1064
1065		if ((scn_addr < drmem.base_addr)
1066		    || (scn_addr >= (drmem.base_addr + lmb_size)))
1067			continue;
1068
1069		nid = of_drconf_to_nid_single(&drmem, &aa);
1070		break;
1071	}
1072
1073	return nid;
1074}
1075
1076/*
1077 * Find the node associated with a hot added memory section for memory
1078 * represented in the device tree as a node (i.e. memory@XXXX) for
1079 * each memblock.
1080 */
1081static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1082{
1083	struct device_node *memory;
1084	int nid = -1;
1085
1086	for_each_node_by_type(memory, "memory") {
1087		unsigned long start, size;
1088		int ranges;
1089		const __be32 *memcell_buf;
1090		unsigned int len;
1091
1092		memcell_buf = of_get_property(memory, "reg", &len);
1093		if (!memcell_buf || len <= 0)
1094			continue;
1095
1096		/* ranges in cell */
1097		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1098
1099		while (ranges--) {
1100			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1101			size = read_n_cells(n_mem_size_cells, &memcell_buf);
1102
1103			if ((scn_addr < start) || (scn_addr >= (start + size)))
1104				continue;
1105
1106			nid = of_node_to_nid_single(memory);
1107			break;
1108		}
1109
1110		if (nid >= 0)
1111			break;
1112	}
1113
1114	of_node_put(memory);
1115
1116	return nid;
1117}
1118
1119/*
1120 * Find the node associated with a hot added memory section.  Section
1121 * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1122 * sections are fully contained within a single MEMBLOCK.
1123 */
1124int hot_add_scn_to_nid(unsigned long scn_addr)
1125{
1126	struct device_node *memory = NULL;
1127	int nid, found = 0;
1128
1129	if (!numa_enabled || (min_common_depth < 0))
1130		return first_online_node;
1131
1132	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1133	if (memory) {
1134		nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1135		of_node_put(memory);
1136	} else {
1137		nid = hot_add_node_scn_to_nid(scn_addr);
1138	}
1139
1140	if (nid < 0 || !node_online(nid))
1141		nid = first_online_node;
1142
1143	if (NODE_DATA(nid)->node_spanned_pages)
1144		return nid;
1145
1146	for_each_online_node(nid) {
1147		if (NODE_DATA(nid)->node_spanned_pages) {
1148			found = 1;
1149			break;
1150		}
1151	}
1152
1153	BUG_ON(!found);
1154	return nid;
1155}
1156
1157static u64 hot_add_drconf_memory_max(void)
1158{
1159        struct device_node *memory = NULL;
1160        unsigned int drconf_cell_cnt = 0;
1161        u64 lmb_size = 0;
1162	const __be32 *dm = NULL;
1163
1164        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1165        if (memory) {
1166                drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1167                lmb_size = of_get_lmb_size(memory);
1168                of_node_put(memory);
1169        }
1170        return lmb_size * drconf_cell_cnt;
1171}
1172
1173/*
1174 * memory_hotplug_max - return max address of memory that may be added
1175 *
1176 * This is currently only used on systems that support drconfig memory
1177 * hotplug.
1178 */
1179u64 memory_hotplug_max(void)
1180{
1181        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1182}
1183#endif /* CONFIG_MEMORY_HOTPLUG */
1184
1185/* Virtual Processor Home Node (VPHN) support */
1186#ifdef CONFIG_PPC_SPLPAR
1187
1188#include "vphn.h"
1189
1190struct topology_update_data {
1191	struct topology_update_data *next;
1192	unsigned int cpu;
1193	int old_nid;
1194	int new_nid;
1195};
1196
1197static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1198static cpumask_t cpu_associativity_changes_mask;
1199static int vphn_enabled;
1200static int prrn_enabled;
1201static void reset_topology_timer(void);
1202
1203/*
1204 * Store the current values of the associativity change counters in the
1205 * hypervisor.
1206 */
1207static void setup_cpu_associativity_change_counters(void)
1208{
1209	int cpu;
1210
1211	/* The VPHN feature supports a maximum of 8 reference points */
1212	BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1213
1214	for_each_possible_cpu(cpu) {
1215		int i;
1216		u8 *counts = vphn_cpu_change_counts[cpu];
1217		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1218
1219		for (i = 0; i < distance_ref_points_depth; i++)
1220			counts[i] = hypervisor_counts[i];
1221	}
1222}
1223
1224/*
1225 * The hypervisor maintains a set of 8 associativity change counters in
1226 * the VPA of each cpu that correspond to the associativity levels in the
1227 * ibm,associativity-reference-points property. When an associativity
1228 * level changes, the corresponding counter is incremented.
1229 *
1230 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1231 * node associativity levels have changed.
1232 *
1233 * Returns the number of cpus with unhandled associativity changes.
1234 */
1235static int update_cpu_associativity_changes_mask(void)
1236{
1237	int cpu;
1238	cpumask_t *changes = &cpu_associativity_changes_mask;
1239
1240	for_each_possible_cpu(cpu) {
1241		int i, changed = 0;
1242		u8 *counts = vphn_cpu_change_counts[cpu];
1243		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1244
1245		for (i = 0; i < distance_ref_points_depth; i++) {
1246			if (hypervisor_counts[i] != counts[i]) {
1247				counts[i] = hypervisor_counts[i];
1248				changed = 1;
1249			}
1250		}
1251		if (changed) {
1252			cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1253			cpu = cpu_last_thread_sibling(cpu);
1254		}
1255	}
1256
1257	return cpumask_weight(changes);
1258}
1259
1260/*
1261 * Retrieve the new associativity information for a virtual processor's
1262 * home node.
1263 */
1264static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1265{
1266	long rc;
1267	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1268	u64 flags = 1;
1269	int hwcpu = get_hard_smp_processor_id(cpu);
1270
1271	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1272	vphn_unpack_associativity(retbuf, associativity);
1273
1274	return rc;
1275}
1276
1277static long vphn_get_associativity(unsigned long cpu,
1278					__be32 *associativity)
1279{
1280	long rc;
1281
1282	rc = hcall_vphn(cpu, associativity);
1283
1284	switch (rc) {
1285	case H_FUNCTION:
1286		printk(KERN_INFO
1287			"VPHN is not supported. Disabling polling...\n");
1288		stop_topology_update();
1289		break;
1290	case H_HARDWARE:
1291		printk(KERN_ERR
1292			"hcall_vphn() experienced a hardware fault "
1293			"preventing VPHN. Disabling polling...\n");
1294		stop_topology_update();
1295	}
1296
1297	return rc;
1298}
1299
1300/*
1301 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1302 * characteristics change. This function doesn't perform any locking and is
1303 * only safe to call from stop_machine().
1304 */
1305static int update_cpu_topology(void *data)
1306{
1307	struct topology_update_data *update;
1308	unsigned long cpu;
1309
1310	if (!data)
1311		return -EINVAL;
1312
1313	cpu = smp_processor_id();
1314
1315	for (update = data; update; update = update->next) {
1316		int new_nid = update->new_nid;
1317		if (cpu != update->cpu)
1318			continue;
1319
1320		unmap_cpu_from_node(cpu);
1321		map_cpu_to_node(cpu, new_nid);
1322		set_cpu_numa_node(cpu, new_nid);
1323		set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1324		vdso_getcpu_init();
1325	}
1326
1327	return 0;
1328}
1329
1330static int update_lookup_table(void *data)
1331{
1332	struct topology_update_data *update;
1333
1334	if (!data)
1335		return -EINVAL;
1336
1337	/*
1338	 * Upon topology update, the numa-cpu lookup table needs to be updated
1339	 * for all threads in the core, including offline CPUs, to ensure that
1340	 * future hotplug operations respect the cpu-to-node associativity
1341	 * properly.
1342	 */
1343	for (update = data; update; update = update->next) {
1344		int nid, base, j;
1345
1346		nid = update->new_nid;
1347		base = cpu_first_thread_sibling(update->cpu);
1348
1349		for (j = 0; j < threads_per_core; j++) {
1350			update_numa_cpu_lookup_table(base + j, nid);
1351		}
1352	}
1353
1354	return 0;
1355}
1356
1357/*
1358 * Update the node maps and sysfs entries for each cpu whose home node
1359 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1360 */
1361int arch_update_cpu_topology(void)
1362{
1363	unsigned int cpu, sibling, changed = 0;
1364	struct topology_update_data *updates, *ud;
1365	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1366	cpumask_t updated_cpus;
1367	struct device *dev;
1368	int weight, new_nid, i = 0;
1369
1370	if (!prrn_enabled && !vphn_enabled)
1371		return 0;
1372
1373	weight = cpumask_weight(&cpu_associativity_changes_mask);
1374	if (!weight)
1375		return 0;
1376
1377	updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1378	if (!updates)
1379		return 0;
1380
1381	cpumask_clear(&updated_cpus);
1382
1383	for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1384		/*
1385		 * If siblings aren't flagged for changes, updates list
1386		 * will be too short. Skip on this update and set for next
1387		 * update.
1388		 */
1389		if (!cpumask_subset(cpu_sibling_mask(cpu),
1390					&cpu_associativity_changes_mask)) {
1391			pr_info("Sibling bits not set for associativity "
1392					"change, cpu%d\n", cpu);
1393			cpumask_or(&cpu_associativity_changes_mask,
1394					&cpu_associativity_changes_mask,
1395					cpu_sibling_mask(cpu));
1396			cpu = cpu_last_thread_sibling(cpu);
1397			continue;
1398		}
1399
1400		/* Use associativity from first thread for all siblings */
1401		vphn_get_associativity(cpu, associativity);
1402		new_nid = associativity_to_nid(associativity);
1403		if (new_nid < 0 || !node_online(new_nid))
1404			new_nid = first_online_node;
1405
1406		if (new_nid == numa_cpu_lookup_table[cpu]) {
1407			cpumask_andnot(&cpu_associativity_changes_mask,
1408					&cpu_associativity_changes_mask,
1409					cpu_sibling_mask(cpu));
1410			cpu = cpu_last_thread_sibling(cpu);
1411			continue;
1412		}
1413
1414		for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1415			ud = &updates[i++];
1416			ud->cpu = sibling;
1417			ud->new_nid = new_nid;
1418			ud->old_nid = numa_cpu_lookup_table[sibling];
1419			cpumask_set_cpu(sibling, &updated_cpus);
1420			if (i < weight)
1421				ud->next = &updates[i];
1422		}
1423		cpu = cpu_last_thread_sibling(cpu);
1424	}
1425
1426	pr_debug("Topology update for the following CPUs:\n");
1427	if (cpumask_weight(&updated_cpus)) {
1428		for (ud = &updates[0]; ud; ud = ud->next) {
1429			pr_debug("cpu %d moving from node %d "
1430					  "to %d\n", ud->cpu,
1431					  ud->old_nid, ud->new_nid);
1432		}
1433	}
1434
1435	/*
1436	 * In cases where we have nothing to update (because the updates list
1437	 * is too short or because the new topology is same as the old one),
1438	 * skip invoking update_cpu_topology() via stop-machine(). This is
1439	 * necessary (and not just a fast-path optimization) since stop-machine
1440	 * can end up electing a random CPU to run update_cpu_topology(), and
1441	 * thus trick us into setting up incorrect cpu-node mappings (since
1442	 * 'updates' is kzalloc()'ed).
1443	 *
1444	 * And for the similar reason, we will skip all the following updating.
1445	 */
1446	if (!cpumask_weight(&updated_cpus))
1447		goto out;
1448
1449	stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1450
1451	/*
1452	 * Update the numa-cpu lookup table with the new mappings, even for
1453	 * offline CPUs. It is best to perform this update from the stop-
1454	 * machine context.
1455	 */
1456	stop_machine(update_lookup_table, &updates[0],
1457					cpumask_of(raw_smp_processor_id()));
1458
1459	for (ud = &updates[0]; ud; ud = ud->next) {
1460		unregister_cpu_under_node(ud->cpu, ud->old_nid);
1461		register_cpu_under_node(ud->cpu, ud->new_nid);
1462
1463		dev = get_cpu_device(ud->cpu);
1464		if (dev)
1465			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1466		cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1467		changed = 1;
1468	}
1469
1470out:
1471	kfree(updates);
1472	return changed;
1473}
1474
1475static void topology_work_fn(struct work_struct *work)
1476{
1477	rebuild_sched_domains();
1478}
1479static DECLARE_WORK(topology_work, topology_work_fn);
1480
1481static void topology_schedule_update(void)
1482{
1483	schedule_work(&topology_work);
1484}
1485
1486static void topology_timer_fn(unsigned long ignored)
1487{
1488	if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1489		topology_schedule_update();
1490	else if (vphn_enabled) {
1491		if (update_cpu_associativity_changes_mask() > 0)
1492			topology_schedule_update();
1493		reset_topology_timer();
1494	}
1495}
1496static struct timer_list topology_timer =
1497	TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1498
1499static void reset_topology_timer(void)
1500{
1501	topology_timer.data = 0;
1502	topology_timer.expires = jiffies + 60 * HZ;
1503	mod_timer(&topology_timer, topology_timer.expires);
1504}
1505
1506#ifdef CONFIG_SMP
1507
1508static void stage_topology_update(int core_id)
1509{
1510	cpumask_or(&cpu_associativity_changes_mask,
1511		&cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1512	reset_topology_timer();
1513}
1514
1515static int dt_update_callback(struct notifier_block *nb,
1516				unsigned long action, void *data)
1517{
1518	struct of_reconfig_data *update = data;
1519	int rc = NOTIFY_DONE;
1520
1521	switch (action) {
1522	case OF_RECONFIG_UPDATE_PROPERTY:
1523		if (!of_prop_cmp(update->dn->type, "cpu") &&
1524		    !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1525			u32 core_id;
1526			of_property_read_u32(update->dn, "reg", &core_id);
1527			stage_topology_update(core_id);
1528			rc = NOTIFY_OK;
1529		}
1530		break;
1531	}
1532
1533	return rc;
1534}
1535
1536static struct notifier_block dt_update_nb = {
1537	.notifier_call = dt_update_callback,
1538};
1539
1540#endif
1541
1542/*
1543 * Start polling for associativity changes.
1544 */
1545int start_topology_update(void)
1546{
1547	int rc = 0;
1548
1549	if (firmware_has_feature(FW_FEATURE_PRRN)) {
1550		if (!prrn_enabled) {
1551			prrn_enabled = 1;
1552			vphn_enabled = 0;
1553#ifdef CONFIG_SMP
1554			rc = of_reconfig_notifier_register(&dt_update_nb);
1555#endif
1556		}
1557	} else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1558		   lppaca_shared_proc(get_lppaca())) {
1559		if (!vphn_enabled) {
1560			prrn_enabled = 0;
1561			vphn_enabled = 1;
1562			setup_cpu_associativity_change_counters();
1563			init_timer_deferrable(&topology_timer);
1564			reset_topology_timer();
1565		}
1566	}
1567
1568	return rc;
1569}
1570
1571/*
1572 * Disable polling for VPHN associativity changes.
1573 */
1574int stop_topology_update(void)
1575{
1576	int rc = 0;
1577
1578	if (prrn_enabled) {
1579		prrn_enabled = 0;
1580#ifdef CONFIG_SMP
1581		rc = of_reconfig_notifier_unregister(&dt_update_nb);
1582#endif
1583	} else if (vphn_enabled) {
1584		vphn_enabled = 0;
1585		rc = del_timer_sync(&topology_timer);
1586	}
1587
1588	return rc;
1589}
1590
1591int prrn_is_enabled(void)
1592{
1593	return prrn_enabled;
1594}
1595
1596static int topology_read(struct seq_file *file, void *v)
1597{
1598	if (vphn_enabled || prrn_enabled)
1599		seq_puts(file, "on\n");
1600	else
1601		seq_puts(file, "off\n");
1602
1603	return 0;
1604}
1605
1606static int topology_open(struct inode *inode, struct file *file)
1607{
1608	return single_open(file, topology_read, NULL);
1609}
1610
1611static ssize_t topology_write(struct file *file, const char __user *buf,
1612			      size_t count, loff_t *off)
1613{
1614	char kbuf[4]; /* "on" or "off" plus null. */
1615	int read_len;
1616
1617	read_len = count < 3 ? count : 3;
1618	if (copy_from_user(kbuf, buf, read_len))
1619		return -EINVAL;
1620
1621	kbuf[read_len] = '\0';
1622
1623	if (!strncmp(kbuf, "on", 2))
1624		start_topology_update();
1625	else if (!strncmp(kbuf, "off", 3))
1626		stop_topology_update();
1627	else
1628		return -EINVAL;
1629
1630	return count;
1631}
1632
1633static const struct file_operations topology_ops = {
1634	.read = seq_read,
1635	.write = topology_write,
1636	.open = topology_open,
1637	.release = single_release
1638};
1639
1640static int topology_update_init(void)
1641{
1642	/* Do not poll for changes if disabled at boot */
1643	if (topology_updates_enabled)
1644		start_topology_update();
1645
1646	if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1647		return -ENOMEM;
1648
1649	return 0;
1650}
1651device_initcall(topology_update_init);
1652#endif /* CONFIG_PPC_SPLPAR */
1653