1/*
2 *  linux/kernel/profile.c
3 *  Simple profiling. Manages a direct-mapped profile hit count buffer,
4 *  with configurable resolution, support for restricting the cpus on
5 *  which profiling is done, and switching between cpu time and
6 *  schedule() calls via kernel command line parameters passed at boot.
7 *
8 *  Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
9 *	Red Hat, July 2004
10 *  Consolidation of architecture support code for profiling,
11 *	Nadia Yvette Chambers, Oracle, July 2004
12 *  Amortized hit count accounting via per-cpu open-addressed hashtables
13 *	to resolve timer interrupt livelocks, Nadia Yvette Chambers,
14 *	Oracle, 2004
15 */
16
17#include <linux/export.h>
18#include <linux/profile.h>
19#include <linux/bootmem.h>
20#include <linux/notifier.h>
21#include <linux/mm.h>
22#include <linux/cpumask.h>
23#include <linux/cpu.h>
24#include <linux/highmem.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
28#include <asm/sections.h>
29#include <asm/irq_regs.h>
30#include <asm/ptrace.h>
31
32struct profile_hit {
33	u32 pc, hits;
34};
35#define PROFILE_GRPSHIFT	3
36#define PROFILE_GRPSZ		(1 << PROFILE_GRPSHIFT)
37#define NR_PROFILE_HIT		(PAGE_SIZE/sizeof(struct profile_hit))
38#define NR_PROFILE_GRP		(NR_PROFILE_HIT/PROFILE_GRPSZ)
39
40static atomic_t *prof_buffer;
41static unsigned long prof_len, prof_shift;
42
43int prof_on __read_mostly;
44EXPORT_SYMBOL_GPL(prof_on);
45
46static cpumask_var_t prof_cpu_mask;
47#ifdef CONFIG_SMP
48static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
49static DEFINE_PER_CPU(int, cpu_profile_flip);
50static DEFINE_MUTEX(profile_flip_mutex);
51#endif /* CONFIG_SMP */
52
53int profile_setup(char *str)
54{
55	static const char schedstr[] = "schedule";
56	static const char sleepstr[] = "sleep";
57	static const char kvmstr[] = "kvm";
58	int par;
59
60	if (!strncmp(str, sleepstr, strlen(sleepstr))) {
61#ifdef CONFIG_SCHEDSTATS
62		prof_on = SLEEP_PROFILING;
63		if (str[strlen(sleepstr)] == ',')
64			str += strlen(sleepstr) + 1;
65		if (get_option(&str, &par))
66			prof_shift = par;
67		pr_info("kernel sleep profiling enabled (shift: %ld)\n",
68			prof_shift);
69#else
70		pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
71#endif /* CONFIG_SCHEDSTATS */
72	} else if (!strncmp(str, schedstr, strlen(schedstr))) {
73		prof_on = SCHED_PROFILING;
74		if (str[strlen(schedstr)] == ',')
75			str += strlen(schedstr) + 1;
76		if (get_option(&str, &par))
77			prof_shift = par;
78		pr_info("kernel schedule profiling enabled (shift: %ld)\n",
79			prof_shift);
80	} else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
81		prof_on = KVM_PROFILING;
82		if (str[strlen(kvmstr)] == ',')
83			str += strlen(kvmstr) + 1;
84		if (get_option(&str, &par))
85			prof_shift = par;
86		pr_info("kernel KVM profiling enabled (shift: %ld)\n",
87			prof_shift);
88	} else if (get_option(&str, &par)) {
89		prof_shift = par;
90		prof_on = CPU_PROFILING;
91		pr_info("kernel profiling enabled (shift: %ld)\n",
92			prof_shift);
93	}
94	return 1;
95}
96__setup("profile=", profile_setup);
97
98
99int __ref profile_init(void)
100{
101	int buffer_bytes;
102	if (!prof_on)
103		return 0;
104
105	/* only text is profiled */
106	prof_len = (_etext - _stext) >> prof_shift;
107	buffer_bytes = prof_len*sizeof(atomic_t);
108
109	if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
110		return -ENOMEM;
111
112	cpumask_copy(prof_cpu_mask, cpu_possible_mask);
113
114	prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
115	if (prof_buffer)
116		return 0;
117
118	prof_buffer = alloc_pages_exact(buffer_bytes,
119					GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
120	if (prof_buffer)
121		return 0;
122
123	prof_buffer = vzalloc(buffer_bytes);
124	if (prof_buffer)
125		return 0;
126
127	free_cpumask_var(prof_cpu_mask);
128	return -ENOMEM;
129}
130
131/* Profile event notifications */
132
133static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
134static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
135static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
136
137void profile_task_exit(struct task_struct *task)
138{
139	blocking_notifier_call_chain(&task_exit_notifier, 0, task);
140}
141
142int profile_handoff_task(struct task_struct *task)
143{
144	int ret;
145	ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
146	return (ret == NOTIFY_OK) ? 1 : 0;
147}
148
149void profile_munmap(unsigned long addr)
150{
151	blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
152}
153
154int task_handoff_register(struct notifier_block *n)
155{
156	return atomic_notifier_chain_register(&task_free_notifier, n);
157}
158EXPORT_SYMBOL_GPL(task_handoff_register);
159
160int task_handoff_unregister(struct notifier_block *n)
161{
162	return atomic_notifier_chain_unregister(&task_free_notifier, n);
163}
164EXPORT_SYMBOL_GPL(task_handoff_unregister);
165
166int profile_event_register(enum profile_type type, struct notifier_block *n)
167{
168	int err = -EINVAL;
169
170	switch (type) {
171	case PROFILE_TASK_EXIT:
172		err = blocking_notifier_chain_register(
173				&task_exit_notifier, n);
174		break;
175	case PROFILE_MUNMAP:
176		err = blocking_notifier_chain_register(
177				&munmap_notifier, n);
178		break;
179	}
180
181	return err;
182}
183EXPORT_SYMBOL_GPL(profile_event_register);
184
185int profile_event_unregister(enum profile_type type, struct notifier_block *n)
186{
187	int err = -EINVAL;
188
189	switch (type) {
190	case PROFILE_TASK_EXIT:
191		err = blocking_notifier_chain_unregister(
192				&task_exit_notifier, n);
193		break;
194	case PROFILE_MUNMAP:
195		err = blocking_notifier_chain_unregister(
196				&munmap_notifier, n);
197		break;
198	}
199
200	return err;
201}
202EXPORT_SYMBOL_GPL(profile_event_unregister);
203
204#ifdef CONFIG_SMP
205/*
206 * Each cpu has a pair of open-addressed hashtables for pending
207 * profile hits. read_profile() IPI's all cpus to request them
208 * to flip buffers and flushes their contents to prof_buffer itself.
209 * Flip requests are serialized by the profile_flip_mutex. The sole
210 * use of having a second hashtable is for avoiding cacheline
211 * contention that would otherwise happen during flushes of pending
212 * profile hits required for the accuracy of reported profile hits
213 * and so resurrect the interrupt livelock issue.
214 *
215 * The open-addressed hashtables are indexed by profile buffer slot
216 * and hold the number of pending hits to that profile buffer slot on
217 * a cpu in an entry. When the hashtable overflows, all pending hits
218 * are accounted to their corresponding profile buffer slots with
219 * atomic_add() and the hashtable emptied. As numerous pending hits
220 * may be accounted to a profile buffer slot in a hashtable entry,
221 * this amortizes a number of atomic profile buffer increments likely
222 * to be far larger than the number of entries in the hashtable,
223 * particularly given that the number of distinct profile buffer
224 * positions to which hits are accounted during short intervals (e.g.
225 * several seconds) is usually very small. Exclusion from buffer
226 * flipping is provided by interrupt disablement (note that for
227 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
228 * process context).
229 * The hash function is meant to be lightweight as opposed to strong,
230 * and was vaguely inspired by ppc64 firmware-supported inverted
231 * pagetable hash functions, but uses a full hashtable full of finite
232 * collision chains, not just pairs of them.
233 *
234 * -- nyc
235 */
236static void __profile_flip_buffers(void *unused)
237{
238	int cpu = smp_processor_id();
239
240	per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
241}
242
243static void profile_flip_buffers(void)
244{
245	int i, j, cpu;
246
247	mutex_lock(&profile_flip_mutex);
248	j = per_cpu(cpu_profile_flip, get_cpu());
249	put_cpu();
250	on_each_cpu(__profile_flip_buffers, NULL, 1);
251	for_each_online_cpu(cpu) {
252		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
253		for (i = 0; i < NR_PROFILE_HIT; ++i) {
254			if (!hits[i].hits) {
255				if (hits[i].pc)
256					hits[i].pc = 0;
257				continue;
258			}
259			atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
260			hits[i].hits = hits[i].pc = 0;
261		}
262	}
263	mutex_unlock(&profile_flip_mutex);
264}
265
266static void profile_discard_flip_buffers(void)
267{
268	int i, cpu;
269
270	mutex_lock(&profile_flip_mutex);
271	i = per_cpu(cpu_profile_flip, get_cpu());
272	put_cpu();
273	on_each_cpu(__profile_flip_buffers, NULL, 1);
274	for_each_online_cpu(cpu) {
275		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
276		memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
277	}
278	mutex_unlock(&profile_flip_mutex);
279}
280
281static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
282{
283	unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
284	int i, j, cpu;
285	struct profile_hit *hits;
286
287	pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
288	i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
289	secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
290	cpu = get_cpu();
291	hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
292	if (!hits) {
293		put_cpu();
294		return;
295	}
296	/*
297	 * We buffer the global profiler buffer into a per-CPU
298	 * queue and thus reduce the number of global (and possibly
299	 * NUMA-alien) accesses. The write-queue is self-coalescing:
300	 */
301	local_irq_save(flags);
302	do {
303		for (j = 0; j < PROFILE_GRPSZ; ++j) {
304			if (hits[i + j].pc == pc) {
305				hits[i + j].hits += nr_hits;
306				goto out;
307			} else if (!hits[i + j].hits) {
308				hits[i + j].pc = pc;
309				hits[i + j].hits = nr_hits;
310				goto out;
311			}
312		}
313		i = (i + secondary) & (NR_PROFILE_HIT - 1);
314	} while (i != primary);
315
316	/*
317	 * Add the current hit(s) and flush the write-queue out
318	 * to the global buffer:
319	 */
320	atomic_add(nr_hits, &prof_buffer[pc]);
321	for (i = 0; i < NR_PROFILE_HIT; ++i) {
322		atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
323		hits[i].pc = hits[i].hits = 0;
324	}
325out:
326	local_irq_restore(flags);
327	put_cpu();
328}
329
330static int profile_cpu_callback(struct notifier_block *info,
331					unsigned long action, void *__cpu)
332{
333	int node, cpu = (unsigned long)__cpu;
334	struct page *page;
335
336	switch (action) {
337	case CPU_UP_PREPARE:
338	case CPU_UP_PREPARE_FROZEN:
339		node = cpu_to_mem(cpu);
340		per_cpu(cpu_profile_flip, cpu) = 0;
341		if (!per_cpu(cpu_profile_hits, cpu)[1]) {
342			page = __alloc_pages_node(node,
343					GFP_KERNEL | __GFP_ZERO,
344					0);
345			if (!page)
346				return notifier_from_errno(-ENOMEM);
347			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
348		}
349		if (!per_cpu(cpu_profile_hits, cpu)[0]) {
350			page = __alloc_pages_node(node,
351					GFP_KERNEL | __GFP_ZERO,
352					0);
353			if (!page)
354				goto out_free;
355			per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
356		}
357		break;
358out_free:
359		page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
360		per_cpu(cpu_profile_hits, cpu)[1] = NULL;
361		__free_page(page);
362		return notifier_from_errno(-ENOMEM);
363	case CPU_ONLINE:
364	case CPU_ONLINE_FROZEN:
365		if (prof_cpu_mask != NULL)
366			cpumask_set_cpu(cpu, prof_cpu_mask);
367		break;
368	case CPU_UP_CANCELED:
369	case CPU_UP_CANCELED_FROZEN:
370	case CPU_DEAD:
371	case CPU_DEAD_FROZEN:
372		if (prof_cpu_mask != NULL)
373			cpumask_clear_cpu(cpu, prof_cpu_mask);
374		if (per_cpu(cpu_profile_hits, cpu)[0]) {
375			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
376			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
377			__free_page(page);
378		}
379		if (per_cpu(cpu_profile_hits, cpu)[1]) {
380			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
381			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
382			__free_page(page);
383		}
384		break;
385	}
386	return NOTIFY_OK;
387}
388#else /* !CONFIG_SMP */
389#define profile_flip_buffers()		do { } while (0)
390#define profile_discard_flip_buffers()	do { } while (0)
391#define profile_cpu_callback		NULL
392
393static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
394{
395	unsigned long pc;
396	pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
397	atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
398}
399#endif /* !CONFIG_SMP */
400
401void profile_hits(int type, void *__pc, unsigned int nr_hits)
402{
403	if (prof_on != type || !prof_buffer)
404		return;
405	do_profile_hits(type, __pc, nr_hits);
406}
407EXPORT_SYMBOL_GPL(profile_hits);
408
409void profile_tick(int type)
410{
411	struct pt_regs *regs = get_irq_regs();
412
413	if (!user_mode(regs) && prof_cpu_mask != NULL &&
414	    cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
415		profile_hit(type, (void *)profile_pc(regs));
416}
417
418#ifdef CONFIG_PROC_FS
419#include <linux/proc_fs.h>
420#include <linux/seq_file.h>
421#include <asm/uaccess.h>
422
423static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
424{
425	seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
426	return 0;
427}
428
429static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
430{
431	return single_open(file, prof_cpu_mask_proc_show, NULL);
432}
433
434static ssize_t prof_cpu_mask_proc_write(struct file *file,
435	const char __user *buffer, size_t count, loff_t *pos)
436{
437	cpumask_var_t new_value;
438	int err;
439
440	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
441		return -ENOMEM;
442
443	err = cpumask_parse_user(buffer, count, new_value);
444	if (!err) {
445		cpumask_copy(prof_cpu_mask, new_value);
446		err = count;
447	}
448	free_cpumask_var(new_value);
449	return err;
450}
451
452static const struct file_operations prof_cpu_mask_proc_fops = {
453	.open		= prof_cpu_mask_proc_open,
454	.read		= seq_read,
455	.llseek		= seq_lseek,
456	.release	= single_release,
457	.write		= prof_cpu_mask_proc_write,
458};
459
460void create_prof_cpu_mask(void)
461{
462	/* create /proc/irq/prof_cpu_mask */
463	proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops);
464}
465
466/*
467 * This function accesses profiling information. The returned data is
468 * binary: the sampling step and the actual contents of the profile
469 * buffer. Use of the program readprofile is recommended in order to
470 * get meaningful info out of these data.
471 */
472static ssize_t
473read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
474{
475	unsigned long p = *ppos;
476	ssize_t read;
477	char *pnt;
478	unsigned int sample_step = 1 << prof_shift;
479
480	profile_flip_buffers();
481	if (p >= (prof_len+1)*sizeof(unsigned int))
482		return 0;
483	if (count > (prof_len+1)*sizeof(unsigned int) - p)
484		count = (prof_len+1)*sizeof(unsigned int) - p;
485	read = 0;
486
487	while (p < sizeof(unsigned int) && count > 0) {
488		if (put_user(*((char *)(&sample_step)+p), buf))
489			return -EFAULT;
490		buf++; p++; count--; read++;
491	}
492	pnt = (char *)prof_buffer + p - sizeof(atomic_t);
493	if (copy_to_user(buf, (void *)pnt, count))
494		return -EFAULT;
495	read += count;
496	*ppos += read;
497	return read;
498}
499
500/*
501 * Writing to /proc/profile resets the counters
502 *
503 * Writing a 'profiling multiplier' value into it also re-sets the profiling
504 * interrupt frequency, on architectures that support this.
505 */
506static ssize_t write_profile(struct file *file, const char __user *buf,
507			     size_t count, loff_t *ppos)
508{
509#ifdef CONFIG_SMP
510	extern int setup_profiling_timer(unsigned int multiplier);
511
512	if (count == sizeof(int)) {
513		unsigned int multiplier;
514
515		if (copy_from_user(&multiplier, buf, sizeof(int)))
516			return -EFAULT;
517
518		if (setup_profiling_timer(multiplier))
519			return -EINVAL;
520	}
521#endif
522	profile_discard_flip_buffers();
523	memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
524	return count;
525}
526
527static const struct file_operations proc_profile_operations = {
528	.read		= read_profile,
529	.write		= write_profile,
530	.llseek		= default_llseek,
531};
532
533#ifdef CONFIG_SMP
534static void profile_nop(void *unused)
535{
536}
537
538static int create_hash_tables(void)
539{
540	int cpu;
541
542	for_each_online_cpu(cpu) {
543		int node = cpu_to_mem(cpu);
544		struct page *page;
545
546		page = __alloc_pages_node(node,
547				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
548				0);
549		if (!page)
550			goto out_cleanup;
551		per_cpu(cpu_profile_hits, cpu)[1]
552				= (struct profile_hit *)page_address(page);
553		page = __alloc_pages_node(node,
554				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
555				0);
556		if (!page)
557			goto out_cleanup;
558		per_cpu(cpu_profile_hits, cpu)[0]
559				= (struct profile_hit *)page_address(page);
560	}
561	return 0;
562out_cleanup:
563	prof_on = 0;
564	smp_mb();
565	on_each_cpu(profile_nop, NULL, 1);
566	for_each_online_cpu(cpu) {
567		struct page *page;
568
569		if (per_cpu(cpu_profile_hits, cpu)[0]) {
570			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
571			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
572			__free_page(page);
573		}
574		if (per_cpu(cpu_profile_hits, cpu)[1]) {
575			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
576			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
577			__free_page(page);
578		}
579	}
580	return -1;
581}
582#else
583#define create_hash_tables()			({ 0; })
584#endif
585
586int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
587{
588	struct proc_dir_entry *entry;
589	int err = 0;
590
591	if (!prof_on)
592		return 0;
593
594	cpu_notifier_register_begin();
595
596	if (create_hash_tables()) {
597		err = -ENOMEM;
598		goto out;
599	}
600
601	entry = proc_create("profile", S_IWUSR | S_IRUGO,
602			    NULL, &proc_profile_operations);
603	if (!entry)
604		goto out;
605	proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
606	__hotcpu_notifier(profile_cpu_callback, 0);
607
608out:
609	cpu_notifier_register_done();
610	return err;
611}
612subsys_initcall(create_proc_profile);
613#endif /* CONFIG_PROC_FS */
614