This source file includes following definitions.
- pfm_put_task
- pfm_protect_ctx_ctxsw
- pfm_unprotect_ctx_ctxsw
- pfmfs_init_fs_context
- pfm_clear_psr_pp
- pfm_set_psr_pp
- pfm_clear_psr_up
- pfm_set_psr_up
- pfm_get_psr
- pfm_set_psr_l
- pfm_freeze_pmu
- pfm_unfreeze_pmu
- pfm_restore_ibrs
- pfm_restore_dbrs
- pfm_read_soft_counter
- pfm_write_soft_counter
- pfm_get_new_msg
- pfm_get_next_msg
- pfm_reset_msgq
- pfm_context_alloc
- pfm_context_free
- pfm_mask_monitoring
- pfm_restore_monitoring
- pfm_save_pmds
- pfm_restore_pmds
- pfm_copy_pmds
- pfm_copy_pmcs
- pfm_restore_pmcs
- pfm_uuid_cmp
- pfm_buf_fmt_exit
- pfm_buf_fmt_getsize
- pfm_buf_fmt_validate
- pfm_buf_fmt_init
- pfm_buf_fmt_restart
- pfm_buf_fmt_restart_active
- __pfm_find_buffer_fmt
- pfm_find_buffer_fmt
- pfm_register_buffer_fmt
- pfm_unregister_buffer_fmt
- pfm_reserve_session
- pfm_unreserve_session
- pfm_remove_smpl_mapping
- pfm_free_smpl_buffer
- pfm_exit_smpl_buffer
- init_pfm_fs
- pfm_read
- pfm_write
- pfm_poll
- pfm_ioctl
- pfm_do_fasync
- pfm_fasync
- pfm_syswide_force_stop
- pfm_syswide_cleanup_other_cpu
- pfm_flush
- pfm_close
- pfmfs_dname
- pfm_alloc_file
- pfm_remap_buffer
- pfm_smpl_buffer_alloc
- pfm_bad_permissions
- pfarg_is_sane
- pfm_setup_buffer_fmt
- pfm_reset_pmu_state
- pfm_ctx_getsize
- pfm_task_incompatible
- pfm_get_task
- pfm_context_create
- pfm_new_counter_value
- pfm_reset_regs_masked
- pfm_reset_regs
- pfm_write_pmcs
- pfm_write_pmds
- pfm_read_pmds
- pfm_mod_write_pmcs
- pfm_mod_read_pmds
- pfm_use_debug_registers
- pfm_release_debug_registers
- pfm_restart
- pfm_debug
- pfm_write_ibr_dbr
- pfm_write_ibrs
- pfm_write_dbrs
- pfm_mod_write_ibrs
- pfm_mod_write_dbrs
- pfm_get_features
- pfm_stop
- pfm_start
- pfm_get_pmc_reset
- pfm_check_task_exist
- pfm_context_load
- pfm_context_unload
- pfm_exit_thread
- pfm_check_task_state
- sys_perfmonctl
- pfm_resume_after_ovfl
- pfm_context_force_terminate
- pfm_handle_work
- pfm_notify_user
- pfm_ovfl_notify_user
- pfm_end_notify_user
- pfm_overflow_handler
- pfm_do_interrupt_handler
- pfm_interrupt_handler
- pfm_proc_start
- pfm_proc_next
- pfm_proc_stop
- pfm_proc_show_header
- pfm_proc_show
- pfm_syst_wide_update_task
- pfm_force_cleanup
- pfm_save_regs
- pfm_save_regs
- pfm_lazy_save_regs
- pfm_load_regs
- pfm_load_regs
- pfm_flush_pmds
- pfm_alt_save_pmu_state
- pfm_alt_restore_pmu_state
- pfm_install_alt_pmu_interrupt
- pfm_remove_alt_pmu_interrupt
- pfm_probe_pmu
- pfm_init
- pfm_init_percpu
- dump_pmu_state
- pfm_inherit
- sys_perfmonctl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/sched/task.h>
27 #include <linux/sched/task_stack.h>
28 #include <linux/interrupt.h>
29 #include <linux/proc_fs.h>
30 #include <linux/seq_file.h>
31 #include <linux/init.h>
32 #include <linux/vmalloc.h>
33 #include <linux/mm.h>
34 #include <linux/sysctl.h>
35 #include <linux/list.h>
36 #include <linux/file.h>
37 #include <linux/poll.h>
38 #include <linux/vfs.h>
39 #include <linux/smp.h>
40 #include <linux/pagemap.h>
41 #include <linux/mount.h>
42 #include <linux/pseudo_fs.h>
43 #include <linux/bitops.h>
44 #include <linux/capability.h>
45 #include <linux/rcupdate.h>
46 #include <linux/completion.h>
47 #include <linux/tracehook.h>
48 #include <linux/slab.h>
49 #include <linux/cpu.h>
50
51 #include <asm/errno.h>
52 #include <asm/intrinsics.h>
53 #include <asm/page.h>
54 #include <asm/perfmon.h>
55 #include <asm/processor.h>
56 #include <asm/signal.h>
57 #include <linux/uaccess.h>
58 #include <asm/delay.h>
59
60 #ifdef CONFIG_PERFMON
61
62
63
64 #define PFM_CTX_UNLOADED 1
65 #define PFM_CTX_LOADED 2
66 #define PFM_CTX_MASKED 3
67 #define PFM_CTX_ZOMBIE 4
68
69 #define PFM_INVALID_ACTIVATION (~0UL)
70
71 #define PFM_NUM_PMC_REGS 64
72 #define PFM_NUM_PMD_REGS 64
73
74
75
76
77 #define PFM_MAX_MSGS 32
78 #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
79
80
81
82
83
84
85
86
87
88
89
90
91 #define PFM_REG_NOTIMPL 0x0
92 #define PFM_REG_IMPL 0x1
93 #define PFM_REG_END 0x2
94 #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL)
95 #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR)
96 #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL)
97 #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL)
98 #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL)
99
100 #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
101 #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
102
103 #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
104
105
106 #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
107 #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
108
109
110 #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
111 #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
112 #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
113 #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
114
115 #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
116 #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
117 #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
118 #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
119
120 #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
121 #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
122
123 #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
124 #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
125 #define PFM_CTX_TASK(h) (h)->ctx_task
126
127 #define PMU_PMC_OI 5
128
129
130 #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
131 #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
132
133 #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
134
135 #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
136 #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
137 #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
138 #define PFM_CODE_RR 0
139 #define PFM_DATA_RR 1
140
141 #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
142 #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
143 #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
144
145 #define RDEP(x) (1UL<<(x))
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165 #define PROTECT_CTX(c, f) \
166 do { \
167 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
168 spin_lock_irqsave(&(c)->ctx_lock, f); \
169 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
170 } while(0)
171
172 #define UNPROTECT_CTX(c, f) \
173 do { \
174 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
175 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
176 } while(0)
177
178 #define PROTECT_CTX_NOPRINT(c, f) \
179 do { \
180 spin_lock_irqsave(&(c)->ctx_lock, f); \
181 } while(0)
182
183
184 #define UNPROTECT_CTX_NOPRINT(c, f) \
185 do { \
186 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
187 } while(0)
188
189
190 #define PROTECT_CTX_NOIRQ(c) \
191 do { \
192 spin_lock(&(c)->ctx_lock); \
193 } while(0)
194
195 #define UNPROTECT_CTX_NOIRQ(c) \
196 do { \
197 spin_unlock(&(c)->ctx_lock); \
198 } while(0)
199
200
201 #ifdef CONFIG_SMP
202
203 #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
204 #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
205 #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
206
207 #else
208 #define SET_ACTIVATION(t) do {} while(0)
209 #define GET_ACTIVATION(t) do {} while(0)
210 #define INC_ACTIVATION(t) do {} while(0)
211 #endif
212
213 #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
214 #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
215 #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
216
217 #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
218 #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
219
220 #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
221
222
223
224
225 #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
226
227 #define PFMFS_MAGIC 0xa0b4d889
228
229
230
231
232 #define PFM_DEBUGGING 1
233 #ifdef PFM_DEBUGGING
234 #define DPRINT(a) \
235 do { \
236 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
237 } while (0)
238
239 #define DPRINT_ovfl(a) \
240 do { \
241 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
242 } while (0)
243 #endif
244
245
246
247
248
249
250 typedef struct {
251 unsigned long val;
252 unsigned long lval;
253 unsigned long long_reset;
254 unsigned long short_reset;
255 unsigned long reset_pmds[4];
256 unsigned long smpl_pmds[4];
257 unsigned long seed;
258 unsigned long mask;
259 unsigned int flags;
260 unsigned long eventid;
261 } pfm_counter_t;
262
263
264
265
266 typedef struct {
267 unsigned int block:1;
268 unsigned int system:1;
269 unsigned int using_dbreg:1;
270 unsigned int is_sampling:1;
271 unsigned int excl_idle:1;
272 unsigned int going_zombie:1;
273 unsigned int trap_reason:2;
274 unsigned int no_msg:1;
275 unsigned int can_restart:1;
276 unsigned int reserved:22;
277 } pfm_context_flags_t;
278
279 #define PFM_TRAP_REASON_NONE 0x0
280 #define PFM_TRAP_REASON_BLOCK 0x1
281 #define PFM_TRAP_REASON_RESET 0x2
282
283
284
285
286
287
288 typedef struct pfm_context {
289 spinlock_t ctx_lock;
290
291 pfm_context_flags_t ctx_flags;
292 unsigned int ctx_state;
293
294 struct task_struct *ctx_task;
295
296 unsigned long ctx_ovfl_regs[4];
297
298 struct completion ctx_restart_done;
299
300 unsigned long ctx_used_pmds[4];
301 unsigned long ctx_all_pmds[4];
302 unsigned long ctx_reload_pmds[4];
303
304 unsigned long ctx_all_pmcs[4];
305 unsigned long ctx_reload_pmcs[4];
306 unsigned long ctx_used_monitors[4];
307
308 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS];
309
310 unsigned int ctx_used_ibrs[1];
311 unsigned int ctx_used_dbrs[1];
312 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS];
313 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS];
314
315 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS];
316
317 unsigned long th_pmcs[PFM_NUM_PMC_REGS];
318 unsigned long th_pmds[PFM_NUM_PMD_REGS];
319
320 unsigned long ctx_saved_psr_up;
321
322 unsigned long ctx_last_activation;
323 unsigned int ctx_last_cpu;
324 unsigned int ctx_cpu;
325
326 int ctx_fd;
327 pfm_ovfl_arg_t ctx_ovfl_arg;
328
329 pfm_buffer_fmt_t *ctx_buf_fmt;
330 void *ctx_smpl_hdr;
331 unsigned long ctx_smpl_size;
332 void *ctx_smpl_vaddr;
333
334 wait_queue_head_t ctx_msgq_wait;
335 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
336 int ctx_msgq_head;
337 int ctx_msgq_tail;
338 struct fasync_struct *ctx_async_queue;
339
340 wait_queue_head_t ctx_zombieq;
341 } pfm_context_t;
342
343
344
345
346
347 #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
348
349 #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
350
351 #ifdef CONFIG_SMP
352 #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
353 #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
354 #else
355 #define SET_LAST_CPU(ctx, v) do {} while(0)
356 #define GET_LAST_CPU(ctx) do {} while(0)
357 #endif
358
359
360 #define ctx_fl_block ctx_flags.block
361 #define ctx_fl_system ctx_flags.system
362 #define ctx_fl_using_dbreg ctx_flags.using_dbreg
363 #define ctx_fl_is_sampling ctx_flags.is_sampling
364 #define ctx_fl_excl_idle ctx_flags.excl_idle
365 #define ctx_fl_going_zombie ctx_flags.going_zombie
366 #define ctx_fl_trap_reason ctx_flags.trap_reason
367 #define ctx_fl_no_msg ctx_flags.no_msg
368 #define ctx_fl_can_restart ctx_flags.can_restart
369
370 #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
371 #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
372
373
374
375
376
377 typedef struct {
378 spinlock_t pfs_lock;
379
380 unsigned int pfs_task_sessions;
381 unsigned int pfs_sys_sessions;
382 unsigned int pfs_sys_use_dbregs;
383 unsigned int pfs_ptrace_use_dbregs;
384 struct task_struct *pfs_sys_session[NR_CPUS];
385 } pfm_session_t;
386
387
388
389
390
391
392 typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
393 typedef struct {
394 unsigned int type;
395 int pm_pos;
396 unsigned long default_value;
397 unsigned long reserved_mask;
398 pfm_reg_check_t read_check;
399 pfm_reg_check_t write_check;
400 unsigned long dep_pmd[4];
401 unsigned long dep_pmc[4];
402 } pfm_reg_desc_t;
403
404
405 #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
406
407
408
409
410
411
412
413
414
415
416
417
418
419 typedef struct {
420 unsigned long ovfl_val;
421
422 pfm_reg_desc_t *pmc_desc;
423 pfm_reg_desc_t *pmd_desc;
424
425 unsigned int num_pmcs;
426 unsigned int num_pmds;
427 unsigned long impl_pmcs[4];
428 unsigned long impl_pmds[4];
429
430 char *pmu_name;
431 unsigned int pmu_family;
432 unsigned int flags;
433 unsigned int num_ibrs;
434 unsigned int num_dbrs;
435 unsigned int num_counters;
436 int (*probe)(void);
437 unsigned int use_rr_dbregs:1;
438 } pmu_config_t;
439
440
441
442 #define PFM_PMU_IRQ_RESEND 1
443
444
445
446
447 typedef struct {
448 unsigned long ibr_mask:56;
449 unsigned long ibr_plm:4;
450 unsigned long ibr_ig:3;
451 unsigned long ibr_x:1;
452 } ibr_mask_reg_t;
453
454 typedef struct {
455 unsigned long dbr_mask:56;
456 unsigned long dbr_plm:4;
457 unsigned long dbr_ig:2;
458 unsigned long dbr_w:1;
459 unsigned long dbr_r:1;
460 } dbr_mask_reg_t;
461
462 typedef union {
463 unsigned long val;
464 ibr_mask_reg_t ibr;
465 dbr_mask_reg_t dbr;
466 } dbreg_t;
467
468
469
470
471
472 typedef struct {
473 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
474 char *cmd_name;
475 int cmd_flags;
476 unsigned int cmd_narg;
477 size_t cmd_argsize;
478 int (*cmd_getsize)(void *arg, size_t *sz);
479 } pfm_cmd_desc_t;
480
481 #define PFM_CMD_FD 0x01
482 #define PFM_CMD_ARG_READ 0x02
483 #define PFM_CMD_ARG_RW 0x04
484 #define PFM_CMD_STOP 0x08
485
486
487 #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
488 #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
489 #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
490 #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
491 #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
492
493 #define PFM_CMD_ARG_MANY -1
494
495 typedef struct {
496 unsigned long pfm_spurious_ovfl_intr_count;
497 unsigned long pfm_replay_ovfl_intr_count;
498 unsigned long pfm_ovfl_intr_count;
499 unsigned long pfm_ovfl_intr_cycles;
500 unsigned long pfm_ovfl_intr_cycles_min;
501 unsigned long pfm_ovfl_intr_cycles_max;
502 unsigned long pfm_smpl_handler_calls;
503 unsigned long pfm_smpl_handler_cycles;
504 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
505 } pfm_stats_t;
506
507
508
509
510 static pfm_stats_t pfm_stats[NR_CPUS];
511 static pfm_session_t pfm_sessions;
512
513 static DEFINE_SPINLOCK(pfm_alt_install_check);
514 static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
515
516 static struct proc_dir_entry *perfmon_dir;
517 static pfm_uuid_t pfm_null_uuid = {0,};
518
519 static spinlock_t pfm_buffer_fmt_lock;
520 static LIST_HEAD(pfm_buffer_fmt_list);
521
522 static pmu_config_t *pmu_conf;
523
524
525 pfm_sysctl_t pfm_sysctl;
526 EXPORT_SYMBOL(pfm_sysctl);
527
528 static struct ctl_table pfm_ctl_table[] = {
529 {
530 .procname = "debug",
531 .data = &pfm_sysctl.debug,
532 .maxlen = sizeof(int),
533 .mode = 0666,
534 .proc_handler = proc_dointvec,
535 },
536 {
537 .procname = "debug_ovfl",
538 .data = &pfm_sysctl.debug_ovfl,
539 .maxlen = sizeof(int),
540 .mode = 0666,
541 .proc_handler = proc_dointvec,
542 },
543 {
544 .procname = "fastctxsw",
545 .data = &pfm_sysctl.fastctxsw,
546 .maxlen = sizeof(int),
547 .mode = 0600,
548 .proc_handler = proc_dointvec,
549 },
550 {
551 .procname = "expert_mode",
552 .data = &pfm_sysctl.expert_mode,
553 .maxlen = sizeof(int),
554 .mode = 0600,
555 .proc_handler = proc_dointvec,
556 },
557 {}
558 };
559 static struct ctl_table pfm_sysctl_dir[] = {
560 {
561 .procname = "perfmon",
562 .mode = 0555,
563 .child = pfm_ctl_table,
564 },
565 {}
566 };
567 static struct ctl_table pfm_sysctl_root[] = {
568 {
569 .procname = "kernel",
570 .mode = 0555,
571 .child = pfm_sysctl_dir,
572 },
573 {}
574 };
575 static struct ctl_table_header *pfm_sysctl_header;
576
577 static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
578
579 #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
580 #define pfm_get_cpu_data(a,b) per_cpu(a, b)
581
582 static inline void
583 pfm_put_task(struct task_struct *task)
584 {
585 if (task != current) put_task_struct(task);
586 }
587
588 static inline unsigned long
589 pfm_protect_ctx_ctxsw(pfm_context_t *x)
590 {
591 spin_lock(&(x)->ctx_lock);
592 return 0UL;
593 }
594
595 static inline void
596 pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
597 {
598 spin_unlock(&(x)->ctx_lock);
599 }
600
601
602 static const struct dentry_operations pfmfs_dentry_operations;
603
604 static int pfmfs_init_fs_context(struct fs_context *fc)
605 {
606 struct pseudo_fs_context *ctx = init_pseudo(fc, PFMFS_MAGIC);
607 if (!ctx)
608 return -ENOMEM;
609 ctx->dops = &pfmfs_dentry_operations;
610 return 0;
611 }
612
613 static struct file_system_type pfm_fs_type = {
614 .name = "pfmfs",
615 .init_fs_context = pfmfs_init_fs_context,
616 .kill_sb = kill_anon_super,
617 };
618 MODULE_ALIAS_FS("pfmfs");
619
620 DEFINE_PER_CPU(unsigned long, pfm_syst_info);
621 DEFINE_PER_CPU(struct task_struct *, pmu_owner);
622 DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
623 DEFINE_PER_CPU(unsigned long, pmu_activation_number);
624 EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
625
626
627
628 static const struct file_operations pfm_file_ops;
629
630
631
632
633 #ifndef CONFIG_SMP
634 static void pfm_lazy_save_regs (struct task_struct *ta);
635 #endif
636
637 void dump_pmu_state(const char *);
638 static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
639
640 #include "perfmon_itanium.h"
641 #include "perfmon_mckinley.h"
642 #include "perfmon_montecito.h"
643 #include "perfmon_generic.h"
644
645 static pmu_config_t *pmu_confs[]={
646 &pmu_conf_mont,
647 &pmu_conf_mck,
648 &pmu_conf_ita,
649 &pmu_conf_gen,
650 NULL
651 };
652
653
654 static int pfm_end_notify_user(pfm_context_t *ctx);
655
656 static inline void
657 pfm_clear_psr_pp(void)
658 {
659 ia64_rsm(IA64_PSR_PP);
660 ia64_srlz_i();
661 }
662
663 static inline void
664 pfm_set_psr_pp(void)
665 {
666 ia64_ssm(IA64_PSR_PP);
667 ia64_srlz_i();
668 }
669
670 static inline void
671 pfm_clear_psr_up(void)
672 {
673 ia64_rsm(IA64_PSR_UP);
674 ia64_srlz_i();
675 }
676
677 static inline void
678 pfm_set_psr_up(void)
679 {
680 ia64_ssm(IA64_PSR_UP);
681 ia64_srlz_i();
682 }
683
684 static inline unsigned long
685 pfm_get_psr(void)
686 {
687 unsigned long tmp;
688 tmp = ia64_getreg(_IA64_REG_PSR);
689 ia64_srlz_i();
690 return tmp;
691 }
692
693 static inline void
694 pfm_set_psr_l(unsigned long val)
695 {
696 ia64_setreg(_IA64_REG_PSR_L, val);
697 ia64_srlz_i();
698 }
699
700 static inline void
701 pfm_freeze_pmu(void)
702 {
703 ia64_set_pmc(0,1UL);
704 ia64_srlz_d();
705 }
706
707 static inline void
708 pfm_unfreeze_pmu(void)
709 {
710 ia64_set_pmc(0,0UL);
711 ia64_srlz_d();
712 }
713
714 static inline void
715 pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
716 {
717 int i;
718
719 for (i=0; i < nibrs; i++) {
720 ia64_set_ibr(i, ibrs[i]);
721 ia64_dv_serialize_instruction();
722 }
723 ia64_srlz_i();
724 }
725
726 static inline void
727 pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
728 {
729 int i;
730
731 for (i=0; i < ndbrs; i++) {
732 ia64_set_dbr(i, dbrs[i]);
733 ia64_dv_serialize_data();
734 }
735 ia64_srlz_d();
736 }
737
738
739
740
741 static inline unsigned long
742 pfm_read_soft_counter(pfm_context_t *ctx, int i)
743 {
744 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
745 }
746
747
748
749
750 static inline void
751 pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
752 {
753 unsigned long ovfl_val = pmu_conf->ovfl_val;
754
755 ctx->ctx_pmds[i].val = val & ~ovfl_val;
756
757
758
759
760 ia64_set_pmd(i, val & ovfl_val);
761 }
762
763 static pfm_msg_t *
764 pfm_get_new_msg(pfm_context_t *ctx)
765 {
766 int idx, next;
767
768 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
769
770 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
771 if (next == ctx->ctx_msgq_head) return NULL;
772
773 idx = ctx->ctx_msgq_tail;
774 ctx->ctx_msgq_tail = next;
775
776 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
777
778 return ctx->ctx_msgq+idx;
779 }
780
781 static pfm_msg_t *
782 pfm_get_next_msg(pfm_context_t *ctx)
783 {
784 pfm_msg_t *msg;
785
786 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
787
788 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
789
790
791
792
793 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
794
795
796
797
798 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
799
800 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
801
802 return msg;
803 }
804
805 static void
806 pfm_reset_msgq(pfm_context_t *ctx)
807 {
808 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
809 DPRINT(("ctx=%p msgq reset\n", ctx));
810 }
811
812 static pfm_context_t *
813 pfm_context_alloc(int ctx_flags)
814 {
815 pfm_context_t *ctx;
816
817
818
819
820
821 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
822 if (ctx) {
823 DPRINT(("alloc ctx @%p\n", ctx));
824
825
826
827
828 spin_lock_init(&ctx->ctx_lock);
829
830
831
832
833 ctx->ctx_state = PFM_CTX_UNLOADED;
834
835
836
837
838 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
839 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
840 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
841
842
843
844
845
846
847
848
849 init_completion(&ctx->ctx_restart_done);
850
851
852
853
854 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
855 SET_LAST_CPU(ctx, -1);
856
857
858
859
860 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
861 init_waitqueue_head(&ctx->ctx_msgq_wait);
862 init_waitqueue_head(&ctx->ctx_zombieq);
863
864 }
865 return ctx;
866 }
867
868 static void
869 pfm_context_free(pfm_context_t *ctx)
870 {
871 if (ctx) {
872 DPRINT(("free ctx @%p\n", ctx));
873 kfree(ctx);
874 }
875 }
876
877 static void
878 pfm_mask_monitoring(struct task_struct *task)
879 {
880 pfm_context_t *ctx = PFM_GET_CTX(task);
881 unsigned long mask, val, ovfl_mask;
882 int i;
883
884 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
885
886 ovfl_mask = pmu_conf->ovfl_val;
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906 mask = ctx->ctx_used_pmds[0];
907 for (i = 0; mask; i++, mask>>=1) {
908
909 if ((mask & 0x1) == 0) continue;
910 val = ia64_get_pmd(i);
911
912 if (PMD_IS_COUNTING(i)) {
913
914
915
916 ctx->ctx_pmds[i].val += (val & ovfl_mask);
917 } else {
918 ctx->ctx_pmds[i].val = val;
919 }
920 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
921 i,
922 ctx->ctx_pmds[i].val,
923 val & ovfl_mask));
924 }
925
926
927
928
929
930
931
932
933 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
934 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
935 if ((mask & 0x1) == 0UL) continue;
936 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
937 ctx->th_pmcs[i] &= ~0xfUL;
938 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
939 }
940
941
942
943 ia64_srlz_d();
944 }
945
946
947
948
949
950
951 static void
952 pfm_restore_monitoring(struct task_struct *task)
953 {
954 pfm_context_t *ctx = PFM_GET_CTX(task);
955 unsigned long mask, ovfl_mask;
956 unsigned long psr, val;
957 int i, is_system;
958
959 is_system = ctx->ctx_fl_system;
960 ovfl_mask = pmu_conf->ovfl_val;
961
962 if (task != current) {
963 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
964 return;
965 }
966 if (ctx->ctx_state != PFM_CTX_MASKED) {
967 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
968 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
969 return;
970 }
971 psr = pfm_get_psr();
972
973
974
975
976
977
978
979
980
981
982 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
983
984 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
985 pfm_clear_psr_pp();
986 } else {
987 pfm_clear_psr_up();
988 }
989
990
991
992 mask = ctx->ctx_used_pmds[0];
993 for (i = 0; mask; i++, mask>>=1) {
994
995 if ((mask & 0x1) == 0) continue;
996
997 if (PMD_IS_COUNTING(i)) {
998
999
1000
1001
1002 val = ctx->ctx_pmds[i].val & ovfl_mask;
1003 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1004 } else {
1005 val = ctx->ctx_pmds[i].val;
1006 }
1007 ia64_set_pmd(i, val);
1008
1009 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1010 i,
1011 ctx->ctx_pmds[i].val,
1012 val));
1013 }
1014
1015
1016
1017 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1018 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1019 if ((mask & 0x1) == 0UL) continue;
1020 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1021 ia64_set_pmc(i, ctx->th_pmcs[i]);
1022 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1023 task_pid_nr(task), i, ctx->th_pmcs[i]));
1024 }
1025 ia64_srlz_d();
1026
1027
1028
1029
1030
1031 if (ctx->ctx_fl_using_dbreg) {
1032 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1033 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1034 }
1035
1036
1037
1038
1039 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1040
1041 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1042 ia64_srlz_i();
1043 }
1044 pfm_set_psr_l(psr);
1045 }
1046
1047 static inline void
1048 pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1049 {
1050 int i;
1051
1052 ia64_srlz_d();
1053
1054 for (i=0; mask; i++, mask>>=1) {
1055 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1056 }
1057 }
1058
1059
1060
1061
1062 static inline void
1063 pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1064 {
1065 int i;
1066 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1067
1068 for (i=0; mask; i++, mask>>=1) {
1069 if ((mask & 0x1) == 0) continue;
1070 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1071 ia64_set_pmd(i, val);
1072 }
1073 ia64_srlz_d();
1074 }
1075
1076
1077
1078
1079 static inline void
1080 pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1081 {
1082 unsigned long ovfl_val = pmu_conf->ovfl_val;
1083 unsigned long mask = ctx->ctx_all_pmds[0];
1084 unsigned long val;
1085 int i;
1086
1087 DPRINT(("mask=0x%lx\n", mask));
1088
1089 for (i=0; mask; i++, mask>>=1) {
1090
1091 val = ctx->ctx_pmds[i].val;
1092
1093
1094
1095
1096
1097
1098
1099 if (PMD_IS_COUNTING(i)) {
1100 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1101 val &= ovfl_val;
1102 }
1103 ctx->th_pmds[i] = val;
1104
1105 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1106 i,
1107 ctx->th_pmds[i],
1108 ctx->ctx_pmds[i].val));
1109 }
1110 }
1111
1112
1113
1114
1115 static inline void
1116 pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1117 {
1118 unsigned long mask = ctx->ctx_all_pmcs[0];
1119 int i;
1120
1121 DPRINT(("mask=0x%lx\n", mask));
1122
1123 for (i=0; mask; i++, mask>>=1) {
1124
1125 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1126 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1127 }
1128 }
1129
1130
1131
1132 static inline void
1133 pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1134 {
1135 int i;
1136
1137 for (i=0; mask; i++, mask>>=1) {
1138 if ((mask & 0x1) == 0) continue;
1139 ia64_set_pmc(i, pmcs[i]);
1140 }
1141 ia64_srlz_d();
1142 }
1143
1144 static inline int
1145 pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1146 {
1147 return memcmp(a, b, sizeof(pfm_uuid_t));
1148 }
1149
1150 static inline int
1151 pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1152 {
1153 int ret = 0;
1154 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1155 return ret;
1156 }
1157
1158 static inline int
1159 pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1160 {
1161 int ret = 0;
1162 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1163 return ret;
1164 }
1165
1166
1167 static inline int
1168 pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1169 int cpu, void *arg)
1170 {
1171 int ret = 0;
1172 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1173 return ret;
1174 }
1175
1176 static inline int
1177 pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1178 int cpu, void *arg)
1179 {
1180 int ret = 0;
1181 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1182 return ret;
1183 }
1184
1185 static inline int
1186 pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1187 {
1188 int ret = 0;
1189 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1190 return ret;
1191 }
1192
1193 static inline int
1194 pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1195 {
1196 int ret = 0;
1197 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1198 return ret;
1199 }
1200
1201 static pfm_buffer_fmt_t *
1202 __pfm_find_buffer_fmt(pfm_uuid_t uuid)
1203 {
1204 struct list_head * pos;
1205 pfm_buffer_fmt_t * entry;
1206
1207 list_for_each(pos, &pfm_buffer_fmt_list) {
1208 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1209 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1210 return entry;
1211 }
1212 return NULL;
1213 }
1214
1215
1216
1217
1218 static pfm_buffer_fmt_t *
1219 pfm_find_buffer_fmt(pfm_uuid_t uuid)
1220 {
1221 pfm_buffer_fmt_t * fmt;
1222 spin_lock(&pfm_buffer_fmt_lock);
1223 fmt = __pfm_find_buffer_fmt(uuid);
1224 spin_unlock(&pfm_buffer_fmt_lock);
1225 return fmt;
1226 }
1227
1228 int
1229 pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1230 {
1231 int ret = 0;
1232
1233
1234 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1235
1236
1237 if (fmt->fmt_handler == NULL) return -EINVAL;
1238
1239
1240
1241
1242
1243 spin_lock(&pfm_buffer_fmt_lock);
1244
1245 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1246 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1247 ret = -EBUSY;
1248 goto out;
1249 }
1250 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1251 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1252
1253 out:
1254 spin_unlock(&pfm_buffer_fmt_lock);
1255 return ret;
1256 }
1257 EXPORT_SYMBOL(pfm_register_buffer_fmt);
1258
1259 int
1260 pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1261 {
1262 pfm_buffer_fmt_t *fmt;
1263 int ret = 0;
1264
1265 spin_lock(&pfm_buffer_fmt_lock);
1266
1267 fmt = __pfm_find_buffer_fmt(uuid);
1268 if (!fmt) {
1269 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1270 ret = -EINVAL;
1271 goto out;
1272 }
1273 list_del_init(&fmt->fmt_list);
1274 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1275
1276 out:
1277 spin_unlock(&pfm_buffer_fmt_lock);
1278 return ret;
1279
1280 }
1281 EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1282
1283 static int
1284 pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1285 {
1286 unsigned long flags;
1287
1288
1289
1290 LOCK_PFS(flags);
1291
1292 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1293 pfm_sessions.pfs_sys_sessions,
1294 pfm_sessions.pfs_task_sessions,
1295 pfm_sessions.pfs_sys_use_dbregs,
1296 is_syswide,
1297 cpu));
1298
1299 if (is_syswide) {
1300
1301
1302
1303 if (pfm_sessions.pfs_task_sessions > 0UL) {
1304 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1305 pfm_sessions.pfs_task_sessions));
1306 goto abort;
1307 }
1308
1309 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1310
1311 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1312
1313 pfm_sessions.pfs_sys_session[cpu] = task;
1314
1315 pfm_sessions.pfs_sys_sessions++ ;
1316
1317 } else {
1318 if (pfm_sessions.pfs_sys_sessions) goto abort;
1319 pfm_sessions.pfs_task_sessions++;
1320 }
1321
1322 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1323 pfm_sessions.pfs_sys_sessions,
1324 pfm_sessions.pfs_task_sessions,
1325 pfm_sessions.pfs_sys_use_dbregs,
1326 is_syswide,
1327 cpu));
1328
1329
1330
1331
1332 cpu_idle_poll_ctrl(true);
1333
1334 UNLOCK_PFS(flags);
1335
1336 return 0;
1337
1338 error_conflict:
1339 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1340 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1341 cpu));
1342 abort:
1343 UNLOCK_PFS(flags);
1344
1345 return -EBUSY;
1346
1347 }
1348
1349 static int
1350 pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1351 {
1352 unsigned long flags;
1353
1354
1355
1356 LOCK_PFS(flags);
1357
1358 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1359 pfm_sessions.pfs_sys_sessions,
1360 pfm_sessions.pfs_task_sessions,
1361 pfm_sessions.pfs_sys_use_dbregs,
1362 is_syswide,
1363 cpu));
1364
1365
1366 if (is_syswide) {
1367 pfm_sessions.pfs_sys_session[cpu] = NULL;
1368
1369
1370
1371 if (ctx && ctx->ctx_fl_using_dbreg) {
1372 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1373 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1374 } else {
1375 pfm_sessions.pfs_sys_use_dbregs--;
1376 }
1377 }
1378 pfm_sessions.pfs_sys_sessions--;
1379 } else {
1380 pfm_sessions.pfs_task_sessions--;
1381 }
1382 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1383 pfm_sessions.pfs_sys_sessions,
1384 pfm_sessions.pfs_task_sessions,
1385 pfm_sessions.pfs_sys_use_dbregs,
1386 is_syswide,
1387 cpu));
1388
1389
1390 cpu_idle_poll_ctrl(false);
1391
1392 UNLOCK_PFS(flags);
1393
1394 return 0;
1395 }
1396
1397
1398
1399
1400
1401
1402 static int
1403 pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
1404 {
1405 struct task_struct *task = current;
1406 int r;
1407
1408
1409 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1410 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1411 return -EINVAL;
1412 }
1413
1414 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1415
1416
1417
1418
1419 r = vm_munmap((unsigned long)vaddr, size);
1420
1421 if (r !=0) {
1422 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1423 }
1424
1425 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1426
1427 return 0;
1428 }
1429
1430
1431
1432
1433 #if 0
1434 static int
1435 pfm_free_smpl_buffer(pfm_context_t *ctx)
1436 {
1437 pfm_buffer_fmt_t *fmt;
1438
1439 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1440
1441
1442
1443
1444 fmt = ctx->ctx_buf_fmt;
1445
1446 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1447 ctx->ctx_smpl_hdr,
1448 ctx->ctx_smpl_size,
1449 ctx->ctx_smpl_vaddr));
1450
1451 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1452
1453
1454
1455
1456 vfree(ctx->ctx_smpl_hdr);
1457
1458 ctx->ctx_smpl_hdr = NULL;
1459 ctx->ctx_smpl_size = 0UL;
1460
1461 return 0;
1462
1463 invalid_free:
1464 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1465 return -EINVAL;
1466 }
1467 #endif
1468
1469 static inline void
1470 pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1471 {
1472 if (fmt == NULL) return;
1473
1474 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1475
1476 }
1477
1478
1479
1480
1481
1482
1483
1484 static struct vfsmount *pfmfs_mnt __read_mostly;
1485
1486 static int __init
1487 init_pfm_fs(void)
1488 {
1489 int err = register_filesystem(&pfm_fs_type);
1490 if (!err) {
1491 pfmfs_mnt = kern_mount(&pfm_fs_type);
1492 err = PTR_ERR(pfmfs_mnt);
1493 if (IS_ERR(pfmfs_mnt))
1494 unregister_filesystem(&pfm_fs_type);
1495 else
1496 err = 0;
1497 }
1498 return err;
1499 }
1500
1501 static ssize_t
1502 pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1503 {
1504 pfm_context_t *ctx;
1505 pfm_msg_t *msg;
1506 ssize_t ret;
1507 unsigned long flags;
1508 DECLARE_WAITQUEUE(wait, current);
1509 if (PFM_IS_FILE(filp) == 0) {
1510 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1511 return -EINVAL;
1512 }
1513
1514 ctx = filp->private_data;
1515 if (ctx == NULL) {
1516 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1517 return -EINVAL;
1518 }
1519
1520
1521
1522
1523 if (size < sizeof(pfm_msg_t)) {
1524 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1525 return -EINVAL;
1526 }
1527
1528 PROTECT_CTX(ctx, flags);
1529
1530
1531
1532
1533 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1534
1535
1536 for(;;) {
1537
1538
1539
1540
1541 set_current_state(TASK_INTERRUPTIBLE);
1542
1543 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1544
1545 ret = 0;
1546 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1547
1548 UNPROTECT_CTX(ctx, flags);
1549
1550
1551
1552
1553 ret = -EAGAIN;
1554 if(filp->f_flags & O_NONBLOCK) break;
1555
1556
1557
1558
1559 if(signal_pending(current)) {
1560 ret = -EINTR;
1561 break;
1562 }
1563
1564
1565
1566 schedule();
1567
1568 PROTECT_CTX(ctx, flags);
1569 }
1570 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1571 set_current_state(TASK_RUNNING);
1572 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1573
1574 if (ret < 0) goto abort;
1575
1576 ret = -EINVAL;
1577 msg = pfm_get_next_msg(ctx);
1578 if (msg == NULL) {
1579 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1580 goto abort_locked;
1581 }
1582
1583 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1584
1585 ret = -EFAULT;
1586 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1587
1588 abort_locked:
1589 UNPROTECT_CTX(ctx, flags);
1590 abort:
1591 return ret;
1592 }
1593
1594 static ssize_t
1595 pfm_write(struct file *file, const char __user *ubuf,
1596 size_t size, loff_t *ppos)
1597 {
1598 DPRINT(("pfm_write called\n"));
1599 return -EINVAL;
1600 }
1601
1602 static __poll_t
1603 pfm_poll(struct file *filp, poll_table * wait)
1604 {
1605 pfm_context_t *ctx;
1606 unsigned long flags;
1607 __poll_t mask = 0;
1608
1609 if (PFM_IS_FILE(filp) == 0) {
1610 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1611 return 0;
1612 }
1613
1614 ctx = filp->private_data;
1615 if (ctx == NULL) {
1616 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1617 return 0;
1618 }
1619
1620
1621 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1622
1623 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1624
1625 PROTECT_CTX(ctx, flags);
1626
1627 if (PFM_CTXQ_EMPTY(ctx) == 0)
1628 mask = EPOLLIN | EPOLLRDNORM;
1629
1630 UNPROTECT_CTX(ctx, flags);
1631
1632 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1633
1634 return mask;
1635 }
1636
1637 static long
1638 pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1639 {
1640 DPRINT(("pfm_ioctl called\n"));
1641 return -EINVAL;
1642 }
1643
1644
1645
1646
1647 static inline int
1648 pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1649 {
1650 int ret;
1651
1652 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1653
1654 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1655 task_pid_nr(current),
1656 fd,
1657 on,
1658 ctx->ctx_async_queue, ret));
1659
1660 return ret;
1661 }
1662
1663 static int
1664 pfm_fasync(int fd, struct file *filp, int on)
1665 {
1666 pfm_context_t *ctx;
1667 int ret;
1668
1669 if (PFM_IS_FILE(filp) == 0) {
1670 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1671 return -EBADF;
1672 }
1673
1674 ctx = filp->private_data;
1675 if (ctx == NULL) {
1676 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1677 return -EBADF;
1678 }
1679
1680
1681
1682
1683
1684
1685
1686 ret = pfm_do_fasync(fd, filp, ctx, on);
1687
1688
1689 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1690 fd,
1691 on,
1692 ctx->ctx_async_queue, ret));
1693
1694 return ret;
1695 }
1696
1697 #ifdef CONFIG_SMP
1698
1699
1700
1701
1702
1703 static void
1704 pfm_syswide_force_stop(void *info)
1705 {
1706 pfm_context_t *ctx = (pfm_context_t *)info;
1707 struct pt_regs *regs = task_pt_regs(current);
1708 struct task_struct *owner;
1709 unsigned long flags;
1710 int ret;
1711
1712 if (ctx->ctx_cpu != smp_processor_id()) {
1713 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1714 ctx->ctx_cpu,
1715 smp_processor_id());
1716 return;
1717 }
1718 owner = GET_PMU_OWNER();
1719 if (owner != ctx->ctx_task) {
1720 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1721 smp_processor_id(),
1722 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1723 return;
1724 }
1725 if (GET_PMU_CTX() != ctx) {
1726 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1727 smp_processor_id(),
1728 GET_PMU_CTX(), ctx);
1729 return;
1730 }
1731
1732 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1733
1734
1735
1736
1737
1738 local_irq_save(flags);
1739
1740 ret = pfm_context_unload(ctx, NULL, 0, regs);
1741 if (ret) {
1742 DPRINT(("context_unload returned %d\n", ret));
1743 }
1744
1745
1746
1747
1748 local_irq_restore(flags);
1749 }
1750
1751 static void
1752 pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1753 {
1754 int ret;
1755
1756 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1757 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1758 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1759 }
1760 #endif
1761
1762
1763
1764
1765
1766 static int
1767 pfm_flush(struct file *filp, fl_owner_t id)
1768 {
1769 pfm_context_t *ctx;
1770 struct task_struct *task;
1771 struct pt_regs *regs;
1772 unsigned long flags;
1773 unsigned long smpl_buf_size = 0UL;
1774 void *smpl_buf_vaddr = NULL;
1775 int state, is_system;
1776
1777 if (PFM_IS_FILE(filp) == 0) {
1778 DPRINT(("bad magic for\n"));
1779 return -EBADF;
1780 }
1781
1782 ctx = filp->private_data;
1783 if (ctx == NULL) {
1784 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1785 return -EBADF;
1786 }
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801 PROTECT_CTX(ctx, flags);
1802
1803 state = ctx->ctx_state;
1804 is_system = ctx->ctx_fl_system;
1805
1806 task = PFM_CTX_TASK(ctx);
1807 regs = task_pt_regs(task);
1808
1809 DPRINT(("ctx_state=%d is_current=%d\n",
1810 state,
1811 task == current ? 1 : 0));
1812
1813
1814
1815
1816
1817
1818
1819
1820 if (task == current) {
1821 #ifdef CONFIG_SMP
1822
1823
1824
1825
1826
1827
1828
1829 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1830
1831 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1832
1833
1834
1835 local_irq_restore(flags);
1836
1837 pfm_syswide_cleanup_other_cpu(ctx);
1838
1839
1840
1841
1842 local_irq_save(flags);
1843
1844
1845
1846
1847 } else
1848 #endif
1849 {
1850
1851 DPRINT(("forcing unload\n"));
1852
1853
1854
1855
1856 pfm_context_unload(ctx, NULL, 0, regs);
1857
1858 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1859 }
1860 }
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873 if (ctx->ctx_smpl_vaddr && current->mm) {
1874 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1875 smpl_buf_size = ctx->ctx_smpl_size;
1876 }
1877
1878 UNPROTECT_CTX(ctx, flags);
1879
1880
1881
1882
1883
1884
1885
1886 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
1887
1888 return 0;
1889 }
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905 static int
1906 pfm_close(struct inode *inode, struct file *filp)
1907 {
1908 pfm_context_t *ctx;
1909 struct task_struct *task;
1910 struct pt_regs *regs;
1911 DECLARE_WAITQUEUE(wait, current);
1912 unsigned long flags;
1913 unsigned long smpl_buf_size = 0UL;
1914 void *smpl_buf_addr = NULL;
1915 int free_possible = 1;
1916 int state, is_system;
1917
1918 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1919
1920 if (PFM_IS_FILE(filp) == 0) {
1921 DPRINT(("bad magic\n"));
1922 return -EBADF;
1923 }
1924
1925 ctx = filp->private_data;
1926 if (ctx == NULL) {
1927 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1928 return -EBADF;
1929 }
1930
1931 PROTECT_CTX(ctx, flags);
1932
1933 state = ctx->ctx_state;
1934 is_system = ctx->ctx_fl_system;
1935
1936 task = PFM_CTX_TASK(ctx);
1937 regs = task_pt_regs(task);
1938
1939 DPRINT(("ctx_state=%d is_current=%d\n",
1940 state,
1941 task == current ? 1 : 0));
1942
1943
1944
1945
1946 if (state == PFM_CTX_UNLOADED) goto doit;
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976 ctx->ctx_fl_going_zombie = 1;
1977
1978
1979
1980
1981 complete(&ctx->ctx_restart_done);
1982
1983 DPRINT(("waking up ctx_state=%d\n", state));
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993 set_current_state(TASK_INTERRUPTIBLE);
1994 add_wait_queue(&ctx->ctx_zombieq, &wait);
1995
1996 UNPROTECT_CTX(ctx, flags);
1997
1998
1999
2000
2001
2002
2003 schedule();
2004
2005
2006 PROTECT_CTX(ctx, flags);
2007
2008
2009 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2010 set_current_state(TASK_RUNNING);
2011
2012
2013
2014
2015 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2016 }
2017 else if (task != current) {
2018 #ifdef CONFIG_SMP
2019
2020
2021
2022 ctx->ctx_state = PFM_CTX_ZOMBIE;
2023
2024 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2025
2026
2027
2028
2029 free_possible = 0;
2030 #else
2031 pfm_context_unload(ctx, NULL, 0, regs);
2032 #endif
2033 }
2034
2035 doit:
2036
2037 state = ctx->ctx_state;
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 if (ctx->ctx_smpl_hdr) {
2054 smpl_buf_addr = ctx->ctx_smpl_hdr;
2055 smpl_buf_size = ctx->ctx_smpl_size;
2056
2057 ctx->ctx_smpl_hdr = NULL;
2058 ctx->ctx_fl_is_sampling = 0;
2059 }
2060
2061 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2062 state,
2063 free_possible,
2064 smpl_buf_addr,
2065 smpl_buf_size));
2066
2067 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2068
2069
2070
2071
2072 if (state == PFM_CTX_ZOMBIE) {
2073 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2074 }
2075
2076
2077
2078
2079
2080 filp->private_data = NULL;
2081
2082
2083
2084
2085
2086
2087
2088
2089 UNPROTECT_CTX(ctx, flags);
2090
2091
2092
2093
2094
2095 vfree(smpl_buf_addr);
2096
2097
2098
2099
2100 if (free_possible) pfm_context_free(ctx);
2101
2102 return 0;
2103 }
2104
2105 static const struct file_operations pfm_file_ops = {
2106 .llseek = no_llseek,
2107 .read = pfm_read,
2108 .write = pfm_write,
2109 .poll = pfm_poll,
2110 .unlocked_ioctl = pfm_ioctl,
2111 .fasync = pfm_fasync,
2112 .release = pfm_close,
2113 .flush = pfm_flush
2114 };
2115
2116 static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2117 {
2118 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
2119 d_inode(dentry)->i_ino);
2120 }
2121
2122 static const struct dentry_operations pfmfs_dentry_operations = {
2123 .d_delete = always_delete_dentry,
2124 .d_dname = pfmfs_dname,
2125 };
2126
2127
2128 static struct file *
2129 pfm_alloc_file(pfm_context_t *ctx)
2130 {
2131 struct file *file;
2132 struct inode *inode;
2133 struct path path;
2134 struct qstr this = { .name = "" };
2135
2136
2137
2138
2139 inode = new_inode(pfmfs_mnt->mnt_sb);
2140 if (!inode)
2141 return ERR_PTR(-ENOMEM);
2142
2143 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2144
2145 inode->i_mode = S_IFCHR|S_IRUGO;
2146 inode->i_uid = current_fsuid();
2147 inode->i_gid = current_fsgid();
2148
2149
2150
2151
2152 path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
2153 if (!path.dentry) {
2154 iput(inode);
2155 return ERR_PTR(-ENOMEM);
2156 }
2157 path.mnt = mntget(pfmfs_mnt);
2158
2159 d_add(path.dentry, inode);
2160
2161 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
2162 if (IS_ERR(file)) {
2163 path_put(&path);
2164 return file;
2165 }
2166
2167 file->f_flags = O_RDONLY;
2168 file->private_data = ctx;
2169
2170 return file;
2171 }
2172
2173 static int
2174 pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2175 {
2176 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2177
2178 while (size > 0) {
2179 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2180
2181
2182 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2183 return -ENOMEM;
2184
2185 addr += PAGE_SIZE;
2186 buf += PAGE_SIZE;
2187 size -= PAGE_SIZE;
2188 }
2189 return 0;
2190 }
2191
2192
2193
2194
2195 static int
2196 pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2197 {
2198 struct mm_struct *mm = task->mm;
2199 struct vm_area_struct *vma = NULL;
2200 unsigned long size;
2201 void *smpl_buf;
2202
2203
2204
2205
2206
2207 size = PAGE_ALIGN(rsize);
2208
2209 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2220 return -ENOMEM;
2221
2222
2223
2224
2225 smpl_buf = vzalloc(size);
2226 if (smpl_buf == NULL) {
2227 DPRINT(("Can't allocate sampling buffer\n"));
2228 return -ENOMEM;
2229 }
2230
2231 DPRINT(("smpl_buf @%p\n", smpl_buf));
2232
2233
2234 vma = vm_area_alloc(mm);
2235 if (!vma) {
2236 DPRINT(("Cannot allocate vma\n"));
2237 goto error_kmem;
2238 }
2239
2240
2241
2242
2243 vma->vm_file = get_file(filp);
2244 vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
2245 vma->vm_page_prot = PAGE_READONLY;
2246
2247
2248
2249
2250
2251
2252 ctx->ctx_smpl_hdr = smpl_buf;
2253 ctx->ctx_smpl_size = size;
2254
2255
2256
2257
2258
2259
2260
2261 down_write(&task->mm->mmap_sem);
2262
2263
2264 vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
2265 if (IS_ERR_VALUE(vma->vm_start)) {
2266 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2267 up_write(&task->mm->mmap_sem);
2268 goto error;
2269 }
2270 vma->vm_end = vma->vm_start + size;
2271 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2272
2273 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2274
2275
2276 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2277 DPRINT(("Can't remap buffer\n"));
2278 up_write(&task->mm->mmap_sem);
2279 goto error;
2280 }
2281
2282
2283
2284
2285
2286 insert_vm_struct(mm, vma);
2287
2288 vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
2289 up_write(&task->mm->mmap_sem);
2290
2291
2292
2293
2294 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2295 *(unsigned long *)user_vaddr = vma->vm_start;
2296
2297 return 0;
2298
2299 error:
2300 vm_area_free(vma);
2301 error_kmem:
2302 vfree(smpl_buf);
2303
2304 return -ENOMEM;
2305 }
2306
2307
2308
2309
2310 static int
2311 pfm_bad_permissions(struct task_struct *task)
2312 {
2313 const struct cred *tcred;
2314 kuid_t uid = current_uid();
2315 kgid_t gid = current_gid();
2316 int ret;
2317
2318 rcu_read_lock();
2319 tcred = __task_cred(task);
2320
2321
2322 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2323 from_kuid(&init_user_ns, uid),
2324 from_kgid(&init_user_ns, gid),
2325 from_kuid(&init_user_ns, tcred->euid),
2326 from_kuid(&init_user_ns, tcred->suid),
2327 from_kuid(&init_user_ns, tcred->uid),
2328 from_kgid(&init_user_ns, tcred->egid),
2329 from_kgid(&init_user_ns, tcred->sgid)));
2330
2331 ret = ((!uid_eq(uid, tcred->euid))
2332 || (!uid_eq(uid, tcred->suid))
2333 || (!uid_eq(uid, tcred->uid))
2334 || (!gid_eq(gid, tcred->egid))
2335 || (!gid_eq(gid, tcred->sgid))
2336 || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE);
2337
2338 rcu_read_unlock();
2339 return ret;
2340 }
2341
2342 static int
2343 pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2344 {
2345 int ctx_flags;
2346
2347
2348
2349 ctx_flags = pfx->ctx_flags;
2350
2351 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2352
2353
2354
2355
2356 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2357 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2358 return -EINVAL;
2359 }
2360 } else {
2361 }
2362
2363
2364 return 0;
2365 }
2366
2367 static int
2368 pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
2369 unsigned int cpu, pfarg_context_t *arg)
2370 {
2371 pfm_buffer_fmt_t *fmt = NULL;
2372 unsigned long size = 0UL;
2373 void *uaddr = NULL;
2374 void *fmt_arg = NULL;
2375 int ret = 0;
2376 #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2377
2378
2379 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2380 if (fmt == NULL) {
2381 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2382 return -EINVAL;
2383 }
2384
2385
2386
2387
2388 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2389
2390 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2391
2392 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2393
2394 if (ret) goto error;
2395
2396
2397 ctx->ctx_buf_fmt = fmt;
2398 ctx->ctx_fl_is_sampling = 1;
2399
2400
2401
2402
2403 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2404 if (ret) goto error;
2405
2406 if (size) {
2407
2408
2409
2410 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
2411 if (ret) goto error;
2412
2413
2414 arg->ctx_smpl_vaddr = uaddr;
2415 }
2416 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2417
2418 error:
2419 return ret;
2420 }
2421
2422 static void
2423 pfm_reset_pmu_state(pfm_context_t *ctx)
2424 {
2425 int i;
2426
2427
2428
2429
2430 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2431 if (PMC_IS_IMPL(i) == 0) continue;
2432 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2433 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2434 }
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2463
2464
2465
2466
2467 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2468
2469 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2470
2471
2472
2473
2474 ctx->ctx_used_ibrs[0] = 0UL;
2475 ctx->ctx_used_dbrs[0] = 0UL;
2476 }
2477
2478 static int
2479 pfm_ctx_getsize(void *arg, size_t *sz)
2480 {
2481 pfarg_context_t *req = (pfarg_context_t *)arg;
2482 pfm_buffer_fmt_t *fmt;
2483
2484 *sz = 0;
2485
2486 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2487
2488 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2489 if (fmt == NULL) {
2490 DPRINT(("cannot find buffer format\n"));
2491 return -EINVAL;
2492 }
2493
2494 *sz = fmt->fmt_arg_size;
2495 DPRINT(("arg_size=%lu\n", *sz));
2496
2497 return 0;
2498 }
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508 static int
2509 pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2510 {
2511
2512
2513
2514 if (task->mm == NULL) {
2515 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2516 return -EPERM;
2517 }
2518 if (pfm_bad_permissions(task)) {
2519 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2520 return -EPERM;
2521 }
2522
2523
2524
2525 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2526 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2527 return -EINVAL;
2528 }
2529
2530 if (task->exit_state == EXIT_ZOMBIE) {
2531 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2532 return -EBUSY;
2533 }
2534
2535
2536
2537
2538 if (task == current) return 0;
2539
2540 if (!task_is_stopped_or_traced(task)) {
2541 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2542 return -EBUSY;
2543 }
2544
2545
2546
2547 wait_task_inactive(task, 0);
2548
2549
2550
2551 return 0;
2552 }
2553
2554 static int
2555 pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2556 {
2557 struct task_struct *p = current;
2558 int ret;
2559
2560
2561 if (pid < 2) return -EPERM;
2562
2563 if (pid != task_pid_vnr(current)) {
2564
2565 p = find_get_task_by_vpid(pid);
2566 if (!p)
2567 return -ESRCH;
2568 }
2569
2570 ret = pfm_task_incompatible(ctx, p);
2571 if (ret == 0) {
2572 *task = p;
2573 } else if (p != current) {
2574 pfm_put_task(p);
2575 }
2576 return ret;
2577 }
2578
2579
2580
2581 static int
2582 pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2583 {
2584 pfarg_context_t *req = (pfarg_context_t *)arg;
2585 struct file *filp;
2586 struct path path;
2587 int ctx_flags;
2588 int fd;
2589 int ret;
2590
2591
2592 ret = pfarg_is_sane(current, req);
2593 if (ret < 0)
2594 return ret;
2595
2596 ctx_flags = req->ctx_flags;
2597
2598 ret = -ENOMEM;
2599
2600 fd = get_unused_fd_flags(0);
2601 if (fd < 0)
2602 return fd;
2603
2604 ctx = pfm_context_alloc(ctx_flags);
2605 if (!ctx)
2606 goto error;
2607
2608 filp = pfm_alloc_file(ctx);
2609 if (IS_ERR(filp)) {
2610 ret = PTR_ERR(filp);
2611 goto error_file;
2612 }
2613
2614 req->ctx_fd = ctx->ctx_fd = fd;
2615
2616
2617
2618
2619 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2620 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2621 if (ret)
2622 goto buffer_error;
2623 }
2624
2625 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2626 ctx,
2627 ctx_flags,
2628 ctx->ctx_fl_system,
2629 ctx->ctx_fl_block,
2630 ctx->ctx_fl_excl_idle,
2631 ctx->ctx_fl_no_msg,
2632 ctx->ctx_fd));
2633
2634
2635
2636
2637 pfm_reset_pmu_state(ctx);
2638
2639 fd_install(fd, filp);
2640
2641 return 0;
2642
2643 buffer_error:
2644 path = filp->f_path;
2645 put_filp(filp);
2646 path_put(&path);
2647
2648 if (ctx->ctx_buf_fmt) {
2649 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2650 }
2651 error_file:
2652 pfm_context_free(ctx);
2653
2654 error:
2655 put_unused_fd(fd);
2656 return ret;
2657 }
2658
2659 static inline unsigned long
2660 pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2661 {
2662 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2663 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2664 extern unsigned long carta_random32 (unsigned long seed);
2665
2666 if (reg->flags & PFM_REGFL_RANDOM) {
2667 new_seed = carta_random32(old_seed);
2668 val -= (old_seed & mask);
2669 if ((mask >> 32) != 0)
2670
2671 new_seed |= carta_random32(old_seed >> 32) << 32;
2672 reg->seed = new_seed;
2673 }
2674 reg->lval = val;
2675 return val;
2676 }
2677
2678 static void
2679 pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2680 {
2681 unsigned long mask = ovfl_regs[0];
2682 unsigned long reset_others = 0UL;
2683 unsigned long val;
2684 int i;
2685
2686
2687
2688
2689 mask >>= PMU_FIRST_COUNTER;
2690 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2691
2692 if ((mask & 0x1UL) == 0UL) continue;
2693
2694 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2695 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2696
2697 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2698 }
2699
2700
2701
2702
2703 for(i = 0; reset_others; i++, reset_others >>= 1) {
2704
2705 if ((reset_others & 0x1) == 0) continue;
2706
2707 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2708
2709 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2710 is_long_reset ? "long" : "short", i, val));
2711 }
2712 }
2713
2714 static void
2715 pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2716 {
2717 unsigned long mask = ovfl_regs[0];
2718 unsigned long reset_others = 0UL;
2719 unsigned long val;
2720 int i;
2721
2722 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2723
2724 if (ctx->ctx_state == PFM_CTX_MASKED) {
2725 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2726 return;
2727 }
2728
2729
2730
2731
2732 mask >>= PMU_FIRST_COUNTER;
2733 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2734
2735 if ((mask & 0x1UL) == 0UL) continue;
2736
2737 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2738 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2739
2740 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2741
2742 pfm_write_soft_counter(ctx, i, val);
2743 }
2744
2745
2746
2747
2748 for(i = 0; reset_others; i++, reset_others >>= 1) {
2749
2750 if ((reset_others & 0x1) == 0) continue;
2751
2752 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2753
2754 if (PMD_IS_COUNTING(i)) {
2755 pfm_write_soft_counter(ctx, i, val);
2756 } else {
2757 ia64_set_pmd(i, val);
2758 }
2759 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2760 is_long_reset ? "long" : "short", i, val));
2761 }
2762 ia64_srlz_d();
2763 }
2764
2765 static int
2766 pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2767 {
2768 struct task_struct *task;
2769 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2770 unsigned long value, pmc_pm;
2771 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2772 unsigned int cnum, reg_flags, flags, pmc_type;
2773 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2774 int is_monitor, is_counting, state;
2775 int ret = -EINVAL;
2776 pfm_reg_check_t wr_func;
2777 #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2778
2779 state = ctx->ctx_state;
2780 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2781 is_system = ctx->ctx_fl_system;
2782 task = ctx->ctx_task;
2783 impl_pmds = pmu_conf->impl_pmds[0];
2784
2785 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2786
2787 if (is_loaded) {
2788
2789
2790
2791
2792
2793 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2794 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2795 return -EBUSY;
2796 }
2797 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2798 }
2799 expert_mode = pfm_sysctl.expert_mode;
2800
2801 for (i = 0; i < count; i++, req++) {
2802
2803 cnum = req->reg_num;
2804 reg_flags = req->reg_flags;
2805 value = req->reg_value;
2806 smpl_pmds = req->reg_smpl_pmds[0];
2807 reset_pmds = req->reg_reset_pmds[0];
2808 flags = 0;
2809
2810
2811 if (cnum >= PMU_MAX_PMCS) {
2812 DPRINT(("pmc%u is invalid\n", cnum));
2813 goto error;
2814 }
2815
2816 pmc_type = pmu_conf->pmc_desc[cnum].type;
2817 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2818 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2819 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2820
2821
2822
2823
2824
2825
2826 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2827 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2828 goto error;
2829 }
2830 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2831
2832
2833
2834
2835
2836 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2837 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2838 cnum,
2839 pmc_pm,
2840 is_system));
2841 goto error;
2842 }
2843
2844 if (is_counting) {
2845
2846
2847
2848
2849 value |= 1 << PMU_PMC_OI;
2850
2851 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2852 flags |= PFM_REGFL_OVFL_NOTIFY;
2853 }
2854
2855 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2856
2857
2858 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2859 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2860 goto error;
2861 }
2862
2863
2864 if ((reset_pmds & impl_pmds) != reset_pmds) {
2865 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2866 goto error;
2867 }
2868 } else {
2869 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2870 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2871 goto error;
2872 }
2873
2874 }
2875
2876
2877
2878
2879 if (likely(expert_mode == 0 && wr_func)) {
2880 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2881 if (ret) goto error;
2882 ret = -EINVAL;
2883 }
2884
2885
2886
2887
2888 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2889
2890
2891
2892
2893
2894
2895
2896
2897 if (is_counting) {
2898
2899
2900
2901 ctx->ctx_pmds[cnum].flags = flags;
2902
2903 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2904 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2905 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918 CTX_USED_PMD(ctx, reset_pmds);
2919 CTX_USED_PMD(ctx, smpl_pmds);
2920
2921
2922
2923
2924 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
2925 }
2926
2927
2928
2929
2930
2931 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
2946
2947
2948
2949
2950 ctx->ctx_pmcs[cnum] = value;
2951
2952 if (is_loaded) {
2953
2954
2955
2956 if (is_system == 0) ctx->th_pmcs[cnum] = value;
2957
2958
2959
2960
2961 if (can_access_pmu) {
2962 ia64_set_pmc(cnum, value);
2963 }
2964 #ifdef CONFIG_SMP
2965 else {
2966
2967
2968
2969
2970
2971
2972
2973 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
2974 }
2975 #endif
2976 }
2977
2978 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
2979 cnum,
2980 value,
2981 is_loaded,
2982 can_access_pmu,
2983 flags,
2984 ctx->ctx_all_pmcs[0],
2985 ctx->ctx_used_pmds[0],
2986 ctx->ctx_pmds[cnum].eventid,
2987 smpl_pmds,
2988 reset_pmds,
2989 ctx->ctx_reload_pmcs[0],
2990 ctx->ctx_used_monitors[0],
2991 ctx->ctx_ovfl_regs[0]));
2992 }
2993
2994
2995
2996
2997 if (can_access_pmu) ia64_srlz_d();
2998
2999 return 0;
3000 error:
3001 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3002 return ret;
3003 }
3004
3005 static int
3006 pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3007 {
3008 struct task_struct *task;
3009 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3010 unsigned long value, hw_value, ovfl_mask;
3011 unsigned int cnum;
3012 int i, can_access_pmu = 0, state;
3013 int is_counting, is_loaded, is_system, expert_mode;
3014 int ret = -EINVAL;
3015 pfm_reg_check_t wr_func;
3016
3017
3018 state = ctx->ctx_state;
3019 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3020 is_system = ctx->ctx_fl_system;
3021 ovfl_mask = pmu_conf->ovfl_val;
3022 task = ctx->ctx_task;
3023
3024 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3025
3026
3027
3028
3029
3030 if (likely(is_loaded)) {
3031
3032
3033
3034
3035
3036 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3037 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3038 return -EBUSY;
3039 }
3040 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3041 }
3042 expert_mode = pfm_sysctl.expert_mode;
3043
3044 for (i = 0; i < count; i++, req++) {
3045
3046 cnum = req->reg_num;
3047 value = req->reg_value;
3048
3049 if (!PMD_IS_IMPL(cnum)) {
3050 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3051 goto abort_mission;
3052 }
3053 is_counting = PMD_IS_COUNTING(cnum);
3054 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3055
3056
3057
3058
3059 if (unlikely(expert_mode == 0 && wr_func)) {
3060 unsigned long v = value;
3061
3062 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3063 if (ret) goto abort_mission;
3064
3065 value = v;
3066 ret = -EINVAL;
3067 }
3068
3069
3070
3071
3072 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3073
3074
3075
3076
3077 hw_value = value;
3078
3079
3080
3081
3082 if (is_counting) {
3083
3084
3085
3086 ctx->ctx_pmds[cnum].lval = value;
3087
3088
3089
3090
3091 if (is_loaded) {
3092 hw_value = value & ovfl_mask;
3093 value = value & ~ovfl_mask;
3094 }
3095 }
3096
3097
3098
3099 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3100 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3101
3102
3103
3104
3105 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3106 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3107
3108
3109
3110
3111 ctx->ctx_pmds[cnum].val = value;
3112
3113
3114
3115
3116
3117
3118
3119 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3120
3121
3122
3123
3124 CTX_USED_PMD(ctx, RDEP(cnum));
3125
3126
3127
3128
3129
3130 if (is_counting && state == PFM_CTX_MASKED) {
3131 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3132 }
3133
3134 if (is_loaded) {
3135
3136
3137
3138 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3139
3140
3141
3142
3143 if (can_access_pmu) {
3144 ia64_set_pmd(cnum, hw_value);
3145 } else {
3146 #ifdef CONFIG_SMP
3147
3148
3149
3150
3151
3152 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3153 #endif
3154 }
3155 }
3156
3157 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3158 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3159 cnum,
3160 value,
3161 is_loaded,
3162 can_access_pmu,
3163 hw_value,
3164 ctx->ctx_pmds[cnum].val,
3165 ctx->ctx_pmds[cnum].short_reset,
3166 ctx->ctx_pmds[cnum].long_reset,
3167 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3168 ctx->ctx_pmds[cnum].seed,
3169 ctx->ctx_pmds[cnum].mask,
3170 ctx->ctx_used_pmds[0],
3171 ctx->ctx_pmds[cnum].reset_pmds[0],
3172 ctx->ctx_reload_pmds[0],
3173 ctx->ctx_all_pmds[0],
3174 ctx->ctx_ovfl_regs[0]));
3175 }
3176
3177
3178
3179
3180 if (can_access_pmu) ia64_srlz_d();
3181
3182 return 0;
3183
3184 abort_mission:
3185
3186
3187
3188 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3189 return ret;
3190 }
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201 static int
3202 pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3203 {
3204 struct task_struct *task;
3205 unsigned long val = 0UL, lval, ovfl_mask, sval;
3206 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3207 unsigned int cnum, reg_flags = 0;
3208 int i, can_access_pmu = 0, state;
3209 int is_loaded, is_system, is_counting, expert_mode;
3210 int ret = -EINVAL;
3211 pfm_reg_check_t rd_func;
3212
3213
3214
3215
3216
3217
3218 state = ctx->ctx_state;
3219 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3220 is_system = ctx->ctx_fl_system;
3221 ovfl_mask = pmu_conf->ovfl_val;
3222 task = ctx->ctx_task;
3223
3224 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3225
3226 if (likely(is_loaded)) {
3227
3228
3229
3230
3231
3232 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3233 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3234 return -EBUSY;
3235 }
3236
3237
3238
3239 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3240
3241 if (can_access_pmu) ia64_srlz_d();
3242 }
3243 expert_mode = pfm_sysctl.expert_mode;
3244
3245 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3246 is_loaded,
3247 can_access_pmu,
3248 state));
3249
3250
3251
3252
3253
3254
3255 for (i = 0; i < count; i++, req++) {
3256
3257 cnum = req->reg_num;
3258 reg_flags = req->reg_flags;
3259
3260 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3261
3262
3263
3264
3265
3266
3267
3268
3269 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3270
3271 sval = ctx->ctx_pmds[cnum].val;
3272 lval = ctx->ctx_pmds[cnum].lval;
3273 is_counting = PMD_IS_COUNTING(cnum);
3274
3275
3276
3277
3278
3279
3280 if (can_access_pmu){
3281 val = ia64_get_pmd(cnum);
3282 } else {
3283
3284
3285
3286
3287
3288 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3289 }
3290 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3291
3292 if (is_counting) {
3293
3294
3295
3296 val &= ovfl_mask;
3297 val += sval;
3298 }
3299
3300
3301
3302
3303 if (unlikely(expert_mode == 0 && rd_func)) {
3304 unsigned long v = val;
3305 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3306 if (ret) goto error;
3307 val = v;
3308 ret = -EINVAL;
3309 }
3310
3311 PFM_REG_RETFLAG_SET(reg_flags, 0);
3312
3313 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3314
3315
3316
3317
3318
3319
3320 req->reg_value = val;
3321 req->reg_flags = reg_flags;
3322 req->reg_last_reset_val = lval;
3323 }
3324
3325 return 0;
3326
3327 error:
3328 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3329 return ret;
3330 }
3331
3332 int
3333 pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3334 {
3335 pfm_context_t *ctx;
3336
3337 if (req == NULL) return -EINVAL;
3338
3339 ctx = GET_PMU_CTX();
3340
3341 if (ctx == NULL) return -EINVAL;
3342
3343
3344
3345
3346
3347 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3348
3349 return pfm_write_pmcs(ctx, req, nreq, regs);
3350 }
3351 EXPORT_SYMBOL(pfm_mod_write_pmcs);
3352
3353 int
3354 pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3355 {
3356 pfm_context_t *ctx;
3357
3358 if (req == NULL) return -EINVAL;
3359
3360 ctx = GET_PMU_CTX();
3361
3362 if (ctx == NULL) return -EINVAL;
3363
3364
3365
3366
3367
3368 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3369
3370 return pfm_read_pmds(ctx, req, nreq, regs);
3371 }
3372 EXPORT_SYMBOL(pfm_mod_read_pmds);
3373
3374
3375
3376
3377
3378 int
3379 pfm_use_debug_registers(struct task_struct *task)
3380 {
3381 pfm_context_t *ctx = task->thread.pfm_context;
3382 unsigned long flags;
3383 int ret = 0;
3384
3385 if (pmu_conf->use_rr_dbregs == 0) return 0;
3386
3387 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3388
3389
3390
3391
3392 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3403
3404 LOCK_PFS(flags);
3405
3406
3407
3408
3409
3410 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3411 ret = -1;
3412 else
3413 pfm_sessions.pfs_ptrace_use_dbregs++;
3414
3415 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3416 pfm_sessions.pfs_ptrace_use_dbregs,
3417 pfm_sessions.pfs_sys_use_dbregs,
3418 task_pid_nr(task), ret));
3419
3420 UNLOCK_PFS(flags);
3421
3422 return ret;
3423 }
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433 int
3434 pfm_release_debug_registers(struct task_struct *task)
3435 {
3436 unsigned long flags;
3437 int ret;
3438
3439 if (pmu_conf->use_rr_dbregs == 0) return 0;
3440
3441 LOCK_PFS(flags);
3442 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3443 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3444 ret = -1;
3445 } else {
3446 pfm_sessions.pfs_ptrace_use_dbregs--;
3447 ret = 0;
3448 }
3449 UNLOCK_PFS(flags);
3450
3451 return ret;
3452 }
3453
3454 static int
3455 pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3456 {
3457 struct task_struct *task;
3458 pfm_buffer_fmt_t *fmt;
3459 pfm_ovfl_ctrl_t rst_ctrl;
3460 int state, is_system;
3461 int ret = 0;
3462
3463 state = ctx->ctx_state;
3464 fmt = ctx->ctx_buf_fmt;
3465 is_system = ctx->ctx_fl_system;
3466 task = PFM_CTX_TASK(ctx);
3467
3468 switch(state) {
3469 case PFM_CTX_MASKED:
3470 break;
3471 case PFM_CTX_LOADED:
3472 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3473
3474 case PFM_CTX_UNLOADED:
3475 case PFM_CTX_ZOMBIE:
3476 DPRINT(("invalid state=%d\n", state));
3477 return -EBUSY;
3478 default:
3479 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3480 return -EINVAL;
3481 }
3482
3483
3484
3485
3486
3487
3488 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3489 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3490 return -EBUSY;
3491 }
3492
3493
3494 if (unlikely(task == NULL)) {
3495 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3496 return -EINVAL;
3497 }
3498
3499 if (task == current || is_system) {
3500
3501 fmt = ctx->ctx_buf_fmt;
3502
3503 DPRINT(("restarting self %d ovfl=0x%lx\n",
3504 task_pid_nr(task),
3505 ctx->ctx_ovfl_regs[0]));
3506
3507 if (CTX_HAS_SMPL(ctx)) {
3508
3509 prefetch(ctx->ctx_smpl_hdr);
3510
3511 rst_ctrl.bits.mask_monitoring = 0;
3512 rst_ctrl.bits.reset_ovfl_pmds = 0;
3513
3514 if (state == PFM_CTX_LOADED)
3515 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3516 else
3517 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3518 } else {
3519 rst_ctrl.bits.mask_monitoring = 0;
3520 rst_ctrl.bits.reset_ovfl_pmds = 1;
3521 }
3522
3523 if (ret == 0) {
3524 if (rst_ctrl.bits.reset_ovfl_pmds)
3525 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3526
3527 if (rst_ctrl.bits.mask_monitoring == 0) {
3528 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3529
3530 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3531 } else {
3532 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3533
3534
3535 }
3536 }
3537
3538
3539
3540 ctx->ctx_ovfl_regs[0] = 0UL;
3541
3542
3543
3544
3545 ctx->ctx_state = PFM_CTX_LOADED;
3546
3547
3548
3549
3550 ctx->ctx_fl_can_restart = 0;
3551
3552 return 0;
3553 }
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563 if (state == PFM_CTX_MASKED) {
3564 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3565
3566
3567
3568
3569 ctx->ctx_fl_can_restart = 0;
3570 }
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3589 DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
3590 complete(&ctx->ctx_restart_done);
3591 } else {
3592 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3593
3594 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3595
3596 PFM_SET_WORK_PENDING(task, 1);
3597
3598 set_notify_resume(task);
3599
3600
3601
3602
3603 }
3604 return 0;
3605 }
3606
3607 static int
3608 pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3609 {
3610 unsigned int m = *(unsigned int *)arg;
3611
3612 pfm_sysctl.debug = m == 0 ? 0 : 1;
3613
3614 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3615
3616 if (m == 0) {
3617 memset(pfm_stats, 0, sizeof(pfm_stats));
3618 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3619 }
3620 return 0;
3621 }
3622
3623
3624
3625
3626 static int
3627 pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3628 {
3629 struct thread_struct *thread = NULL;
3630 struct task_struct *task;
3631 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3632 unsigned long flags;
3633 dbreg_t dbreg;
3634 unsigned int rnum;
3635 int first_time;
3636 int ret = 0, state;
3637 int i, can_access_pmu = 0;
3638 int is_system, is_loaded;
3639
3640 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3641
3642 state = ctx->ctx_state;
3643 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3644 is_system = ctx->ctx_fl_system;
3645 task = ctx->ctx_task;
3646
3647 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3648
3649
3650
3651
3652
3653 if (is_loaded) {
3654 thread = &task->thread;
3655
3656
3657
3658
3659
3660 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3661 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3662 return -EBUSY;
3663 }
3664 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3665 }
3666
3667
3668
3669
3670
3671
3672
3673
3674 first_time = ctx->ctx_fl_using_dbreg == 0;
3675
3676
3677
3678
3679 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3680 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3681 return -EBUSY;
3682 }
3683
3684
3685
3686
3687
3688
3689
3690
3691 if (is_loaded) {
3692 LOCK_PFS(flags);
3693
3694 if (first_time && is_system) {
3695 if (pfm_sessions.pfs_ptrace_use_dbregs)
3696 ret = -EBUSY;
3697 else
3698 pfm_sessions.pfs_sys_use_dbregs++;
3699 }
3700 UNLOCK_PFS(flags);
3701 }
3702
3703 if (ret != 0) return ret;
3704
3705
3706
3707
3708
3709 ctx->ctx_fl_using_dbreg = 1;
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720 if (first_time && can_access_pmu) {
3721 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3722 for (i=0; i < pmu_conf->num_ibrs; i++) {
3723 ia64_set_ibr(i, 0UL);
3724 ia64_dv_serialize_instruction();
3725 }
3726 ia64_srlz_i();
3727 for (i=0; i < pmu_conf->num_dbrs; i++) {
3728 ia64_set_dbr(i, 0UL);
3729 ia64_dv_serialize_data();
3730 }
3731 ia64_srlz_d();
3732 }
3733
3734
3735
3736
3737 for (i = 0; i < count; i++, req++) {
3738
3739 rnum = req->dbreg_num;
3740 dbreg.val = req->dbreg_value;
3741
3742 ret = -EINVAL;
3743
3744 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3745 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3746 rnum, dbreg.val, mode, i, count));
3747
3748 goto abort_mission;
3749 }
3750
3751
3752
3753
3754 if (rnum & 0x1) {
3755 if (mode == PFM_CODE_RR)
3756 dbreg.ibr.ibr_x = 0;
3757 else
3758 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3759 }
3760
3761 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773 if (mode == PFM_CODE_RR) {
3774 CTX_USED_IBR(ctx, rnum);
3775
3776 if (can_access_pmu) {
3777 ia64_set_ibr(rnum, dbreg.val);
3778 ia64_dv_serialize_instruction();
3779 }
3780
3781 ctx->ctx_ibrs[rnum] = dbreg.val;
3782
3783 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3784 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3785 } else {
3786 CTX_USED_DBR(ctx, rnum);
3787
3788 if (can_access_pmu) {
3789 ia64_set_dbr(rnum, dbreg.val);
3790 ia64_dv_serialize_data();
3791 }
3792 ctx->ctx_dbrs[rnum] = dbreg.val;
3793
3794 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3795 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3796 }
3797 }
3798
3799 return 0;
3800
3801 abort_mission:
3802
3803
3804
3805 if (first_time) {
3806 LOCK_PFS(flags);
3807 if (ctx->ctx_fl_system) {
3808 pfm_sessions.pfs_sys_use_dbregs--;
3809 }
3810 UNLOCK_PFS(flags);
3811 ctx->ctx_fl_using_dbreg = 0;
3812 }
3813
3814
3815
3816 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3817
3818 return ret;
3819 }
3820
3821 static int
3822 pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3823 {
3824 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3825 }
3826
3827 static int
3828 pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3829 {
3830 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3831 }
3832
3833 int
3834 pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3835 {
3836 pfm_context_t *ctx;
3837
3838 if (req == NULL) return -EINVAL;
3839
3840 ctx = GET_PMU_CTX();
3841
3842 if (ctx == NULL) return -EINVAL;
3843
3844
3845
3846
3847
3848 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3849
3850 return pfm_write_ibrs(ctx, req, nreq, regs);
3851 }
3852 EXPORT_SYMBOL(pfm_mod_write_ibrs);
3853
3854 int
3855 pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3856 {
3857 pfm_context_t *ctx;
3858
3859 if (req == NULL) return -EINVAL;
3860
3861 ctx = GET_PMU_CTX();
3862
3863 if (ctx == NULL) return -EINVAL;
3864
3865
3866
3867
3868
3869 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3870
3871 return pfm_write_dbrs(ctx, req, nreq, regs);
3872 }
3873 EXPORT_SYMBOL(pfm_mod_write_dbrs);
3874
3875
3876 static int
3877 pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3878 {
3879 pfarg_features_t *req = (pfarg_features_t *)arg;
3880
3881 req->ft_version = PFM_VERSION;
3882 return 0;
3883 }
3884
3885 static int
3886 pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3887 {
3888 struct pt_regs *tregs;
3889 struct task_struct *task = PFM_CTX_TASK(ctx);
3890 int state, is_system;
3891
3892 state = ctx->ctx_state;
3893 is_system = ctx->ctx_fl_system;
3894
3895
3896
3897
3898 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3899
3900
3901
3902
3903
3904
3905 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3906 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3907 return -EBUSY;
3908 }
3909 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
3910 task_pid_nr(PFM_CTX_TASK(ctx)),
3911 state,
3912 is_system));
3913
3914
3915
3916
3917
3918 if (is_system) {
3919
3920
3921
3922
3923
3924 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
3925 ia64_srlz_i();
3926
3927
3928
3929
3930 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
3931
3932
3933
3934
3935 pfm_clear_psr_pp();
3936
3937
3938
3939
3940 ia64_psr(regs)->pp = 0;
3941
3942 return 0;
3943 }
3944
3945
3946
3947
3948 if (task == current) {
3949
3950 pfm_clear_psr_up();
3951
3952
3953
3954
3955 ia64_psr(regs)->up = 0;
3956 } else {
3957 tregs = task_pt_regs(task);
3958
3959
3960
3961
3962 ia64_psr(tregs)->up = 0;
3963
3964
3965
3966
3967 ctx->ctx_saved_psr_up = 0;
3968 DPRINT(("task=[%d]\n", task_pid_nr(task)));
3969 }
3970 return 0;
3971 }
3972
3973
3974 static int
3975 pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3976 {
3977 struct pt_regs *tregs;
3978 int state, is_system;
3979
3980 state = ctx->ctx_state;
3981 is_system = ctx->ctx_fl_system;
3982
3983 if (state != PFM_CTX_LOADED) return -EINVAL;
3984
3985
3986
3987
3988
3989
3990 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3991 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3992 return -EBUSY;
3993 }
3994
3995
3996
3997
3998
3999
4000 if (is_system) {
4001
4002
4003
4004
4005 ia64_psr(regs)->pp = 1;
4006
4007
4008
4009
4010 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4011
4012
4013
4014
4015 pfm_set_psr_pp();
4016
4017
4018 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4019 ia64_srlz_i();
4020
4021 return 0;
4022 }
4023
4024
4025
4026
4027
4028 if (ctx->ctx_task == current) {
4029
4030
4031 pfm_set_psr_up();
4032
4033
4034
4035
4036 ia64_psr(regs)->up = 1;
4037
4038 } else {
4039 tregs = task_pt_regs(ctx->ctx_task);
4040
4041
4042
4043
4044
4045 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4046
4047
4048
4049
4050 ia64_psr(tregs)->up = 1;
4051 }
4052 return 0;
4053 }
4054
4055 static int
4056 pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4057 {
4058 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4059 unsigned int cnum;
4060 int i;
4061 int ret = -EINVAL;
4062
4063 for (i = 0; i < count; i++, req++) {
4064
4065 cnum = req->reg_num;
4066
4067 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4068
4069 req->reg_value = PMC_DFL_VAL(cnum);
4070
4071 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4072
4073 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4074 }
4075 return 0;
4076
4077 abort_mission:
4078 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4079 return ret;
4080 }
4081
4082 static int
4083 pfm_check_task_exist(pfm_context_t *ctx)
4084 {
4085 struct task_struct *g, *t;
4086 int ret = -ESRCH;
4087
4088 read_lock(&tasklist_lock);
4089
4090 do_each_thread (g, t) {
4091 if (t->thread.pfm_context == ctx) {
4092 ret = 0;
4093 goto out;
4094 }
4095 } while_each_thread (g, t);
4096 out:
4097 read_unlock(&tasklist_lock);
4098
4099 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4100
4101 return ret;
4102 }
4103
4104 static int
4105 pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4106 {
4107 struct task_struct *task;
4108 struct thread_struct *thread;
4109 struct pfm_context_t *old;
4110 unsigned long flags;
4111 #ifndef CONFIG_SMP
4112 struct task_struct *owner_task = NULL;
4113 #endif
4114 pfarg_load_t *req = (pfarg_load_t *)arg;
4115 unsigned long *pmcs_source, *pmds_source;
4116 int the_cpu;
4117 int ret = 0;
4118 int state, is_system, set_dbregs = 0;
4119
4120 state = ctx->ctx_state;
4121 is_system = ctx->ctx_fl_system;
4122
4123
4124
4125 if (state != PFM_CTX_UNLOADED) {
4126 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4127 req->load_pid,
4128 ctx->ctx_state));
4129 return -EBUSY;
4130 }
4131
4132 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4133
4134 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4135 DPRINT(("cannot use blocking mode on self\n"));
4136 return -EINVAL;
4137 }
4138
4139 ret = pfm_get_task(ctx, req->load_pid, &task);
4140 if (ret) {
4141 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4142 return ret;
4143 }
4144
4145 ret = -EINVAL;
4146
4147
4148
4149
4150 if (is_system && task != current) {
4151 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4152 req->load_pid));
4153 goto error;
4154 }
4155
4156 thread = &task->thread;
4157
4158 ret = 0;
4159
4160
4161
4162
4163 if (ctx->ctx_fl_using_dbreg) {
4164 if (thread->flags & IA64_THREAD_DBG_VALID) {
4165 ret = -EBUSY;
4166 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4167 goto error;
4168 }
4169 LOCK_PFS(flags);
4170
4171 if (is_system) {
4172 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4173 DPRINT(("cannot load [%d] dbregs in use\n",
4174 task_pid_nr(task)));
4175 ret = -EBUSY;
4176 } else {
4177 pfm_sessions.pfs_sys_use_dbregs++;
4178 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4179 set_dbregs = 1;
4180 }
4181 }
4182
4183 UNLOCK_PFS(flags);
4184
4185 if (ret) goto error;
4186 }
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203 the_cpu = ctx->ctx_cpu = smp_processor_id();
4204
4205 ret = -EBUSY;
4206
4207
4208
4209 ret = pfm_reserve_session(current, is_system, the_cpu);
4210 if (ret) goto error;
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4222 thread->pfm_context, ctx));
4223
4224 ret = -EBUSY;
4225 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4226 if (old != NULL) {
4227 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4228 goto error_unres;
4229 }
4230
4231 pfm_reset_msgq(ctx);
4232
4233 ctx->ctx_state = PFM_CTX_LOADED;
4234
4235
4236
4237
4238 ctx->ctx_task = task;
4239
4240 if (is_system) {
4241
4242
4243
4244 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4245 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4246
4247 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4248 } else {
4249 thread->flags |= IA64_THREAD_PM_VALID;
4250 }
4251
4252
4253
4254
4255 pfm_copy_pmds(task, ctx);
4256 pfm_copy_pmcs(task, ctx);
4257
4258 pmcs_source = ctx->th_pmcs;
4259 pmds_source = ctx->th_pmds;
4260
4261
4262
4263
4264 if (task == current) {
4265
4266 if (is_system == 0) {
4267
4268
4269 ia64_psr(regs)->sp = 0;
4270 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4271
4272 SET_LAST_CPU(ctx, smp_processor_id());
4273 INC_ACTIVATION();
4274 SET_ACTIVATION(ctx);
4275 #ifndef CONFIG_SMP
4276
4277
4278
4279 owner_task = GET_PMU_OWNER();
4280 if (owner_task) pfm_lazy_save_regs(owner_task);
4281 #endif
4282 }
4283
4284
4285
4286
4287 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4288 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4289
4290 ctx->ctx_reload_pmcs[0] = 0UL;
4291 ctx->ctx_reload_pmds[0] = 0UL;
4292
4293
4294
4295
4296 if (ctx->ctx_fl_using_dbreg) {
4297 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4298 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4299 }
4300
4301
4302
4303 SET_PMU_OWNER(task, ctx);
4304
4305 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4306 } else {
4307
4308
4309
4310 regs = task_pt_regs(task);
4311
4312
4313 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4314 SET_LAST_CPU(ctx, -1);
4315
4316
4317 ctx->ctx_saved_psr_up = 0UL;
4318 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4319 }
4320
4321 ret = 0;
4322
4323 error_unres:
4324 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4325 error:
4326
4327
4328
4329 if (ret && set_dbregs) {
4330 LOCK_PFS(flags);
4331 pfm_sessions.pfs_sys_use_dbregs--;
4332 UNLOCK_PFS(flags);
4333 }
4334
4335
4336
4337 if (is_system == 0 && task != current) {
4338 pfm_put_task(task);
4339
4340 if (ret == 0) {
4341 ret = pfm_check_task_exist(ctx);
4342 if (ret) {
4343 ctx->ctx_state = PFM_CTX_UNLOADED;
4344 ctx->ctx_task = NULL;
4345 }
4346 }
4347 }
4348 return ret;
4349 }
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359 static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4360
4361 static int
4362 pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4363 {
4364 struct task_struct *task = PFM_CTX_TASK(ctx);
4365 struct pt_regs *tregs;
4366 int prev_state, is_system;
4367 int ret;
4368
4369 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4370
4371 prev_state = ctx->ctx_state;
4372 is_system = ctx->ctx_fl_system;
4373
4374
4375
4376
4377 if (prev_state == PFM_CTX_UNLOADED) {
4378 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4379 return 0;
4380 }
4381
4382
4383
4384
4385 ret = pfm_stop(ctx, NULL, 0, regs);
4386 if (ret) return ret;
4387
4388 ctx->ctx_state = PFM_CTX_UNLOADED;
4389
4390
4391
4392
4393
4394
4395 if (is_system) {
4396
4397
4398
4399
4400
4401
4402 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4403 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4404
4405
4406
4407
4408
4409 pfm_flush_pmds(current, ctx);
4410
4411
4412
4413
4414
4415 if (prev_state != PFM_CTX_ZOMBIE)
4416 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4417
4418
4419
4420
4421 task->thread.pfm_context = NULL;
4422
4423
4424
4425 ctx->ctx_task = NULL;
4426
4427
4428
4429
4430 return 0;
4431 }
4432
4433
4434
4435
4436 tregs = task == current ? regs : task_pt_regs(task);
4437
4438 if (task == current) {
4439
4440
4441
4442 ia64_psr(regs)->sp = 1;
4443
4444 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4445 }
4446
4447
4448
4449
4450 pfm_flush_pmds(task, ctx);
4451
4452
4453
4454
4455
4456
4457
4458 if (prev_state != PFM_CTX_ZOMBIE)
4459 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4460
4461
4462
4463
4464 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4465 SET_LAST_CPU(ctx, -1);
4466
4467
4468
4469
4470 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4471
4472
4473
4474
4475 task->thread.pfm_context = NULL;
4476 ctx->ctx_task = NULL;
4477
4478 PFM_SET_WORK_PENDING(task, 0);
4479
4480 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4481 ctx->ctx_fl_can_restart = 0;
4482 ctx->ctx_fl_going_zombie = 0;
4483
4484 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4485
4486 return 0;
4487 }
4488
4489
4490
4491
4492
4493
4494 void
4495 pfm_exit_thread(struct task_struct *task)
4496 {
4497 pfm_context_t *ctx;
4498 unsigned long flags;
4499 struct pt_regs *regs = task_pt_regs(task);
4500 int ret, state;
4501 int free_ok = 0;
4502
4503 ctx = PFM_GET_CTX(task);
4504
4505 PROTECT_CTX(ctx, flags);
4506
4507 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4508
4509 state = ctx->ctx_state;
4510 switch(state) {
4511 case PFM_CTX_UNLOADED:
4512
4513
4514
4515
4516 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4517 break;
4518 case PFM_CTX_LOADED:
4519 case PFM_CTX_MASKED:
4520 ret = pfm_context_unload(ctx, NULL, 0, regs);
4521 if (ret) {
4522 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4523 }
4524 DPRINT(("ctx unloaded for current state was %d\n", state));
4525
4526 pfm_end_notify_user(ctx);
4527 break;
4528 case PFM_CTX_ZOMBIE:
4529 ret = pfm_context_unload(ctx, NULL, 0, regs);
4530 if (ret) {
4531 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4532 }
4533 free_ok = 1;
4534 break;
4535 default:
4536 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4537 break;
4538 }
4539 UNPROTECT_CTX(ctx, flags);
4540
4541 { u64 psr = pfm_get_psr();
4542 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4543 BUG_ON(GET_PMU_OWNER());
4544 BUG_ON(ia64_psr(regs)->up);
4545 BUG_ON(ia64_psr(regs)->pp);
4546 }
4547
4548
4549
4550
4551
4552 if (free_ok) pfm_context_free(ctx);
4553 }
4554
4555
4556
4557
4558 #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4559 #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4560 #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4561 #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4562 #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4563
4564 static pfm_cmd_desc_t pfm_cmd_tab[]={
4565 PFM_CMD_NONE,
4566 PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4567 PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4568 PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4569 PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4570 PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4571 PFM_CMD_NONE,
4572 PFM_CMD_NONE,
4573 PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4574 PFM_CMD_NONE,
4575 PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4576 PFM_CMD_NONE,
4577 PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4578 PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4579 PFM_CMD_NONE,
4580 PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4581 PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4582 PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4583 PFM_CMD_NONE,
4584 PFM_CMD_NONE,
4585 PFM_CMD_NONE,
4586 PFM_CMD_NONE,
4587 PFM_CMD_NONE,
4588 PFM_CMD_NONE,
4589 PFM_CMD_NONE,
4590 PFM_CMD_NONE,
4591 PFM_CMD_NONE,
4592 PFM_CMD_NONE,
4593 PFM_CMD_NONE,
4594 PFM_CMD_NONE,
4595 PFM_CMD_NONE,
4596 PFM_CMD_NONE,
4597 PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4598 PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4599 };
4600 #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4601
4602 static int
4603 pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4604 {
4605 struct task_struct *task;
4606 int state, old_state;
4607
4608 recheck:
4609 state = ctx->ctx_state;
4610 task = ctx->ctx_task;
4611
4612 if (task == NULL) {
4613 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4614 return 0;
4615 }
4616
4617 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4618 ctx->ctx_fd,
4619 state,
4620 task_pid_nr(task),
4621 task->state, PFM_CMD_STOPPED(cmd)));
4622
4623
4624
4625
4626
4627
4628
4629
4630 if (task == current || ctx->ctx_fl_system) return 0;
4631
4632
4633
4634
4635 switch(state) {
4636 case PFM_CTX_UNLOADED:
4637
4638
4639
4640 return 0;
4641 case PFM_CTX_ZOMBIE:
4642
4643
4644
4645 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4646 return -EINVAL;
4647 case PFM_CTX_MASKED:
4648
4649
4650
4651
4652 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4653 }
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665 if (PFM_CMD_STOPPED(cmd)) {
4666 if (!task_is_stopped_or_traced(task)) {
4667 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4668 return -EBUSY;
4669 }
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684 old_state = state;
4685
4686 UNPROTECT_CTX(ctx, flags);
4687
4688 wait_task_inactive(task, 0);
4689
4690 PROTECT_CTX(ctx, flags);
4691
4692
4693
4694
4695 if (ctx->ctx_state != old_state) {
4696 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4697 goto recheck;
4698 }
4699 }
4700 return 0;
4701 }
4702
4703
4704
4705
4706 asmlinkage long
4707 sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4708 {
4709 struct fd f = {NULL, 0};
4710 pfm_context_t *ctx = NULL;
4711 unsigned long flags = 0UL;
4712 void *args_k = NULL;
4713 long ret;
4714 size_t base_sz, sz, xtra_sz = 0;
4715 int narg, completed_args = 0, call_made = 0, cmd_flags;
4716 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4717 int (*getsize)(void *arg, size_t *sz);
4718 #define PFM_MAX_ARGSIZE 4096
4719
4720
4721
4722
4723 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4724
4725 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4726 DPRINT(("invalid cmd=%d\n", cmd));
4727 return -EINVAL;
4728 }
4729
4730 func = pfm_cmd_tab[cmd].cmd_func;
4731 narg = pfm_cmd_tab[cmd].cmd_narg;
4732 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4733 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4734 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4735
4736 if (unlikely(func == NULL)) {
4737 DPRINT(("invalid cmd=%d\n", cmd));
4738 return -EINVAL;
4739 }
4740
4741 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4742 PFM_CMD_NAME(cmd),
4743 cmd,
4744 narg,
4745 base_sz,
4746 count));
4747
4748
4749
4750
4751 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4752 return -EINVAL;
4753
4754 restart_args:
4755 sz = xtra_sz + base_sz*count;
4756
4757
4758
4759 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4760 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4761 return -E2BIG;
4762 }
4763
4764
4765
4766
4767 if (likely(count && args_k == NULL)) {
4768 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4769 if (args_k == NULL) return -ENOMEM;
4770 }
4771
4772 ret = -EFAULT;
4773
4774
4775
4776
4777
4778
4779 if (sz && copy_from_user(args_k, arg, sz)) {
4780 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4781 goto error_args;
4782 }
4783
4784
4785
4786
4787 if (completed_args == 0 && getsize) {
4788
4789
4790
4791 ret = (*getsize)(args_k, &xtra_sz);
4792 if (ret) goto error_args;
4793
4794 completed_args = 1;
4795
4796 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4797
4798
4799 if (likely(xtra_sz)) goto restart_args;
4800 }
4801
4802 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4803
4804 ret = -EBADF;
4805
4806 f = fdget(fd);
4807 if (unlikely(f.file == NULL)) {
4808 DPRINT(("invalid fd %d\n", fd));
4809 goto error_args;
4810 }
4811 if (unlikely(PFM_IS_FILE(f.file) == 0)) {
4812 DPRINT(("fd %d not related to perfmon\n", fd));
4813 goto error_args;
4814 }
4815
4816 ctx = f.file->private_data;
4817 if (unlikely(ctx == NULL)) {
4818 DPRINT(("no context for fd %d\n", fd));
4819 goto error_args;
4820 }
4821 prefetch(&ctx->ctx_state);
4822
4823 PROTECT_CTX(ctx, flags);
4824
4825
4826
4827
4828 ret = pfm_check_task_state(ctx, cmd, flags);
4829 if (unlikely(ret)) goto abort_locked;
4830
4831 skip_fd:
4832 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4833
4834 call_made = 1;
4835
4836 abort_locked:
4837 if (likely(ctx)) {
4838 DPRINT(("context unlocked\n"));
4839 UNPROTECT_CTX(ctx, flags);
4840 }
4841
4842
4843 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4844
4845 error_args:
4846 if (f.file)
4847 fdput(f);
4848
4849 kfree(args_k);
4850
4851 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4852
4853 return ret;
4854 }
4855
4856 static void
4857 pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4858 {
4859 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4860 pfm_ovfl_ctrl_t rst_ctrl;
4861 int state;
4862 int ret = 0;
4863
4864 state = ctx->ctx_state;
4865
4866
4867
4868
4869 if (CTX_HAS_SMPL(ctx)) {
4870
4871 rst_ctrl.bits.mask_monitoring = 0;
4872 rst_ctrl.bits.reset_ovfl_pmds = 0;
4873
4874 if (state == PFM_CTX_LOADED)
4875 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4876 else
4877 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4878 } else {
4879 rst_ctrl.bits.mask_monitoring = 0;
4880 rst_ctrl.bits.reset_ovfl_pmds = 1;
4881 }
4882
4883 if (ret == 0) {
4884 if (rst_ctrl.bits.reset_ovfl_pmds) {
4885 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4886 }
4887 if (rst_ctrl.bits.mask_monitoring == 0) {
4888 DPRINT(("resuming monitoring\n"));
4889 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4890 } else {
4891 DPRINT(("stopping monitoring\n"));
4892
4893 }
4894 ctx->ctx_state = PFM_CTX_LOADED;
4895 }
4896 }
4897
4898
4899
4900
4901
4902 static void
4903 pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4904 {
4905 int ret;
4906
4907 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
4908
4909 ret = pfm_context_unload(ctx, NULL, 0, regs);
4910 if (ret) {
4911 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
4912 }
4913
4914
4915
4916
4917 wake_up_interruptible(&ctx->ctx_zombieq);
4918
4919
4920
4921
4922
4923
4924 }
4925
4926 static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937 void
4938 pfm_handle_work(void)
4939 {
4940 pfm_context_t *ctx;
4941 struct pt_regs *regs;
4942 unsigned long flags, dummy_flags;
4943 unsigned long ovfl_regs;
4944 unsigned int reason;
4945 int ret;
4946
4947 ctx = PFM_GET_CTX(current);
4948 if (ctx == NULL) {
4949 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
4950 task_pid_nr(current));
4951 return;
4952 }
4953
4954 PROTECT_CTX(ctx, flags);
4955
4956 PFM_SET_WORK_PENDING(current, 0);
4957
4958 regs = task_pt_regs(current);
4959
4960
4961
4962
4963 reason = ctx->ctx_fl_trap_reason;
4964 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4965 ovfl_regs = ctx->ctx_ovfl_regs[0];
4966
4967 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
4968
4969
4970
4971
4972 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
4973 goto do_zombie;
4974
4975
4976 if (reason == PFM_TRAP_REASON_RESET)
4977 goto skip_blocking;
4978
4979
4980
4981
4982
4983 UNPROTECT_CTX(ctx, flags);
4984
4985
4986
4987
4988 local_irq_enable();
4989
4990 DPRINT(("before block sleeping\n"));
4991
4992
4993
4994
4995
4996 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
4997
4998 DPRINT(("after block sleeping ret=%d\n", ret));
4999
5000
5001
5002
5003
5004
5005
5006 PROTECT_CTX(ctx, dummy_flags);
5007
5008
5009
5010
5011
5012
5013
5014 ovfl_regs = ctx->ctx_ovfl_regs[0];
5015
5016 if (ctx->ctx_fl_going_zombie) {
5017 do_zombie:
5018 DPRINT(("context is zombie, bailing out\n"));
5019 pfm_context_force_terminate(ctx, regs);
5020 goto nothing_to_do;
5021 }
5022
5023
5024
5025 if (ret < 0)
5026 goto nothing_to_do;
5027
5028 skip_blocking:
5029 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5030 ctx->ctx_ovfl_regs[0] = 0UL;
5031
5032 nothing_to_do:
5033
5034
5035
5036 UNPROTECT_CTX(ctx, flags);
5037 }
5038
5039 static int
5040 pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5041 {
5042 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5043 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5044 return 0;
5045 }
5046
5047 DPRINT(("waking up somebody\n"));
5048
5049 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5050
5051
5052
5053
5054
5055 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5056
5057 return 0;
5058 }
5059
5060 static int
5061 pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5062 {
5063 pfm_msg_t *msg = NULL;
5064
5065 if (ctx->ctx_fl_no_msg == 0) {
5066 msg = pfm_get_new_msg(ctx);
5067 if (msg == NULL) {
5068 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5069 return -1;
5070 }
5071
5072 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5073 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5074 msg->pfm_ovfl_msg.msg_active_set = 0;
5075 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5076 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5077 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5078 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5079 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5080 }
5081
5082 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5083 msg,
5084 ctx->ctx_fl_no_msg,
5085 ctx->ctx_fd,
5086 ovfl_pmds));
5087
5088 return pfm_notify_user(ctx, msg);
5089 }
5090
5091 static int
5092 pfm_end_notify_user(pfm_context_t *ctx)
5093 {
5094 pfm_msg_t *msg;
5095
5096 msg = pfm_get_new_msg(ctx);
5097 if (msg == NULL) {
5098 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5099 return -1;
5100 }
5101
5102 memset(msg, 0, sizeof(*msg));
5103
5104 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5105 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5106 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5107
5108 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5109 msg,
5110 ctx->ctx_fl_no_msg,
5111 ctx->ctx_fd));
5112
5113 return pfm_notify_user(ctx, msg);
5114 }
5115
5116
5117
5118
5119
5120 static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5121 unsigned long pmc0, struct pt_regs *regs)
5122 {
5123 pfm_ovfl_arg_t *ovfl_arg;
5124 unsigned long mask;
5125 unsigned long old_val, ovfl_val, new_val;
5126 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5127 unsigned long tstamp;
5128 pfm_ovfl_ctrl_t ovfl_ctrl;
5129 unsigned int i, has_smpl;
5130 int must_notify = 0;
5131
5132 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5133
5134
5135
5136
5137 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5138
5139 tstamp = ia64_get_itc();
5140 mask = pmc0 >> PMU_FIRST_COUNTER;
5141 ovfl_val = pmu_conf->ovfl_val;
5142 has_smpl = CTX_HAS_SMPL(ctx);
5143
5144 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5145 "used_pmds=0x%lx\n",
5146 pmc0,
5147 task ? task_pid_nr(task): -1,
5148 (regs ? regs->cr_iip : 0),
5149 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5150 ctx->ctx_used_pmds[0]));
5151
5152
5153
5154
5155
5156
5157 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5158
5159
5160 if ((mask & 0x1) == 0) continue;
5161
5162
5163
5164
5165
5166
5167
5168 old_val = new_val = ctx->ctx_pmds[i].val;
5169 new_val += 1 + ovfl_val;
5170 ctx->ctx_pmds[i].val = new_val;
5171
5172
5173
5174
5175 if (likely(old_val > new_val)) {
5176 ovfl_pmds |= 1UL << i;
5177 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5178 }
5179
5180 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5181 i,
5182 new_val,
5183 old_val,
5184 ia64_get_pmd(i) & ovfl_val,
5185 ovfl_pmds,
5186 ovfl_notify));
5187 }
5188
5189
5190
5191
5192 if (ovfl_pmds == 0UL) return;
5193
5194
5195
5196
5197 ovfl_ctrl.val = 0;
5198 reset_pmds = 0UL;
5199
5200
5201
5202
5203
5204 if (has_smpl) {
5205 unsigned long start_cycles, end_cycles;
5206 unsigned long pmd_mask;
5207 int j, k, ret = 0;
5208 int this_cpu = smp_processor_id();
5209
5210 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5211 ovfl_arg = &ctx->ctx_ovfl_arg;
5212
5213 prefetch(ctx->ctx_smpl_hdr);
5214
5215 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5216
5217 mask = 1UL << i;
5218
5219 if ((pmd_mask & 0x1) == 0) continue;
5220
5221 ovfl_arg->ovfl_pmd = (unsigned char )i;
5222 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5223 ovfl_arg->active_set = 0;
5224 ovfl_arg->ovfl_ctrl.val = 0;
5225 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5226
5227 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5228 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5229 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5230
5231
5232
5233
5234
5235 if (smpl_pmds) {
5236 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5237 if ((smpl_pmds & 0x1) == 0) continue;
5238 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5239 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5240 }
5241 }
5242
5243 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5244
5245 start_cycles = ia64_get_itc();
5246
5247
5248
5249
5250 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5251
5252 end_cycles = ia64_get_itc();
5253
5254
5255
5256
5257
5258 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5259 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5260 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5261
5262
5263
5264 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5265
5266 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5267 }
5268
5269
5270
5271 if (ret && pmd_mask) {
5272 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5273 pmd_mask<<PMU_FIRST_COUNTER));
5274 }
5275
5276
5277
5278 ovfl_pmds &= ~reset_pmds;
5279 } else {
5280
5281
5282
5283
5284 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5285 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5286 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0;
5287 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5288
5289
5290
5291 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5292 }
5293
5294 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5295
5296
5297
5298
5299 if (reset_pmds) {
5300 unsigned long bm = reset_pmds;
5301 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5302 }
5303
5304 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5305
5306
5307
5308 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5309
5310
5311
5312
5313 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5314
5315 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5316
5317
5318
5319
5320 PFM_SET_WORK_PENDING(task, 1);
5321
5322
5323
5324
5325
5326 set_notify_resume(task);
5327 }
5328
5329
5330
5331
5332 must_notify = 1;
5333 }
5334
5335 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5336 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5337 PFM_GET_WORK_PENDING(task),
5338 ctx->ctx_fl_trap_reason,
5339 ovfl_pmds,
5340 ovfl_notify,
5341 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5342
5343
5344
5345 if (ovfl_ctrl.bits.mask_monitoring) {
5346 pfm_mask_monitoring(task);
5347 ctx->ctx_state = PFM_CTX_MASKED;
5348 ctx->ctx_fl_can_restart = 1;
5349 }
5350
5351
5352
5353
5354 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5355
5356 return;
5357
5358 sanity_check:
5359 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5360 smp_processor_id(),
5361 task ? task_pid_nr(task) : -1,
5362 pmc0);
5363 return;
5364
5365 stop_monitoring:
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5395 pfm_clear_psr_up();
5396 ia64_psr(regs)->up = 0;
5397 ia64_psr(regs)->sp = 1;
5398 return;
5399 }
5400
5401 static int
5402 pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5403 {
5404 struct task_struct *task;
5405 pfm_context_t *ctx;
5406 unsigned long flags;
5407 u64 pmc0;
5408 int this_cpu = smp_processor_id();
5409 int retval = 0;
5410
5411 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5412
5413
5414
5415
5416 pmc0 = ia64_get_pmc(0);
5417
5418 task = GET_PMU_OWNER();
5419 ctx = GET_PMU_CTX();
5420
5421
5422
5423
5424
5425 if (PMC0_HAS_OVFL(pmc0) && task) {
5426
5427
5428
5429
5430
5431 if (!ctx) goto report_spurious1;
5432
5433 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5434 goto report_spurious2;
5435
5436 PROTECT_CTX_NOPRINT(ctx, flags);
5437
5438 pfm_overflow_handler(task, ctx, pmc0, regs);
5439
5440 UNPROTECT_CTX_NOPRINT(ctx, flags);
5441
5442 } else {
5443 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5444 retval = -1;
5445 }
5446
5447
5448
5449 pfm_unfreeze_pmu();
5450
5451 return retval;
5452
5453 report_spurious1:
5454 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5455 this_cpu, task_pid_nr(task));
5456 pfm_unfreeze_pmu();
5457 return -1;
5458 report_spurious2:
5459 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5460 this_cpu,
5461 task_pid_nr(task));
5462 pfm_unfreeze_pmu();
5463 return -1;
5464 }
5465
5466 static irqreturn_t
5467 pfm_interrupt_handler(int irq, void *arg)
5468 {
5469 unsigned long start_cycles, total_cycles;
5470 unsigned long min, max;
5471 int this_cpu;
5472 int ret;
5473 struct pt_regs *regs = get_irq_regs();
5474
5475 this_cpu = get_cpu();
5476 if (likely(!pfm_alt_intr_handler)) {
5477 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5478 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5479
5480 start_cycles = ia64_get_itc();
5481
5482 ret = pfm_do_interrupt_handler(arg, regs);
5483
5484 total_cycles = ia64_get_itc();
5485
5486
5487
5488
5489 if (likely(ret == 0)) {
5490 total_cycles -= start_cycles;
5491
5492 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5493 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5494
5495 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5496 }
5497 }
5498 else {
5499 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5500 }
5501
5502 put_cpu();
5503 return IRQ_HANDLED;
5504 }
5505
5506
5507
5508
5509
5510 #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5511
5512 static void *
5513 pfm_proc_start(struct seq_file *m, loff_t *pos)
5514 {
5515 if (*pos == 0) {
5516 return PFM_PROC_SHOW_HEADER;
5517 }
5518
5519 while (*pos <= nr_cpu_ids) {
5520 if (cpu_online(*pos - 1)) {
5521 return (void *)*pos;
5522 }
5523 ++*pos;
5524 }
5525 return NULL;
5526 }
5527
5528 static void *
5529 pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5530 {
5531 ++*pos;
5532 return pfm_proc_start(m, pos);
5533 }
5534
5535 static void
5536 pfm_proc_stop(struct seq_file *m, void *v)
5537 {
5538 }
5539
5540 static void
5541 pfm_proc_show_header(struct seq_file *m)
5542 {
5543 struct list_head * pos;
5544 pfm_buffer_fmt_t * entry;
5545 unsigned long flags;
5546
5547 seq_printf(m,
5548 "perfmon version : %u.%u\n"
5549 "model : %s\n"
5550 "fastctxsw : %s\n"
5551 "expert mode : %s\n"
5552 "ovfl_mask : 0x%lx\n"
5553 "PMU flags : 0x%x\n",
5554 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5555 pmu_conf->pmu_name,
5556 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5557 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5558 pmu_conf->ovfl_val,
5559 pmu_conf->flags);
5560
5561 LOCK_PFS(flags);
5562
5563 seq_printf(m,
5564 "proc_sessions : %u\n"
5565 "sys_sessions : %u\n"
5566 "sys_use_dbregs : %u\n"
5567 "ptrace_use_dbregs : %u\n",
5568 pfm_sessions.pfs_task_sessions,
5569 pfm_sessions.pfs_sys_sessions,
5570 pfm_sessions.pfs_sys_use_dbregs,
5571 pfm_sessions.pfs_ptrace_use_dbregs);
5572
5573 UNLOCK_PFS(flags);
5574
5575 spin_lock(&pfm_buffer_fmt_lock);
5576
5577 list_for_each(pos, &pfm_buffer_fmt_list) {
5578 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5579 seq_printf(m, "format : %16phD %s\n",
5580 entry->fmt_uuid, entry->fmt_name);
5581 }
5582 spin_unlock(&pfm_buffer_fmt_lock);
5583
5584 }
5585
5586 static int
5587 pfm_proc_show(struct seq_file *m, void *v)
5588 {
5589 unsigned long psr;
5590 unsigned int i;
5591 int cpu;
5592
5593 if (v == PFM_PROC_SHOW_HEADER) {
5594 pfm_proc_show_header(m);
5595 return 0;
5596 }
5597
5598
5599
5600 cpu = (long)v - 1;
5601 seq_printf(m,
5602 "CPU%-2d overflow intrs : %lu\n"
5603 "CPU%-2d overflow cycles : %lu\n"
5604 "CPU%-2d overflow min : %lu\n"
5605 "CPU%-2d overflow max : %lu\n"
5606 "CPU%-2d smpl handler calls : %lu\n"
5607 "CPU%-2d smpl handler cycles : %lu\n"
5608 "CPU%-2d spurious intrs : %lu\n"
5609 "CPU%-2d replay intrs : %lu\n"
5610 "CPU%-2d syst_wide : %d\n"
5611 "CPU%-2d dcr_pp : %d\n"
5612 "CPU%-2d exclude idle : %d\n"
5613 "CPU%-2d owner : %d\n"
5614 "CPU%-2d context : %p\n"
5615 "CPU%-2d activations : %lu\n",
5616 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5617 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5618 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5619 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5620 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5621 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5622 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5623 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5624 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5625 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5626 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5627 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5628 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5629 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5630
5631 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5632
5633 psr = pfm_get_psr();
5634
5635 ia64_srlz_d();
5636
5637 seq_printf(m,
5638 "CPU%-2d psr : 0x%lx\n"
5639 "CPU%-2d pmc0 : 0x%lx\n",
5640 cpu, psr,
5641 cpu, ia64_get_pmc(0));
5642
5643 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5644 if (PMC_IS_COUNTING(i) == 0) continue;
5645 seq_printf(m,
5646 "CPU%-2d pmc%u : 0x%lx\n"
5647 "CPU%-2d pmd%u : 0x%lx\n",
5648 cpu, i, ia64_get_pmc(i),
5649 cpu, i, ia64_get_pmd(i));
5650 }
5651 }
5652 return 0;
5653 }
5654
5655 const struct seq_operations pfm_seq_ops = {
5656 .start = pfm_proc_start,
5657 .next = pfm_proc_next,
5658 .stop = pfm_proc_stop,
5659 .show = pfm_proc_show
5660 };
5661
5662
5663
5664
5665
5666
5667
5668 void
5669 pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5670 {
5671 struct pt_regs *regs;
5672 unsigned long dcr;
5673 unsigned long dcr_pp;
5674
5675 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5676
5677
5678
5679
5680
5681 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5682 regs = task_pt_regs(task);
5683 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5684 return;
5685 }
5686
5687
5688
5689 if (dcr_pp) {
5690 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5691
5692
5693
5694 if (is_ctxswin) {
5695
5696 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5697 pfm_clear_psr_pp();
5698 ia64_srlz_i();
5699 return;
5700 }
5701
5702
5703
5704
5705
5706
5707
5708 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5709 pfm_set_psr_pp();
5710 ia64_srlz_i();
5711 }
5712 }
5713
5714 #ifdef CONFIG_SMP
5715
5716 static void
5717 pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5718 {
5719 struct task_struct *task = ctx->ctx_task;
5720
5721 ia64_psr(regs)->up = 0;
5722 ia64_psr(regs)->sp = 1;
5723
5724 if (GET_PMU_OWNER() == task) {
5725 DPRINT(("cleared ownership for [%d]\n",
5726 task_pid_nr(ctx->ctx_task)));
5727 SET_PMU_OWNER(NULL, NULL);
5728 }
5729
5730
5731
5732
5733 PFM_SET_WORK_PENDING(task, 0);
5734
5735 task->thread.pfm_context = NULL;
5736 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5737
5738 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5739 }
5740
5741
5742
5743
5744
5745 void
5746 pfm_save_regs(struct task_struct *task)
5747 {
5748 pfm_context_t *ctx;
5749 unsigned long flags;
5750 u64 psr;
5751
5752
5753 ctx = PFM_GET_CTX(task);
5754 if (ctx == NULL) return;
5755
5756
5757
5758
5759
5760
5761 flags = pfm_protect_ctx_ctxsw(ctx);
5762
5763 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5764 struct pt_regs *regs = task_pt_regs(task);
5765
5766 pfm_clear_psr_up();
5767
5768 pfm_force_cleanup(ctx, regs);
5769
5770 BUG_ON(ctx->ctx_smpl_hdr);
5771
5772 pfm_unprotect_ctx_ctxsw(ctx, flags);
5773
5774 pfm_context_free(ctx);
5775 return;
5776 }
5777
5778
5779
5780
5781 ia64_srlz_d();
5782 psr = pfm_get_psr();
5783
5784 BUG_ON(psr & (IA64_PSR_I));
5785
5786
5787
5788
5789
5790
5791
5792
5793 pfm_clear_psr_up();
5794
5795
5796
5797
5798 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5799
5800
5801
5802
5803
5804
5805 SET_PMU_OWNER(NULL, NULL);
5806
5807
5808
5809
5810
5811
5812 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5813
5814
5815
5816
5817
5818
5819 ctx->th_pmcs[0] = ia64_get_pmc(0);
5820
5821
5822
5823
5824 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5825
5826
5827
5828
5829
5830 pfm_unprotect_ctx_ctxsw(ctx, flags);
5831 }
5832
5833 #else
5834 void
5835 pfm_save_regs(struct task_struct *task)
5836 {
5837 pfm_context_t *ctx;
5838 u64 psr;
5839
5840 ctx = PFM_GET_CTX(task);
5841 if (ctx == NULL) return;
5842
5843
5844
5845
5846 psr = pfm_get_psr();
5847
5848 BUG_ON(psr & (IA64_PSR_I));
5849
5850
5851
5852
5853
5854
5855
5856
5857 pfm_clear_psr_up();
5858
5859
5860
5861
5862 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5863 }
5864
5865 static void
5866 pfm_lazy_save_regs (struct task_struct *task)
5867 {
5868 pfm_context_t *ctx;
5869 unsigned long flags;
5870
5871 { u64 psr = pfm_get_psr();
5872 BUG_ON(psr & IA64_PSR_UP);
5873 }
5874
5875 ctx = PFM_GET_CTX(task);
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886 PROTECT_CTX(ctx,flags);
5887
5888
5889
5890
5891
5892
5893
5894
5895 SET_PMU_OWNER(NULL, NULL);
5896
5897
5898
5899
5900 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5901
5902
5903
5904
5905
5906
5907 ctx->th_pmcs[0] = ia64_get_pmc(0);
5908
5909
5910
5911
5912 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5913
5914
5915
5916
5917
5918
5919 UNPROTECT_CTX(ctx,flags);
5920 }
5921 #endif
5922
5923 #ifdef CONFIG_SMP
5924
5925
5926
5927 void
5928 pfm_load_regs (struct task_struct *task)
5929 {
5930 pfm_context_t *ctx;
5931 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
5932 unsigned long flags;
5933 u64 psr, psr_up;
5934 int need_irq_resend;
5935
5936 ctx = PFM_GET_CTX(task);
5937 if (unlikely(ctx == NULL)) return;
5938
5939 BUG_ON(GET_PMU_OWNER());
5940
5941
5942
5943
5944 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
5945
5946
5947
5948
5949
5950
5951 flags = pfm_protect_ctx_ctxsw(ctx);
5952 psr = pfm_get_psr();
5953
5954 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
5955
5956 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
5957 BUG_ON(psr & IA64_PSR_I);
5958
5959 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
5960 struct pt_regs *regs = task_pt_regs(task);
5961
5962 BUG_ON(ctx->ctx_smpl_hdr);
5963
5964 pfm_force_cleanup(ctx, regs);
5965
5966 pfm_unprotect_ctx_ctxsw(ctx, flags);
5967
5968
5969
5970
5971 pfm_context_free(ctx);
5972
5973 return;
5974 }
5975
5976
5977
5978
5979
5980 if (ctx->ctx_fl_using_dbreg) {
5981 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
5982 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
5983 }
5984
5985
5986
5987 psr_up = ctx->ctx_saved_psr_up;
5988
5989
5990
5991
5992
5993 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
5994
5995
5996
5997
5998 pmc_mask = ctx->ctx_reload_pmcs[0];
5999 pmd_mask = ctx->ctx_reload_pmds[0];
6000
6001 } else {
6002
6003
6004
6005
6006
6007
6008 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6009
6010
6011
6012
6013
6014
6015
6016
6017 pmc_mask = ctx->ctx_all_pmcs[0];
6018 }
6019
6020
6021
6022
6023
6024
6025
6026 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6027 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6028
6029
6030
6031
6032
6033 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6034
6035
6036
6037
6038 ia64_set_pmc(0, ctx->th_pmcs[0]);
6039 ia64_srlz_d();
6040 ctx->th_pmcs[0] = 0UL;
6041
6042
6043
6044
6045 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6046
6047 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6048 }
6049
6050
6051
6052
6053 ctx->ctx_reload_pmcs[0] = 0UL;
6054 ctx->ctx_reload_pmds[0] = 0UL;
6055
6056 SET_LAST_CPU(ctx, smp_processor_id());
6057
6058
6059
6060
6061 INC_ACTIVATION();
6062
6063
6064
6065 SET_ACTIVATION(ctx);
6066
6067
6068
6069
6070 SET_PMU_OWNER(task, ctx);
6071
6072
6073
6074
6075
6076
6077
6078 if (likely(psr_up)) pfm_set_psr_up();
6079
6080
6081
6082
6083 pfm_unprotect_ctx_ctxsw(ctx, flags);
6084 }
6085 #else
6086
6087
6088
6089
6090 void
6091 pfm_load_regs (struct task_struct *task)
6092 {
6093 pfm_context_t *ctx;
6094 struct task_struct *owner;
6095 unsigned long pmd_mask, pmc_mask;
6096 u64 psr, psr_up;
6097 int need_irq_resend;
6098
6099 owner = GET_PMU_OWNER();
6100 ctx = PFM_GET_CTX(task);
6101 psr = pfm_get_psr();
6102
6103 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6104 BUG_ON(psr & IA64_PSR_I);
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114 if (ctx->ctx_fl_using_dbreg) {
6115 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6116 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6117 }
6118
6119
6120
6121
6122 psr_up = ctx->ctx_saved_psr_up;
6123 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133 if (likely(owner == task)) {
6134 if (likely(psr_up)) pfm_set_psr_up();
6135 return;
6136 }
6137
6138
6139
6140
6141
6142
6143
6144 if (owner) pfm_lazy_save_regs(owner);
6145
6146
6147
6148
6149
6150
6151
6152 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6153
6154
6155
6156
6157
6158
6159
6160
6161 pmc_mask = ctx->ctx_all_pmcs[0];
6162
6163 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6164 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6165
6166
6167
6168
6169
6170 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6171
6172
6173
6174
6175 ia64_set_pmc(0, ctx->th_pmcs[0]);
6176 ia64_srlz_d();
6177
6178 ctx->th_pmcs[0] = 0UL;
6179
6180
6181
6182
6183 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6184
6185 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6186 }
6187
6188
6189
6190
6191 SET_PMU_OWNER(task, ctx);
6192
6193
6194
6195
6196
6197
6198
6199 if (likely(psr_up)) pfm_set_psr_up();
6200 }
6201 #endif
6202
6203
6204
6205
6206 static void
6207 pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6208 {
6209 u64 pmc0;
6210 unsigned long mask2, val, pmd_val, ovfl_val;
6211 int i, can_access_pmu = 0;
6212 int is_self;
6213
6214
6215
6216
6217
6218 is_self = ctx->ctx_task == task ? 1 : 0;
6219
6220
6221
6222
6223
6224
6225
6226
6227 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6228 if (can_access_pmu) {
6229
6230
6231
6232
6233
6234
6235
6236
6237 SET_PMU_OWNER(NULL, NULL);
6238 DPRINT(("releasing ownership\n"));
6239
6240
6241
6242
6243
6244
6245 ia64_srlz_d();
6246 pmc0 = ia64_get_pmc(0);
6247
6248
6249
6250
6251 pfm_unfreeze_pmu();
6252 } else {
6253 pmc0 = ctx->th_pmcs[0];
6254
6255
6256
6257 ctx->th_pmcs[0] = 0;
6258 }
6259 ovfl_val = pmu_conf->ovfl_val;
6260
6261
6262
6263
6264
6265
6266 mask2 = ctx->ctx_used_pmds[0];
6267
6268 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6269
6270 for (i = 0; mask2; i++, mask2>>=1) {
6271
6272
6273 if ((mask2 & 0x1) == 0) continue;
6274
6275
6276
6277
6278 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6279
6280 if (PMD_IS_COUNTING(i)) {
6281 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6282 task_pid_nr(task),
6283 i,
6284 ctx->ctx_pmds[i].val,
6285 val & ovfl_val));
6286
6287
6288
6289
6290 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6291
6292
6293
6294
6295
6296
6297 pmd_val = 0UL;
6298
6299
6300
6301
6302 if (pmc0 & (1UL << i)) {
6303 val += 1 + ovfl_val;
6304 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6305 }
6306 }
6307
6308 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6309
6310 if (is_self) ctx->th_pmds[i] = pmd_val;
6311
6312 ctx->ctx_pmds[i].val = val;
6313 }
6314 }
6315
6316 static struct irqaction perfmon_irqaction = {
6317 .handler = pfm_interrupt_handler,
6318 .name = "perfmon"
6319 };
6320
6321 static void
6322 pfm_alt_save_pmu_state(void *data)
6323 {
6324 struct pt_regs *regs;
6325
6326 regs = task_pt_regs(current);
6327
6328 DPRINT(("called\n"));
6329
6330
6331
6332
6333
6334 pfm_clear_psr_up();
6335 pfm_clear_psr_pp();
6336 ia64_psr(regs)->pp = 0;
6337
6338
6339
6340
6341
6342 pfm_freeze_pmu();
6343
6344 ia64_srlz_d();
6345 }
6346
6347 void
6348 pfm_alt_restore_pmu_state(void *data)
6349 {
6350 struct pt_regs *regs;
6351
6352 regs = task_pt_regs(current);
6353
6354 DPRINT(("called\n"));
6355
6356
6357
6358
6359
6360 pfm_clear_psr_up();
6361 pfm_clear_psr_pp();
6362 ia64_psr(regs)->pp = 0;
6363
6364
6365
6366
6367 pfm_unfreeze_pmu();
6368
6369 ia64_srlz_d();
6370 }
6371
6372 int
6373 pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6374 {
6375 int ret, i;
6376 int reserve_cpu;
6377
6378
6379 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6380
6381
6382 if (pfm_alt_intr_handler) return -EBUSY;
6383
6384
6385 if (!spin_trylock(&pfm_alt_install_check)) {
6386 return -EBUSY;
6387 }
6388
6389
6390 for_each_online_cpu(reserve_cpu) {
6391 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6392 if (ret) goto cleanup_reserve;
6393 }
6394
6395
6396 on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6397
6398
6399 pfm_alt_intr_handler = hdl;
6400
6401 spin_unlock(&pfm_alt_install_check);
6402
6403 return 0;
6404
6405 cleanup_reserve:
6406 for_each_online_cpu(i) {
6407
6408 if (i >= reserve_cpu) break;
6409
6410 pfm_unreserve_session(NULL, 1, i);
6411 }
6412
6413 spin_unlock(&pfm_alt_install_check);
6414
6415 return ret;
6416 }
6417 EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6418
6419 int
6420 pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6421 {
6422 int i;
6423
6424 if (hdl == NULL) return -EINVAL;
6425
6426
6427 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6428
6429
6430 if (!spin_trylock(&pfm_alt_install_check)) {
6431 return -EBUSY;
6432 }
6433
6434 pfm_alt_intr_handler = NULL;
6435
6436 on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6437
6438 for_each_online_cpu(i) {
6439 pfm_unreserve_session(NULL, 1, i);
6440 }
6441
6442 spin_unlock(&pfm_alt_install_check);
6443
6444 return 0;
6445 }
6446 EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6447
6448
6449
6450
6451 static int init_pfm_fs(void);
6452
6453 static int __init
6454 pfm_probe_pmu(void)
6455 {
6456 pmu_config_t **p;
6457 int family;
6458
6459 family = local_cpu_data->family;
6460 p = pmu_confs;
6461
6462 while(*p) {
6463 if ((*p)->probe) {
6464 if ((*p)->probe() == 0) goto found;
6465 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6466 goto found;
6467 }
6468 p++;
6469 }
6470 return -1;
6471 found:
6472 pmu_conf = *p;
6473 return 0;
6474 }
6475
6476 int __init
6477 pfm_init(void)
6478 {
6479 unsigned int n, n_counters, i;
6480
6481 printk("perfmon: version %u.%u IRQ %u\n",
6482 PFM_VERSION_MAJ,
6483 PFM_VERSION_MIN,
6484 IA64_PERFMON_VECTOR);
6485
6486 if (pfm_probe_pmu()) {
6487 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6488 local_cpu_data->family);
6489 return -ENODEV;
6490 }
6491
6492
6493
6494
6495
6496 n = 0;
6497 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6498 if (PMC_IS_IMPL(i) == 0) continue;
6499 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6500 n++;
6501 }
6502 pmu_conf->num_pmcs = n;
6503
6504 n = 0; n_counters = 0;
6505 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6506 if (PMD_IS_IMPL(i) == 0) continue;
6507 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6508 n++;
6509 if (PMD_IS_COUNTING(i)) n_counters++;
6510 }
6511 pmu_conf->num_pmds = n;
6512 pmu_conf->num_counters = n_counters;
6513
6514
6515
6516
6517 if (pmu_conf->use_rr_dbregs) {
6518 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6519 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6520 pmu_conf = NULL;
6521 return -1;
6522 }
6523 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6524 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6525 pmu_conf = NULL;
6526 return -1;
6527 }
6528 }
6529
6530 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6531 pmu_conf->pmu_name,
6532 pmu_conf->num_pmcs,
6533 pmu_conf->num_pmds,
6534 pmu_conf->num_counters,
6535 ffz(pmu_conf->ovfl_val));
6536
6537
6538 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6539 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6540 pmu_conf = NULL;
6541 return -1;
6542 }
6543
6544
6545
6546
6547 perfmon_dir = proc_create_seq("perfmon", S_IRUGO, NULL, &pfm_seq_ops);
6548 if (perfmon_dir == NULL) {
6549 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6550 pmu_conf = NULL;
6551 return -1;
6552 }
6553
6554
6555
6556
6557 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6558
6559
6560
6561
6562 spin_lock_init(&pfm_sessions.pfs_lock);
6563 spin_lock_init(&pfm_buffer_fmt_lock);
6564
6565 init_pfm_fs();
6566
6567 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6568
6569 return 0;
6570 }
6571
6572 __initcall(pfm_init);
6573
6574
6575
6576
6577 void
6578 pfm_init_percpu (void)
6579 {
6580 static int first_time=1;
6581
6582
6583
6584
6585 pfm_clear_psr_pp();
6586 pfm_clear_psr_up();
6587
6588
6589
6590
6591 pfm_unfreeze_pmu();
6592
6593 if (first_time) {
6594 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6595 first_time=0;
6596 }
6597
6598 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6599 ia64_srlz_d();
6600 }
6601
6602
6603
6604
6605 void
6606 dump_pmu_state(const char *from)
6607 {
6608 struct task_struct *task;
6609 struct pt_regs *regs;
6610 pfm_context_t *ctx;
6611 unsigned long psr, dcr, info, flags;
6612 int i, this_cpu;
6613
6614 local_irq_save(flags);
6615
6616 this_cpu = smp_processor_id();
6617 regs = task_pt_regs(current);
6618 info = PFM_CPUINFO_GET();
6619 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6620
6621 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6622 local_irq_restore(flags);
6623 return;
6624 }
6625
6626 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6627 this_cpu,
6628 from,
6629 task_pid_nr(current),
6630 regs->cr_iip,
6631 current->comm);
6632
6633 task = GET_PMU_OWNER();
6634 ctx = GET_PMU_CTX();
6635
6636 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6637
6638 psr = pfm_get_psr();
6639
6640 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6641 this_cpu,
6642 ia64_get_pmc(0),
6643 psr & IA64_PSR_PP ? 1 : 0,
6644 psr & IA64_PSR_UP ? 1 : 0,
6645 dcr & IA64_DCR_PP ? 1 : 0,
6646 info,
6647 ia64_psr(regs)->up,
6648 ia64_psr(regs)->pp);
6649
6650 ia64_psr(regs)->up = 0;
6651 ia64_psr(regs)->pp = 0;
6652
6653 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6654 if (PMC_IS_IMPL(i) == 0) continue;
6655 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6656 }
6657
6658 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6659 if (PMD_IS_IMPL(i) == 0) continue;
6660 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6661 }
6662
6663 if (ctx) {
6664 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6665 this_cpu,
6666 ctx->ctx_state,
6667 ctx->ctx_smpl_vaddr,
6668 ctx->ctx_smpl_hdr,
6669 ctx->ctx_msgq_head,
6670 ctx->ctx_msgq_tail,
6671 ctx->ctx_saved_psr_up);
6672 }
6673 local_irq_restore(flags);
6674 }
6675
6676
6677
6678
6679 void
6680 pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6681 {
6682 struct thread_struct *thread;
6683
6684 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6685
6686 thread = &task->thread;
6687
6688
6689
6690
6691 thread->pfm_context = NULL;
6692
6693 PFM_SET_WORK_PENDING(task, 0);
6694
6695
6696
6697
6698 }
6699 #else
6700 asmlinkage long
6701 sys_perfmonctl (int fd, int cmd, void *arg, int count)
6702 {
6703 return -ENOSYS;
6704 }
6705 #endif