pmu_conf          100 arch/ia64/kernel/perfmon.c #define PMC_IS_LAST(i)	(pmu_conf->pmc_desc[i].type & PFM_REG_END)
pmu_conf          101 arch/ia64/kernel/perfmon.c #define PMD_IS_LAST(i)	(pmu_conf->pmd_desc[i].type & PFM_REG_END)
pmu_conf          106 arch/ia64/kernel/perfmon.c #define PMC_IS_IMPL(i)	  (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
pmu_conf          107 arch/ia64/kernel/perfmon.c #define PMD_IS_IMPL(i)	  (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
pmu_conf          110 arch/ia64/kernel/perfmon.c #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
pmu_conf          111 arch/ia64/kernel/perfmon.c #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
pmu_conf          112 arch/ia64/kernel/perfmon.c #define PMC_IS_MONITOR(i)  ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR)  == PFM_REG_MONITOR)
pmu_conf          113 arch/ia64/kernel/perfmon.c #define PMC_IS_CONTROL(i)  ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL)  == PFM_REG_CONTROL)
pmu_conf          115 arch/ia64/kernel/perfmon.c #define PMC_DFL_VAL(i)     pmu_conf->pmc_desc[i].default_value
pmu_conf          116 arch/ia64/kernel/perfmon.c #define PMC_RSVD_MASK(i)   pmu_conf->pmc_desc[i].reserved_mask
pmu_conf          117 arch/ia64/kernel/perfmon.c #define PMD_PMD_DEP(i)	   pmu_conf->pmd_desc[i].dep_pmd[0]
pmu_conf          118 arch/ia64/kernel/perfmon.c #define PMC_PMD_DEP(i)	   pmu_conf->pmc_desc[i].dep_pmd[0]
pmu_conf          405 arch/ia64/kernel/perfmon.c #define PMC_PM(cnum, val)	(((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
pmu_conf          522 arch/ia64/kernel/perfmon.c static pmu_config_t		*pmu_conf;
pmu_conf          744 arch/ia64/kernel/perfmon.c 	return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
pmu_conf          753 arch/ia64/kernel/perfmon.c 	unsigned long ovfl_val = pmu_conf->ovfl_val;
pmu_conf          886 arch/ia64/kernel/perfmon.c 	ovfl_mask = pmu_conf->ovfl_val;
pmu_conf          960 arch/ia64/kernel/perfmon.c 	ovfl_mask = pmu_conf->ovfl_val;
pmu_conf         1032 arch/ia64/kernel/perfmon.c 		pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pmu_conf         1033 arch/ia64/kernel/perfmon.c 		pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
pmu_conf         1066 arch/ia64/kernel/perfmon.c 	unsigned long val, ovfl_val = pmu_conf->ovfl_val;
pmu_conf         1082 arch/ia64/kernel/perfmon.c 	unsigned long ovfl_val = pmu_conf->ovfl_val;
pmu_conf         2462 arch/ia64/kernel/perfmon.c 	ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
pmu_conf         2467 arch/ia64/kernel/perfmon.c 	ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
pmu_conf         2783 arch/ia64/kernel/perfmon.c 	impl_pmds = pmu_conf->impl_pmds[0];
pmu_conf         2816 arch/ia64/kernel/perfmon.c 		pmc_type   = pmu_conf->pmc_desc[cnum].type;
pmu_conf         2817 arch/ia64/kernel/perfmon.c 		pmc_pm     = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
pmu_conf         2830 arch/ia64/kernel/perfmon.c 		wr_func = pmu_conf->pmc_desc[cnum].write_check;
pmu_conf         2931 arch/ia64/kernel/perfmon.c 		CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
pmu_conf         3021 arch/ia64/kernel/perfmon.c 	ovfl_mask = pmu_conf->ovfl_val;
pmu_conf         3054 arch/ia64/kernel/perfmon.c 		wr_func     = pmu_conf->pmd_desc[cnum].write_check;
pmu_conf         3221 arch/ia64/kernel/perfmon.c 	ovfl_mask = pmu_conf->ovfl_val;
pmu_conf         3290 arch/ia64/kernel/perfmon.c 		rd_func = pmu_conf->pmd_desc[cnum].read_check;
pmu_conf         3385 arch/ia64/kernel/perfmon.c 	if (pmu_conf->use_rr_dbregs == 0) return 0;
pmu_conf         3439 arch/ia64/kernel/perfmon.c 	if (pmu_conf->use_rr_dbregs == 0) return 0;
pmu_conf         3640 arch/ia64/kernel/perfmon.c 	if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
pmu_conf         3722 arch/ia64/kernel/perfmon.c 		for (i=0; i < pmu_conf->num_ibrs; i++) {
pmu_conf         3727 arch/ia64/kernel/perfmon.c 		for (i=0; i < pmu_conf->num_dbrs; i++) {
pmu_conf         4297 arch/ia64/kernel/perfmon.c 			pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pmu_conf         4298 arch/ia64/kernel/perfmon.c 			pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
pmu_conf         4723 arch/ia64/kernel/perfmon.c 	if (unlikely(pmu_conf == NULL)) return -ENOSYS;
pmu_conf         5141 arch/ia64/kernel/perfmon.c 	ovfl_val = pmu_conf->ovfl_val;
pmu_conf         5555 arch/ia64/kernel/perfmon.c 		pmu_conf->pmu_name,
pmu_conf         5558 arch/ia64/kernel/perfmon.c 		pmu_conf->ovfl_val,
pmu_conf         5559 arch/ia64/kernel/perfmon.c 		pmu_conf->flags);
pmu_conf         5954 arch/ia64/kernel/perfmon.c 	need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
pmu_conf         5981 arch/ia64/kernel/perfmon.c 		pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pmu_conf         5982 arch/ia64/kernel/perfmon.c 		pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
pmu_conf         6115 arch/ia64/kernel/perfmon.c 		pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pmu_conf         6116 arch/ia64/kernel/perfmon.c 		pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
pmu_conf         6123 arch/ia64/kernel/perfmon.c 	need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
pmu_conf         6259 arch/ia64/kernel/perfmon.c 	ovfl_val = pmu_conf->ovfl_val;
pmu_conf         6472 arch/ia64/kernel/perfmon.c 	pmu_conf = *p;
pmu_conf         6499 arch/ia64/kernel/perfmon.c 		pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
pmu_conf         6502 arch/ia64/kernel/perfmon.c 	pmu_conf->num_pmcs = n;
pmu_conf         6507 arch/ia64/kernel/perfmon.c 		pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
pmu_conf         6511 arch/ia64/kernel/perfmon.c 	pmu_conf->num_pmds      = n;
pmu_conf         6512 arch/ia64/kernel/perfmon.c 	pmu_conf->num_counters  = n_counters;
pmu_conf         6517 arch/ia64/kernel/perfmon.c 	if (pmu_conf->use_rr_dbregs) {
pmu_conf         6518 arch/ia64/kernel/perfmon.c 		if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
pmu_conf         6519 arch/ia64/kernel/perfmon.c 			printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
pmu_conf         6520 arch/ia64/kernel/perfmon.c 			pmu_conf = NULL;
pmu_conf         6523 arch/ia64/kernel/perfmon.c 		if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
pmu_conf         6524 arch/ia64/kernel/perfmon.c 			printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
pmu_conf         6525 arch/ia64/kernel/perfmon.c 			pmu_conf = NULL;
pmu_conf         6531 arch/ia64/kernel/perfmon.c 	       pmu_conf->pmu_name,
pmu_conf         6532 arch/ia64/kernel/perfmon.c 	       pmu_conf->num_pmcs,
pmu_conf         6533 arch/ia64/kernel/perfmon.c 	       pmu_conf->num_pmds,
pmu_conf         6534 arch/ia64/kernel/perfmon.c 	       pmu_conf->num_counters,
pmu_conf         6535 arch/ia64/kernel/perfmon.c 	       ffz(pmu_conf->ovfl_val));
pmu_conf         6538 arch/ia64/kernel/perfmon.c 	if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
pmu_conf         6540 arch/ia64/kernel/perfmon.c 		pmu_conf = NULL;
pmu_conf         6550 arch/ia64/kernel/perfmon.c 		pmu_conf = NULL;