perf_iommu 185 arch/x86/events/amd/iommu.c static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu, perf_iommu 192 arch/x86/events/amd/iommu.c max_banks = perf_iommu->max_banks; perf_iommu 193 arch/x86/events/amd/iommu.c max_cntrs = perf_iommu->max_counters; perf_iommu 200 arch/x86/events/amd/iommu.c raw_spin_lock_irqsave(&perf_iommu->lock, flags); perf_iommu 201 arch/x86/events/amd/iommu.c perf_iommu->cntr_assign_mask &= ~(1ULL<<shift); perf_iommu 202 arch/x86/events/amd/iommu.c raw_spin_unlock_irqrestore(&perf_iommu->lock, flags); perf_iommu 362 arch/x86/events/amd/iommu.c struct perf_amd_iommu *perf_iommu = perf_iommu 368 arch/x86/events/amd/iommu.c clear_avail_iommu_bnk_cntr(perf_iommu, perf_iommu 414 arch/x86/events/amd/iommu.c struct perf_amd_iommu *perf_iommu; perf_iommu 417 arch/x86/events/amd/iommu.c perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL); perf_iommu 418 arch/x86/events/amd/iommu.c if (!perf_iommu) perf_iommu 421 arch/x86/events/amd/iommu.c raw_spin_lock_init(&perf_iommu->lock); perf_iommu 423 arch/x86/events/amd/iommu.c perf_iommu->pmu = iommu_pmu; perf_iommu 424 arch/x86/events/amd/iommu.c perf_iommu->iommu = get_amd_iommu(idx); perf_iommu 425 arch/x86/events/amd/iommu.c perf_iommu->max_banks = amd_iommu_pc_get_max_banks(idx); perf_iommu 426 arch/x86/events/amd/iommu.c perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx); perf_iommu 428 arch/x86/events/amd/iommu.c if (!perf_iommu->iommu || perf_iommu 429 arch/x86/events/amd/iommu.c !perf_iommu->max_banks || perf_iommu 430 arch/x86/events/amd/iommu.c !perf_iommu->max_counters) { perf_iommu 431 arch/x86/events/amd/iommu.c kfree(perf_iommu); perf_iommu 435 arch/x86/events/amd/iommu.c snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx); perf_iommu 437 arch/x86/events/amd/iommu.c ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1); perf_iommu 440 arch/x86/events/amd/iommu.c idx, perf_iommu->max_banks, perf_iommu->max_counters); perf_iommu 441 arch/x86/events/amd/iommu.c list_add_tail(&perf_iommu->list, &perf_amd_iommu_list); perf_iommu 444 arch/x86/events/amd/iommu.c kfree(perf_iommu);