pr 89 arch/arm/mach-ux500/pm.c u32 pr; /* Pending register */ pr 96 arch/arm/mach-ux500/pm.c pr = readl_relaxed(dist_base + GIC_DIST_PENDING_SET + i * 4); pr 99 arch/arm/mach-ux500/pm.c if (pr & er) pr 54 arch/arm64/kernel/cpuidle.c struct acpi_processor *pr = per_cpu(processors, cpu); pr 63 arch/arm64/kernel/cpuidle.c if (unlikely(!pr || !pr->flags.has_lpi)) pr 66 arch/arm64/kernel/cpuidle.c count = pr->power.count - 1; pr 73 arch/arm64/kernel/cpuidle.c lpi = &pr->power.lpi_states[i + 1]; pr 77 arch/ia64/include/asm/asmmacro.h #define LOAD_PHYSICAL(pr, reg, obj) \ pr 78 arch/ia64/include/asm/asmmacro.h [1:](pr)movl reg = obj; \ pr 454 arch/ia64/include/asm/pal.h pr : 1, /* Predicate registers pr 688 arch/ia64/include/asm/pal.h #define pmci_proc_predicate_regs_valid pme_processor.pr pr 326 arch/ia64/include/asm/processor.h regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \ pr 884 arch/ia64/include/asm/sal.h u64 pr; /* Predicate registers */ pr 64 arch/ia64/include/asm/unwind.h unsigned long pr; /* current predicate values */ pr 104 arch/ia64/include/uapi/asm/ptrace.h unsigned long pr; /* 64 predicate registers (1 bit each) */ pr 204 arch/ia64/include/uapi/asm/ptrace.h unsigned long pr; /* 64 predicate registers (1 bit each) */ pr 214 arch/ia64/include/uapi/asm/ptrace.h unsigned long pr; pr 91 arch/ia64/kernel/asm-offsets.c DEFINE(IA64_PT_REGS_PR_OFFSET, offsetof (struct pt_regs, pr)); pr 169 arch/ia64/kernel/asm-offsets.c DEFINE(IA64_SWITCH_STACK_PR_OFFSET, offsetof (struct switch_stack, pr)); pr 89 arch/ia64/kernel/brl_emu.c tmp_taken = regs->pr & (1L << qp); pr 35 arch/ia64/kernel/entry.h .spillsp pr, PT(PR)+16+(off); pr 64 arch/ia64/kernel/entry.h .spillsp pr,SW(PR)+16+(off) pr 915 arch/ia64/kernel/mca.c regs->pr = ms->pmsa_pr; pr 1120 arch/ia64/kernel/mca.c old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL); pr 114 arch/ia64/kernel/process.c regs->ar_rnat, regs->ar_bspstore, regs->pr); pr 396 arch/ia64/kernel/process.c child_stack->pr = (1 << PRED_KERNEL_STACK); pr 733 arch/ia64/kernel/ptrace.c unsigned long ip, sp, pr; pr 748 arch/ia64/kernel/ptrace.c if (unw_get_pr (&prev_info, &pr) < 0) { pr 756 arch/ia64/kernel/ptrace.c && (pr & (1UL << PRED_USER_STACK))) pr 766 arch/ia64/kernel/ptrace.c unw_get_pr(&prev_info, &pr); pr 767 arch/ia64/kernel/ptrace.c pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL)); pr 768 arch/ia64/kernel/ptrace.c pr |= (1UL << PRED_NON_SYSCALL); pr 769 arch/ia64/kernel/ptrace.c unw_set_pr(&prev_info, pr); pr 961 arch/ia64/kernel/ptrace.c retval |= __put_user(pt->pr, &ppr->pr); pr 1098 arch/ia64/kernel/ptrace.c retval |= __get_user(pt->pr, &ppr->pr); pr 1478 arch/ia64/kernel/ptrace.c ptr = &pt->pr; pr 62 arch/ia64/kernel/signal.c err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */ pr 196 arch/ia64/kernel/signal.c err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */ pr 233 arch/ia64/kernel/traps.c fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs, pr 265 arch/ia64/kernel/traps.c (unsigned long *) isr, (unsigned long *) pr, pr 328 arch/ia64/kernel/traps.c exception = fp_emulate(fp_fault, bundle, ®s->cr_ipsr, ®s->ar_fpsr, &isr, ®s->pr, pr 455 arch/ia64/kernel/traps.c regs.cr_iip + ia64_psr(®s)->ri, regs.pr); pr 598 arch/ia64/kernel/unwind.c addr = &info->sw->pr; pr 1214 arch/ia64/kernel/unwind.c cache_match (struct unw_script *script, unsigned long ip, unsigned long pr) pr 1217 arch/ia64/kernel/unwind.c if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0) pr 1229 arch/ia64/kernel/unwind.c unsigned long ip, pr; pr 1237 arch/ia64/kernel/unwind.c pr = info->pr; pr 1239 arch/ia64/kernel/unwind.c if (cache_match(script, ip, pr)) { pr 1250 arch/ia64/kernel/unwind.c if (cache_match(script, ip, pr)) { pr 1548 arch/ia64/kernel/unwind.c sr.pr_val = info->pr; pr 1886 arch/ia64/kernel/unwind.c unsigned long ip, pr, num_regs; pr 1922 arch/ia64/kernel/unwind.c pr = info->pr; pr 1926 arch/ia64/kernel/unwind.c if ((pr & (1UL << PRED_NON_SYSCALL)) != 0) pr 1961 arch/ia64/kernel/unwind.c unw_get_pr(info, &info->pr); pr 1972 arch/ia64/kernel/unwind.c unsigned long ip, sp, pr = info->pr; pr 1983 arch/ia64/kernel/unwind.c (pr & (1UL << PRED_USER_STACK))) pr 1985 arch/ia64/kernel/unwind.c if (unw_get_pr (info, &pr) < 0) { pr 2038 arch/ia64/kernel/unwind.c info->pr = sw->pr; pr 2047 arch/ia64/kernel/unwind.c info->pr, (unsigned long) info->sw, info->sp); pr 678 arch/microblaze/pci/pci-common.c struct resource *res, *pr; pr 688 arch/microblaze/pci/pci-common.c pr = (res->flags & IORESOURCE_IO) ? pr 697 arch/microblaze/pci/pci-common.c pr = pci_find_parent_resource(bus->self, res); pr 698 arch/microblaze/pci/pci-common.c if (pr == res) { pr 714 arch/microblaze/pci/pci-common.c pr, (pr && pr->name) ? pr->name : "nil"); pr 716 arch/microblaze/pci/pci-common.c if (pr && !(pr->flags & IORESOURCE_UNSET)) { pr 719 arch/microblaze/pci/pci-common.c if (request_resource(pr, res) == 0) pr 726 arch/microblaze/pci/pci-common.c if (reparent_resources(pr, res) == 0) pr 747 arch/microblaze/pci/pci-common.c struct resource *pr, *r = &dev->resource[idx]; pr 755 arch/microblaze/pci/pci-common.c pr = pci_find_parent_resource(dev, r); pr 756 arch/microblaze/pci/pci-common.c if (!pr || (pr->flags & IORESOURCE_UNSET) || pr 757 arch/microblaze/pci/pci-common.c request_resource(pr, r) < 0) { pr 760 arch/microblaze/pci/pci-common.c if (pr) pr 762 arch/microblaze/pci/pci-common.c pr, pr 763 arch/microblaze/pci/pci-common.c (unsigned long long)pr->start, pr 764 arch/microblaze/pci/pci-common.c (unsigned long long)pr->end, pr 765 arch/microblaze/pci/pci-common.c (unsigned int)pr->flags); pr 399 arch/mips/alchemy/common/clock.c long tdv, tpr, pr, nr, br, bpr, diff, lastdiff; pr 425 arch/mips/alchemy/common/clock.c pr = clk_hw_get_rate(pc); pr 426 arch/mips/alchemy/common/clock.c if (pr < req->rate) pr 430 arch/mips/alchemy/common/clock.c tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL); pr 431 arch/mips/alchemy/common/clock.c nr = pr / tdv; pr 438 arch/mips/alchemy/common/clock.c bpr = pr; pr 455 arch/mips/alchemy/common/clock.c pr = clk_hw_round_rate(free, tpr); pr 457 arch/mips/alchemy/common/clock.c tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, pr 459 arch/mips/alchemy/common/clock.c nr = pr / tdv; pr 465 arch/mips/alchemy/common/clock.c bpr = pr; pr 75 arch/mips/include/asm/octeon/cvmx-wqe.h uint64_t pr:4; pr 186 arch/mips/include/asm/octeon/cvmx-wqe.h uint64_t pr:4; pr 289 arch/mips/include/asm/octeon/cvmx-wqe.h uint64_t pr:4; pr 392 arch/mips/include/asm/octeon/cvmx-wqe.h uint64_t pr:4; pr 193 arch/mips/kernel/pm-cps.c struct uasm_reloc **pr, pr 232 arch/mips/kernel/pm-cps.c uasm_il_bne(pp, pr, t0, t1, lbl); pr 237 arch/mips/kernel/pm-cps.c struct uasm_reloc **pr, pr 317 arch/mips/kernel/pm-cps.c uasm_il_beqz(pp, pr, t1, lbl); pr 330 arch/mips/kernel/pm-cps.c struct uasm_reloc **pr, pr 338 arch/mips/kernel/pm-cps.c uasm_il_beqz(pp, pr, t1, lbl); pr 78 arch/powerpc/include/asm/book3s/32/kup.h lwz \gpr, KUAP(thread) pr 79 arch/powerpc/include/asm/book3s/32/kup.h 999: twnei \gpr, 0 pr 17 arch/powerpc/include/asm/book3s/64/kup-radix.h ld \gpr, STACK_REGS_KUAP(r1) pr 18 arch/powerpc/include/asm/book3s/64/kup-radix.h mtspr SPRN_AMR, \gpr pr 25 arch/powerpc/include/asm/nohash/32/kup-8xx.h mfspr \gpr, SPRN_MD_AP pr 26 arch/powerpc/include/asm/nohash/32/kup-8xx.h rlwinm \gpr, \gpr, 16, 0xffff pr 27 arch/powerpc/include/asm/nohash/32/kup-8xx.h 999: twnei \gpr, MD_APG_KUAP@h pr 1163 arch/powerpc/kernel/pci-common.c struct resource *res, *pr; pr 1177 arch/powerpc/kernel/pci-common.c pr = (res->flags & IORESOURCE_IO) ? pr 1180 arch/powerpc/kernel/pci-common.c pr = pci_find_parent_resource(bus->self, res); pr 1181 arch/powerpc/kernel/pci-common.c if (pr == res) { pr 1192 arch/powerpc/kernel/pci-common.c i, res, pr, (pr && pr->name) ? pr->name : "nil"); pr 1194 arch/powerpc/kernel/pci-common.c if (pr && !(pr->flags & IORESOURCE_UNSET)) { pr 1197 arch/powerpc/kernel/pci-common.c if (request_resource(pr, res) == 0) pr 1204 arch/powerpc/kernel/pci-common.c if (reparent_resources(pr, res) == 0) pr 1232 arch/powerpc/kernel/pci-common.c struct resource *pr, *r = &dev->resource[idx]; pr 1237 arch/powerpc/kernel/pci-common.c pr = pci_find_parent_resource(dev, r); pr 1238 arch/powerpc/kernel/pci-common.c if (!pr || (pr->flags & IORESOURCE_UNSET) || pr 1239 arch/powerpc/kernel/pci-common.c request_resource(pr, r) < 0) { pr 1242 arch/powerpc/kernel/pci-common.c if (pr) pr 1243 arch/powerpc/kernel/pci-common.c pr_debug("PCI: parent is %p: %pR\n", pr, pr); pr 161 arch/powerpc/kvm/e500.c int as, int pid, int pr) pr 167 arch/powerpc/kvm/e500.c BUG_ON(pr >= 2); pr 169 arch/powerpc/kvm/e500.c idt->id[as][pid][pr].val = 0; pr 170 arch/powerpc/kvm/e500.c idt->id[as][pid][pr].pentry = NULL; pr 187 arch/powerpc/kvm/e500.c unsigned int pr, int avoid_recursion) pr 194 arch/powerpc/kvm/e500.c BUG_ON(pr >= 2); pr 196 arch/powerpc/kvm/e500.c sid = local_sid_lookup(&idt->id[as][gid][pr]); pr 200 arch/powerpc/kvm/e500.c sid = local_sid_setup_one(&idt->id[as][gid][pr]); pr 236 arch/powerpc/kvm/e500.c unsigned int pr, tid, ts; pr 247 arch/powerpc/kvm/e500.c for (pr = 0; pr < 2; pr++) { pr 257 arch/powerpc/kvm/e500.c pid = local_sid_lookup(&idt->id[ts][tid][pr]); pr 259 arch/powerpc/kvm/e500.c kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); pr 146 arch/powerpc/kvm/e500.h unsigned int pr, int avoid_recursion); pr 311 arch/powerpc/kvm/e500_mmu_host.c u32 pr = vcpu->arch.shared->msr & MSR_PR; pr 319 arch/powerpc/kvm/e500_mmu_host.c e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); pr 636 arch/powerpc/kvm/e500_mmu_host.c bool pr; pr 672 arch/powerpc/kvm/e500_mmu_host.c pr = vcpu->arch.shared->msr & MSR_PR; pr 673 arch/powerpc/kvm/e500_mmu_host.c if (unlikely((pr && !(mas3 & MAS3_UX)) || pr 674 arch/powerpc/kvm/e500_mmu_host.c (!pr && !(mas3 & MAS3_SX)))) { pr 124 arch/powerpc/platforms/4xx/uic.c u32 tr, pr, mask; pr 151 arch/powerpc/platforms/4xx/uic.c pr = mfdcr(uic->dcrbase + UIC_PR); pr 153 arch/powerpc/platforms/4xx/uic.c pr = (pr & mask) | (polarity << (31-src)); pr 155 arch/powerpc/platforms/4xx/uic.c mtdcr(uic->dcrbase + UIC_PR, pr); pr 976 arch/powerpc/platforms/pseries/hotplug-memory.c static int pseries_update_drconf_memory(struct of_reconfig_data *pr) pr 991 arch/powerpc/platforms/pseries/hotplug-memory.c if (!pr->old_prop) pr 994 arch/powerpc/platforms/pseries/hotplug-memory.c p = (__be32 *) pr->old_prop->value; pr 1006 arch/powerpc/platforms/pseries/hotplug-memory.c p = (__be32 *)pr->prop->value; pr 75 arch/s390/include/asm/nmi.h u64 pr : 1; /* 42 tod programmable register validity */ pr 53 arch/s390/mm/dump_pagetables.c static void print_prot(struct seq_file *m, unsigned int pr, int level) pr 59 arch/s390/mm/dump_pagetables.c if (pr & _PAGE_INVALID) { pr 63 arch/s390/mm/dump_pagetables.c seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW "); pr 64 arch/s390/mm/dump_pagetables.c seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n"); pr 58 arch/sh/include/asm/kexec.h __asm__ __volatile__ ("sts pr, %0" : "=r" (newregs->pr)); pr 48 arch/sh/include/uapi/asm/ptrace_32.h unsigned long pr; pr 419 arch/sh/kernel/cpu/sh2a/fpu.c regs->pr = regs->pc + 4; pr 444 arch/sh/kernel/cpu/sh2a/fpu.c nextpc = regs->pr; pr 187 arch/sh/kernel/cpu/sh4/fpu.c regs->pr = regs->pc + 4; /* bsr & jsr */ pr 219 arch/sh/kernel/cpu/sh4/fpu.c nextpc = regs->pr; pr 112 arch/sh/kernel/kgdb.c addr = linux_regs->pr; pr 185 arch/sh/kernel/kgdb.c { "pr", GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) }, pr 248 arch/sh/kernel/kgdb.c gdb_regs[GDB_PR] = thread_regs->pr; pr 171 arch/sh/kernel/kprobes.c op1->addr = (kprobe_opcode_t *) regs->pr; pr 206 arch/sh/kernel/kprobes.c ri->ret_addr = (kprobe_opcode_t *) regs->pr; pr 209 arch/sh/kernel/kprobes.c regs->pr = (unsigned long)kretprobe_trampoline; pr 37 arch/sh/kernel/process_32.c printk("PR is at %pS\n", (void *)regs->pr); pr 60 arch/sh/kernel/process_32.c regs->mach, regs->macl, regs->gbr, regs->pr); pr 69 arch/sh/kernel/process_32.c regs->pr = 0; pr 295 arch/sh/kernel/ptrace_32.c REG_OFFSET_NAME(pr), pr 130 arch/sh/kernel/signal_32.c COPY(macl); COPY(pr); pr 234 arch/sh/kernel/signal_32.c COPY(macl); COPY(pr); pr 287 arch/sh/kernel/signal_32.c regs->pr = (unsigned long) ksig->ka.sa.sa_restorer; pr 290 arch/sh/kernel/signal_32.c regs->pr = VDSO_SYM(&__kernel_sigreturn); pr 302 arch/sh/kernel/signal_32.c regs->pr = (unsigned long) frame->retcode; pr 303 arch/sh/kernel/signal_32.c flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); pr 328 arch/sh/kernel/signal_32.c current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); pr 357 arch/sh/kernel/signal_32.c regs->pr = (unsigned long) ksig->ka.sa.sa_restorer; pr 360 arch/sh/kernel/signal_32.c regs->pr = VDSO_SYM(&__kernel_rt_sigreturn); pr 372 arch/sh/kernel/signal_32.c regs->pr = (unsigned long) frame->retcode; pr 373 arch/sh/kernel/signal_32.c flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); pr 398 arch/sh/kernel/signal_32.c current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); pr 338 arch/sh/kernel/traps_32.c regs->pc = regs->pr; pr 350 arch/sh/kernel/traps_32.c regs->pr = regs->pc + 4; pr 377 arch/sh/kernel/traps_32.c regs->pr = regs->pc + 4; pr 442 arch/sh/kernel/traps_32.c regs->pr = regs->pc + 4; pr 534 arch/sh/kernel/traps_32.c regs->pr); pr 664 arch/sh/kernel/traps_32.c regs->pr = regs->pc + 4; pr 687 arch/sh/kernel/traps_32.c regs->pc = regs->pr; pr 518 arch/sh/math-emu/math.c regs->pr = regs->pc + 4; pr 544 arch/sh/math-emu/math.c nextpc = regs->pr; pr 378 arch/sparc/kernel/pci_common.c const struct linux_prom_pci_ranges *pr = &pbm_ranges[i]; pr 385 arch/sparc/kernel/pci_common.c parent_phys_hi = pr->parent_phys_hi; pr 386 arch/sparc/kernel/pci_common.c parent_phys_lo = pr->parent_phys_lo; pr 387 arch/sparc/kernel/pci_common.c child_phys_mid = pr->child_phys_mid; pr 388 arch/sparc/kernel/pci_common.c child_phys_lo = pr->child_phys_lo; pr 392 arch/sparc/kernel/pci_common.c size_hi = pr->size_hi; pr 393 arch/sparc/kernel/pci_common.c size_lo = pr->size_lo; pr 395 arch/sparc/kernel/pci_common.c type = (pr->child_phys_hi >> 24) & 0x3; pr 542 arch/sparc/kernel/sbus.c const struct linux_prom64_registers *pr; pr 550 arch/sparc/kernel/sbus.c pr = of_get_property(dp, "reg", NULL); pr 551 arch/sparc/kernel/sbus.c if (!pr) { pr 556 arch/sparc/kernel/sbus.c regs = pr->phys_addr; pr 227 arch/x86/include/asm/inst.h R32_NUM extrd_opd1 \gpr pr 370 arch/x86/kernel/dumpstack.c const char *pr = ""; pr 377 arch/x86/kernel/dumpstack.c pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; pr 381 arch/x86/kernel/dumpstack.c pr, pr 2694 arch/x86/kvm/x86.c bool pr = false; pr 2870 arch/x86/kvm/x86.c pr = true; /* fall through */ pr 2876 arch/x86/kvm/x86.c if (pr || data != 0) pr 179 arch/x86/mm/dump_pagetables.c pgprotval_t pr = pgprot_val(prot); pr 183 arch/x86/mm/dump_pagetables.c if (!(pr & _PAGE_PRESENT)) { pr 187 arch/x86/mm/dump_pagetables.c if (pr & _PAGE_USER) pr 191 arch/x86/mm/dump_pagetables.c if (pr & _PAGE_RW) pr 195 arch/x86/mm/dump_pagetables.c if (pr & _PAGE_PWT) pr 199 arch/x86/mm/dump_pagetables.c if (pr & _PAGE_PCD) pr 205 arch/x86/mm/dump_pagetables.c if (level <= 4 && pr & _PAGE_PSE) pr 209 arch/x86/mm/dump_pagetables.c if ((level == 5 && pr & _PAGE_PAT) || pr 210 arch/x86/mm/dump_pagetables.c ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE)) pr 214 arch/x86/mm/dump_pagetables.c if (pr & _PAGE_GLOBAL) pr 218 arch/x86/mm/dump_pagetables.c if (pr & _PAGE_NX) pr 111 crypto/async_tx/raid6test.c pr("%s: timeout! (faila: %d failb: %d disks: %d)\n", pr 115 crypto/async_tx/raid6test.c pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n", pr 134 crypto/async_tx/raid6test.c pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n", pr 169 crypto/async_tx/raid6test.c pr("error: initial gen_syndrome(%d) timed out\n", disks); pr 173 crypto/async_tx/raid6test.c pr("testing the %d-disk case...\n", disks); pr 221 crypto/async_tx/raid6test.c pr("\n"); pr 222 crypto/async_tx/raid6test.c pr("complete (%d tests, %d failure%s)\n", pr 1378 crypto/drbg.c if (drbg->pr || !drbg->seeded) { pr 1381 crypto/drbg.c drbg->pr ? "true" : "false", pr 1546 crypto/drbg.c int coreref, bool pr) pr 1552 crypto/drbg.c "%s\n", coreref, pr ? "enabled" : "disabled"); pr 1567 crypto/drbg.c drbg->pr = pr; pr 1880 crypto/drbg.c int *coreref, bool *pr) pr 1886 crypto/drbg.c *pr = true; pr 1890 crypto/drbg.c *pr = false; pr 1956 crypto/drbg.c bool pr = false; pr 1962 crypto/drbg.c &pr); pr 1968 crypto/drbg.c return drbg_instantiate(drbg, seed_string, coreref, pr); pr 1993 crypto/drbg.c bool pr = false; pr 2003 crypto/drbg.c drbg_convert_tfm_core("drbg_nopr_ctr_aes128", &coreref, &pr); pr 2005 crypto/drbg.c drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr); pr 2007 crypto/drbg.c drbg_convert_tfm_core("drbg_nopr_hmac_sha256", &coreref, &pr); pr 2057 crypto/drbg.c const struct drbg_core *core, int pr) pr 2063 crypto/drbg.c if (pr) { pr 3380 crypto/testmgr.c static int drbg_cavs_test(const struct drbg_testvec *test, int pr, pr 3410 crypto/testmgr.c if (pr) { pr 3425 crypto/testmgr.c if (pr) { pr 3452 crypto/testmgr.c int pr = 0; pr 3458 crypto/testmgr.c pr = 1; pr 3461 crypto/testmgr.c err = drbg_cavs_test(&template[i], pr, driver, type, mask); pr 183 drivers/acpi/acpi_processor.c static int acpi_processor_hotadd_init(struct acpi_processor *pr) pr 189 drivers/acpi/acpi_processor.c if (invalid_phys_cpuid(pr->phys_id)) pr 192 drivers/acpi/acpi_processor.c status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); pr 199 drivers/acpi/acpi_processor.c ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id); pr 203 drivers/acpi/acpi_processor.c ret = arch_register_cpu(pr->id); pr 205 drivers/acpi/acpi_processor.c acpi_unmap_cpu(pr->id); pr 214 drivers/acpi/acpi_processor.c pr_info("CPU%d has been hot-added\n", pr->id); pr 215 drivers/acpi/acpi_processor.c pr->flags.need_hotplug_init = 1; pr 223 drivers/acpi/acpi_processor.c static inline int acpi_processor_hotadd_init(struct acpi_processor *pr) pr 233 drivers/acpi/acpi_processor.c struct acpi_processor *pr = acpi_driver_data(device); pr 246 drivers/acpi/acpi_processor.c pr->flags.bm_control = 1; pr 255 drivers/acpi/acpi_processor.c status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); pr 263 drivers/acpi/acpi_processor.c pr->acpi_id = object.processor.proc_id; pr 269 drivers/acpi/acpi_processor.c status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, pr 278 drivers/acpi/acpi_processor.c pr->acpi_id = value; pr 281 drivers/acpi/acpi_processor.c if (acpi_duplicate_processor_id(pr->acpi_id)) { pr 282 drivers/acpi/acpi_processor.c if (pr->acpi_id == 0xff) pr 288 drivers/acpi/acpi_processor.c pr->acpi_id); pr 292 drivers/acpi/acpi_processor.c pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr 293 drivers/acpi/acpi_processor.c pr->acpi_id); pr 294 drivers/acpi/acpi_processor.c if (invalid_phys_cpuid(pr->phys_id)) pr 295 drivers/acpi/acpi_processor.c acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); pr 297 drivers/acpi/acpi_processor.c pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id); pr 304 drivers/acpi/acpi_processor.c if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1)) pr 305 drivers/acpi/acpi_processor.c pr->id = 0; pr 316 drivers/acpi/acpi_processor.c if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) { pr 317 drivers/acpi/acpi_processor.c int ret = acpi_processor_hotadd_init(pr); pr 331 drivers/acpi/acpi_processor.c sprintf(acpi_device_bid(device), "CPU%X", pr->id); pr 332 drivers/acpi/acpi_processor.c ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, pr 333 drivers/acpi/acpi_processor.c pr->acpi_id)); pr 341 drivers/acpi/acpi_processor.c pr->throttling.address = object.processor.pblk_address; pr 342 drivers/acpi/acpi_processor.c pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; pr 343 drivers/acpi/acpi_processor.c pr->throttling.duty_width = acpi_gbl_FADT.duty_width; pr 345 drivers/acpi/acpi_processor.c pr->pblk = object.processor.pblk_address; pr 353 drivers/acpi/acpi_processor.c status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value); pr 355 drivers/acpi/acpi_processor.c arch_fix_phys_package_id(pr->id, value); pr 371 drivers/acpi/acpi_processor.c struct acpi_processor *pr; pr 375 drivers/acpi/acpi_processor.c pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); pr 376 drivers/acpi/acpi_processor.c if (!pr) pr 379 drivers/acpi/acpi_processor.c if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { pr 384 drivers/acpi/acpi_processor.c pr->handle = device->handle; pr 387 drivers/acpi/acpi_processor.c device->driver_data = pr; pr 393 drivers/acpi/acpi_processor.c BUG_ON(pr->id >= nr_cpu_ids); pr 400 drivers/acpi/acpi_processor.c if (per_cpu(processor_device_array, pr->id) != NULL && pr 401 drivers/acpi/acpi_processor.c per_cpu(processor_device_array, pr->id) != device) { pr 404 drivers/acpi/acpi_processor.c pr->id); pr 412 drivers/acpi/acpi_processor.c per_cpu(processor_device_array, pr->id) = device; pr 413 drivers/acpi/acpi_processor.c per_cpu(processors, pr->id) = pr; pr 415 drivers/acpi/acpi_processor.c dev = get_cpu_device(pr->id); pr 425 drivers/acpi/acpi_processor.c pr->dev = dev; pr 435 drivers/acpi/acpi_processor.c free_cpumask_var(pr->throttling.shared_cpu_map); pr 437 drivers/acpi/acpi_processor.c per_cpu(processors, pr->id) = NULL; pr 439 drivers/acpi/acpi_processor.c kfree(pr); pr 450 drivers/acpi/acpi_processor.c struct acpi_processor *pr; pr 455 drivers/acpi/acpi_processor.c pr = acpi_driver_data(device); pr 456 drivers/acpi/acpi_processor.c if (pr->id >= nr_cpu_ids) pr 467 drivers/acpi/acpi_processor.c device_release_driver(pr->dev); pr 468 drivers/acpi/acpi_processor.c acpi_unbind_one(pr->dev); pr 471 drivers/acpi/acpi_processor.c per_cpu(processor_device_array, pr->id) = NULL; pr 472 drivers/acpi/acpi_processor.c per_cpu(processors, pr->id) = NULL; pr 478 drivers/acpi/acpi_processor.c arch_unregister_cpu(pr->id); pr 479 drivers/acpi/acpi_processor.c acpi_unmap_cpu(pr->id); pr 484 drivers/acpi/acpi_processor.c try_offline_node(cpu_to_node(pr->id)); pr 487 drivers/acpi/acpi_processor.c free_cpumask_var(pr->throttling.shared_cpu_map); pr 488 drivers/acpi/acpi_processor.c kfree(pr); pr 428 drivers/acpi/cppc_acpi.c struct cppc_cpudata *pr, *match_pr; pr 441 drivers/acpi/cppc_acpi.c pr = all_cpu_data[i]; pr 442 drivers/acpi/cppc_acpi.c if (!pr) pr 455 drivers/acpi/cppc_acpi.c cpumask_set_cpu(i, pr->shared_cpu_map); pr 463 drivers/acpi/cppc_acpi.c pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; pr 465 drivers/acpi/cppc_acpi.c pr->shared_type = CPUFREQ_SHARED_TYPE_HW; pr 467 drivers/acpi/cppc_acpi.c pr->shared_type = CPUFREQ_SHARED_TYPE_ANY; pr 495 drivers/acpi/cppc_acpi.c cpumask_set_cpu(j, pr->shared_cpu_map); pr 516 drivers/acpi/cppc_acpi.c match_pr->shared_type = pr->shared_type; pr 518 drivers/acpi/cppc_acpi.c pr->shared_cpu_map); pr 524 drivers/acpi/cppc_acpi.c pr = all_cpu_data[i]; pr 525 drivers/acpi/cppc_acpi.c if (!pr) pr 530 drivers/acpi/cppc_acpi.c cpumask_clear(pr->shared_cpu_map); pr 531 drivers/acpi/cppc_acpi.c cpumask_set_cpu(i, pr->shared_cpu_map); pr 532 drivers/acpi/cppc_acpi.c pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; pr 716 drivers/acpi/cppc_acpi.c int acpi_cppc_processor_probe(struct acpi_processor *pr) pr 723 drivers/acpi/cppc_acpi.c acpi_handle handle = pr->handle; pr 816 drivers/acpi/cppc_acpi.c pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id); pr 820 drivers/acpi/cppc_acpi.c per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id; pr 834 drivers/acpi/cppc_acpi.c cpc_ptr->cpu_id = pr->id; pr 852 drivers/acpi/cppc_acpi.c pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); pr 855 drivers/acpi/cppc_acpi.c cpu_dev = get_cpu_device(pr->id); pr 862 drivers/acpi/cppc_acpi.c per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; pr 867 drivers/acpi/cppc_acpi.c per_cpu(cpc_desc_ptr, pr->id) = NULL; pr 897 drivers/acpi/cppc_acpi.c void acpi_cppc_processor_exit(struct acpi_processor *pr) pr 902 drivers/acpi/cppc_acpi.c int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id); pr 915 drivers/acpi/cppc_acpi.c cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); pr 59 drivers/acpi/processor_driver.c struct acpi_processor *pr; pr 65 drivers/acpi/processor_driver.c pr = acpi_driver_data(device); pr 66 drivers/acpi/processor_driver.c if (!pr) pr 71 drivers/acpi/processor_driver.c saved = pr->performance_platform_limit; pr 72 drivers/acpi/processor_driver.c acpi_processor_ppc_has_changed(pr, 1); pr 73 drivers/acpi/processor_driver.c if (saved == pr->performance_platform_limit) pr 77 drivers/acpi/processor_driver.c pr->performance_platform_limit); pr 80 drivers/acpi/processor_driver.c acpi_processor_power_state_has_changed(pr); pr 85 drivers/acpi/processor_driver.c acpi_processor_tstate_has_changed(pr); pr 102 drivers/acpi/processor_driver.c struct acpi_processor *pr = per_cpu(processors, cpu); pr 105 drivers/acpi/processor_driver.c if (!pr || acpi_bus_get_device(pr->handle, &device)) pr 111 drivers/acpi/processor_driver.c if (pr->flags.need_hotplug_init) { pr 115 drivers/acpi/processor_driver.c pr->id); pr 116 drivers/acpi/processor_driver.c pr->flags.need_hotplug_init = 0; pr 118 drivers/acpi/processor_driver.c WARN(ret, "Failed to start CPU: %d\n", pr->id); pr 121 drivers/acpi/processor_driver.c acpi_processor_ppc_has_changed(pr, 0); pr 122 drivers/acpi/processor_driver.c acpi_processor_hotplug(pr); pr 123 drivers/acpi/processor_driver.c acpi_processor_reevaluate_tstate(pr, false); pr 124 drivers/acpi/processor_driver.c acpi_processor_tstate_has_changed(pr); pr 131 drivers/acpi/processor_driver.c struct acpi_processor *pr = per_cpu(processors, cpu); pr 134 drivers/acpi/processor_driver.c if (!pr || acpi_bus_get_device(pr->handle, &device)) pr 137 drivers/acpi/processor_driver.c acpi_processor_reevaluate_tstate(pr, true); pr 142 drivers/acpi/processor_driver.c static int acpi_pss_perf_init(struct acpi_processor *pr, pr 147 drivers/acpi/processor_driver.c acpi_processor_ppc_has_changed(pr, 0); pr 149 drivers/acpi/processor_driver.c acpi_processor_get_throttling_info(pr); pr 151 drivers/acpi/processor_driver.c if (pr->flags.throttling) pr 152 drivers/acpi/processor_driver.c pr->flags.limit = 1; pr 154 drivers/acpi/processor_driver.c pr->cdev = thermal_cooling_device_register("Processor", device, pr 156 drivers/acpi/processor_driver.c if (IS_ERR(pr->cdev)) { pr 157 drivers/acpi/processor_driver.c result = PTR_ERR(pr->cdev); pr 162 drivers/acpi/processor_driver.c pr->cdev->id); pr 165 drivers/acpi/processor_driver.c &pr->cdev->device.kobj, pr 173 drivers/acpi/processor_driver.c result = sysfs_create_link(&pr->cdev->device.kobj, pr 177 drivers/acpi/processor_driver.c dev_err(&pr->cdev->device, pr 187 drivers/acpi/processor_driver.c thermal_cooling_device_unregister(pr->cdev); pr 192 drivers/acpi/processor_driver.c static void acpi_pss_perf_exit(struct acpi_processor *pr, pr 195 drivers/acpi/processor_driver.c if (pr->cdev) { pr 197 drivers/acpi/processor_driver.c sysfs_remove_link(&pr->cdev->device.kobj, "device"); pr 198 drivers/acpi/processor_driver.c thermal_cooling_device_unregister(pr->cdev); pr 199 drivers/acpi/processor_driver.c pr->cdev = NULL; pr 203 drivers/acpi/processor_driver.c static inline int acpi_pss_perf_init(struct acpi_processor *pr, pr 209 drivers/acpi/processor_driver.c static inline void acpi_pss_perf_exit(struct acpi_processor *pr, pr 215 drivers/acpi/processor_driver.c struct acpi_processor *pr = acpi_driver_data(device); pr 219 drivers/acpi/processor_driver.c if (!pr) pr 222 drivers/acpi/processor_driver.c if (pr->flags.need_hotplug_init) pr 225 drivers/acpi/processor_driver.c result = acpi_cppc_processor_probe(pr); pr 230 drivers/acpi/processor_driver.c acpi_processor_power_init(pr); pr 232 drivers/acpi/processor_driver.c result = acpi_pss_perf_init(pr, device); pr 242 drivers/acpi/processor_driver.c acpi_pss_perf_exit(pr, device); pr 245 drivers/acpi/processor_driver.c acpi_processor_power_exit(pr); pr 267 drivers/acpi/processor_driver.c struct acpi_processor *pr; pr 275 drivers/acpi/processor_driver.c pr = acpi_driver_data(device); pr 276 drivers/acpi/processor_driver.c if (!pr) pr 278 drivers/acpi/processor_driver.c acpi_processor_power_exit(pr); pr 280 drivers/acpi/processor_driver.c acpi_pss_perf_exit(pr, device); pr 282 drivers/acpi/processor_driver.c acpi_cppc_processor_exit(pr); pr 124 drivers/acpi/processor_idle.c static void lapic_timer_check_state(int state, struct acpi_processor *pr, pr 127 drivers/acpi/processor_idle.c struct acpi_processor_power *pwr = &pr->power; pr 130 drivers/acpi/processor_idle.c if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) pr 144 drivers/acpi/processor_idle.c pr->power.timer_broadcast_on_state = state; pr 149 drivers/acpi/processor_idle.c struct acpi_processor *pr = (struct acpi_processor *) arg; pr 151 drivers/acpi/processor_idle.c if (pr->power.timer_broadcast_on_state < INT_MAX) pr 157 drivers/acpi/processor_idle.c static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) pr 159 drivers/acpi/processor_idle.c smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, pr 160 drivers/acpi/processor_idle.c (void *)pr, 1); pr 164 drivers/acpi/processor_idle.c static void lapic_timer_state_broadcast(struct acpi_processor *pr, pr 168 drivers/acpi/processor_idle.c int state = cx - pr->power.states; pr 170 drivers/acpi/processor_idle.c if (state >= pr->power.timer_broadcast_on_state) { pr 180 drivers/acpi/processor_idle.c static void lapic_timer_check_state(int state, struct acpi_processor *pr, pr 182 drivers/acpi/processor_idle.c static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } pr 183 drivers/acpi/processor_idle.c static void lapic_timer_state_broadcast(struct acpi_processor *pr, pr 218 drivers/acpi/processor_idle.c static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) pr 221 drivers/acpi/processor_idle.c if (!pr->pblk) pr 225 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; pr 226 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; pr 239 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; pr 240 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; pr 243 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; pr 244 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; pr 254 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C2].address = 0; pr 265 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C3].address = 0; pr 270 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C2].address, pr 271 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C3].address)); pr 273 drivers/acpi/processor_idle.c snprintf(pr->power.states[ACPI_STATE_C2].desc, pr 275 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C2].address); pr 276 drivers/acpi/processor_idle.c snprintf(pr->power.states[ACPI_STATE_C3].desc, pr 278 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C3].address); pr 283 drivers/acpi/processor_idle.c static int acpi_processor_get_power_info_default(struct acpi_processor *pr) pr 285 drivers/acpi/processor_idle.c if (!pr->power.states[ACPI_STATE_C1].valid) { pr 288 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; pr 289 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C1].valid = 1; pr 290 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; pr 292 drivers/acpi/processor_idle.c snprintf(pr->power.states[ACPI_STATE_C1].desc, pr 296 drivers/acpi/processor_idle.c pr->power.states[ACPI_STATE_C0].valid = 1; pr 300 drivers/acpi/processor_idle.c static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) pr 314 drivers/acpi/processor_idle.c status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); pr 339 drivers/acpi/processor_idle.c pr->flags.has_cst = 1; pr 386 drivers/acpi/processor_idle.c (pr->id, &cx, reg) == 0) { pr 435 drivers/acpi/processor_idle.c memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); pr 462 drivers/acpi/processor_idle.c static void acpi_processor_power_verify_c3(struct acpi_processor *pr, pr 488 drivers/acpi/processor_idle.c acpi_processor_power_init_bm_check(&(pr->flags), pr->id); pr 489 drivers/acpi/processor_idle.c bm_check_flag = pr->flags.bm_check; pr 490 drivers/acpi/processor_idle.c bm_control_flag = pr->flags.bm_control; pr 492 drivers/acpi/processor_idle.c pr->flags.bm_check = bm_check_flag; pr 493 drivers/acpi/processor_idle.c pr->flags.bm_control = bm_control_flag; pr 496 drivers/acpi/processor_idle.c if (pr->flags.bm_check) { pr 497 drivers/acpi/processor_idle.c if (!pr->flags.bm_control) { pr 498 drivers/acpi/processor_idle.c if (pr->flags.has_cst != 1) { pr 543 drivers/acpi/processor_idle.c static int acpi_processor_power_verify(struct acpi_processor *pr) pr 548 drivers/acpi/processor_idle.c pr->power.timer_broadcast_on_state = INT_MAX; pr 551 drivers/acpi/processor_idle.c struct acpi_processor_cx *cx = &pr->power.states[i]; pr 565 drivers/acpi/processor_idle.c acpi_processor_power_verify_c3(pr, cx); pr 571 drivers/acpi/processor_idle.c lapic_timer_check_state(i, pr, cx); pr 576 drivers/acpi/processor_idle.c lapic_timer_propagate_broadcast(pr); pr 581 drivers/acpi/processor_idle.c static int acpi_processor_get_cstate_info(struct acpi_processor *pr) pr 591 drivers/acpi/processor_idle.c memset(pr->power.states, 0, sizeof(pr->power.states)); pr 593 drivers/acpi/processor_idle.c result = acpi_processor_get_power_info_cst(pr); pr 595 drivers/acpi/processor_idle.c result = acpi_processor_get_power_info_fadt(pr); pr 600 drivers/acpi/processor_idle.c acpi_processor_get_power_info_default(pr); pr 602 drivers/acpi/processor_idle.c pr->power.count = acpi_processor_power_verify(pr); pr 609 drivers/acpi/processor_idle.c if (pr->power.states[i].valid) { pr 610 drivers/acpi/processor_idle.c pr->power.count = i; pr 611 drivers/acpi/processor_idle.c if (pr->power.states[i].type >= ACPI_STATE_C2) pr 612 drivers/acpi/processor_idle.c pr->flags.power = 1; pr 695 drivers/acpi/processor_idle.c static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) pr 697 drivers/acpi/processor_idle.c return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && pr 710 drivers/acpi/processor_idle.c static void acpi_idle_enter_bm(struct acpi_processor *pr, pr 720 drivers/acpi/processor_idle.c lapic_timer_state_broadcast(pr, cx, 1); pr 731 drivers/acpi/processor_idle.c if (pr->flags.bm_control) { pr 743 drivers/acpi/processor_idle.c if (pr->flags.bm_control) { pr 751 drivers/acpi/processor_idle.c lapic_timer_state_broadcast(pr, cx, 0); pr 758 drivers/acpi/processor_idle.c struct acpi_processor *pr; pr 760 drivers/acpi/processor_idle.c pr = __this_cpu_read(processors); pr 761 drivers/acpi/processor_idle.c if (unlikely(!pr)) pr 765 drivers/acpi/processor_idle.c if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { pr 768 drivers/acpi/processor_idle.c } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { pr 770 drivers/acpi/processor_idle.c acpi_idle_enter_bm(pr, cx, true); pr 782 drivers/acpi/processor_idle.c lapic_timer_state_broadcast(pr, cx, 1); pr 789 drivers/acpi/processor_idle.c lapic_timer_state_broadcast(pr, cx, 0); pr 800 drivers/acpi/processor_idle.c struct acpi_processor *pr = __this_cpu_read(processors); pr 802 drivers/acpi/processor_idle.c if (unlikely(!pr)) pr 805 drivers/acpi/processor_idle.c if (pr->flags.bm_check) { pr 806 drivers/acpi/processor_idle.c acpi_idle_enter_bm(pr, cx, false); pr 815 drivers/acpi/processor_idle.c static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, pr 825 drivers/acpi/processor_idle.c cx = &pr->power.states[i]; pr 843 drivers/acpi/processor_idle.c static int acpi_processor_setup_cstates(struct acpi_processor *pr) pr 861 drivers/acpi/processor_idle.c cx = &pr->power.states[i]; pr 885 drivers/acpi/processor_idle.c if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) pr 927 drivers/acpi/processor_idle.c static int acpi_processor_get_cstate_info(struct acpi_processor *pr) pr 932 drivers/acpi/processor_idle.c static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, pr 938 drivers/acpi/processor_idle.c static int acpi_processor_setup_cstates(struct acpi_processor *pr) pr 1119 drivers/acpi/processor_idle.c static int flatten_lpi_states(struct acpi_processor *pr, pr 1140 drivers/acpi/processor_idle.c flpi = &pr->power.lpi_states[flat_state_cnt]; pr 1164 drivers/acpi/processor_idle.c static int acpi_processor_get_lpi_info(struct acpi_processor *pr) pr 1168 drivers/acpi/processor_idle.c acpi_handle handle = pr->handle, pr_ahandle; pr 1181 drivers/acpi/processor_idle.c handle = pr->handle; pr 1185 drivers/acpi/processor_idle.c flatten_lpi_states(pr, prev, NULL); pr 1204 drivers/acpi/processor_idle.c flatten_lpi_states(pr, curr, prev); pr 1211 drivers/acpi/processor_idle.c pr->power.count = flat_state_cnt; pr 1213 drivers/acpi/processor_idle.c for (i = 0; i < pr->power.count; i++) pr 1214 drivers/acpi/processor_idle.c pr->power.lpi_states[i].index = i; pr 1217 drivers/acpi/processor_idle.c pr->flags.has_lpi = 1; pr 1218 drivers/acpi/processor_idle.c pr->flags.power = 1; pr 1244 drivers/acpi/processor_idle.c struct acpi_processor *pr; pr 1247 drivers/acpi/processor_idle.c pr = __this_cpu_read(processors); pr 1249 drivers/acpi/processor_idle.c if (unlikely(!pr)) pr 1252 drivers/acpi/processor_idle.c lpi = &pr->power.lpi_states[index]; pr 1259 drivers/acpi/processor_idle.c static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) pr 1266 drivers/acpi/processor_idle.c if (!pr->flags.has_lpi) pr 1269 drivers/acpi/processor_idle.c for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) { pr 1270 drivers/acpi/processor_idle.c lpi = &pr->power.lpi_states[i]; pr 1294 drivers/acpi/processor_idle.c static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) pr 1299 drivers/acpi/processor_idle.c if (!pr->flags.power_setup_done || !pr->flags.power) pr 1308 drivers/acpi/processor_idle.c if (pr->flags.has_lpi) pr 1309 drivers/acpi/processor_idle.c return acpi_processor_setup_lpi_states(pr); pr 1311 drivers/acpi/processor_idle.c return acpi_processor_setup_cstates(pr); pr 1321 drivers/acpi/processor_idle.c static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr, pr 1324 drivers/acpi/processor_idle.c if (!pr->flags.power_setup_done || !pr->flags.power || !dev) pr 1327 drivers/acpi/processor_idle.c dev->cpu = pr->id; pr 1328 drivers/acpi/processor_idle.c if (pr->flags.has_lpi) pr 1329 drivers/acpi/processor_idle.c return acpi_processor_ffh_lpi_probe(pr->id); pr 1331 drivers/acpi/processor_idle.c return acpi_processor_setup_cpuidle_cx(pr, dev); pr 1334 drivers/acpi/processor_idle.c static int acpi_processor_get_power_info(struct acpi_processor *pr) pr 1338 drivers/acpi/processor_idle.c ret = acpi_processor_get_lpi_info(pr); pr 1340 drivers/acpi/processor_idle.c ret = acpi_processor_get_cstate_info(pr); pr 1345 drivers/acpi/processor_idle.c int acpi_processor_hotplug(struct acpi_processor *pr) pr 1353 drivers/acpi/processor_idle.c if (!pr->flags.power_setup_done) pr 1356 drivers/acpi/processor_idle.c dev = per_cpu(acpi_cpuidle_device, pr->id); pr 1359 drivers/acpi/processor_idle.c ret = acpi_processor_get_power_info(pr); pr 1360 drivers/acpi/processor_idle.c if (!ret && pr->flags.power) { pr 1361 drivers/acpi/processor_idle.c acpi_processor_setup_cpuidle_dev(pr, dev); pr 1369 drivers/acpi/processor_idle.c int acpi_processor_power_state_has_changed(struct acpi_processor *pr) pr 1378 drivers/acpi/processor_idle.c if (!pr->flags.power_setup_done) pr 1387 drivers/acpi/processor_idle.c if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { pr 1403 drivers/acpi/processor_idle.c acpi_processor_get_power_info(pr); pr 1404 drivers/acpi/processor_idle.c acpi_processor_setup_cpuidle_states(pr); pr 1427 drivers/acpi/processor_idle.c int acpi_processor_power_init(struct acpi_processor *pr) pr 1437 drivers/acpi/processor_idle.c if (!acpi_processor_get_power_info(pr)) pr 1438 drivers/acpi/processor_idle.c pr->flags.power_setup_done = 1; pr 1445 drivers/acpi/processor_idle.c if (pr->flags.power) { pr 1448 drivers/acpi/processor_idle.c acpi_processor_setup_cpuidle_states(pr); pr 1459 drivers/acpi/processor_idle.c per_cpu(acpi_cpuidle_device, pr->id) = dev; pr 1461 drivers/acpi/processor_idle.c acpi_processor_setup_cpuidle_dev(pr, dev); pr 1477 drivers/acpi/processor_idle.c int acpi_processor_power_exit(struct acpi_processor *pr) pr 1479 drivers/acpi/processor_idle.c struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); pr 1484 drivers/acpi/processor_idle.c if (pr->flags.power) { pr 1491 drivers/acpi/processor_idle.c pr->flags.power_setup_done = 0; pr 55 drivers/acpi/processor_perflib.c static int acpi_processor_get_platform_limit(struct acpi_processor *pr) pr 61 drivers/acpi/processor_perflib.c if (!pr) pr 68 drivers/acpi/processor_perflib.c status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); pr 78 drivers/acpi/processor_perflib.c pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, pr 81 drivers/acpi/processor_perflib.c pr->performance_platform_limit = (int)ppc; pr 83 drivers/acpi/processor_perflib.c if (ppc >= pr->performance->state_count || pr 84 drivers/acpi/processor_perflib.c unlikely(!freq_qos_request_active(&pr->perflib_req))) pr 87 drivers/acpi/processor_perflib.c ret = freq_qos_update_request(&pr->perflib_req, pr 88 drivers/acpi/processor_perflib.c pr->performance->states[ppc].core_frequency * 1000); pr 91 drivers/acpi/processor_perflib.c pr->id, ret); pr 112 drivers/acpi/processor_perflib.c void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) pr 116 drivers/acpi/processor_perflib.c if (ignore_ppc || !pr->performance) { pr 122 drivers/acpi/processor_perflib.c acpi_processor_ppc_ost(pr->handle, 1); pr 126 drivers/acpi/processor_perflib.c ret = acpi_processor_get_platform_limit(pr); pr 133 drivers/acpi/processor_perflib.c acpi_processor_ppc_ost(pr->handle, 1); pr 135 drivers/acpi/processor_perflib.c acpi_processor_ppc_ost(pr->handle, 0); pr 138 drivers/acpi/processor_perflib.c cpufreq_update_limits(pr->id); pr 143 drivers/acpi/processor_perflib.c struct acpi_processor *pr; pr 145 drivers/acpi/processor_perflib.c pr = per_cpu(processors, cpu); pr 146 drivers/acpi/processor_perflib.c if (!pr || !pr->performance || !pr->performance->state_count) pr 148 drivers/acpi/processor_perflib.c *limit = pr->performance->states[pr->performance_platform_limit]. pr 165 drivers/acpi/processor_perflib.c struct acpi_processor *pr = per_cpu(processors, cpu); pr 168 drivers/acpi/processor_perflib.c if (!pr) pr 172 drivers/acpi/processor_perflib.c &pr->perflib_req, pr 185 drivers/acpi/processor_perflib.c struct acpi_processor *pr = per_cpu(processors, cpu); pr 187 drivers/acpi/processor_perflib.c if (pr) pr 188 drivers/acpi/processor_perflib.c freq_qos_remove_request(&pr->perflib_req); pr 192 drivers/acpi/processor_perflib.c static int acpi_processor_get_performance_control(struct acpi_processor *pr) pr 201 drivers/acpi/processor_perflib.c status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); pr 228 drivers/acpi/processor_perflib.c memcpy(&pr->performance->control_register, obj.buffer.pointer, pr 245 drivers/acpi/processor_perflib.c memcpy(&pr->performance->status_register, obj.buffer.pointer, pr 289 drivers/acpi/processor_perflib.c static int acpi_processor_get_performance_states(struct acpi_processor *pr) pr 301 drivers/acpi/processor_perflib.c status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); pr 317 drivers/acpi/processor_perflib.c pr->performance->state_count = pss->package.count; pr 318 drivers/acpi/processor_perflib.c pr->performance->states = pr 322 drivers/acpi/processor_perflib.c if (!pr->performance->states) { pr 327 drivers/acpi/processor_perflib.c for (i = 0; i < pr->performance->state_count; i++) { pr 329 drivers/acpi/processor_perflib.c struct acpi_processor_px *px = &(pr->performance->states[i]); pr 341 drivers/acpi/processor_perflib.c kfree(pr->performance->states); pr 364 drivers/acpi/processor_perflib.c pr->id, px->core_frequency); pr 372 drivers/acpi/processor_perflib.c memcpy(&(pr->performance->states[last_invalid]), pr 381 drivers/acpi/processor_perflib.c "No valid BIOS _PSS frequency found for processor %d\n", pr->id); pr 383 drivers/acpi/processor_perflib.c kfree(pr->performance->states); pr 384 drivers/acpi/processor_perflib.c pr->performance->states = NULL; pr 388 drivers/acpi/processor_perflib.c pr->performance->state_count = last_invalid; pr 396 drivers/acpi/processor_perflib.c int acpi_processor_get_performance_info(struct acpi_processor *pr) pr 400 drivers/acpi/processor_perflib.c if (!pr || !pr->performance || !pr->handle) pr 403 drivers/acpi/processor_perflib.c if (!acpi_has_method(pr->handle, "_PCT")) { pr 409 drivers/acpi/processor_perflib.c result = acpi_processor_get_performance_control(pr); pr 413 drivers/acpi/processor_perflib.c result = acpi_processor_get_performance_states(pr); pr 419 drivers/acpi/processor_perflib.c result = acpi_processor_get_platform_limit(pr); pr 429 drivers/acpi/processor_perflib.c if (acpi_has_method(pr->handle, "_PPC")) { pr 580 drivers/acpi/processor_perflib.c struct acpi_processor *pr; pr 595 drivers/acpi/processor_perflib.c pr = per_cpu(processors, i); pr 596 drivers/acpi/processor_perflib.c if (!pr) { pr 601 drivers/acpi/processor_perflib.c if (pr->performance) { pr 614 drivers/acpi/processor_perflib.c pr = per_cpu(processors, i); pr 615 drivers/acpi/processor_perflib.c if (!pr) pr 618 drivers/acpi/processor_perflib.c pr->performance = per_cpu_ptr(performance, i); pr 619 drivers/acpi/processor_perflib.c cpumask_set_cpu(i, pr->performance->shared_cpu_map); pr 620 drivers/acpi/processor_perflib.c pdomain = &(pr->performance->domain_info); pr 621 drivers/acpi/processor_perflib.c if (acpi_processor_get_psd(pr->handle, pdomain)) { pr 634 drivers/acpi/processor_perflib.c pr = per_cpu(processors, i); pr 635 drivers/acpi/processor_perflib.c if (!pr) pr 641 drivers/acpi/processor_perflib.c pdomain = &(pr->performance->domain_info); pr 642 drivers/acpi/processor_perflib.c cpumask_set_cpu(i, pr->performance->shared_cpu_map); pr 650 drivers/acpi/processor_perflib.c pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; pr 652 drivers/acpi/processor_perflib.c pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW; pr 654 drivers/acpi/processor_perflib.c pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY; pr 681 drivers/acpi/processor_perflib.c cpumask_set_cpu(j, pr->performance->shared_cpu_map); pr 697 drivers/acpi/processor_perflib.c pr->performance->shared_type; pr 699 drivers/acpi/processor_perflib.c pr->performance->shared_cpu_map); pr 705 drivers/acpi/processor_perflib.c pr = per_cpu(processors, i); pr 706 drivers/acpi/processor_perflib.c if (!pr || !pr->performance) pr 711 drivers/acpi/processor_perflib.c cpumask_clear(pr->performance->shared_cpu_map); pr 712 drivers/acpi/processor_perflib.c cpumask_set_cpu(i, pr->performance->shared_cpu_map); pr 713 drivers/acpi/processor_perflib.c pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; pr 715 drivers/acpi/processor_perflib.c pr->performance = NULL; /* Will be set for real in register */ pr 729 drivers/acpi/processor_perflib.c struct acpi_processor *pr; pr 736 drivers/acpi/processor_perflib.c pr = per_cpu(processors, cpu); pr 737 drivers/acpi/processor_perflib.c if (!pr) { pr 742 drivers/acpi/processor_perflib.c if (pr->performance) { pr 749 drivers/acpi/processor_perflib.c pr->performance = performance; pr 751 drivers/acpi/processor_perflib.c if (acpi_processor_get_performance_info(pr)) { pr 752 drivers/acpi/processor_perflib.c pr->performance = NULL; pr 765 drivers/acpi/processor_perflib.c struct acpi_processor *pr; pr 769 drivers/acpi/processor_perflib.c pr = per_cpu(processors, cpu); pr 770 drivers/acpi/processor_perflib.c if (!pr) { pr 775 drivers/acpi/processor_perflib.c if (pr->performance) pr 776 drivers/acpi/processor_perflib.c kfree(pr->performance->states); pr 777 drivers/acpi/processor_perflib.c pr->performance = NULL; pr 87 drivers/acpi/processor_thermal.c struct acpi_processor *pr; pr 106 drivers/acpi/processor_thermal.c pr = per_cpu(processors, i); pr 108 drivers/acpi/processor_thermal.c if (unlikely(!freq_qos_request_active(&pr->thermal_req))) pr 119 drivers/acpi/processor_thermal.c ret = freq_qos_update_request(&pr->thermal_req, max_freq); pr 122 drivers/acpi/processor_thermal.c pr->id, ret); pr 133 drivers/acpi/processor_thermal.c struct acpi_processor *pr = per_cpu(processors, cpu); pr 136 drivers/acpi/processor_thermal.c if (!pr) pr 140 drivers/acpi/processor_thermal.c &pr->thermal_req, pr 153 drivers/acpi/processor_thermal.c struct acpi_processor *pr = per_cpu(processors, policy->cpu); pr 155 drivers/acpi/processor_thermal.c if (pr) pr 156 drivers/acpi/processor_thermal.c freq_qos_remove_request(&pr->thermal_req); pr 178 drivers/acpi/processor_thermal.c static int acpi_processor_max_state(struct acpi_processor *pr) pr 186 drivers/acpi/processor_thermal.c max_state += cpufreq_get_max_state(pr->id); pr 187 drivers/acpi/processor_thermal.c if (pr->flags.throttling) pr 188 drivers/acpi/processor_thermal.c max_state += (pr->throttling.state_count -1); pr 197 drivers/acpi/processor_thermal.c struct acpi_processor *pr; pr 202 drivers/acpi/processor_thermal.c pr = acpi_driver_data(device); pr 203 drivers/acpi/processor_thermal.c if (!pr) pr 206 drivers/acpi/processor_thermal.c *state = acpi_processor_max_state(pr); pr 215 drivers/acpi/processor_thermal.c struct acpi_processor *pr; pr 220 drivers/acpi/processor_thermal.c pr = acpi_driver_data(device); pr 221 drivers/acpi/processor_thermal.c if (!pr) pr 224 drivers/acpi/processor_thermal.c *cur_state = cpufreq_get_cur_state(pr->id); pr 225 drivers/acpi/processor_thermal.c if (pr->flags.throttling) pr 226 drivers/acpi/processor_thermal.c *cur_state += pr->throttling.state; pr 235 drivers/acpi/processor_thermal.c struct acpi_processor *pr; pr 242 drivers/acpi/processor_thermal.c pr = acpi_driver_data(device); pr 243 drivers/acpi/processor_thermal.c if (!pr) pr 246 drivers/acpi/processor_thermal.c max_pstate = cpufreq_get_max_state(pr->id); pr 248 drivers/acpi/processor_thermal.c if (state > acpi_processor_max_state(pr)) pr 252 drivers/acpi/processor_thermal.c if (pr->flags.throttling && pr->throttling.state) pr 253 drivers/acpi/processor_thermal.c result = acpi_processor_set_throttling(pr, 0, false); pr 254 drivers/acpi/processor_thermal.c cpufreq_set_cur_state(pr->id, state); pr 256 drivers/acpi/processor_thermal.c cpufreq_set_cur_state(pr->id, max_pstate); pr 257 drivers/acpi/processor_thermal.c result = acpi_processor_set_throttling(pr, pr 43 drivers/acpi/processor_throttling.c struct acpi_processor *pr; pr 51 drivers/acpi/processor_throttling.c static int acpi_processor_get_throttling(struct acpi_processor *pr); pr 52 drivers/acpi/processor_throttling.c static int __acpi_processor_set_throttling(struct acpi_processor *pr, pr 61 drivers/acpi/processor_throttling.c struct acpi_processor *pr, *match_pr; pr 73 drivers/acpi/processor_throttling.c pr = per_cpu(processors, i); pr 74 drivers/acpi/processor_throttling.c if (!pr) pr 78 drivers/acpi/processor_throttling.c pthrottling = &(pr->throttling); pr 94 drivers/acpi/processor_throttling.c pr = per_cpu(processors, i); pr 95 drivers/acpi/processor_throttling.c if (!pr) pr 100 drivers/acpi/processor_throttling.c pthrottling = &pr->throttling; pr 175 drivers/acpi/processor_throttling.c pr = per_cpu(processors, i); pr 176 drivers/acpi/processor_throttling.c if (!pr) pr 184 drivers/acpi/processor_throttling.c pthrottling = &(pr->throttling); pr 211 drivers/acpi/processor_throttling.c struct acpi_processor *pr; pr 218 drivers/acpi/processor_throttling.c pr = per_cpu(processors, cpu); pr 219 drivers/acpi/processor_throttling.c if (!pr) { pr 223 drivers/acpi/processor_throttling.c if (!pr->flags.throttling) { pr 229 drivers/acpi/processor_throttling.c p_throttling = &(pr->throttling); pr 236 drivers/acpi/processor_throttling.c p_limit = &pr->limit; pr 241 drivers/acpi/processor_throttling.c if (pr->throttling_platform_limit > target_state) pr 242 drivers/acpi/processor_throttling.c target_state = pr->throttling_platform_limit; pr 275 drivers/acpi/processor_throttling.c static int acpi_processor_get_platform_limit(struct acpi_processor *pr) pr 280 drivers/acpi/processor_throttling.c if (!pr) pr 286 drivers/acpi/processor_throttling.c status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); pr 295 drivers/acpi/processor_throttling.c pr->throttling_platform_limit = (int)tpc; pr 299 drivers/acpi/processor_throttling.c int acpi_processor_tstate_has_changed(struct acpi_processor *pr) pr 310 drivers/acpi/processor_throttling.c result = acpi_processor_get_platform_limit(pr); pr 316 drivers/acpi/processor_throttling.c throttling_limit = pr->throttling_platform_limit; pr 317 drivers/acpi/processor_throttling.c if (throttling_limit >= pr->throttling.state_count) { pr 322 drivers/acpi/processor_throttling.c current_state = pr->throttling.state; pr 331 drivers/acpi/processor_throttling.c limit = &pr->limit; pr 352 drivers/acpi/processor_throttling.c return acpi_processor_set_throttling(pr, target_state, false); pr 364 drivers/acpi/processor_throttling.c void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, pr 373 drivers/acpi/processor_throttling.c pr->flags.throttling = 0; pr 379 drivers/acpi/processor_throttling.c if (!pr->throttling.state_count) { pr 383 drivers/acpi/processor_throttling.c pr->flags.throttling = 0; pr 386 drivers/acpi/processor_throttling.c pr->flags.throttling = 1; pr 393 drivers/acpi/processor_throttling.c result = acpi_processor_get_throttling(pr); pr 397 drivers/acpi/processor_throttling.c if (pr->throttling.state) { pr 398 drivers/acpi/processor_throttling.c result = acpi_processor_set_throttling(pr, 0, false); pr 405 drivers/acpi/processor_throttling.c pr->flags.throttling = 0; pr 410 drivers/acpi/processor_throttling.c static int acpi_processor_get_throttling_control(struct acpi_processor *pr) pr 419 drivers/acpi/processor_throttling.c status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); pr 449 drivers/acpi/processor_throttling.c memcpy(&pr->throttling.control_register, obj.buffer.pointer, pr 466 drivers/acpi/processor_throttling.c memcpy(&pr->throttling.status_register, obj.buffer.pointer, pr 469 drivers/acpi/processor_throttling.c throttling = &pr->throttling; pr 494 drivers/acpi/processor_throttling.c static int acpi_processor_get_throttling_states(struct acpi_processor *pr) pr 504 drivers/acpi/processor_throttling.c status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer); pr 522 drivers/acpi/processor_throttling.c pr->throttling.state_count = tss->package.count; pr 523 drivers/acpi/processor_throttling.c pr->throttling.states_tss = pr 527 drivers/acpi/processor_throttling.c if (!pr->throttling.states_tss) { pr 532 drivers/acpi/processor_throttling.c for (i = 0; i < pr->throttling.state_count; i++) { pr 535 drivers/acpi/processor_throttling.c (struct acpi_processor_tx_tss *)&(pr->throttling. pr 548 drivers/acpi/processor_throttling.c kfree(pr->throttling.states_tss); pr 556 drivers/acpi/processor_throttling.c kfree(pr->throttling.states_tss); pr 570 drivers/acpi/processor_throttling.c static int acpi_processor_get_tsd(struct acpi_processor *pr) pr 581 drivers/acpi/processor_throttling.c pthrottling = &pr->throttling; pr 584 drivers/acpi/processor_throttling.c status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); pr 605 drivers/acpi/processor_throttling.c pdomain = &(pr->throttling.domain_info); pr 630 drivers/acpi/processor_throttling.c pthrottling = &pr->throttling; pr 633 drivers/acpi/processor_throttling.c cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); pr 654 drivers/acpi/processor_throttling.c static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) pr 661 drivers/acpi/processor_throttling.c if (!pr) pr 664 drivers/acpi/processor_throttling.c if (!pr->flags.throttling) pr 674 drivers/acpi/processor_throttling.c request_region(pr->throttling.address, 6, "ACPI CPU throttle"); pr 676 drivers/acpi/processor_throttling.c pr->throttling.state = 0; pr 678 drivers/acpi/processor_throttling.c duty_mask = pr->throttling.state_count - 1; pr 680 drivers/acpi/processor_throttling.c duty_mask <<= pr->throttling.duty_offset; pr 684 drivers/acpi/processor_throttling.c value = inl(pr->throttling.address); pr 692 drivers/acpi/processor_throttling.c duty_value >>= pr->throttling.duty_offset; pr 695 drivers/acpi/processor_throttling.c state = pr->throttling.state_count - duty_value; pr 698 drivers/acpi/processor_throttling.c pr->throttling.state = state; pr 704 drivers/acpi/processor_throttling.c state, pr->throttling.states[state].performance)); pr 765 drivers/acpi/processor_throttling.c static int acpi_read_throttling_status(struct acpi_processor *pr, pr 774 drivers/acpi/processor_throttling.c throttling = &pr->throttling; pr 797 drivers/acpi/processor_throttling.c static int acpi_write_throttling_state(struct acpi_processor *pr, pr 806 drivers/acpi/processor_throttling.c throttling = &pr->throttling; pr 830 drivers/acpi/processor_throttling.c static int acpi_get_throttling_state(struct acpi_processor *pr, pr 835 drivers/acpi/processor_throttling.c for (i = 0; i < pr->throttling.state_count; i++) { pr 837 drivers/acpi/processor_throttling.c (struct acpi_processor_tx_tss *)&(pr->throttling. pr 845 drivers/acpi/processor_throttling.c static int acpi_get_throttling_value(struct acpi_processor *pr, pr 850 drivers/acpi/processor_throttling.c if (state >= 0 && state <= pr->throttling.state_count) { pr 852 drivers/acpi/processor_throttling.c (struct acpi_processor_tx_tss *)&(pr->throttling. pr 860 drivers/acpi/processor_throttling.c static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) pr 866 drivers/acpi/processor_throttling.c if (!pr) pr 869 drivers/acpi/processor_throttling.c if (!pr->flags.throttling) pr 872 drivers/acpi/processor_throttling.c pr->throttling.state = 0; pr 875 drivers/acpi/processor_throttling.c ret = acpi_read_throttling_status(pr, &value); pr 877 drivers/acpi/processor_throttling.c state = acpi_get_throttling_state(pr, value); pr 882 drivers/acpi/processor_throttling.c ret = __acpi_processor_set_throttling(pr, state, true, pr 887 drivers/acpi/processor_throttling.c pr->throttling.state = state; pr 895 drivers/acpi/processor_throttling.c struct acpi_processor *pr = data; pr 897 drivers/acpi/processor_throttling.c return pr->throttling.acpi_processor_get_throttling(pr); pr 900 drivers/acpi/processor_throttling.c static int acpi_processor_get_throttling(struct acpi_processor *pr) pr 902 drivers/acpi/processor_throttling.c if (!pr) pr 905 drivers/acpi/processor_throttling.c if (!pr->flags.throttling) pr 914 drivers/acpi/processor_throttling.c if (!cpu_online(pr->id)) pr 917 drivers/acpi/processor_throttling.c return call_on_cpu(pr->id, __acpi_processor_get_throttling, pr, false); pr 920 drivers/acpi/processor_throttling.c static int acpi_processor_get_fadt_info(struct acpi_processor *pr) pr 924 drivers/acpi/processor_throttling.c if (!pr->throttling.address) { pr 927 drivers/acpi/processor_throttling.c } else if (!pr->throttling.duty_width) { pr 932 drivers/acpi/processor_throttling.c else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { pr 937 drivers/acpi/processor_throttling.c pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width; pr 945 drivers/acpi/processor_throttling.c step = (1000 / pr->throttling.state_count); pr 947 drivers/acpi/processor_throttling.c for (i = 0; i < pr->throttling.state_count; i++) { pr 948 drivers/acpi/processor_throttling.c pr->throttling.states[i].performance = 1000 - step * i; pr 949 drivers/acpi/processor_throttling.c pr->throttling.states[i].power = 1000 - step * i; pr 954 drivers/acpi/processor_throttling.c static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, pr 961 drivers/acpi/processor_throttling.c if (!pr) pr 964 drivers/acpi/processor_throttling.c if ((state < 0) || (state > (pr->throttling.state_count - 1))) pr 967 drivers/acpi/processor_throttling.c if (!pr->flags.throttling) pr 970 drivers/acpi/processor_throttling.c if (!force && (state == pr->throttling.state)) pr 973 drivers/acpi/processor_throttling.c if (state < pr->throttling_platform_limit) pr 979 drivers/acpi/processor_throttling.c duty_value = pr->throttling.state_count - state; pr 981 drivers/acpi/processor_throttling.c duty_value <<= pr->throttling.duty_offset; pr 984 drivers/acpi/processor_throttling.c duty_mask = pr->throttling.state_count - 1; pr 996 drivers/acpi/processor_throttling.c value = inl(pr->throttling.address); pr 999 drivers/acpi/processor_throttling.c outl(value, pr->throttling.address); pr 1009 drivers/acpi/processor_throttling.c outl(value, pr->throttling.address); pr 1012 drivers/acpi/processor_throttling.c outl(value, pr->throttling.address); pr 1015 drivers/acpi/processor_throttling.c pr->throttling.state = state; pr 1021 drivers/acpi/processor_throttling.c (pr->throttling.states[state].performance ? pr-> pr 1027 drivers/acpi/processor_throttling.c static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, pr 1033 drivers/acpi/processor_throttling.c if (!pr) pr 1036 drivers/acpi/processor_throttling.c if ((state < 0) || (state > (pr->throttling.state_count - 1))) pr 1039 drivers/acpi/processor_throttling.c if (!pr->flags.throttling) pr 1042 drivers/acpi/processor_throttling.c if (!force && (state == pr->throttling.state)) pr 1045 drivers/acpi/processor_throttling.c if (state < pr->throttling_platform_limit) pr 1049 drivers/acpi/processor_throttling.c ret = acpi_get_throttling_value(pr, state, &value); pr 1051 drivers/acpi/processor_throttling.c acpi_write_throttling_state(pr, value); pr 1052 drivers/acpi/processor_throttling.c pr->throttling.state = state; pr 1061 drivers/acpi/processor_throttling.c struct acpi_processor *pr = arg->pr; pr 1063 drivers/acpi/processor_throttling.c return pr->throttling.acpi_processor_set_throttling(pr, pr 1067 drivers/acpi/processor_throttling.c static int __acpi_processor_set_throttling(struct acpi_processor *pr, pr 1077 drivers/acpi/processor_throttling.c if (!pr) pr 1080 drivers/acpi/processor_throttling.c if (!pr->flags.throttling) pr 1083 drivers/acpi/processor_throttling.c if ((state < 0) || (state > (pr->throttling.state_count - 1))) pr 1086 drivers/acpi/processor_throttling.c if (cpu_is_offline(pr->id)) { pr 1095 drivers/acpi/processor_throttling.c p_throttling = &(pr->throttling); pr 1114 drivers/acpi/processor_throttling.c arg.pr = pr; pr 1117 drivers/acpi/processor_throttling.c ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg, pr 1148 drivers/acpi/processor_throttling.c arg.pr = match_pr; pr 1151 drivers/acpi/processor_throttling.c ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, pr 1170 drivers/acpi/processor_throttling.c int acpi_processor_set_throttling(struct acpi_processor *pr, int state, pr 1173 drivers/acpi/processor_throttling.c return __acpi_processor_set_throttling(pr, state, force, false); pr 1176 drivers/acpi/processor_throttling.c int acpi_processor_get_throttling_info(struct acpi_processor *pr) pr 1183 drivers/acpi/processor_throttling.c pr->throttling.address, pr 1184 drivers/acpi/processor_throttling.c pr->throttling.duty_offset, pr 1185 drivers/acpi/processor_throttling.c pr->throttling.duty_width)); pr 1191 drivers/acpi/processor_throttling.c if (acpi_processor_get_throttling_control(pr) || pr 1192 drivers/acpi/processor_throttling.c acpi_processor_get_throttling_states(pr) || pr 1193 drivers/acpi/processor_throttling.c acpi_processor_get_platform_limit(pr)) pr 1195 drivers/acpi/processor_throttling.c pr->throttling.acpi_processor_get_throttling = pr 1197 drivers/acpi/processor_throttling.c pr->throttling.acpi_processor_set_throttling = pr 1199 drivers/acpi/processor_throttling.c if (acpi_processor_get_fadt_info(pr)) pr 1202 drivers/acpi/processor_throttling.c pr->throttling.acpi_processor_get_throttling = pr 1204 drivers/acpi/processor_throttling.c pr->throttling.acpi_processor_set_throttling = pr 1212 drivers/acpi/processor_throttling.c if (acpi_processor_get_tsd(pr)) { pr 1213 drivers/acpi/processor_throttling.c pthrottling = &pr->throttling; pr 1215 drivers/acpi/processor_throttling.c cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); pr 1231 drivers/acpi/processor_throttling.c pr->throttling.state_count)); pr 1233 drivers/acpi/processor_throttling.c pr->flags.throttling = 1; pr 1241 drivers/acpi/processor_throttling.c result = acpi_processor_get_throttling(pr); pr 1245 drivers/acpi/processor_throttling.c if (pr->throttling.state) { pr 1248 drivers/acpi/processor_throttling.c pr->throttling.state)); pr 1249 drivers/acpi/processor_throttling.c result = acpi_processor_set_throttling(pr, 0, false); pr 1256 drivers/acpi/processor_throttling.c pr->flags.throttling = 0; pr 2264 drivers/atm/horizon.c rounding pr; pr 2268 drivers/atm/horizon.c pr = round_nearest; pr 2271 drivers/atm/horizon.c pr = round_down; pr 2274 drivers/atm/horizon.c pr = round_up; pr 2276 drivers/atm/horizon.c error = make_rate_with_tolerance (dev, pcr, pr, 10, pr 210 drivers/block/paride/paride.c int paride_register(PIP * pr) pr 215 drivers/block/paride/paride.c if (protocols[k] && !strcmp(pr->name, protocols[k]->name)) { pr 217 drivers/block/paride/paride.c pr->name); pr 227 drivers/block/paride/paride.c protocols[k] = pr; pr 228 drivers/block/paride/paride.c pr->index = k; pr 229 drivers/block/paride/paride.c printk("paride: %s registered as protocol %d\n", pr->name, k); pr 235 drivers/block/paride/paride.c void paride_unregister(PIP * pr) pr 237 drivers/block/paride/paride.c if (!pr) pr 239 drivers/block/paride/paride.c if (protocols[pr->index] != pr) { pr 240 drivers/block/paride/paride.c printk("paride: %s not registered\n", pr->name); pr 243 drivers/block/paride/paride.c protocols[pr->index] = NULL; pr 17 drivers/char/ipmi/ipmi_plat_data.c struct property_entry pr[6]; pr 21 drivers/char/ipmi/ipmi_plat_data.c memset(pr, 0, sizeof(pr)); pr 35 drivers/char/ipmi/ipmi_plat_data.c pr[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", p->type); pr 37 drivers/char/ipmi/ipmi_plat_data.c pr[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", p->addr); pr 41 drivers/char/ipmi/ipmi_plat_data.c pr[pidx++] = PROPERTY_ENTRY_U8("slave-addr", p->slave_addr); pr 42 drivers/char/ipmi/ipmi_plat_data.c pr[pidx++] = PROPERTY_ENTRY_U8("addr-source", p->addr_source); pr 44 drivers/char/ipmi/ipmi_plat_data.c pr[pidx++] = PROPERTY_ENTRY_U8("reg-shift", p->regshift); pr 45 drivers/char/ipmi/ipmi_plat_data.c pr[pidx++] = PROPERTY_ENTRY_U8("reg-size", p->regsize); pr 105 drivers/char/ipmi/ipmi_plat_data.c rv = platform_device_add_properties(pdev, pr); pr 2599 drivers/cpufreq/intel_pstate.c struct acpi_processor *pr = per_cpu(processors, i); pr 2601 drivers/cpufreq/intel_pstate.c if (!pr) pr 2604 drivers/cpufreq/intel_pstate.c status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); pr 2643 drivers/cpufreq/intel_pstate.c struct acpi_processor *pr = per_cpu(processors, i); pr 2645 drivers/cpufreq/intel_pstate.c if (!pr) pr 2647 drivers/cpufreq/intel_pstate.c if (acpi_has_method(pr->handle, "_PPC")) pr 70 drivers/cpufreq/longhaul.c static struct acpi_processor *pr; pr 287 drivers/cpufreq/longhaul.c || ((pr != NULL) && pr->flags.bm_control))) { pr 301 drivers/cpufreq/longhaul.c } else if ((pr != NULL) && pr->flags.bm_control) { pr 337 drivers/cpufreq/longhaul.c } else if ((pr != NULL) && pr->flags.bm_control) { pr 862 drivers/cpufreq/longhaul.c NULL, (void *)&pr); pr 865 drivers/cpufreq/longhaul.c if (pr != NULL && longhaul_version == TYPE_POWERSAVER) { pr 866 drivers/cpufreq/longhaul.c cx = &pr->power.states[ACPI_STATE_C3]; pr 880 drivers/cpufreq/longhaul.c && ((pr == NULL) || !(pr->flags.bm_control))) { pr 254 drivers/cpufreq/pcc-cpufreq.c struct acpi_processor *pr; pr 257 drivers/cpufreq/pcc-cpufreq.c pr = per_cpu(processors, cpu); pr 260 drivers/cpufreq/pcc-cpufreq.c if (!pr) pr 263 drivers/cpufreq/pcc-cpufreq.c status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); pr 2909 drivers/dma/pl330.c int chans, pchs, ch, pr; pr 2921 drivers/dma/pl330.c for (pr = 0; pr < pchs; pr++) { pr 2922 drivers/dma/pl330.c struct dma_pl330_chan *pch = &pl330->peripherals[pr]; pr 2927 drivers/dma/pl330.c found = pr; pr 29 drivers/extcon/extcon-usbc-cros-ec.c bool pr; /* power role (true if VBUS enabled) */ pr 253 drivers/extcon/extcon-usbc-cros-ec.c bool pr = false; pr 277 drivers/extcon/extcon-usbc-cros-ec.c pr = (role & PD_CTRL_RESP_ROLE_POWER); pr 288 drivers/extcon/extcon-usbc-cros-ec.c role, power_type, dr, pr, polarity, mux, dp, hpd); pr 299 drivers/extcon/extcon-usbc-cros-ec.c if (force || info->dr != dr || info->pr != pr || info->dp != dp || pr 307 drivers/extcon/extcon-usbc-cros-ec.c info->pr = pr; pr 322 drivers/extcon/extcon-usbc-cros-ec.c (union extcon_property_value)(int)pr); pr 325 drivers/extcon/extcon-usbc-cros-ec.c (union extcon_property_value)(int)pr); pr 455 drivers/extcon/extcon-usbc-cros-ec.c info->pr = false; pr 426 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c unsigned int yq, cn, pr; pr 453 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c pr = (ptr[4] >> 0) & 0xf; pr 469 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0); pr 704 drivers/hwtracing/coresight/coresight-platform.c struct acpi_processor *pr; pr 707 drivers/hwtracing/coresight/coresight-platform.c pr = per_cpu(processors, i); pr 708 drivers/hwtracing/coresight/coresight-platform.c if (pr && pr->handle == handle) pr 382 drivers/input/joystick/xpad.c #define XPAD_XBOX360_VENDOR_PROTOCOL(vend, pr) \ pr 387 drivers/input/joystick/xpad.c .bInterfaceProtocol = (pr) pr 393 drivers/input/joystick/xpad.c #define XPAD_XBOXONE_VENDOR_PROTOCOL(vend, pr) \ pr 398 drivers/input/joystick/xpad.c .bInterfaceProtocol = (pr) pr 36 drivers/input/tablet/hanwang.c #define HANWANG_TABLET_DEVICE(vend, cl, sc, pr) \ pr 42 drivers/input/tablet/hanwang.c .bInterfaceProtocol = (pr) pr 587 drivers/isdn/hardware/mISDN/w6692.c w6692_mode(struct w6692_ch *wch, u32 pr) pr 593 drivers/isdn/hardware/mISDN/w6692.c wch->bch.nr, wch->bch.state, pr); pr 594 drivers/isdn/hardware/mISDN/w6692.c switch (pr) { pr 624 drivers/isdn/hardware/mISDN/w6692.c pr_info("%s: protocol %x not known\n", card->name, pr); pr 627 drivers/isdn/hardware/mISDN/w6692.c wch->bch.state = pr; pr 384 drivers/isdn/mISDN/hwchannel.c queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb) pr 389 drivers/isdn/mISDN/hwchannel.c _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC); pr 393 drivers/isdn/mISDN/hwchannel.c hh->prim = pr; pr 679 drivers/isdn/mISDN/layer2.c int pr; pr 682 drivers/isdn/mISDN/layer2.c pr = DL_RELEASE_CNF; pr 684 drivers/isdn/mISDN/layer2.c pr = DL_RELEASE_IND; pr 685 drivers/isdn/mISDN/layer2.c l2up_create(l2, pr, 0, NULL); pr 1002 drivers/isdn/mISDN/layer2.c int pr = -1; pr 1012 drivers/isdn/mISDN/layer2.c pr = DL_ESTABLISH_CNF; pr 1015 drivers/isdn/mISDN/layer2.c pr = DL_ESTABLISH_IND; pr 1024 drivers/isdn/mISDN/layer2.c if (pr != -1) pr 1025 drivers/isdn/mISDN/layer2.c l2up_create(l2, pr, 0, NULL); pr 1370 drivers/md/dm-cache-policy-smq.c enum promote_result pr; pr 1390 drivers/md/dm-cache-policy-smq.c pr = should_promote(mq, hs_e, data_dir, fast_copy); pr 1391 drivers/md/dm-cache-policy-smq.c if (pr != PROMOTE_NOT) { pr 3087 drivers/md/dm.c struct dm_pr *pr = data; pr 3092 drivers/md/dm.c return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); pr 3098 drivers/md/dm.c struct dm_pr pr = { pr 3106 drivers/md/dm.c ret = dm_call_pr(bdev, __dm_pr_register, &pr); pr 3109 drivers/md/dm.c pr.old_key = new_key; pr 3110 drivers/md/dm.c pr.new_key = 0; pr 3111 drivers/md/dm.c pr.flags = 0; pr 3112 drivers/md/dm.c pr.fail_early = false; pr 3113 drivers/md/dm.c dm_call_pr(bdev, __dm_pr_register, &pr); pr 21 drivers/media/pci/ttpci/dvb_filter.c int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr) pr 40 drivers/media/pci/ttpci/dvb_filter.c if (pr) pr 52 drivers/media/pci/ttpci/dvb_filter.c if (pr) pr 58 drivers/media/pci/ttpci/dvb_filter.c if (pr) pr 64 drivers/media/pci/ttpci/dvb_filter.c if (pr) pr 239 drivers/media/pci/ttpci/dvb_filter.h int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr); pr 2677 drivers/message/fusion/mptscsih.c MpiRaidActionReply_t *pr; pr 2715 drivers/message/fusion/mptscsih.c pr = (MpiRaidActionReply_t *)reply; pr 2716 drivers/message/fusion/mptscsih.c if (le16_to_cpu(pr->ActionStatus) == pr 110 drivers/net/can/usb/peak_usb/pcan_usb_pro.c u8 *pr = pcan_msg_init(pm, buffer_addr, buffer_size); pr 112 drivers/net/can/usb/peak_usb/pcan_usb_pro.c if (pr) { pr 116 drivers/net/can/usb/peak_usb/pcan_usb_pro.c return pr; pr 244 drivers/net/can/usb/peak_usb/pcan_usb_pro.c union pcan_usb_pro_rec *pr; pr 276 drivers/net/can/usb/peak_usb/pcan_usb_pro.c pr = (union pcan_usb_pro_rec *)pc; pr 277 drivers/net/can/usb/peak_usb/pcan_usb_pro.c rec_len = pcan_usb_pro_sizeof_rec[pr->data_type]; pr 287 drivers/net/can/usb/peak_usb/pcan_usb_pro.c if (pr->data_type != req_data_type) pr 290 drivers/net/can/usb/peak_usb/pcan_usb_pro.c pr->data_type); pr 294 drivers/net/can/usb/peak_usb/pcan_usb_pro.c (pr->bus_act.channel != req_channel)) pr 297 drivers/net/can/usb/peak_usb/pcan_usb_pro.c req_data_type, pr->bus_act.channel); pr 705 drivers/net/can/usb/peak_usb/pcan_usb_pro.c union pcan_usb_pro_rec *pr = (union pcan_usb_pro_rec *)rec_ptr; pr 706 drivers/net/can/usb/peak_usb/pcan_usb_pro.c u16 sizeof_rec = pcan_usb_pro_sizeof_rec[pr->data_type]; pr 723 drivers/net/can/usb/peak_usb/pcan_usb_pro.c switch (pr->data_type) { pr 728 drivers/net/can/usb/peak_usb/pcan_usb_pro.c err = pcan_usb_pro_handle_canmsg(usb_if, &pr->rx_msg); pr 734 drivers/net/can/usb/peak_usb/pcan_usb_pro.c err = pcan_usb_pro_handle_error(usb_if, &pr->rx_status); pr 740 drivers/net/can/usb/peak_usb/pcan_usb_pro.c pcan_usb_pro_handle_ts(usb_if, &pr->rx_ts); pr 746 drivers/net/can/usb/peak_usb/pcan_usb_pro.c pr->data_type, pr->data_type); pr 675 drivers/net/ethernet/apple/mace.c while (in_8(&mb->pr) & XMTSV) { pr 22 drivers/net/ethernet/apple/mace.h REG(pr); /* poll register */ pr 190 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr = &port->port_res[l]; pr 193 drivers/net/ethernet/ibm/ehea/ehea_main.c arr[i++].fwh = pr->qp->fw_handle; pr 195 drivers/net/ethernet/ibm/ehea/ehea_main.c arr[i++].fwh = pr->send_cq->fw_handle; pr 197 drivers/net/ethernet/ibm/ehea/ehea_main.c arr[i++].fwh = pr->recv_cq->fw_handle; pr 199 drivers/net/ethernet/ibm/ehea/ehea_main.c arr[i++].fwh = pr->eq->fw_handle; pr 201 drivers/net/ethernet/ibm/ehea/ehea_main.c arr[i++].fwh = pr->send_mr.handle; pr 203 drivers/net/ethernet/ibm/ehea/ehea_main.c arr[i++].fwh = pr->recv_mr.handle; pr 379 drivers/net/ethernet/ibm/ehea/ehea_main.c static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) pr 381 drivers/net/ethernet/ibm/ehea/ehea_main.c struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; pr 382 drivers/net/ethernet/ibm/ehea/ehea_main.c struct net_device *dev = pr->port->netdev; pr 383 drivers/net/ethernet/ibm/ehea/ehea_main.c int max_index_mask = pr->rq1_skba.len - 1; pr 384 drivers/net/ethernet/ibm/ehea/ehea_main.c int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; pr 388 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->rq1_skba.os_skbs = 0; pr 392 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->rq1_skba.index = index; pr 393 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->rq1_skba.os_skbs = fill_wqes; pr 402 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->rq1_skba.os_skbs = fill_wqes - i; pr 415 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_update_rq1a(pr->qp, adder); pr 418 drivers/net/ethernet/ibm/ehea/ehea_main.c static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) pr 420 drivers/net/ethernet/ibm/ehea/ehea_main.c struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; pr 421 drivers/net/ethernet/ibm/ehea/ehea_main.c struct net_device *dev = pr->port->netdev; pr 424 drivers/net/ethernet/ibm/ehea/ehea_main.c if (nr_rq1a > pr->rq1_skba.len) { pr 435 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_update_rq1a(pr->qp, i - 1); pr 438 drivers/net/ethernet/ibm/ehea/ehea_main.c static int ehea_refill_rq_def(struct ehea_port_res *pr, pr 442 drivers/net/ethernet/ibm/ehea/ehea_main.c struct net_device *dev = pr->port->netdev; pr 443 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_qp *qp = pr->qp; pr 468 drivers/net/ethernet/ibm/ehea/ehea_main.c netdev_info(pr->port->netdev, pr 488 drivers/net/ethernet/ibm/ehea/ehea_main.c rwqe->sg_list[0].l_key = pr->recv_mr.lkey; pr 505 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_update_rq2a(pr->qp, adder); pr 507 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_update_rq3a(pr->qp, adder); pr 513 drivers/net/ethernet/ibm/ehea/ehea_main.c static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) pr 515 drivers/net/ethernet/ibm/ehea/ehea_main.c return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, pr 521 drivers/net/ethernet/ibm/ehea/ehea_main.c static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) pr 523 drivers/net/ethernet/ibm/ehea/ehea_main.c return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, pr 541 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr) pr 556 drivers/net/ethernet/ibm/ehea/ehea_main.c skb_record_rx_queue(skb, pr - &pr->port->port_res[0]); pr 613 drivers/net/ethernet/ibm/ehea/ehea_main.c static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, pr 620 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->p_stats.err_tcp_cksum++; pr 622 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->p_stats.err_ip_cksum++; pr 624 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->p_stats.err_frame_crc++; pr 628 drivers/net/ethernet/ibm/ehea/ehea_main.c skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); pr 632 drivers/net/ethernet/ibm/ehea/ehea_main.c skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); pr 637 drivers/net/ethernet/ibm/ehea/ehea_main.c if (netif_msg_rx_err(pr->port)) { pr 639 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->qp->init_attr.qp_nr); pr 642 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_schedule_port_reset(pr->port); pr 650 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr, pr 653 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port *port = pr->port; pr 654 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_qp *qp = pr->qp; pr 657 drivers/net/ethernet/ibm/ehea/ehea_main.c struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; pr 658 drivers/net/ethernet/ibm/ehea/ehea_main.c struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; pr 659 drivers/net/ethernet/ibm/ehea/ehea_main.c struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; pr 660 drivers/net/ethernet/ibm/ehea/ehea_main.c int skb_arr_rq1_len = pr->rq1_skba.len; pr 661 drivers/net/ethernet/ibm/ehea/ehea_main.c int skb_arr_rq2_len = pr->rq2_skba.len; pr 662 drivers/net/ethernet/ibm/ehea/ehea_main.c int skb_arr_rq3_len = pr->rq3_skba.len; pr 697 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_fill_skb(dev, skb, cqe, pr); pr 707 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_fill_skb(dev, skb, cqe, pr); pr 718 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_fill_skb(dev, skb, cqe, pr); pr 728 drivers/net/ethernet/ibm/ehea/ehea_main.c napi_gro_receive(&pr->napi, skb); pr 730 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->p_stats.poll_receive_errors++; pr 731 drivers/net/ethernet/ibm/ehea/ehea_main.c port_reset = ehea_treat_poll_error(pr, rq, cqe, pr 740 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->rx_packets += processed; pr 741 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->rx_bytes += processed_bytes; pr 743 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_refill_rq1(pr, last_wqe_index, processed_rq1); pr 744 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_refill_rq2(pr, processed_rq2); pr 745 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_refill_rq3(pr, processed_rq3); pr 757 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr = &port->port_res[i]; pr 758 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->sq_restart_flag = 0; pr 770 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr = &port->port_res[i]; pr 772 drivers/net/ethernet/ibm/ehea/ehea_main.c swqe = ehea_get_swqe(pr->qp, &swqe_index); pr 774 drivers/net/ethernet/ibm/ehea/ehea_main.c atomic_dec(&pr->swqe_avail); pr 782 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_post_swqe(pr->qp, swqe); pr 785 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->sq_restart_flag == 0, pr 790 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_schedule_port_reset(pr->port); pr 797 drivers/net/ethernet/ibm/ehea/ehea_main.c static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) pr 800 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_cq *send_cq = pr->send_cq; pr 806 drivers/net/ethernet/ibm/ehea/ehea_main.c struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, pr 807 drivers/net/ethernet/ibm/ehea/ehea_main.c pr - &pr->port->port_res[0]); pr 817 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->sq_restart_flag = 1; pr 826 drivers/net/ethernet/ibm/ehea/ehea_main.c if (netif_msg_tx_err(pr->port)) pr 831 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_schedule_port_reset(pr->port); pr 836 drivers/net/ethernet/ibm/ehea/ehea_main.c if (netif_msg_tx_done(pr->port)) pr 843 drivers/net/ethernet/ibm/ehea/ehea_main.c skb = pr->sq_skba.arr[index]; pr 845 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->sq_skba.arr[index] = NULL; pr 855 drivers/net/ethernet/ibm/ehea/ehea_main.c atomic_add(swqe_av, &pr->swqe_avail); pr 858 drivers/net/ethernet/ibm/ehea/ehea_main.c (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) { pr 861 drivers/net/ethernet/ibm/ehea/ehea_main.c (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) pr 866 drivers/net/ethernet/ibm/ehea/ehea_main.c wake_up(&pr->port->swqe_avail_wq); pr 875 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, pr 877 drivers/net/ethernet/ibm/ehea/ehea_main.c struct net_device *dev = pr->port->netdev; pr 883 drivers/net/ethernet/ibm/ehea/ehea_main.c cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); pr 884 drivers/net/ethernet/ibm/ehea/ehea_main.c rx += ehea_proc_rwqes(dev, pr, budget - rx); pr 888 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_reset_cq_ep(pr->recv_cq); pr 889 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_reset_cq_ep(pr->send_cq); pr 890 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_reset_cq_n1(pr->recv_cq); pr 891 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_reset_cq_n1(pr->send_cq); pr 893 drivers/net/ethernet/ibm/ehea/ehea_main.c cqe = ehea_poll_rq1(pr->qp, &wqe_index); pr 894 drivers/net/ethernet/ibm/ehea/ehea_main.c cqe_skb = ehea_poll_cq(pr->send_cq); pr 902 drivers/net/ethernet/ibm/ehea/ehea_main.c cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); pr 903 drivers/net/ethernet/ibm/ehea/ehea_main.c rx += ehea_proc_rwqes(dev, pr, budget - rx); pr 911 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr = param; pr 913 drivers/net/ethernet/ibm/ehea/ehea_main.c napi_schedule(&pr->napi); pr 1247 drivers/net/ethernet/ibm/ehea/ehea_main.c static int ehea_fill_port_res(struct ehea_port_res *pr) pr 1250 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; pr 1252 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_init_fill_rq1(pr, pr->rq1_skba.len); pr 1254 drivers/net/ethernet/ibm/ehea/ehea_main.c ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); pr 1256 drivers/net/ethernet/ibm/ehea/ehea_main.c ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); pr 1264 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr; pr 1286 drivers/net/ethernet/ibm/ehea/ehea_main.c pr = &port->port_res[i]; pr 1287 drivers/net/ethernet/ibm/ehea/ehea_main.c snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, pr 1289 drivers/net/ethernet/ibm/ehea/ehea_main.c ret = ibmebus_request_irq(pr->eq->attr.ist1, pr 1291 drivers/net/ethernet/ibm/ehea/ehea_main.c 0, pr->int_send_name, pr); pr 1294 drivers/net/ethernet/ibm/ehea/ehea_main.c i, pr->eq->attr.ist1); pr 1299 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->eq->attr.ist1, i); pr 1322 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr; pr 1328 drivers/net/ethernet/ibm/ehea/ehea_main.c pr = &port->port_res[i]; pr 1329 drivers/net/ethernet/ibm/ehea/ehea_main.c ibmebus_free_irq(pr->eq->attr.ist1, pr); pr 1332 drivers/net/ethernet/ibm/ehea/ehea_main.c i, pr->eq->attr.ist1); pr 1390 drivers/net/ethernet/ibm/ehea/ehea_main.c static int ehea_gen_smrs(struct ehea_port_res *pr) pr 1393 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_adapter *adapter = pr->port->adapter; pr 1395 drivers/net/ethernet/ibm/ehea/ehea_main.c ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); pr 1399 drivers/net/ethernet/ibm/ehea/ehea_main.c ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); pr 1406 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_rem_mr(&pr->send_mr); pr 1412 drivers/net/ethernet/ibm/ehea/ehea_main.c static int ehea_rem_smrs(struct ehea_port_res *pr) pr 1414 drivers/net/ethernet/ibm/ehea/ehea_main.c if ((ehea_rem_mr(&pr->send_mr)) || pr 1415 drivers/net/ethernet/ibm/ehea/ehea_main.c (ehea_rem_mr(&pr->recv_mr))) pr 1436 drivers/net/ethernet/ibm/ehea/ehea_main.c static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, pr 1445 drivers/net/ethernet/ibm/ehea/ehea_main.c tx_bytes = pr->tx_bytes; pr 1446 drivers/net/ethernet/ibm/ehea/ehea_main.c tx_packets = pr->tx_packets; pr 1447 drivers/net/ethernet/ibm/ehea/ehea_main.c rx_bytes = pr->rx_bytes; pr 1448 drivers/net/ethernet/ibm/ehea/ehea_main.c rx_packets = pr->rx_packets; pr 1450 drivers/net/ethernet/ibm/ehea/ehea_main.c memset(pr, 0, sizeof(struct ehea_port_res)); pr 1452 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->tx_bytes = tx_bytes; pr 1453 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->tx_packets = tx_packets; pr 1454 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->rx_bytes = rx_bytes; pr 1455 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->rx_packets = rx_packets; pr 1457 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->port = port; pr 1459 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); pr 1460 drivers/net/ethernet/ibm/ehea/ehea_main.c if (!pr->eq) { pr 1465 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, pr 1466 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->eq->fw_handle, pr 1468 drivers/net/ethernet/ibm/ehea/ehea_main.c if (!pr->recv_cq) { pr 1473 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, pr 1474 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->eq->fw_handle, pr 1476 drivers/net/ethernet/ibm/ehea/ehea_main.c if (!pr->send_cq) { pr 1483 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->send_cq->attr.act_nr_of_cqes, pr 1484 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->recv_cq->attr.act_nr_of_cqes); pr 1508 drivers/net/ethernet/ibm/ehea/ehea_main.c init_attr->send_cq_handle = pr->send_cq->fw_handle; pr 1509 drivers/net/ethernet/ibm/ehea/ehea_main.c init_attr->recv_cq_handle = pr->recv_cq->fw_handle; pr 1512 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); pr 1513 drivers/net/ethernet/ibm/ehea/ehea_main.c if (!pr->qp) { pr 1527 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; pr 1529 drivers/net/ethernet/ibm/ehea/ehea_main.c ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); pr 1530 drivers/net/ethernet/ibm/ehea/ehea_main.c ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); pr 1531 drivers/net/ethernet/ibm/ehea/ehea_main.c ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); pr 1532 drivers/net/ethernet/ibm/ehea/ehea_main.c ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); pr 1536 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; pr 1537 drivers/net/ethernet/ibm/ehea/ehea_main.c if (ehea_gen_smrs(pr) != 0) { pr 1542 drivers/net/ethernet/ibm/ehea/ehea_main.c atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); pr 1546 drivers/net/ethernet/ibm/ehea/ehea_main.c netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); pr 1553 drivers/net/ethernet/ibm/ehea/ehea_main.c vfree(pr->sq_skba.arr); pr 1554 drivers/net/ethernet/ibm/ehea/ehea_main.c vfree(pr->rq1_skba.arr); pr 1555 drivers/net/ethernet/ibm/ehea/ehea_main.c vfree(pr->rq2_skba.arr); pr 1556 drivers/net/ethernet/ibm/ehea/ehea_main.c vfree(pr->rq3_skba.arr); pr 1557 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_destroy_qp(pr->qp); pr 1558 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_destroy_cq(pr->send_cq); pr 1559 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_destroy_cq(pr->recv_cq); pr 1560 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_destroy_eq(pr->eq); pr 1565 drivers/net/ethernet/ibm/ehea/ehea_main.c static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) pr 1569 drivers/net/ethernet/ibm/ehea/ehea_main.c if (pr->qp) pr 1570 drivers/net/ethernet/ibm/ehea/ehea_main.c netif_napi_del(&pr->napi); pr 1572 drivers/net/ethernet/ibm/ehea/ehea_main.c ret = ehea_destroy_qp(pr->qp); pr 1575 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_destroy_cq(pr->send_cq); pr 1576 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_destroy_cq(pr->recv_cq); pr 1577 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_destroy_eq(pr->eq); pr 1579 drivers/net/ethernet/ibm/ehea/ehea_main.c for (i = 0; i < pr->rq1_skba.len; i++) pr 1580 drivers/net/ethernet/ibm/ehea/ehea_main.c dev_kfree_skb(pr->rq1_skba.arr[i]); pr 1582 drivers/net/ethernet/ibm/ehea/ehea_main.c for (i = 0; i < pr->rq2_skba.len; i++) pr 1583 drivers/net/ethernet/ibm/ehea/ehea_main.c dev_kfree_skb(pr->rq2_skba.arr[i]); pr 1585 drivers/net/ethernet/ibm/ehea/ehea_main.c for (i = 0; i < pr->rq3_skba.len; i++) pr 1586 drivers/net/ethernet/ibm/ehea/ehea_main.c dev_kfree_skb(pr->rq3_skba.arr[i]); pr 1588 drivers/net/ethernet/ibm/ehea/ehea_main.c for (i = 0; i < pr->sq_skba.len; i++) pr 1589 drivers/net/ethernet/ibm/ehea/ehea_main.c dev_kfree_skb(pr->sq_skba.arr[i]); pr 1591 drivers/net/ethernet/ibm/ehea/ehea_main.c vfree(pr->rq1_skba.arr); pr 1592 drivers/net/ethernet/ibm/ehea/ehea_main.c vfree(pr->rq2_skba.arr); pr 1593 drivers/net/ethernet/ibm/ehea/ehea_main.c vfree(pr->rq3_skba.arr); pr 1594 drivers/net/ethernet/ibm/ehea/ehea_main.c vfree(pr->sq_skba.arr); pr 1595 drivers/net/ethernet/ibm/ehea/ehea_main.c ret = ehea_rem_smrs(pr); pr 2017 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr; pr 2020 drivers/net/ethernet/ibm/ehea/ehea_main.c pr = &port->port_res[skb_get_queue_mapping(skb)]; pr 2023 drivers/net/ethernet/ibm/ehea/ehea_main.c swqe = ehea_get_swqe(pr->qp, &swqe_index); pr 2025 drivers/net/ethernet/ibm/ehea/ehea_main.c atomic_dec(&pr->swqe_avail); pr 2032 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->tx_packets++; pr 2033 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->tx_bytes += skb->len; pr 2037 drivers/net/ethernet/ibm/ehea/ehea_main.c u32 swqe_num = pr->swqe_id_counter; pr 2041 drivers/net/ethernet/ibm/ehea/ehea_main.c if (pr->swqe_ll_count >= (sig_iv - 1)) { pr 2045 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->swqe_ll_count = 0; pr 2047 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->swqe_ll_count += 1; pr 2051 drivers/net/ethernet/ibm/ehea/ehea_main.c | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) pr 2053 drivers/net/ethernet/ibm/ehea/ehea_main.c | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); pr 2054 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->sq_skba.arr[pr->sq_skba.index] = skb; pr 2056 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->sq_skba.index++; pr 2057 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->sq_skba.index &= (pr->sq_skba.len - 1); pr 2059 drivers/net/ethernet/ibm/ehea/ehea_main.c lkey = pr->send_mr.lkey; pr 2063 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->swqe_id_counter += 1; pr 2066 drivers/net/ethernet/ibm/ehea/ehea_main.c "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); pr 2075 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_post_swqe(pr->qp, swqe); pr 2077 drivers/net/ethernet/ibm/ehea/ehea_main.c if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { pr 2078 drivers/net/ethernet/ibm/ehea/ehea_main.c pr->p_stats.queue_stopped++; pr 2490 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr = &port->port_res[i]; pr 2491 drivers/net/ethernet/ibm/ehea/ehea_main.c int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; pr 2495 drivers/net/ethernet/ibm/ehea/ehea_main.c atomic_read(&pr->swqe_avail) >= swqe_max, pr 2524 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr = &port->port_res[i]; pr 2525 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_qp *qp = pr->qp; pr 2560 drivers/net/ethernet/ibm/ehea/ehea_main.c dret = ehea_rem_smrs(pr); pr 2574 drivers/net/ethernet/ibm/ehea/ehea_main.c static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) pr 2579 drivers/net/ethernet/ibm/ehea/ehea_main.c struct sk_buff **skba_rq2 = pr->rq2_skba.arr; pr 2580 drivers/net/ethernet/ibm/ehea/ehea_main.c struct sk_buff **skba_rq3 = pr->rq3_skba.arr; pr 2582 drivers/net/ethernet/ibm/ehea/ehea_main.c u32 lkey = pr->recv_mr.lkey; pr 2626 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr = &port->port_res[i]; pr 2627 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_qp *qp = pr->qp; pr 2629 drivers/net/ethernet/ibm/ehea/ehea_main.c ret = ehea_gen_smrs(pr); pr 2635 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_update_rqs(qp, pr); pr 2667 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_refill_rq1(pr, pr->rq1_skba.index, 0); pr 2668 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_refill_rq2(pr, 0); pr 2669 drivers/net/ethernet/ibm/ehea/ehea_main.c ehea_refill_rq3(pr, 0); pr 1236 drivers/net/ethernet/intel/ice/ice_flex_pipe.c struct ice_prof_redir_section *pr; pr 1304 drivers/net/ethernet/intel/ice/ice_flex_pipe.c pr = (struct ice_prof_redir_section *)sect; pr 1305 drivers/net/ethernet/intel/ice/ice_flex_pipe.c src = pr->redir_value; pr 1306 drivers/net/ethernet/intel/ice/ice_flex_pipe.c sect_len = le16_to_cpu(pr->count) * pr 178 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c struct mlxsw_sp_sb_pr *pr; pr 189 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pr 190 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c pr->mode = mode; pr 191 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c pr->size = size; pr 633 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pr 635 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c return pr->mode == MLXSW_REG_SBPR_MODE_STATIC; pr 992 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c struct mlxsw_sp_sb_pr *pr; pr 995 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pr 997 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size); pr 998 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode; pr 1010 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c const struct mlxsw_sp_sb_pr *pr; pr 1014 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c pr = &mlxsw_sp->sb_vals->prs[pool_index]; pr 1021 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c if (pr->freeze_mode && pr->mode != mode) { pr 1026 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c if (pr->freeze_size && pr->size != size) { pr 1040 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pr 1042 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) pr 1051 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pr 1053 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) { pr 307 drivers/net/usb/cx82310_eth.c #define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ pr 314 drivers/net/usb/cx82310_eth.c .bDeviceProtocol = (pr) pr 80 drivers/of/dynamic.c struct of_reconfig_data *pr = p; pr 86 drivers/of/dynamic.c pr->dn); pr 92 drivers/of/dynamic.c pr->dn, pr->prop->name); pr 110 drivers/of/dynamic.c int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr) pr 119 drivers/of/dynamic.c prop = of_find_property(pr->dn, "status", NULL); pr 123 drivers/of/dynamic.c prop = pr->prop; pr 126 drivers/of/dynamic.c prop = pr->prop; pr 127 drivers/of/dynamic.c old_prop = pr->old_prop; pr 191 drivers/of/dynamic.c struct of_reconfig_data pr; pr 197 drivers/of/dynamic.c pr.dn = np; pr 198 drivers/of/dynamic.c pr.prop = prop; pr 199 drivers/of/dynamic.c pr.old_prop = oldprop; pr 200 drivers/of/dynamic.c return of_reconfig_notify(action, &pr); pr 285 drivers/parport/parport_ip32.c #define __pr_trace(pr, p, fmt, ...) \ pr 286 drivers/parport/parport_ip32.c pr("%s: %s" fmt "\n", \ pr 140 drivers/pinctrl/meson/pinctrl-meson.h #define BANK_DS(n, f, l, fi, li, per, peb, pr, pb, dr, db, or, ob, ir, ib, \ pr 150 drivers/pinctrl/meson/pinctrl-meson.h [REG_PULL] = { pr, pb }, \ pr 158 drivers/pinctrl/meson/pinctrl-meson.h #define BANK(n, f, l, fi, li, per, peb, pr, pb, dr, db, or, ob, ir, ib) \ pr 159 drivers/pinctrl/meson/pinctrl-meson.h BANK_DS(n, f, l, fi, li, per, peb, pr, pb, dr, db, or, ob, ir, ib, 0, 0) pr 199 drivers/pinctrl/sirf/pinctrl-atlas7.c #define PADCONF(pad, t, mr, pr, dsr, adr, mb, pb, dsb, adb) \ pr 204 drivers/pinctrl/sirf/pinctrl-atlas7.c .pupd_reg = pr, \ pr 112 drivers/sh/intc/handle.c struct intc_prio_reg *pr = desc->hw.prio_regs; pr 116 drivers/sh/intc/handle.c while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) { pr 117 drivers/sh/intc/handle.c pr = desc->hw.prio_regs + *reg_idx; pr 119 drivers/sh/intc/handle.c for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) { pr 120 drivers/sh/intc/handle.c if (pr->enum_ids[*fld_idx] != enum_id) pr 123 drivers/sh/intc/handle.c if (pr->set_reg && pr->clr_reg) { pr 126 drivers/sh/intc/handle.c reg_e = pr->set_reg; pr 127 drivers/sh/intc/handle.c reg_d = pr->clr_reg; pr 131 drivers/sh/intc/handle.c if (!pr->set_reg) pr 133 drivers/sh/intc/handle.c reg_e = pr->set_reg; pr 134 drivers/sh/intc/handle.c reg_d = pr->set_reg; pr 137 drivers/sh/intc/handle.c fn += (pr->reg_width >> 3) - 1; pr 140 drivers/sh/intc/handle.c BUG_ON(n * pr->field_width > pr->reg_width); pr 142 drivers/sh/intc/handle.c bit = pr->reg_width - (n * pr->field_width); pr 147 drivers/sh/intc/handle.c pr->field_width, bit); pr 70 drivers/staging/most/i2c/i2c.c unsigned int delay, pr; pr 101 drivers/staging/most/i2c/i2c.c pr = MSEC_PER_SEC / jiffies_to_msecs(dev->rx.delay); pr 102 drivers/staging/most/i2c/i2c.c pr_info("polling rate is %u Hz\n", pr); pr 54 drivers/staging/octeon/octeon-stubs.h uint64_t pr:4; pr 107 drivers/staging/octeon/octeon-stubs.h uint64_t pr:4; pr 62 drivers/usb/renesas_usbhs/pipe.h #define usbhs_priv_to_pipeinfo(pr) (&(pr)->pipe_info) pr 113 drivers/usb/storage/usb.c #define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \ pr 30 drivers/usb/storage/usual-tables.c #define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \ pr 39 drivers/usb/storage/usual-tables.c .bInterfaceProtocol = (pr), \ pr 418 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c unsigned yq, cn, pr; pr 445 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c pr = (ptr[4] >> 0) & 0xf; pr 461 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0); pr 27 drivers/xen/xen-acpi-cpuhotplug.c static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr); pr 39 drivers/xen/xen-acpi-cpuhotplug.c struct acpi_processor *pr = acpi_driver_data(device); pr 43 drivers/xen/xen-acpi-cpuhotplug.c status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); pr 49 drivers/xen/xen-acpi-cpuhotplug.c pr->acpi_id = object.processor.proc_id; pr 52 drivers/xen/xen-acpi-cpuhotplug.c status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, pr 59 drivers/xen/xen-acpi-cpuhotplug.c pr->acpi_id = value; pr 62 drivers/xen/xen-acpi-cpuhotplug.c pr->id = xen_pcpu_id(pr->acpi_id); pr 64 drivers/xen/xen-acpi-cpuhotplug.c if (invalid_logical_cpuid(pr->id)) pr 66 drivers/xen/xen-acpi-cpuhotplug.c if (ACPI_FAILURE(xen_acpi_cpu_hotadd(pr))) { pr 68 drivers/xen/xen-acpi-cpuhotplug.c pr->acpi_id); pr 78 drivers/xen/xen-acpi-cpuhotplug.c struct acpi_processor *pr; pr 83 drivers/xen/xen-acpi-cpuhotplug.c pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); pr 84 drivers/xen/xen-acpi-cpuhotplug.c if (!pr) pr 87 drivers/xen/xen-acpi-cpuhotplug.c pr->handle = device->handle; pr 90 drivers/xen/xen-acpi-cpuhotplug.c device->driver_data = pr; pr 101 drivers/xen/xen-acpi-cpuhotplug.c struct acpi_processor *pr; pr 106 drivers/xen/xen-acpi-cpuhotplug.c pr = acpi_driver_data(device); pr 107 drivers/xen/xen-acpi-cpuhotplug.c if (!pr) pr 110 drivers/xen/xen-acpi-cpuhotplug.c kfree(pr); pr 175 drivers/xen/xen-acpi-cpuhotplug.c static int xen_hotadd_cpu(struct acpi_processor *pr) pr 180 drivers/xen/xen-acpi-cpuhotplug.c apic_id = xen_apic_id(pr->handle); pr 183 drivers/xen/xen-acpi-cpuhotplug.c pr->acpi_id); pr 187 drivers/xen/xen-acpi-cpuhotplug.c pxm = xen_acpi_get_pxm(pr->handle); pr 190 drivers/xen/xen-acpi-cpuhotplug.c pr->acpi_id); pr 196 drivers/xen/xen-acpi-cpuhotplug.c op.u.cpu_add.acpi_id = pr->acpi_id; pr 202 drivers/xen/xen-acpi-cpuhotplug.c pr->acpi_id); pr 207 drivers/xen/xen-acpi-cpuhotplug.c static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr) pr 209 drivers/xen/xen-acpi-cpuhotplug.c if (!is_processor_present(pr->handle)) pr 212 drivers/xen/xen-acpi-cpuhotplug.c pr->id = xen_hotadd_cpu(pr); pr 213 drivers/xen/xen-acpi-cpuhotplug.c if (invalid_logical_cpuid(pr->id)) pr 235 drivers/xen/xen-acpi-cpuhotplug.c struct acpi_processor *pr; pr 279 drivers/xen/xen-acpi-cpuhotplug.c pr = acpi_driver_data(device); pr 280 drivers/xen/xen-acpi-cpuhotplug.c if (!pr) { pr 550 drivers/xen/xen-acpi-processor.c struct acpi_processor *pr; pr 553 drivers/xen/xen-acpi-processor.c pr = per_cpu(processors, i); pr 555 drivers/xen/xen-acpi-processor.c if (!pr) pr 558 drivers/xen/xen-acpi-processor.c pr->performance = perf; pr 559 drivers/xen/xen-acpi-processor.c rc = acpi_processor_get_performance_info(pr); pr 1120 fs/ext4/extents_status.c struct pending_reservation *pr = NULL; pr 1123 fs/ext4/extents_status.c pr = rb_entry(node, struct pending_reservation, rb_node); pr 1124 fs/ext4/extents_status.c if (lclu < pr->lclu) pr 1126 fs/ext4/extents_status.c else if (lclu > pr->lclu) pr 1129 fs/ext4/extents_status.c return pr; pr 1131 fs/ext4/extents_status.c if (pr && lclu < pr->lclu) pr 1132 fs/ext4/extents_status.c return pr; pr 1133 fs/ext4/extents_status.c if (pr && lclu > pr->lclu) { pr 1134 fs/ext4/extents_status.c node = rb_next(&pr->rb_node); pr 1162 fs/ext4/extents_status.c struct pending_reservation *pr; pr 1255 fs/ext4/extents_status.c pr = __pr_tree_search(&tree->root, first_lclu); pr 1256 fs/ext4/extents_status.c while (pr && pr->lclu <= last_lclu) { pr 1258 fs/ext4/extents_status.c node = rb_next(&pr->rb_node); pr 1259 fs/ext4/extents_status.c rb_erase(&pr->rb_node, &tree->root); pr 1260 fs/ext4/extents_status.c kmem_cache_free(ext4_pending_cachep, pr); pr 1263 fs/ext4/extents_status.c pr = rb_entry(node, struct pending_reservation, pr 1773 fs/ext4/extents_status.c struct pending_reservation *pr; pr 1779 fs/ext4/extents_status.c pr = rb_entry(node, struct pending_reservation, rb_node); pr 1780 fs/ext4/extents_status.c printk(KERN_DEBUG " %u", pr->lclu); pr 1823 fs/ext4/extents_status.c struct pending_reservation *pr = NULL; pr 1829 fs/ext4/extents_status.c pr = rb_entry(node, struct pending_reservation, rb_node); pr 1830 fs/ext4/extents_status.c if (lclu < pr->lclu) pr 1832 fs/ext4/extents_status.c else if (lclu > pr->lclu) pr 1834 fs/ext4/extents_status.c else if (lclu == pr->lclu) pr 1835 fs/ext4/extents_status.c return pr; pr 1856 fs/ext4/extents_status.c struct pending_reservation *pr; pr 1864 fs/ext4/extents_status.c pr = rb_entry(parent, struct pending_reservation, rb_node); pr 1866 fs/ext4/extents_status.c if (lclu < pr->lclu) { pr 1868 fs/ext4/extents_status.c } else if (lclu > pr->lclu) { pr 1876 fs/ext4/extents_status.c pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC); pr 1877 fs/ext4/extents_status.c if (pr == NULL) { pr 1881 fs/ext4/extents_status.c pr->lclu = lclu; pr 1883 fs/ext4/extents_status.c rb_link_node(&pr->rb_node, parent, p); pr 1884 fs/ext4/extents_status.c rb_insert_color(&pr->rb_node, &tree->root); pr 1902 fs/ext4/extents_status.c struct pending_reservation *pr; pr 1905 fs/ext4/extents_status.c pr = __get_pending(inode, EXT4_B2C(sbi, lblk)); pr 1906 fs/ext4/extents_status.c if (pr != NULL) { pr 1908 fs/ext4/extents_status.c rb_erase(&pr->rb_node, &tree->root); pr 1909 fs/ext4/extents_status.c kmem_cache_free(ext4_pending_cachep, pr); pr 3766 fs/jfs/jfs_dtree.c wchar_t *pl, *pr, *kname; pr 3800 fs/jfs/jfs_dtree.c for (pl = lkey.name, pr = rkey.name; pr 3801 fs/jfs/jfs_dtree.c namlen; pl++, pr++, namlen--, klen++, kname++) { pr 3802 fs/jfs/jfs_dtree.c *kname = *pr; pr 3803 fs/jfs/jfs_dtree.c if (*pl != *pr) { pr 3811 fs/jfs/jfs_dtree.c *kname = *pr; pr 179 include/acpi/processor.h int (*acpi_processor_get_throttling) (struct acpi_processor * pr); pr 180 include/acpi/processor.h int (*acpi_processor_set_throttling) (struct acpi_processor * pr, pr 265 include/acpi/processor.h extern int acpi_processor_get_performance_info(struct acpi_processor *pr); pr 315 include/acpi/processor.h void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag); pr 330 include/acpi/processor.h static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr, pr 356 include/acpi/processor.h extern int acpi_cppc_processor_probe(struct acpi_processor *pr); pr 357 include/acpi/processor.h extern void acpi_cppc_processor_exit(struct acpi_processor *pr); pr 359 include/acpi/processor.h static inline int acpi_cppc_processor_probe(struct acpi_processor *pr) pr 363 include/acpi/processor.h static inline void acpi_cppc_processor_exit(struct acpi_processor *pr) pr 374 include/acpi/processor.h int acpi_processor_tstate_has_changed(struct acpi_processor *pr); pr 375 include/acpi/processor.h int acpi_processor_get_throttling_info(struct acpi_processor *pr); pr 376 include/acpi/processor.h extern int acpi_processor_set_throttling(struct acpi_processor *pr, pr 382 include/acpi/processor.h extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, pr 387 include/acpi/processor.h static inline int acpi_processor_tstate_has_changed(struct acpi_processor *pr) pr 392 include/acpi/processor.h static inline int acpi_processor_get_throttling_info(struct acpi_processor *pr) pr 397 include/acpi/processor.h static inline int acpi_processor_set_throttling(struct acpi_processor *pr, pr 403 include/acpi/processor.h static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, pr 412 include/acpi/processor.h int acpi_processor_power_init(struct acpi_processor *pr); pr 413 include/acpi/processor.h int acpi_processor_power_exit(struct acpi_processor *pr); pr 414 include/acpi/processor.h int acpi_processor_power_state_has_changed(struct acpi_processor *pr); pr 415 include/acpi/processor.h int acpi_processor_hotplug(struct acpi_processor *pr); pr 417 include/acpi/processor.h static inline int acpi_processor_power_init(struct acpi_processor *pr) pr 422 include/acpi/processor.h static inline int acpi_processor_power_exit(struct acpi_processor *pr) pr 427 include/acpi/processor.h static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr) pr 432 include/acpi/processor.h static inline int acpi_processor_hotplug(struct acpi_processor *pr) pr 439 include/acpi/processor.h int acpi_processor_get_limit_info(struct acpi_processor *pr); pr 131 include/crypto/drbg.h bool pr; /* Prediction resistance enabled? */ pr 143 include/linux/dio.h #define DIO_ENCODE_ID(pr,sec) ((((int)sec & 0xff) << 8) | ((int)pr & 0xff)) pr 990 include/linux/usb.h #define USB_DEVICE_INTERFACE_PROTOCOL(vend, prod, pr) \ pr 995 include/linux/usb.h .bInterfaceProtocol = (pr) pr 1022 include/linux/usb.h #define USB_DEVICE_INFO(cl, sc, pr) \ pr 1026 include/linux/usb.h .bDeviceProtocol = (pr) pr 1037 include/linux/usb.h #define USB_INTERFACE_INFO(cl, sc, pr) \ pr 1041 include/linux/usb.h .bInterfaceProtocol = (pr) pr 1057 include/linux/usb.h #define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, cl, sc, pr) \ pr 1064 include/linux/usb.h .bInterfaceProtocol = (pr) pr 1079 include/linux/usb.h #define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \ pr 1085 include/linux/usb.h .bInterfaceProtocol = (pr) pr 503 include/sound/wavefront.h wavefront_program pr; pr 78 net/ceph/crush/mapper.c unsigned int pr = r % bucket->size; pr 87 net/ceph/crush/mapper.c if (pr == 0) { pr 109 net/ceph/crush/mapper.c while (work->perm_n <= pr) { pr 127 net/ceph/crush/mapper.c s = work->perm[pr]; pr 130 net/ceph/crush/mapper.c bucket->size, x, r, pr, s); pr 228 net/core/fib_rules.c const struct fib_rule_port_range *pr = nla_data(pattr); pr 230 net/core/fib_rules.c if (!fib_rule_port_range_valid(pr)) pr 233 net/core/fib_rules.c port_range->start = pr->start; pr 234 net/core/fib_rules.c port_range->end = pr->end; pr 822 sound/isa/wavefront/wavefront_synth.c if (header->hdr.pr.layer[i].mute) { pr 823 sound/isa/wavefront/wavefront_synth.c dev->patch_status[header->hdr.pr.layer[i].patch_number] |= pr 833 sound/isa/wavefront/wavefront_synth.c munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES); pr 1442 sound/isa/wavefront/wavefront_synth.c if (copy_from_user (&header->hdr.pr, header->hdrptr, pr 5520 tools/lib/traceevent/event-parse.c int prec = 0, pr; pr 5534 tools/lib/traceevent/event-parse.c pr = prec; pr 5535 tools/lib/traceevent/event-parse.c while (pr--) pr 672 tools/perf/util/data-convert-bt.c pr("Flush stream for cpu %d (%u samples)\n", pr 1161 tools/perf/util/data-convert-bt.c pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type); pr 1185 tools/perf/util/data-convert-bt.c pr("Failed to add event class into stream.\n"); pr 1232 tools/perf/util/data-convert-bt.c pr("Adding "#_name" event\n"); \ pr 1240 tools/perf/util/data-convert-bt.c pr("Failed to add event class '"#_name"' into stream.\n");\ pr 1502 tools/perf/util/data-convert-bt.c pr("Failed to create CTF clock.\n"); pr 1509 tools/perf/util/data-convert-bt.c pr("Failed to setup CTF clock.\n"); pr 1516 tools/perf/util/data-convert-bt.c pr("Failed to create CTF stream class.\n"); pr 1524 tools/perf/util/data-convert-bt.c pr("Failed to assign CTF clock to stream class.\n"); pr 1543 tools/perf/util/data-convert-bt.c pr("Failed to assign CTF clock to writer.\n"); pr 154 tools/perf/util/ordered-events.c pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n", pr 163 tools/perf/util/ordered-events.c pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size); pr 1733 tools/perf/util/probe-event.c char pr; pr 1765 tools/perf/util/probe-event.c pr = fmt1_str[0]; pr 1772 tools/perf/util/probe-event.c pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr); pr 1774 tools/perf/util/probe-event.c tp->retprobe = (pr == 'r');