tr 376 arch/arm/include/asm/assembler.h \instr\()b\t\cond\().w \reg, [\ptr, #\off] tr 378 arch/arm/include/asm/assembler.h \instr\t\cond\().w \reg, [\ptr, #\off] tr 403 arch/arm/include/asm/assembler.h usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort tr 405 arch/arm/include/asm/assembler.h usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort tr 408 arch/arm/include/asm/assembler.h add\cond \ptr, #\rept * \inc tr 417 arch/arm/include/asm/assembler.h \instr\()b\t\cond \reg, [\ptr], #\inc tr 419 arch/arm/include/asm/assembler.h \instr\t\cond \reg, [\ptr], #\inc tr 434 arch/arm/include/asm/assembler.h usracc str, \reg, \ptr, \inc, \cond, \rept, \abort tr 438 arch/arm/include/asm/assembler.h usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort tr 318 arch/arm/kvm/guest.c struct kvm_translation *tr) tr 807 arch/arm64/kvm/guest.c struct kvm_translation *tr) tr 448 arch/ia64/include/asm/pal.h tr : 1, /* Translation regs tr 692 arch/ia64/include/asm/pal.h #define pmci_proc_translation_regs_valid pme_processor.tr tr 855 arch/ia64/kernel/mca.c copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat) tr 858 arch/ia64/kernel/mca.c *tr = *fr; tr 860 arch/ia64/kernel/mca.c tslot = ((unsigned long)tr >> 3) & 63; tr 27 arch/mips/include/asm/txx9/tx3927.h volatile unsigned long tr[3]; tr 81 arch/mips/include/asm/txx9/tx4927.h u64 tr; tr 1256 arch/mips/kvm/mips.c struct kvm_translation *tr) tr 208 arch/mips/txx9/generic/setup_tx4927.c pr_cont(" TR:%09llx\n", ____raw_readq(&tx4927_sdramcptr->tr)); tr 221 arch/mips/txx9/generic/setup_tx4938.c pr_cont(" TR:%09llx\n", ____raw_readq(&tx4938_sdramcptr->tr)); tr 128 arch/powerpc/include/asm/kvm_ppc.h struct kvm_translation *tr); tr 797 arch/powerpc/kvm/book3s.c struct kvm_translation *tr) tr 1789 arch/powerpc/kvm/booke.c struct kvm_translation *tr) tr 1794 arch/powerpc/kvm/booke.c r = kvmppc_core_vcpu_translate(vcpu, tr); tr 470 arch/powerpc/kvm/e500_mmu.c struct kvm_translation *tr) tr 477 arch/powerpc/kvm/e500_mmu.c eaddr = tr->linear_address; tr 478 arch/powerpc/kvm/e500_mmu.c pid = (tr->linear_address >> 32) & 0xff; tr 479 arch/powerpc/kvm/e500_mmu.c as = (tr->linear_address >> 40) & 0x1; tr 483 arch/powerpc/kvm/e500_mmu.c tr->valid = 0; tr 487 arch/powerpc/kvm/e500_mmu.c tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); tr 489 arch/powerpc/kvm/e500_mmu.c tr->valid = 1; tr 124 arch/powerpc/platforms/4xx/uic.c u32 tr, pr, mask; tr 150 arch/powerpc/platforms/4xx/uic.c tr = mfdcr(uic->dcrbase + UIC_TR); tr 152 arch/powerpc/platforms/4xx/uic.c tr = (tr & mask) | (trigger << (31-src)); tr 156 arch/powerpc/platforms/4xx/uic.c mtdcr(uic->dcrbase + UIC_TR, tr); tr 3393 arch/s390/kvm/kvm-s390.c struct kvm_translation *tr) tr 59 arch/sh/include/asm/ptrace.h {.name = __stringify(tr##num), .offset = offsetof(struct pt_regs, tregs[num])} tr 102 arch/unicore32/include/asm/assembler.h \instr\()b.u \reg, [\ptr], #\inc tr 104 arch/unicore32/include/asm/assembler.h \instr\()w.u \reg, [\ptr], #\inc tr 117 arch/unicore32/include/asm/assembler.h usracc st, \reg, \ptr, \inc, \cond, \rept, \abort tr 121 arch/unicore32/include/asm/assembler.h usracc ld, \reg, \ptr, \inc, \cond, \rept, \abort tr 801 arch/x86/crypto/camellia_glue.c u32 dw, tl, tr; tr 914 arch/x86/crypto/camellia_glue.c tr = subRL[10] ^ rol32(dw, 1); tr 915 arch/x86/crypto/camellia_glue.c tt = (tr | ((u64)tl << 32)); tr 923 arch/x86/crypto/camellia_glue.c tr = subRL[7] ^ rol32(dw, 1); tr 924 arch/x86/crypto/camellia_glue.c tt = (tr | ((u64)tl << 32)); tr 934 arch/x86/crypto/camellia_glue.c tr = subRL[18] ^ rol32(dw, 1); tr 935 arch/x86/crypto/camellia_glue.c tt = (tr | ((u64)tl << 32)); tr 943 arch/x86/crypto/camellia_glue.c tr = subRL[15] ^ rol32(dw, 1); tr 944 arch/x86/crypto/camellia_glue.c tt = (tr | ((u64)tl << 32)); tr 958 arch/x86/crypto/camellia_glue.c tr = subRL[26] ^ rol32(dw, 1); tr 959 arch/x86/crypto/camellia_glue.c tt = (tr | ((u64)tl << 32)); tr 967 arch/x86/crypto/camellia_glue.c tr = subRL[23] ^ rol32(dw, 1); tr 968 arch/x86/crypto/camellia_glue.c tt = (tr | ((u64)tl << 32)); tr 117 arch/x86/include/asm/desc.h #define load_tr(tr) asm volatile("ltr %0"::"m" (tr)) tr 121 arch/x86/include/asm/desc.h #define store_tr(tr) (tr = native_store_tr()) tr 269 arch/x86/include/asm/desc.h unsigned long tr; tr 271 arch/x86/include/asm/desc.h asm volatile("str %0":"=r" (tr)); tr 273 arch/x86/include/asm/desc.h return tr; tr 144 arch/x86/include/asm/nospec-branch.h \ftr tr 268 arch/x86/include/asm/paravirt.h #define store_tr(tr) ((tr) = paravirt_store_tr()) tr 30 arch/x86/include/asm/suspend_32.h unsigned long tr; tr 48 arch/x86/include/asm/suspend_64.h unsigned long tr; tr 171 arch/x86/include/asm/svm.h struct vmcb_seg tr; tr 152 arch/x86/include/uapi/asm/kvm.h struct kvm_segment tr, ldt; tr 379 arch/x86/kernel/cpu/mce/amd.c struct thresh_restart *tr = _tr; tr 382 arch/x86/kernel/cpu/mce/amd.c rdmsr(tr->b->address, lo, hi); tr 384 arch/x86/kernel/cpu/mce/amd.c if (tr->b->threshold_limit < (hi & THRESHOLD_MAX)) tr 385 arch/x86/kernel/cpu/mce/amd.c tr->reset = 1; /* limit cannot be lower than err count */ tr 387 arch/x86/kernel/cpu/mce/amd.c if (tr->reset) { /* reset err count and overflow bit */ tr 390 arch/x86/kernel/cpu/mce/amd.c (THRESHOLD_MAX - tr->b->threshold_limit); tr 391 arch/x86/kernel/cpu/mce/amd.c } else if (tr->old_limit) { /* change limit w/o reset */ tr 393 arch/x86/kernel/cpu/mce/amd.c (tr->old_limit - tr->b->threshold_limit); tr 402 arch/x86/kernel/cpu/mce/amd.c if (!tr->b->interrupt_capable) tr 405 arch/x86/kernel/cpu/mce/amd.c if (tr->set_lvt_off) { tr 406 arch/x86/kernel/cpu/mce/amd.c if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) { tr 409 arch/x86/kernel/cpu/mce/amd.c hi |= tr->lvt_off << 20; tr 413 arch/x86/kernel/cpu/mce/amd.c if (tr->b->interrupt_enable) tr 419 arch/x86/kernel/cpu/mce/amd.c wrmsr(tr->b->address, lo, hi); tr 424 arch/x86/kernel/cpu/mce/amd.c struct thresh_restart tr = { tr 431 arch/x86/kernel/cpu/mce/amd.c threshold_restart_bank(&tr); tr 989 arch/x86/kernel/cpu/mce/amd.c struct thresh_restart tr; tr 1005 arch/x86/kernel/cpu/mce/amd.c memset(&tr, 0, sizeof(tr)); tr 1006 arch/x86/kernel/cpu/mce/amd.c tr.b = block; tr 1007 arch/x86/kernel/cpu/mce/amd.c threshold_restart_bank(&tr); tr 1058 arch/x86/kernel/cpu/mce/amd.c struct thresh_restart tr; tr 1069 arch/x86/kernel/cpu/mce/amd.c memset(&tr, 0, sizeof(tr)); tr 1070 arch/x86/kernel/cpu/mce/amd.c tr.b = b; tr 1072 arch/x86/kernel/cpu/mce/amd.c smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); tr 1080 arch/x86/kernel/cpu/mce/amd.c struct thresh_restart tr; tr 1091 arch/x86/kernel/cpu/mce/amd.c memset(&tr, 0, sizeof(tr)); tr 1092 arch/x86/kernel/cpu/mce/amd.c tr.old_limit = b->threshold_limit; tr 1094 arch/x86/kernel/cpu/mce/amd.c tr.b = b; tr 1096 arch/x86/kernel/cpu/mce/amd.c smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); tr 2977 arch/x86/kvm/emulate.c u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; tr 2989 arch/x86/kvm/emulate.c ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); tr 1623 arch/x86/kvm/svm.c init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); tr 2444 arch/x86/kvm/svm.c case VCPU_SREG_TR: return &save->tr; tr 3735 arch/x86/kvm/svm.c to_vmcb->save.tr = from_vmcb->save.tr; tr 4935 arch/x86/kvm/svm.c save->tr.selector, save->tr.attrib, tr 4936 arch/x86/kvm/svm.c save->tr.limit, save->tr.base); tr 3328 arch/x86/kvm/vmx/vmx.c struct kvm_segment tr; tr 3330 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); tr 3332 arch/x86/kvm/vmx/vmx.c if (tr.unusable) tr 3334 arch/x86/kvm/vmx/vmx.c if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ tr 3336 arch/x86/kvm/vmx/vmx.c if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ tr 3338 arch/x86/kvm/vmx/vmx.c if (!tr.present) tr 8733 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); tr 8934 arch/x86/kvm/x86.c kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); tr 9024 arch/x86/kvm/x86.c struct kvm_translation *tr) tr 9026 arch/x86/kvm/x86.c unsigned long vaddr = tr->linear_address; tr 9035 arch/x86/kvm/x86.c tr->physical_address = gpa; tr 9036 arch/x86/kvm/x86.c tr->valid = gpa != UNMAPPED_GVA; tr 9037 arch/x86/kvm/x86.c tr->writeable = 1; tr 9038 arch/x86/kvm/x86.c tr->usermode = 0; tr 654 arch/x86/mm/fault.c u16 ldtr, tr; tr 677 arch/x86/mm/fault.c store_tr(tr); tr 678 arch/x86/mm/fault.c show_ldttss(&gdt, "TR", tr); tr 96 arch/x86/power/cpu.c store_tr(ctxt->tr); tr 38 arch/xtensa/include/asm/coprocessor.h addi \clb, \ptr, \offset tr 45 arch/xtensa/include/asm/coprocessor.h addi \clb, \ptr, \offset tr 57 arch/xtensa/include/asm/coprocessor.h addi \clb, \ptr, \offset tr 64 arch/xtensa/include/asm/coprocessor.h addi \clb, \ptr, \offset tr 80 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 82 arch/xtensa/variants/csp/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 85 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 90 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 92 arch/xtensa/variants/csp/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 94 arch/xtensa/variants/csp/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 97 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 102 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 104 arch/xtensa/variants/csp/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 106 arch/xtensa/variants/csp/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 108 arch/xtensa/variants/csp/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+8 tr 110 arch/xtensa/variants/csp/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+12 tr 112 arch/xtensa/variants/csp/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+16 tr 114 arch/xtensa/variants/csp/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+20 tr 117 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 146 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 147 arch/xtensa/variants/csp/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 151 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 156 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 157 arch/xtensa/variants/csp/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 159 arch/xtensa/variants/csp/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 163 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 168 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 169 arch/xtensa/variants/csp/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 171 arch/xtensa/variants/csp/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 173 arch/xtensa/variants/csp/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+8 tr 175 arch/xtensa/variants/csp/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+12 tr 177 arch/xtensa/variants/csp/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+16 tr 179 arch/xtensa/variants/csp/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+20 tr 183 arch/xtensa/variants/csp/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 40 arch/xtensa/variants/dc232b/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-8, 4, 4 tr 43 arch/xtensa/variants/dc232b/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 44 arch/xtensa/variants/dc232b/include/variant/tie-asm.h s32i \at2, \ptr, .Lxchal_ofs_ + 4 tr 48 arch/xtensa/variants/dc232b/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-16, 4, 4 tr 51 arch/xtensa/variants/dc232b/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 52 arch/xtensa/variants/dc232b/include/variant/tie-asm.h s32i \at2, \ptr, .Lxchal_ofs_ + 4 tr 55 arch/xtensa/variants/dc232b/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_ + 8 tr 56 arch/xtensa/variants/dc232b/include/variant/tie-asm.h s32i \at2, \ptr, .Lxchal_ofs_ + 12 tr 60 arch/xtensa/variants/dc232b/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 62 arch/xtensa/variants/dc232b/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 66 arch/xtensa/variants/dc232b/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 68 arch/xtensa/variants/dc232b/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 81 arch/xtensa/variants/dc232b/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-8, 4, 4 tr 82 arch/xtensa/variants/dc232b/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 83 arch/xtensa/variants/dc232b/include/variant/tie-asm.h l32i \at2, \ptr, .Lxchal_ofs_ + 4 tr 89 arch/xtensa/variants/dc232b/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-16, 4, 4 tr 90 arch/xtensa/variants/dc232b/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 91 arch/xtensa/variants/dc232b/include/variant/tie-asm.h l32i \at2, \ptr, .Lxchal_ofs_ + 4 tr 94 arch/xtensa/variants/dc232b/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_ + 8 tr 95 arch/xtensa/variants/dc232b/include/variant/tie-asm.h l32i \at2, \ptr, .Lxchal_ofs_ + 12 tr 101 arch/xtensa/variants/dc232b/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 102 arch/xtensa/variants/dc232b/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 107 arch/xtensa/variants/dc232b/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 108 arch/xtensa/variants/dc232b/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 81 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 83 arch/xtensa/variants/dc233c/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 86 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 91 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 93 arch/xtensa/variants/dc233c/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 95 arch/xtensa/variants/dc233c/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 98 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 103 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1004, 4, 4 tr 105 arch/xtensa/variants/dc233c/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 107 arch/xtensa/variants/dc233c/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 109 arch/xtensa/variants/dc233c/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+8 tr 111 arch/xtensa/variants/dc233c/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+12 tr 113 arch/xtensa/variants/dc233c/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+16 tr 116 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1004, 4, 4 tr 145 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 146 arch/xtensa/variants/dc233c/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 150 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 155 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 156 arch/xtensa/variants/dc233c/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 158 arch/xtensa/variants/dc233c/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 162 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 167 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1004, 4, 4 tr 168 arch/xtensa/variants/dc233c/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 170 arch/xtensa/variants/dc233c/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 172 arch/xtensa/variants/dc233c/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+8 tr 174 arch/xtensa/variants/dc233c/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+12 tr 176 arch/xtensa/variants/dc233c/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+16 tr 180 arch/xtensa/variants/dc233c/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1004, 4, 4 tr 80 arch/xtensa/variants/de212/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 82 arch/xtensa/variants/de212/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 84 arch/xtensa/variants/de212/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 87 arch/xtensa/variants/de212/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 92 arch/xtensa/variants/de212/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1004, 4, 4 tr 94 arch/xtensa/variants/de212/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 96 arch/xtensa/variants/de212/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 98 arch/xtensa/variants/de212/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+8 tr 100 arch/xtensa/variants/de212/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+12 tr 102 arch/xtensa/variants/de212/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+16 tr 105 arch/xtensa/variants/de212/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1004, 4, 4 tr 134 arch/xtensa/variants/de212/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 135 arch/xtensa/variants/de212/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 137 arch/xtensa/variants/de212/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 141 arch/xtensa/variants/de212/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 146 arch/xtensa/variants/de212/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1004, 4, 4 tr 147 arch/xtensa/variants/de212/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 149 arch/xtensa/variants/de212/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 151 arch/xtensa/variants/de212/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+8 tr 153 arch/xtensa/variants/de212/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+12 tr 155 arch/xtensa/variants/de212/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+16 tr 159 arch/xtensa/variants/de212/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1004, 4, 4 tr 40 arch/xtensa/variants/fsf/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 42 arch/xtensa/variants/fsf/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 55 arch/xtensa/variants/fsf/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 56 arch/xtensa/variants/fsf/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 80 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 82 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 85 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 90 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 92 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 94 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 97 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 102 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 104 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 106 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 108 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+8 tr 110 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+12 tr 112 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+16 tr 114 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+20 tr 117 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 146 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 147 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 151 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 156 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 157 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 159 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 163 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 168 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 169 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 171 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 173 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+8 tr 175 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+12 tr 177 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+16 tr 179 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+20 tr 183 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 205 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 0, 8, 8 tr 207 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 209 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 211 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+8 tr 213 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+12 tr 215 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+16 tr 217 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+20 tr 218 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sp24x2s.i aep0, \ptr, .Lxchal_ofs_+24 tr 219 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sp24x2s.i aep1, \ptr, .Lxchal_ofs_+32 tr 220 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sp24x2s.i aep2, \ptr, .Lxchal_ofs_+40 tr 221 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sp24x2s.i aep3, \ptr, .Lxchal_ofs_+48 tr 222 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sp24x2s.i aep4, \ptr, .Lxchal_ofs_+56 tr 223 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h addi \ptr, \ptr, 64 tr 224 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sp24x2s.i aep5, \ptr, .Lxchal_ofs_+0 tr 225 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sp24x2s.i aep6, \ptr, .Lxchal_ofs_+8 tr 226 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sp24x2s.i aep7, \ptr, .Lxchal_ofs_+16 tr 227 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sq56s.i aeq0, \ptr, .Lxchal_ofs_+24 tr 228 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sq56s.i aeq1, \ptr, .Lxchal_ofs_+32 tr 229 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sq56s.i aeq2, \ptr, .Lxchal_ofs_+40 tr 230 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_sq56s.i aeq3, \ptr, .Lxchal_ofs_+48 tr 234 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 0, 8, 8 tr 253 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 0, 8, 8 tr 254 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 256 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 258 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+8 tr 260 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+12 tr 262 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+16 tr 264 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+20 tr 266 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lp24x2.i aep0, \ptr, .Lxchal_ofs_+24 tr 267 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lp24x2.i aep1, \ptr, .Lxchal_ofs_+32 tr 268 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lp24x2.i aep2, \ptr, .Lxchal_ofs_+40 tr 269 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lp24x2.i aep3, \ptr, .Lxchal_ofs_+48 tr 270 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lp24x2.i aep4, \ptr, .Lxchal_ofs_+56 tr 271 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h addi \ptr, \ptr, 64 tr 272 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lp24x2.i aep5, \ptr, .Lxchal_ofs_+0 tr 273 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lp24x2.i aep6, \ptr, .Lxchal_ofs_+8 tr 274 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lp24x2.i aep7, \ptr, .Lxchal_ofs_+16 tr 275 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h addi \ptr, \ptr, 24 tr 276 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lq56.i aeq0, \ptr, .Lxchal_ofs_+0 tr 277 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lq56.i aeq1, \ptr, .Lxchal_ofs_+8 tr 278 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lq56.i aeq2, \ptr, .Lxchal_ofs_+16 tr 279 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h ae_lq56.i aeq3, \ptr, .Lxchal_ofs_+24 tr 283 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 0, 8, 8 tr 81 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 83 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 86 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 91 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 93 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 95 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 98 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 103 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 105 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 107 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 109 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+8 tr 111 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+12 tr 113 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+16 tr 115 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+20 tr 118 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 147 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 148 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 152 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1020, 4, 4 tr 157 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 158 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 160 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 164 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1016, 4, 4 tr 169 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 170 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 172 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 174 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+8 tr 176 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+12 tr 178 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+16 tr 180 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+20 tr 184 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1000, 4, 4 tr 209 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 0, 8, 8 tr 211 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+0 tr 213 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+4 tr 215 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+8 tr 217 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+12 tr 219 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+16 tr 221 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_+20 tr 222 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed0, \ptr, .Lxchal_ofs_+24 tr 223 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed1, \ptr, .Lxchal_ofs_+32 tr 224 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed2, \ptr, .Lxchal_ofs_+40 tr 225 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed3, \ptr, .Lxchal_ofs_+48 tr 226 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed4, \ptr, .Lxchal_ofs_+56 tr 227 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h addi \ptr, \ptr, 64 tr 228 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed5, \ptr, .Lxchal_ofs_+0 tr 229 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed6, \ptr, .Lxchal_ofs_+8 tr 230 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed7, \ptr, .Lxchal_ofs_+16 tr 231 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed8, \ptr, .Lxchal_ofs_+24 tr 232 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed9, \ptr, .Lxchal_ofs_+32 tr 233 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed10, \ptr, .Lxchal_ofs_+40 tr 234 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed11, \ptr, .Lxchal_ofs_+48 tr 235 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed12, \ptr, .Lxchal_ofs_+56 tr 236 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h addi \ptr, \ptr, 64 tr 237 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed13, \ptr, .Lxchal_ofs_+0 tr 238 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed14, \ptr, .Lxchal_ofs_+8 tr 239 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_S64.I aed15, \ptr, .Lxchal_ofs_+16 tr 240 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_SALIGN64.I u0, \ptr, .Lxchal_ofs_+24 tr 241 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_SALIGN64.I u1, \ptr, .Lxchal_ofs_+32 tr 242 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_SALIGN64.I u2, \ptr, .Lxchal_ofs_+40 tr 243 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_SALIGN64.I u3, \ptr, .Lxchal_ofs_+48 tr 247 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 0, 8, 8 tr 266 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 0, 8, 8 tr 267 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+0 tr 269 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+4 tr 271 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+8 tr 273 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+12 tr 275 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+16 tr 277 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_+20 tr 279 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed0, \ptr, .Lxchal_ofs_+24 tr 280 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed1, \ptr, .Lxchal_ofs_+32 tr 281 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed2, \ptr, .Lxchal_ofs_+40 tr 282 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed3, \ptr, .Lxchal_ofs_+48 tr 283 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed4, \ptr, .Lxchal_ofs_+56 tr 284 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h addi \ptr, \ptr, 64 tr 285 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed5, \ptr, .Lxchal_ofs_+0 tr 286 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed6, \ptr, .Lxchal_ofs_+8 tr 287 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed7, \ptr, .Lxchal_ofs_+16 tr 288 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed8, \ptr, .Lxchal_ofs_+24 tr 289 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed9, \ptr, .Lxchal_ofs_+32 tr 290 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed10, \ptr, .Lxchal_ofs_+40 tr 291 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed11, \ptr, .Lxchal_ofs_+48 tr 292 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed12, \ptr, .Lxchal_ofs_+56 tr 293 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h addi \ptr, \ptr, 64 tr 294 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed13, \ptr, .Lxchal_ofs_+0 tr 295 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed14, \ptr, .Lxchal_ofs_+8 tr 296 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_L64.I aed15, \ptr, .Lxchal_ofs_+16 tr 297 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_LALIGN64.I u0, \ptr, .Lxchal_ofs_+24 tr 298 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_LALIGN64.I u1, \ptr, .Lxchal_ofs_+32 tr 299 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_LALIGN64.I u2, \ptr, .Lxchal_ofs_+40 tr 300 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h AE_LALIGN64.I u3, \ptr, .Lxchal_ofs_+48 tr 304 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 0, 8, 8 tr 39 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 41 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 45 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 47 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 51 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 53 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h s32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 66 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 67 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 72 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 73 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 78 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 1024-4, 4, 4 tr 79 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h l32i \at1, \ptr, .Lxchal_ofs_ + 0 tr 100 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 0, 1, 8 tr 102 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h s32i \at1, \ptr, 0 tr 104 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h s32i \at1, \ptr, 4 tr 106 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h s32i \at1, \ptr, 8 tr 108 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h s32i \at1, \ptr, 12 tr 109 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SP24X2S.I aep0, \ptr, 16 tr 110 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SP24X2S.I aep1, \ptr, 24 tr 111 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SP24X2S.I aep2, \ptr, 32 tr 112 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SP24X2S.I aep3, \ptr, 40 tr 113 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SP24X2S.I aep4, \ptr, 48 tr 114 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SP24X2S.I aep5, \ptr, 56 tr 115 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h addi \ptr, \ptr, 64 tr 116 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SP24X2S.I aep6, \ptr, 0 tr 117 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SP24X2S.I aep7, \ptr, 8 tr 118 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SQ56S.I aeq0, \ptr, 16 tr 119 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SQ56S.I aeq1, \ptr, 24 tr 120 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SQ56S.I aeq2, \ptr, 32 tr 121 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_SQ56S.I aeq3, \ptr, 40 tr 136 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h xchal_sa_align \ptr, 0, 0, 1, 8 tr 137 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h l32i \at1, \ptr, 0 tr 139 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h l32i \at1, \ptr, 4 tr 141 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h l32i \at1, \ptr, 8 tr 143 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h l32i \at1, \ptr, 12 tr 145 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h addi \ptr, \ptr, 80 tr 146 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LQ56.I aeq0, \ptr, 0 tr 147 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LQ56.I aeq1, \ptr, 8 tr 148 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LQ56.I aeq2, \ptr, 16 tr 149 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LQ56.I aeq3, \ptr, 24 tr 150 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LP24X2.I aep0, \ptr, -64 tr 151 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LP24X2.I aep1, \ptr, -56 tr 152 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LP24X2.I aep2, \ptr, -48 tr 153 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LP24X2.I aep3, \ptr, -40 tr 154 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LP24X2.I aep4, \ptr, -32 tr 155 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LP24X2.I aep5, \ptr, -24 tr 156 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LP24X2.I aep6, \ptr, -16 tr 157 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h AE_LP24X2.I aep7, \ptr, -8 tr 367 crypto/camellia_generic.c u32 dw, tl, tr; tr 468 crypto/camellia_generic.c tr = subR[10] ^ rol32(dw, 1); tr 470 crypto/camellia_generic.c SUBKEY_R(7) = subR[6] ^ tr; tr 477 crypto/camellia_generic.c tr = subR[7] ^ rol32(dw, 1); tr 479 crypto/camellia_generic.c SUBKEY_R(10) = tr ^ subR[11]; tr 490 crypto/camellia_generic.c tr = subR[18] ^ rol32(dw, 1); tr 492 crypto/camellia_generic.c SUBKEY_R(15) = subR[14] ^ tr; tr 499 crypto/camellia_generic.c tr = subR[15] ^ rol32(dw, 1); tr 501 crypto/camellia_generic.c SUBKEY_R(18) = tr ^ subR[19]; tr 518 crypto/camellia_generic.c tr = subR[26] ^ rol32(dw, 1); tr 520 crypto/camellia_generic.c SUBKEY_R(23) = subR[22] ^ tr; tr 527 crypto/camellia_generic.c tr = subR[23] ^ rol32(dw, 1); tr 529 crypto/camellia_generic.c SUBKEY_R(26) = tr ^ subR[27]; tr 2843 drivers/android/binder.c struct binder_transaction_data *tr, int reply, tr 2871 drivers/android/binder.c e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); tr 2874 drivers/android/binder.c e->target_handle = tr->target.handle; tr 2875 drivers/android/binder.c e->data_size = tr->data_size; tr 2876 drivers/android/binder.c e->offsets_size = tr->offsets_size; tr 2936 drivers/android/binder.c if (tr->target.handle) { tr 2947 drivers/android/binder.c ref = binder_get_ref_olocked(proc, tr->target.handle, tr 2998 drivers/android/binder.c if (!(tr->flags & TF_ONE_WAY) && w && tr 3018 drivers/android/binder.c if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { tr 3085 drivers/android/binder.c (u64)tr->data.ptr.buffer, tr 3086 drivers/android/binder.c (u64)tr->data.ptr.offsets, tr 3087 drivers/android/binder.c (u64)tr->data_size, (u64)tr->offsets_size, tr 3094 drivers/android/binder.c (u64)tr->data.ptr.buffer, tr 3095 drivers/android/binder.c (u64)tr->data.ptr.offsets, tr 3096 drivers/android/binder.c (u64)tr->data_size, (u64)tr->offsets_size, tr 3099 drivers/android/binder.c if (!reply && !(tr->flags & TF_ONE_WAY)) tr 3106 drivers/android/binder.c t->code = tr->code; tr 3107 drivers/android/binder.c t->flags = tr->flags; tr 3135 drivers/android/binder.c t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr 3136 drivers/android/binder.c tr->offsets_size, extra_buffers_size, tr 3151 drivers/android/binder.c size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + tr 3152 drivers/android/binder.c ALIGN(tr->offsets_size, sizeof(void *)) + tr 3176 drivers/android/binder.c (uintptr_t)tr->data.ptr.buffer, tr 3177 drivers/android/binder.c tr->data_size)) { tr 3188 drivers/android/binder.c ALIGN(tr->data_size, sizeof(void *)), tr 3190 drivers/android/binder.c (uintptr_t)tr->data.ptr.offsets, tr 3191 drivers/android/binder.c tr->offsets_size)) { tr 3199 drivers/android/binder.c if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { tr 3201 drivers/android/binder.c proc->pid, thread->pid, (u64)tr->offsets_size); tr 3216 drivers/android/binder.c off_start_offset = ALIGN(tr->data_size, sizeof(void *)); tr 3218 drivers/android/binder.c off_end_offset = off_start_offset + tr->offsets_size; tr 3523 drivers/android/binder.c (u64)tr->data_size, (u64)tr->offsets_size, tr 3793 drivers/android/binder.c struct binder_transaction_data_sg tr; tr 3795 drivers/android/binder.c if (copy_from_user(&tr, ptr, sizeof(tr))) tr 3797 drivers/android/binder.c ptr += sizeof(tr); tr 3798 drivers/android/binder.c binder_transaction(proc, thread, &tr.transaction_data, tr 3799 drivers/android/binder.c cmd == BC_REPLY_SG, tr.buffers_size); tr 3804 drivers/android/binder.c struct binder_transaction_data tr; tr 3806 drivers/android/binder.c if (copy_from_user(&tr, ptr, sizeof(tr))) tr 3808 drivers/android/binder.c ptr += sizeof(tr); tr 3809 drivers/android/binder.c binder_transaction(proc, thread, &tr, tr 4228 drivers/android/binder.c struct binder_transaction_data_secctx tr; tr 4229 drivers/android/binder.c struct binder_transaction_data *trd = &tr.transaction_data; tr 4251 drivers/android/binder.c if (end - ptr < sizeof(tr) + 4) { tr 4499 drivers/android/binder.c tr.secctx = t->security_ctx; tr 4502 drivers/android/binder.c trsize = sizeof(tr); tr 4514 drivers/android/binder.c if (copy_to_user(ptr, &tr, trsize)) { tr 384 drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c void dal_hw_translate_dce110_init(struct hw_translate *tr) tr 386 drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c tr->funcs = &funcs; tr 32 drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h void dal_hw_translate_dce110_init(struct hw_translate *tr); tr 406 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c void dal_hw_translate_dce120_init(struct hw_translate *tr) tr 408 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c tr->funcs = &funcs; tr 32 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h void dal_hw_translate_dce120_init(struct hw_translate *tr); tr 30 drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h struct hw_translate *tr); tr 406 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c void dal_hw_translate_dcn10_init(struct hw_translate *tr) tr 408 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c tr->funcs = &funcs; tr 32 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h void dal_hw_translate_dcn10_init(struct hw_translate *tr); tr 377 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c void dal_hw_translate_dcn20_init(struct hw_translate *tr) tr 379 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c tr->funcs = &funcs; tr 32 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h void dal_hw_translate_dcn20_init(struct hw_translate *tr); tr 380 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c void dal_hw_translate_dcn21_init(struct hw_translate *tr) tr 382 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c tr->funcs = &funcs; tr 32 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h void dal_hw_translate_dcn21_init(struct hw_translate *tr); tr 37 drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c void dal_hw_translate_diag_fpga_init(struct hw_translate *tr) tr 39 drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c tr->funcs = &funcs; tr 32 drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h void dal_hw_translate_diag_fpga_init(struct hw_translate *tr); tr 252 drivers/gpu/drm/drm_mipi_dbi.c void *tr; tr 266 drivers/gpu/drm/drm_mipi_dbi.c tr = dbidev->tx_buf; tr 271 drivers/gpu/drm/drm_mipi_dbi.c tr = cma_obj->vaddr; tr 281 drivers/gpu/drm/drm_mipi_dbi.c ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START, tr, tr 777 drivers/gpu/drm/drm_mipi_dbi.c struct spi_transfer tr = { tr 790 drivers/gpu/drm/drm_mipi_dbi.c tr.speed_hz = mipi_dbi_spi_cmd_max_speed(spi, len); tr 791 drivers/gpu/drm/drm_mipi_dbi.c spi_message_init_with_transfers(&m, &tr, 1); tr 801 drivers/gpu/drm/drm_mipi_dbi.c tr.len = 9; tr 878 drivers/gpu/drm/drm_mipi_dbi.c tr.len = chunk + added; tr 893 drivers/gpu/drm/drm_mipi_dbi.c struct spi_transfer tr = { tr 906 drivers/gpu/drm/drm_mipi_dbi.c tr.speed_hz = mipi_dbi_spi_cmd_max_speed(spi, len); tr 916 drivers/gpu/drm/drm_mipi_dbi.c spi_message_init_with_transfers(&m, &tr, 1); tr 917 drivers/gpu/drm/drm_mipi_dbi.c tr.tx_buf = dst16; tr 940 drivers/gpu/drm/drm_mipi_dbi.c tr.len = chunk; tr 977 drivers/gpu/drm/drm_mipi_dbi.c struct spi_transfer tr[2] = { tr 1003 drivers/gpu/drm/drm_mipi_dbi.c tr[1].len = len + 1; tr 1006 drivers/gpu/drm/drm_mipi_dbi.c buf = kmalloc(tr[1].len, GFP_KERNEL); tr 1010 drivers/gpu/drm/drm_mipi_dbi.c tr[1].rx_buf = buf; tr 1013 drivers/gpu/drm/drm_mipi_dbi.c spi_message_init_with_transfers(&m, tr, ARRAY_SIZE(tr)); tr 1018 drivers/gpu/drm/drm_mipi_dbi.c if (tr[1].len == len) { tr 1151 drivers/gpu/drm/drm_mipi_dbi.c struct spi_transfer tr = { tr 1159 drivers/gpu/drm/drm_mipi_dbi.c spi_message_init_with_transfers(&m, &tr, 1); tr 1164 drivers/gpu/drm/drm_mipi_dbi.c tr.tx_buf = buf; tr 1165 drivers/gpu/drm/drm_mipi_dbi.c tr.len = chunk; tr 90 drivers/gpu/drm/tiny/ili9225.c void *tr; tr 104 drivers/gpu/drm/tiny/ili9225.c tr = dbidev->tx_buf; tr 109 drivers/gpu/drm/tiny/ili9225.c tr = cma_obj->vaddr; tr 155 drivers/gpu/drm/tiny/ili9225.c ret = mipi_dbi_command_buf(dbi, ILI9225_WRITE_DATA_TO_GRAM, tr, tr 104 drivers/gpu/drm/tiny/repaper.c struct spi_transfer tr[2] = {}; tr 113 drivers/gpu/drm/tiny/repaper.c tr[0].tx_buf = headerbuf; tr 114 drivers/gpu/drm/tiny/repaper.c tr[0].len = 1; tr 133 drivers/gpu/drm/tiny/repaper.c tr[1].tx_buf = txbuf ? txbuf : tx; tr 134 drivers/gpu/drm/tiny/repaper.c tr[1].rx_buf = rxbuf; tr 135 drivers/gpu/drm/tiny/repaper.c tr[1].len = len; tr 138 drivers/gpu/drm/tiny/repaper.c ret = spi_sync_transfer(spi, tr, 2); tr 511 drivers/ide/pmac.c u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time); tr 512 drivers/ide/pmac.c t = (t & ~TR_133_PIOREG_PIO_MASK) | tr; tr 518 drivers/ide/pmac.c u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time); tr 519 drivers/ide/pmac.c t = (t & ~TR_100_PIOREG_PIO_MASK) | tr; tr 606 drivers/ide/pmac.c u32 tr; tr 610 drivers/ide/pmac.c tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma); tr 611 drivers/ide/pmac.c *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr; tr 624 drivers/ide/pmac.c u32 tr; tr 628 drivers/ide/pmac.c tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma); tr 629 drivers/ide/pmac.c *ultra_timings = ((*ultra_timings) & ~TR_133_UDMAREG_UDMA_MASK) | tr; tr 701 drivers/ide/pmac.c u32 tr = kauai_lookup_timing(shasta_mdma_timings, cycleTime); tr 702 drivers/ide/pmac.c *timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr; tr 709 drivers/ide/pmac.c u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime); tr 710 drivers/ide/pmac.c *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr; tr 640 drivers/infiniband/hw/hfi1/tid_rdma.c struct tid_rdma_qp_params *tr; tr 644 drivers/infiniband/hw/hfi1/tid_rdma.c tr = container_of(work, struct tid_rdma_qp_params, trigger_work); tr 645 drivers/infiniband/hw/hfi1/tid_rdma.c priv = container_of(tr, struct hfi1_qp_priv, tid_rdma); tr 56 drivers/input/touchscreen/touchright.c struct tr *tr = serio_get_drvdata(serio); tr 57 drivers/input/touchscreen/touchright.c struct input_dev *dev = tr->dev; tr 59 drivers/input/touchscreen/touchright.c tr->data[tr->idx] = data; tr 61 drivers/input/touchscreen/touchright.c if ((tr->data[0] & TR_FORMAT_STATUS_MASK) == TR_FORMAT_STATUS_BYTE) { tr 62 drivers/input/touchscreen/touchright.c if (++tr->idx == TR_LENGTH) { tr 64 drivers/input/touchscreen/touchright.c (tr->data[1] << 5) | (tr->data[2] >> 1)); tr 66 drivers/input/touchscreen/touchright.c (tr->data[3] << 5) | (tr->data[4] >> 1)); tr 68 drivers/input/touchscreen/touchright.c tr->data[0] & TR_FORMAT_TOUCH_BIT); tr 70 drivers/input/touchscreen/touchright.c tr->idx = 0; tr 83 drivers/input/touchscreen/touchright.c struct tr *tr = serio_get_drvdata(serio); tr 85 drivers/input/touchscreen/touchright.c input_get_device(tr->dev); tr 86 drivers/input/touchscreen/touchright.c input_unregister_device(tr->dev); tr 89 drivers/input/touchscreen/touchright.c input_put_device(tr->dev); tr 90 drivers/input/touchscreen/touchright.c kfree(tr); tr 101 drivers/input/touchscreen/touchright.c struct tr *tr; tr 105 drivers/input/touchscreen/touchright.c tr = kzalloc(sizeof(struct tr), GFP_KERNEL); tr 107 drivers/input/touchscreen/touchright.c if (!tr || !input_dev) { tr 112 drivers/input/touchscreen/touchright.c tr->serio = serio; tr 113 drivers/input/touchscreen/touchright.c tr->dev = input_dev; tr 114 drivers/input/touchscreen/touchright.c snprintf(tr->phys, sizeof(tr->phys), "%s/input0", serio->phys); tr 117 drivers/input/touchscreen/touchright.c input_dev->phys = tr->phys; tr 125 drivers/input/touchscreen/touchright.c input_set_abs_params(tr->dev, ABS_X, TR_MIN_XC, TR_MAX_XC, 0, 0); tr 126 drivers/input/touchscreen/touchright.c input_set_abs_params(tr->dev, ABS_Y, TR_MIN_YC, TR_MAX_YC, 0, 0); tr 128 drivers/input/touchscreen/touchright.c serio_set_drvdata(serio, tr); tr 134 drivers/input/touchscreen/touchright.c err = input_register_device(tr->dev); tr 143 drivers/input/touchscreen/touchright.c kfree(tr); tr 43 drivers/media/radio/radio-timb.c struct timbradio *tr = video_drvdata(file); tr 44 drivers/media/radio/radio-timb.c return v4l2_subdev_call(tr->sd_tuner, tuner, g_tuner, v); tr 50 drivers/media/radio/radio-timb.c struct timbradio *tr = video_drvdata(file); tr 51 drivers/media/radio/radio-timb.c return v4l2_subdev_call(tr->sd_tuner, tuner, s_tuner, v); tr 57 drivers/media/radio/radio-timb.c struct timbradio *tr = video_drvdata(file); tr 58 drivers/media/radio/radio-timb.c return v4l2_subdev_call(tr->sd_tuner, tuner, s_frequency, f); tr 64 drivers/media/radio/radio-timb.c struct timbradio *tr = video_drvdata(file); tr 65 drivers/media/radio/radio-timb.c return v4l2_subdev_call(tr->sd_tuner, tuner, g_frequency, f); tr 90 drivers/media/radio/radio-timb.c struct timbradio *tr; tr 99 drivers/media/radio/radio-timb.c tr = devm_kzalloc(&pdev->dev, sizeof(*tr), GFP_KERNEL); tr 100 drivers/media/radio/radio-timb.c if (!tr) { tr 105 drivers/media/radio/radio-timb.c tr->pdata = *pdata; tr 106 drivers/media/radio/radio-timb.c mutex_init(&tr->lock); tr 108 drivers/media/radio/radio-timb.c strscpy(tr->video_dev.name, "Timberdale Radio", tr 109 drivers/media/radio/radio-timb.c sizeof(tr->video_dev.name)); tr 110 drivers/media/radio/radio-timb.c tr->video_dev.fops = &timbradio_fops; tr 111 drivers/media/radio/radio-timb.c tr->video_dev.ioctl_ops = &timbradio_ioctl_ops; tr 112 drivers/media/radio/radio-timb.c tr->video_dev.release = video_device_release_empty; tr 113 drivers/media/radio/radio-timb.c tr->video_dev.minor = -1; tr 114 drivers/media/radio/radio-timb.c tr->video_dev.lock = &tr->lock; tr 115 drivers/media/radio/radio-timb.c tr->video_dev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO; tr 117 drivers/media/radio/radio-timb.c strscpy(tr->v4l2_dev.name, DRIVER_NAME, sizeof(tr->v4l2_dev.name)); tr 118 drivers/media/radio/radio-timb.c err = v4l2_device_register(NULL, &tr->v4l2_dev); tr 122 drivers/media/radio/radio-timb.c tr->video_dev.v4l2_dev = &tr->v4l2_dev; tr 124 drivers/media/radio/radio-timb.c tr->sd_tuner = v4l2_i2c_new_subdev_board(&tr->v4l2_dev, tr 126 drivers/media/radio/radio-timb.c tr->sd_dsp = v4l2_i2c_new_subdev_board(&tr->v4l2_dev, tr 128 drivers/media/radio/radio-timb.c if (tr->sd_tuner == NULL || tr->sd_dsp == NULL) { tr 133 drivers/media/radio/radio-timb.c tr->v4l2_dev.ctrl_handler = tr->sd_dsp->ctrl_handler; tr 135 drivers/media/radio/radio-timb.c err = video_register_device(&tr->video_dev, VFL_TYPE_RADIO, -1); tr 141 drivers/media/radio/radio-timb.c video_set_drvdata(&tr->video_dev, tr); tr 143 drivers/media/radio/radio-timb.c platform_set_drvdata(pdev, tr); tr 147 drivers/media/radio/radio-timb.c v4l2_device_unregister(&tr->v4l2_dev); tr 156 drivers/media/radio/radio-timb.c struct timbradio *tr = platform_get_drvdata(pdev); tr 158 drivers/media/radio/radio-timb.c video_unregister_device(&tr->video_dev); tr 159 drivers/media/radio/radio-timb.c v4l2_device_unregister(&tr->v4l2_dev); tr 58 drivers/media/radio/radio-trust.c struct trust *tr = kzalloc(sizeof(*tr), GFP_KERNEL); tr 60 drivers/media/radio/radio-trust.c return tr ? &tr->isa : NULL; tr 67 drivers/media/radio/radio-trust.c #define TR_DELAY do { inb(tr->isa.io); inb(tr->isa.io); inb(tr->isa.io); } while (0) tr 68 drivers/media/radio/radio-trust.c #define TR_SET_SCL outb(tr->ioval |= 2, tr->isa.io) tr 69 drivers/media/radio/radio-trust.c #define TR_CLR_SCL outb(tr->ioval &= 0xfd, tr->isa.io) tr 70 drivers/media/radio/radio-trust.c #define TR_SET_SDA outb(tr->ioval |= 1, tr->isa.io) tr 71 drivers/media/radio/radio-trust.c #define TR_CLR_SDA outb(tr->ioval &= 0xfe, tr->isa.io) tr 73 drivers/media/radio/radio-trust.c static void write_i2c(struct trust *tr, int n, ...) tr 121 drivers/media/radio/radio-trust.c struct trust *tr = container_of(isa, struct trust, isa); tr 123 drivers/media/radio/radio-trust.c tr->ioval = (tr->ioval & 0xf7) | (mute << 3); tr 124 drivers/media/radio/radio-trust.c outb(tr->ioval, isa->io); tr 125 drivers/media/radio/radio-trust.c write_i2c(tr, 2, TDA7318_ADDR, vol ^ 0x1f); tr 131 drivers/media/radio/radio-trust.c struct trust *tr = container_of(isa, struct trust, isa); tr 133 drivers/media/radio/radio-trust.c tr->ioval = (tr->ioval & 0xfb) | (!stereo << 2); tr 134 drivers/media/radio/radio-trust.c outb(tr->ioval, isa->io); tr 149 drivers/media/radio/radio-trust.c struct trust *tr = container_of(isa, struct trust, isa); tr 153 drivers/media/radio/radio-trust.c write_i2c(tr, 5, TSA6060T_ADDR, (freq << 1) | 1, tr 166 drivers/media/radio/radio-trust.c struct trust *tr = container_of(isa, struct trust, isa); tr 170 drivers/media/radio/radio-trust.c write_i2c(tr, 2, TDA7318_ADDR, 0x60 | basstreble2chip[ctrl->val]); tr 173 drivers/media/radio/radio-trust.c write_i2c(tr, 2, TDA7318_ADDR, 0x70 | basstreble2chip[ctrl->val]); tr 185 drivers/media/radio/radio-trust.c struct trust *tr = container_of(isa, struct trust, isa); tr 187 drivers/media/radio/radio-trust.c tr->ioval = 0xf; tr 188 drivers/media/radio/radio-trust.c write_i2c(tr, 2, TDA7318_ADDR, 0x80); /* speaker att. LF = 0 dB */ tr 189 drivers/media/radio/radio-trust.c write_i2c(tr, 2, TDA7318_ADDR, 0xa0); /* speaker att. RF = 0 dB */ tr 190 drivers/media/radio/radio-trust.c write_i2c(tr, 2, TDA7318_ADDR, 0xc0); /* speaker att. LR = 0 dB */ tr 191 drivers/media/radio/radio-trust.c write_i2c(tr, 2, TDA7318_ADDR, 0xe0); /* speaker att. RR = 0 dB */ tr 192 drivers/media/radio/radio-trust.c write_i2c(tr, 2, TDA7318_ADDR, 0x40); /* stereo 1 input, gain = 18.75 dB */ tr 536 drivers/mmc/core/mmc_test.c struct mmc_test_transfer_result *tr; tr 541 drivers/mmc/core/mmc_test.c tr = kmalloc(sizeof(*tr), GFP_KERNEL); tr 542 drivers/mmc/core/mmc_test.c if (!tr) tr 545 drivers/mmc/core/mmc_test.c tr->count = count; tr 546 drivers/mmc/core/mmc_test.c tr->sectors = sectors; tr 547 drivers/mmc/core/mmc_test.c tr->ts = ts; tr 548 drivers/mmc/core/mmc_test.c tr->rate = rate; tr 549 drivers/mmc/core/mmc_test.c tr->iops = iops; tr 551 drivers/mmc/core/mmc_test.c list_add_tail(&tr->link, &test->gr->tr_lst); tr 3020 drivers/mmc/core/mmc_test.c struct mmc_test_transfer_result *tr, *trs; tr 3025 drivers/mmc/core/mmc_test.c list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { tr 3026 drivers/mmc/core/mmc_test.c list_del(&tr->link); tr 3027 drivers/mmc/core/mmc_test.c kfree(tr); tr 3047 drivers/mmc/core/mmc_test.c struct mmc_test_transfer_result *tr; tr 3054 drivers/mmc/core/mmc_test.c list_for_each_entry(tr, &gr->tr_lst, link) { tr 3056 drivers/mmc/core/mmc_test.c tr->count, tr->sectors, tr 3057 drivers/mmc/core/mmc_test.c (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec, tr 3058 drivers/mmc/core/mmc_test.c tr->rate, tr->iops / 100, tr->iops % 100); tr 1006 drivers/mtd/ftl.c static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) tr 1030 drivers/mtd/ftl.c partition->mbd.tr = tr; tr 35 drivers/mtd/inftlcore.c static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) tr 63 drivers/mtd/inftlcore.c inftl->mbd.tr = tr; tr 65 drivers/mtd/mtd_blkdevs.c static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr, tr 72 drivers/mtd/mtd_blkdevs.c block = blk_rq_pos(req) << 9 >> tr->blkshift; tr 73 drivers/mtd/mtd_blkdevs.c nsect = blk_rq_cur_bytes(req) >> tr->blkshift; tr 76 drivers/mtd/mtd_blkdevs.c if (tr->flush(dev)) tr 87 drivers/mtd/mtd_blkdevs.c if (tr->discard(dev, block, nsect)) tr 92 drivers/mtd/mtd_blkdevs.c for (; nsect > 0; nsect--, block++, buf += tr->blksize) { tr 93 drivers/mtd/mtd_blkdevs.c if (tr->readsect(dev, block, buf)) { tr 102 drivers/mtd/mtd_blkdevs.c if (!tr->writesect) tr 107 drivers/mtd/mtd_blkdevs.c for (; nsect > 0; nsect--, block++, buf += tr->blksize) { tr 108 drivers/mtd/mtd_blkdevs.c if (tr->writesect(dev, block, buf)) { tr 144 drivers/mtd/mtd_blkdevs.c struct mtd_blktrans_ops *tr = dev->tr; tr 153 drivers/mtd/mtd_blkdevs.c if (tr->background && !background_done) { tr 156 drivers/mtd/mtd_blkdevs.c tr->background(dev); tr 172 drivers/mtd/mtd_blkdevs.c res = do_blktrans_request(dev->tr, dev, req); tr 219 drivers/mtd/mtd_blkdevs.c __module_get(dev->tr->owner); tr 224 drivers/mtd/mtd_blkdevs.c if (dev->tr->open) { tr 225 drivers/mtd/mtd_blkdevs.c ret = dev->tr->open(dev); tr 243 drivers/mtd/mtd_blkdevs.c if (dev->tr->release) tr 244 drivers/mtd/mtd_blkdevs.c dev->tr->release(dev); tr 246 drivers/mtd/mtd_blkdevs.c module_put(dev->tr->owner); tr 268 drivers/mtd/mtd_blkdevs.c module_put(dev->tr->owner); tr 271 drivers/mtd/mtd_blkdevs.c if (dev->tr->release) tr 272 drivers/mtd/mtd_blkdevs.c dev->tr->release(dev); tr 294 drivers/mtd/mtd_blkdevs.c ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY; tr 317 drivers/mtd/mtd_blkdevs.c ret = dev->tr->flush ? dev->tr->flush(dev) : 0; tr 342 drivers/mtd/mtd_blkdevs.c struct mtd_blktrans_ops *tr = new->tr; tr 354 drivers/mtd/mtd_blkdevs.c list_for_each_entry(d, &tr->devs, list) { tr 382 drivers/mtd/mtd_blkdevs.c if (new->devnum > (MINORMASK >> tr->part_bits) || tr 383 drivers/mtd/mtd_blkdevs.c (tr->part_bits && new->devnum >= 27 * 26)) { tr 388 drivers/mtd/mtd_blkdevs.c list_add_tail(&new->list, &tr->devs); tr 394 drivers/mtd/mtd_blkdevs.c if (!tr->writesect) tr 399 drivers/mtd/mtd_blkdevs.c gd = alloc_disk(1 << tr->part_bits); tr 406 drivers/mtd/mtd_blkdevs.c gd->major = tr->major; tr 407 drivers/mtd/mtd_blkdevs.c gd->first_minor = (new->devnum) << tr->part_bits; tr 410 drivers/mtd/mtd_blkdevs.c if (tr->part_bits) tr 413 drivers/mtd/mtd_blkdevs.c "%s%c", tr->name, 'a' + new->devnum); tr 416 drivers/mtd/mtd_blkdevs.c "%s%c%c", tr->name, tr 421 drivers/mtd/mtd_blkdevs.c "%s%d", tr->name, new->devnum); tr 423 drivers/mtd/mtd_blkdevs.c set_capacity(gd, ((u64)new->size * tr->blksize) >> 9); tr 441 drivers/mtd/mtd_blkdevs.c if (tr->flush) tr 445 drivers/mtd/mtd_blkdevs.c blk_queue_logical_block_size(new->rq, tr->blksize); tr 450 drivers/mtd/mtd_blkdevs.c if (tr->discard) { tr 509 drivers/mtd/mtd_blkdevs.c if (old->tr->release) tr 510 drivers/mtd/mtd_blkdevs.c old->tr->release(old); tr 523 drivers/mtd/mtd_blkdevs.c struct mtd_blktrans_ops *tr; tr 526 drivers/mtd/mtd_blkdevs.c list_for_each_entry(tr, &blktrans_majors, list) tr 527 drivers/mtd/mtd_blkdevs.c list_for_each_entry_safe(dev, next, &tr->devs, list) tr 529 drivers/mtd/mtd_blkdevs.c tr->remove_dev(dev); tr 534 drivers/mtd/mtd_blkdevs.c struct mtd_blktrans_ops *tr; tr 539 drivers/mtd/mtd_blkdevs.c list_for_each_entry(tr, &blktrans_majors, list) tr 540 drivers/mtd/mtd_blkdevs.c tr->add_mtd(tr, mtd); tr 548 drivers/mtd/mtd_blkdevs.c int register_mtd_blktrans(struct mtd_blktrans_ops *tr) tr 562 drivers/mtd/mtd_blkdevs.c ret = register_blkdev(tr->major, tr->name); tr 565 drivers/mtd/mtd_blkdevs.c tr->name, tr->major, ret); tr 571 drivers/mtd/mtd_blkdevs.c tr->major = ret; tr 573 drivers/mtd/mtd_blkdevs.c tr->blkshift = ffs(tr->blksize) - 1; tr 575 drivers/mtd/mtd_blkdevs.c INIT_LIST_HEAD(&tr->devs); tr 576 drivers/mtd/mtd_blkdevs.c list_add(&tr->list, &blktrans_majors); tr 580 drivers/mtd/mtd_blkdevs.c tr->add_mtd(tr, mtd); tr 586 drivers/mtd/mtd_blkdevs.c int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) tr 593 drivers/mtd/mtd_blkdevs.c list_del(&tr->list); tr 595 drivers/mtd/mtd_blkdevs.c list_for_each_entry_safe(dev, next, &tr->devs, list) tr 596 drivers/mtd/mtd_blkdevs.c tr->remove_dev(dev); tr 598 drivers/mtd/mtd_blkdevs.c unregister_blkdev(tr->major, tr->name); tr 601 drivers/mtd/mtd_blkdevs.c BUG_ON(!list_empty(&tr->devs)); tr 305 drivers/mtd/mtdblock.c static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) tr 316 drivers/mtd/mtdblock.c dev->mbd.tr = tr; tr 35 drivers/mtd/mtdblock_ro.c static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) tr 46 drivers/mtd/mtdblock_ro.c dev->tr = tr; tr 1340 drivers/mtd/mtdswap.c static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) tr 1440 drivers/mtd/mtdswap.c mbd_dev->tr = tr; tr 35 drivers/mtd/nftlcore.c static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) tr 56 drivers/mtd/nftlcore.c nftl->mbd.tr = tr; tr 722 drivers/mtd/rfd_ftl.c static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) tr 747 drivers/mtd/rfd_ftl.c part->mbd.tr = tr; tr 1118 drivers/mtd/sm_ftl.c static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) tr 1168 drivers/mtd/sm_ftl.c trans->tr = tr; tr 284 drivers/mtd/ssfdc.c static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) tr 305 drivers/mtd/ssfdc.c ssfdc->mbd.tr = tr; tr 506 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 510 drivers/net/ethernet/toshiba/tc35815.c tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA); tr 512 drivers/net/ethernet/toshiba/tc35815.c while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { tr 517 drivers/net/ethernet/toshiba/tc35815.c return tc_readl(&tr->MD_Data) & 0xffff; tr 523 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 527 drivers/net/ethernet/toshiba/tc35815.c tc_writel(val, &tr->MD_Data); tr 529 drivers/net/ethernet/toshiba/tc35815.c &tr->MD_CA); tr 531 drivers/net/ethernet/toshiba/tc35815.c while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { tr 549 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 553 drivers/net/ethernet/toshiba/tc35815.c reg = tc_readl(&tr->MAC_Ctl); tr 555 drivers/net/ethernet/toshiba/tc35815.c tc_writel(reg, &tr->MAC_Ctl); tr 560 drivers/net/ethernet/toshiba/tc35815.c tc_writel(reg, &tr->MAC_Ctl); tr 562 drivers/net/ethernet/toshiba/tc35815.c tc_writel(reg, &tr->MAC_Ctl); tr 575 drivers/net/ethernet/toshiba/tc35815.c tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, tr 576 drivers/net/ethernet/toshiba/tc35815.c &tr->Tx_Ctl); tr 727 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 731 drivers/net/ethernet/toshiba/tc35815.c while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) tr 735 drivers/net/ethernet/toshiba/tc35815.c tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl); tr 736 drivers/net/ethernet/toshiba/tc35815.c while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) tr 738 drivers/net/ethernet/toshiba/tc35815.c data = tc_readl(&tr->PROM_Data); tr 1180 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 1186 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->Int_En); tr 1187 drivers/net/ethernet/toshiba/tc35815.c tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl); tr 1194 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 1198 drivers/net/ethernet/toshiba/tc35815.c dev->name, tc_readl(&tr->Tx_Stat)); tr 1306 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 1315 drivers/net/ethernet/toshiba/tc35815.c tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); tr 1426 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 1428 drivers/net/ethernet/toshiba/tc35815.c u32 dmactl = tc_readl(&tr->DMA_Ctl); tr 1432 drivers/net/ethernet/toshiba/tc35815.c tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); tr 1440 drivers/net/ethernet/toshiba/tc35815.c (void)tc_readl(&tr->Int_Src); /* flush */ tr 1619 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 1628 drivers/net/ethernet/toshiba/tc35815.c status = tc_readl(&tr->Int_Src); tr 1632 drivers/net/ethernet/toshiba/tc35815.c &tr->Int_Src); /* write to clear */ tr 1637 drivers/net/ethernet/toshiba/tc35815.c &tr->Int_Src); tr 1643 drivers/net/ethernet/toshiba/tc35815.c status = tc_readl(&tr->Int_Src); tr 1650 drivers/net/ethernet/toshiba/tc35815.c tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); tr 1693 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 1695 drivers/net/ethernet/toshiba/tc35815.c tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh); tr 1778 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 1804 drivers/net/ethernet/toshiba/tc35815.c tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); tr 1846 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 1850 drivers/net/ethernet/toshiba/tc35815.c dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt); tr 1858 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 1864 drivers/net/ethernet/toshiba/tc35815.c saved_addr = tc_readl(&tr->CAM_Adr); tr 1871 drivers/net/ethernet/toshiba/tc35815.c tc_writel(cam_index - 2, &tr->CAM_Adr); tr 1872 drivers/net/ethernet/toshiba/tc35815.c cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000; tr 1874 drivers/net/ethernet/toshiba/tc35815.c tc_writel(cam_data, &tr->CAM_Data); tr 1876 drivers/net/ethernet/toshiba/tc35815.c tc_writel(cam_index + 2, &tr->CAM_Adr); tr 1878 drivers/net/ethernet/toshiba/tc35815.c tc_writel(cam_data, &tr->CAM_Data); tr 1881 drivers/net/ethernet/toshiba/tc35815.c tc_writel(cam_index, &tr->CAM_Adr); tr 1883 drivers/net/ethernet/toshiba/tc35815.c tc_writel(cam_data, &tr->CAM_Data); tr 1885 drivers/net/ethernet/toshiba/tc35815.c tc_writel(cam_index + 4, &tr->CAM_Adr); tr 1886 drivers/net/ethernet/toshiba/tc35815.c cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff; tr 1888 drivers/net/ethernet/toshiba/tc35815.c tc_writel(cam_data, &tr->CAM_Data); tr 1891 drivers/net/ethernet/toshiba/tc35815.c tc_writel(saved_addr, &tr->CAM_Adr); tr 1905 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 1916 drivers/net/ethernet/toshiba/tc35815.c tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); tr 1921 drivers/net/ethernet/toshiba/tc35815.c tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); tr 1927 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->CAM_Ctl); tr 1936 drivers/net/ethernet/toshiba/tc35815.c tc_writel(ena_bits, &tr->CAM_Ena); tr 1937 drivers/net/ethernet/toshiba/tc35815.c tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); tr 1939 drivers/net/ethernet/toshiba/tc35815.c tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); tr 1940 drivers/net/ethernet/toshiba/tc35815.c tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); tr 2023 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 2027 drivers/net/ethernet/toshiba/tc35815.c tc_writel(MAC_Reset, &tr->MAC_Ctl); tr 2030 drivers/net/ethernet/toshiba/tc35815.c while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) { tr 2037 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->MAC_Ctl); tr 2040 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->DMA_Ctl); tr 2041 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->TxThrsh); tr 2042 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->TxPollCtr); tr 2043 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->RxFragSize); tr 2044 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->Int_En); tr 2045 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->FDA_Bas); tr 2046 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->FDA_Lim); tr 2047 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0xffffffff, &tr->Int_Src); /* Write 1 to clear */ tr 2048 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->CAM_Ctl); tr 2049 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->Tx_Ctl); tr 2050 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->Rx_Ctl); tr 2051 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->CAM_Ena); tr 2052 drivers/net/ethernet/toshiba/tc35815.c (void)tc_readl(&tr->Miss_Cnt); /* Read to clear */ tr 2055 drivers/net/ethernet/toshiba/tc35815.c tc_writel(DMA_TestMode, &tr->DMA_Ctl); tr 2057 drivers/net/ethernet/toshiba/tc35815.c tc_writel(i, &tr->CAM_Adr); tr 2058 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->CAM_Data); tr 2060 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->DMA_Ctl); tr 2066 drivers/net/ethernet/toshiba/tc35815.c struct tc35815_regs __iomem *tr = tr 2074 drivers/net/ethernet/toshiba/tc35815.c tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); tr 2075 drivers/net/ethernet/toshiba/tc35815.c tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); tr 2079 drivers/net/ethernet/toshiba/tc35815.c tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); tr 2081 drivers/net/ethernet/toshiba/tc35815.c tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); tr 2082 drivers/net/ethernet/toshiba/tc35815.c tc_writel(0, &tr->TxPollCtr); /* Batch mode */ tr 2083 drivers/net/ethernet/toshiba/tc35815.c tc_writel(TX_THRESHOLD, &tr->TxThrsh); tr 2084 drivers/net/ethernet/toshiba/tc35815.c tc_writel(INT_EN_CMD, &tr->Int_En); tr 2087 drivers/net/ethernet/toshiba/tc35815.c tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas); tr 2089 drivers/net/ethernet/toshiba/tc35815.c &tr->FDA_Lim); tr 2095 drivers/net/ethernet/toshiba/tc35815.c tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */ tr 2096 drivers/net/ethernet/toshiba/tc35815.c tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ tr 2105 drivers/net/ethernet/toshiba/tc35815.c tc_writel(txctl, &tr->Tx_Ctl); tr 162 drivers/net/fddi/skfp/hwt.c u_short tr ; tr 167 drivers/net/fddi/skfp/hwt.c tr = (u_short)((inpd(ADDR(B2_TI_VAL))/200) & 0xffff) ; tr 171 drivers/net/fddi/skfp/hwt.c if ((tr > smc->hw.t_start) || (is & IS_TIMINT)) { tr 176 drivers/net/fddi/skfp/hwt.c smc->hw.t_stop = smc->hw.t_start - tr ; tr 2995 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct brcmf_trap_info tr; tr 3002 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr, tr 3014 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.type), le32_to_cpu(tr.epc), tr 3015 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr), tr 3016 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.r13), le32_to_cpu(tr.r14), tr 3017 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.pc), sh->trap_addr, tr 3018 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.r0), le32_to_cpu(tr.r1), tr 3019 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.r2), le32_to_cpu(tr.r3), tr 3020 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.r4), le32_to_cpu(tr.r5), tr 3021 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.r6), le32_to_cpu(tr.r7)); tr 3028 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.type), le32_to_cpu(tr.epc), tr 3029 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr), tr 3030 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.r13), le32_to_cpu(tr.r14), tr 3031 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.pc), sh->trap_addr, tr 3032 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.r0), le32_to_cpu(tr.r1), tr 3033 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.r2), le32_to_cpu(tr.r3), tr 3034 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.r4), le32_to_cpu(tr.r5), tr 3035 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.r6), le32_to_cpu(tr.r7)); tr 815 drivers/net/wireless/marvell/mwl8k.c struct mwl8k_dma_data *tr; tr 818 drivers/net/wireless/marvell/mwl8k.c tr = (struct mwl8k_dma_data *)skb->data; tr 819 drivers/net/wireless/marvell/mwl8k.c hdrlen = ieee80211_hdrlen(tr->wh.frame_control); tr 821 drivers/net/wireless/marvell/mwl8k.c if (hdrlen != sizeof(tr->wh)) { tr 822 drivers/net/wireless/marvell/mwl8k.c if (ieee80211_is_data_qos(tr->wh.frame_control)) { tr 823 drivers/net/wireless/marvell/mwl8k.c memmove(tr->data - hdrlen, &tr->wh, hdrlen - 2); tr 824 drivers/net/wireless/marvell/mwl8k.c *((__le16 *)(tr->data - 2)) = qos; tr 826 drivers/net/wireless/marvell/mwl8k.c memmove(tr->data - hdrlen, &tr->wh, hdrlen); tr 830 drivers/net/wireless/marvell/mwl8k.c if (hdrlen != sizeof(*tr)) tr 831 drivers/net/wireless/marvell/mwl8k.c skb_pull(skb, sizeof(*tr) - hdrlen); tr 843 drivers/net/wireless/marvell/mwl8k.c struct mwl8k_dma_data *tr; tr 870 drivers/net/wireless/marvell/mwl8k.c reqd_hdrlen = sizeof(*tr) + head_pad; tr 878 drivers/net/wireless/marvell/mwl8k.c tr = (struct mwl8k_dma_data *)skb->data; tr 879 drivers/net/wireless/marvell/mwl8k.c if (wh != &tr->wh) tr 880 drivers/net/wireless/marvell/mwl8k.c memmove(&tr->wh, wh, hdrlen); tr 881 drivers/net/wireless/marvell/mwl8k.c if (hdrlen != sizeof(tr->wh)) tr 882 drivers/net/wireless/marvell/mwl8k.c memset(((void *)&tr->wh) + hdrlen, 0, sizeof(tr->wh) - hdrlen); tr 889 drivers/net/wireless/marvell/mwl8k.c tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad); tr 1392 drivers/net/wireless/marvell/mwl8k.c struct mwl8k_dma_data *tr; tr 1393 drivers/net/wireless/marvell/mwl8k.c tr = (struct mwl8k_dma_data *)skb->data; tr 1394 drivers/net/wireless/marvell/mwl8k.c memset((void *)&(tr->data), 0, 4); tr 95 drivers/rtc/rtc-stm32.c u16 tr; tr 274 drivers/rtc/rtc-stm32.c unsigned int tr, dr; tr 277 drivers/rtc/rtc-stm32.c tr = readl_relaxed(rtc->base + regs->tr); tr 280 drivers/rtc/rtc-stm32.c tm->tm_sec = (tr & STM32_RTC_TR_SEC) >> STM32_RTC_TR_SEC_SHIFT; tr 281 drivers/rtc/rtc-stm32.c tm->tm_min = (tr & STM32_RTC_TR_MIN) >> STM32_RTC_TR_MIN_SHIFT; tr 282 drivers/rtc/rtc-stm32.c tm->tm_hour = (tr & STM32_RTC_TR_HOUR) >> STM32_RTC_TR_HOUR_SHIFT; tr 300 drivers/rtc/rtc-stm32.c unsigned int tr, dr; tr 306 drivers/rtc/rtc-stm32.c tr = ((tm->tm_sec << STM32_RTC_TR_SEC_SHIFT) & STM32_RTC_TR_SEC) | tr 324 drivers/rtc/rtc-stm32.c writel_relaxed(tr, rtc->base + regs->tr); tr 435 drivers/rtc/rtc-stm32.c unsigned int tr = readl_relaxed(rtc->base + regs->tr); tr 440 drivers/rtc/rtc-stm32.c cur_sec = (tr & STM32_RTC_TR_SEC) >> STM32_RTC_TR_SEC_SHIFT; tr 441 drivers/rtc/rtc-stm32.c cur_min = (tr & STM32_RTC_TR_MIN) >> STM32_RTC_TR_MIN_SHIFT; tr 442 drivers/rtc/rtc-stm32.c cur_hour = (tr & STM32_RTC_TR_HOUR) >> STM32_RTC_TR_HOUR_SHIFT; tr 552 drivers/rtc/rtc-stm32.c .tr = 0x00, tr 574 drivers/rtc/rtc-stm32.c .tr = 0x00, tr 605 drivers/rtc/rtc-stm32.c .tr = 0x00, tr 642 drivers/scsi/mesh.c int v, tr; tr 663 drivers/scsi/mesh.c tr = (ms->clk_freq + 250000) / 500000; tr 669 drivers/scsi/mesh.c tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000; tr 676 drivers/scsi/mesh.c ms->conn_tgt, tr/10, tr%10); tr 173 drivers/staging/wilc1000/wilc_spi.c struct spi_transfer tr = { tr 183 drivers/staging/wilc1000/wilc_spi.c tr.rx_buf = r_buffer; tr 190 drivers/staging/wilc1000/wilc_spi.c spi_message_add_tail(&tr, &msg); tr 214 drivers/staging/wilc1000/wilc_spi.c struct spi_transfer tr = { tr 225 drivers/staging/wilc1000/wilc_spi.c tr.tx_buf = t_buffer; tr 231 drivers/staging/wilc1000/wilc_spi.c spi_message_add_tail(&tr, &msg); tr 254 drivers/staging/wilc1000/wilc_spi.c struct spi_transfer tr = { tr 268 drivers/staging/wilc1000/wilc_spi.c spi_message_add_tail(&tr, &msg); tr 121 drivers/thermal/ti-soc-thermal/ti-thermal-common.c int id, tr, ret = 0; tr 126 drivers/thermal/ti-soc-thermal/ti-thermal-common.c ret = ti_bandgap_get_trend(bgp, id, &tr); tr 130 drivers/thermal/ti-soc-thermal/ti-thermal-common.c if (tr > 0) tr 132 drivers/thermal/ti-soc-thermal/ti-thermal-common.c else if (tr < 0) tr 1494 drivers/usb/gadget/udc/lpc32xx_udc.c u32 tr, bufferspace; tr 1517 drivers/usb/gadget/udc/lpc32xx_udc.c tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual, tr 1521 drivers/usb/gadget/udc/lpc32xx_udc.c if (tr < ep0->ep.maxpacket) { tr 1045 fs/gfs2/bmap.c struct gfs2_trans *tr = current->journal_info; tr 1052 fs/gfs2/bmap.c if (tr->tr_num_buf_new) tr 1080 fs/gfs2/bmap.c struct gfs2_trans *tr; tr 1128 fs/gfs2/bmap.c tr = current->journal_info; tr 1129 fs/gfs2/bmap.c if (tr->tr_num_buf_new) tr 1380 fs/gfs2/bmap.c struct gfs2_trans *tr; tr 1394 fs/gfs2/bmap.c tr = current->journal_info; tr 1395 fs/gfs2/bmap.c if (!test_bit(TR_TOUCHED, &tr->tr_flags)) tr 1500 fs/gfs2/bmap.c struct gfs2_trans *tr; tr 1573 fs/gfs2/bmap.c tr = current->journal_info; tr 1574 fs/gfs2/bmap.c if (tr->tr_num_buf_new + RES_STATFS + tr 2412 fs/gfs2/bmap.c struct gfs2_trans *tr; tr 2428 fs/gfs2/bmap.c tr = current->journal_info; tr 2429 fs/gfs2/bmap.c if (!test_bit(TR_TOUCHED, &tr->tr_flags)) tr 85 fs/gfs2/glops.c struct gfs2_trans tr; tr 87 fs/gfs2/glops.c memset(&tr, 0, sizeof(tr)); tr 88 fs/gfs2/glops.c INIT_LIST_HEAD(&tr.tr_buf); tr 89 fs/gfs2/glops.c INIT_LIST_HEAD(&tr.tr_databuf); tr 90 fs/gfs2/glops.c tr.tr_revokes = atomic_read(&gl->gl_ail_count); tr 92 fs/gfs2/glops.c if (!tr.tr_revokes) { tr 122 fs/gfs2/glops.c tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); tr 123 fs/gfs2/glops.c tr.tr_ip = _RET_IP_; tr 124 fs/gfs2/glops.c if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0) tr 127 fs/gfs2/glops.c current->journal_info = &tr; tr 129 fs/gfs2/glops.c __gfs2_ail_flush(gl, 0, tr.tr_revokes); tr 52 fs/gfs2/incore.h void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr); tr 53 fs/gfs2/incore.h void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr); tr 92 fs/gfs2/log.c struct gfs2_trans *tr, tr 102 fs/gfs2/log.c list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) { tr 105 fs/gfs2/log.c gfs2_assert(sdp, bd->bd_tr == tr); tr 114 fs/gfs2/log.c list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); tr 123 fs/gfs2/log.c list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list); tr 151 fs/gfs2/log.c struct gfs2_trans *tr; tr 159 fs/gfs2/log.c list_for_each_entry_reverse(tr, head, tr_list) { tr 162 fs/gfs2/log.c if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw)) tr 196 fs/gfs2/log.c static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr, tr 202 fs/gfs2/log.c list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, tr 205 fs/gfs2/log.c gfs2_assert(sdp, bd->bd_tr == tr); tr 213 fs/gfs2/log.c list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); tr 226 fs/gfs2/log.c struct gfs2_trans *tr, *s; tr 232 fs/gfs2/log.c list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) { tr 233 fs/gfs2/log.c gfs2_ail1_empty_one(sdp, tr, &withdraw); tr 234 fs/gfs2/log.c if (list_empty(&tr->tr_ail1_list) && oldest_tr) tr 235 fs/gfs2/log.c list_move(&tr->tr_list, &sdp->sd_ail2_list); tr 250 fs/gfs2/log.c struct gfs2_trans *tr; tr 255 fs/gfs2/log.c list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) { tr 256 fs/gfs2/log.c list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) { tr 277 fs/gfs2/log.c static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) tr 279 fs/gfs2/log.c struct list_head *head = &tr->tr_ail2_list; tr 285 fs/gfs2/log.c gfs2_assert(sdp, bd->bd_tr == tr); tr 292 fs/gfs2/log.c struct gfs2_trans *tr, *safe; tr 299 fs/gfs2/log.c list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) { tr 300 fs/gfs2/log.c a = (old_tail <= tr->tr_first); tr 301 fs/gfs2/log.c b = (tr->tr_first < new_tail); tr 306 fs/gfs2/log.c gfs2_ail2_empty_one(sdp, tr); tr 307 fs/gfs2/log.c list_del(&tr->tr_list); tr 308 fs/gfs2/log.c gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list)); tr 309 fs/gfs2/log.c gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list)); tr 310 fs/gfs2/log.c kfree(tr); tr 461 fs/gfs2/log.c struct gfs2_trans *tr = sdp->sd_log_tr; tr 463 fs/gfs2/log.c if (tr) { tr 464 fs/gfs2/log.c mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; tr 465 fs/gfs2/log.c dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; tr 483 fs/gfs2/log.c struct gfs2_trans *tr; tr 491 fs/gfs2/log.c tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans, tr 493 fs/gfs2/log.c tail = tr->tr_first; tr 622 fs/gfs2/log.c struct gfs2_trans *tr; tr 629 fs/gfs2/log.c list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) { tr 630 fs/gfs2/log.c list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) { tr 653 fs/gfs2/log.c list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) { tr 654 fs/gfs2/log.c list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) { tr 794 fs/gfs2/log.c struct gfs2_trans *tr; tr 810 fs/gfs2/log.c tr = sdp->sd_log_tr; tr 811 fs/gfs2/log.c if (tr) { tr 813 fs/gfs2/log.c INIT_LIST_HEAD(&tr->tr_ail1_list); tr 814 fs/gfs2/log.c INIT_LIST_HEAD(&tr->tr_ail2_list); tr 815 fs/gfs2/log.c tr->tr_first = sdp->sd_log_flush_head; tr 817 fs/gfs2/log.c gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new); tr 826 fs/gfs2/log.c lops_before_commit(sdp, tr); tr 837 fs/gfs2/log.c lops_after_commit(sdp, tr); tr 845 fs/gfs2/log.c if (tr && !list_empty(&tr->tr_ail1_list)) { tr 846 fs/gfs2/log.c list_add(&tr->tr_list, &sdp->sd_ail1_list); tr 847 fs/gfs2/log.c tr = NULL; tr 875 fs/gfs2/log.c kfree(tr); tr 898 fs/gfs2/log.c static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) tr 907 fs/gfs2/log.c gfs2_merge_trans(sdp->sd_log_tr, tr); tr 908 fs/gfs2/log.c } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { tr 909 fs/gfs2/log.c gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); tr 910 fs/gfs2/log.c sdp->sd_log_tr = tr; tr 911 fs/gfs2/log.c set_bit(TR_ATTACHED, &tr->tr_flags); tr 914 fs/gfs2/log.c sdp->sd_log_commited_revoke += tr->tr_num_revoke; tr 916 fs/gfs2/log.c maxres = sdp->sd_log_blks_reserved + tr->tr_reserved; tr 943 fs/gfs2/log.c void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) tr 945 fs/gfs2/log.c log_refund(sdp, tr); tr 99 fs/gfs2/lops.c struct gfs2_trans *tr) tr 122 fs/gfs2/lops.c bd->bd_tr = tr; tr 123 fs/gfs2/lops.c list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list); tr 712 fs/gfs2/lops.c static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) tr 716 fs/gfs2/lops.c if (tr == NULL) tr 718 fs/gfs2/lops.c nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; tr 719 fs/gfs2/lops.c gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0); tr 722 fs/gfs2/lops.c static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) tr 727 fs/gfs2/lops.c if (tr == NULL) tr 730 fs/gfs2/lops.c head = &tr->tr_buf; tr 734 fs/gfs2/lops.c gfs2_unpin(sdp, bd->bd_bh, tr); tr 855 fs/gfs2/lops.c static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) tr 895 fs/gfs2/lops.c static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) tr 992 fs/gfs2/lops.c static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) tr 996 fs/gfs2/lops.c if (tr == NULL) tr 998 fs/gfs2/lops.c nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; tr 999 fs/gfs2/lops.c gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1); tr 1071 fs/gfs2/lops.c static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) tr 1076 fs/gfs2/lops.c if (tr == NULL) tr 1079 fs/gfs2/lops.c head = &tr->tr_databuf; tr 1083 fs/gfs2/lops.c gfs2_unpin(sdp, bd->bd_bh, tr); tr 47 fs/gfs2/lops.h struct gfs2_trans *tr) tr 52 fs/gfs2/lops.h gfs2_log_ops[x]->lo_before_commit(sdp, tr); tr 56 fs/gfs2/lops.h struct gfs2_trans *tr) tr 61 fs/gfs2/lops.h gfs2_log_ops[x]->lo_after_commit(sdp, tr); tr 291 fs/gfs2/meta_io.c struct gfs2_trans *tr = current->journal_info; tr 292 fs/gfs2/meta_io.c if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) tr 318 fs/gfs2/meta_io.c struct gfs2_trans *tr = current->journal_info; tr 319 fs/gfs2/meta_io.c if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) tr 334 fs/gfs2/meta_io.c struct gfs2_trans *tr = current->journal_info; tr 342 fs/gfs2/meta_io.c tr->tr_num_buf_rm++; tr 344 fs/gfs2/meta_io.c tr->tr_num_databuf_rm++; tr 345 fs/gfs2/meta_io.c set_bit(TR_TOUCHED, &tr->tr_flags); tr 31 fs/gfs2/trans.c struct gfs2_trans *tr; tr 40 fs/gfs2/trans.c tr = kzalloc(sizeof(struct gfs2_trans), GFP_NOFS); tr 41 fs/gfs2/trans.c if (!tr) tr 44 fs/gfs2/trans.c tr->tr_ip = _RET_IP_; tr 45 fs/gfs2/trans.c tr->tr_blocks = blocks; tr 46 fs/gfs2/trans.c tr->tr_revokes = revokes; tr 47 fs/gfs2/trans.c tr->tr_reserved = 1; tr 48 fs/gfs2/trans.c set_bit(TR_ALLOCED, &tr->tr_flags); tr 50 fs/gfs2/trans.c tr->tr_reserved += 6 + blocks; tr 52 fs/gfs2/trans.c tr->tr_reserved += gfs2_struct2blk(sdp, revokes, tr 54 fs/gfs2/trans.c INIT_LIST_HEAD(&tr->tr_databuf); tr 55 fs/gfs2/trans.c INIT_LIST_HEAD(&tr->tr_buf); tr 59 fs/gfs2/trans.c error = gfs2_log_reserve(sdp, tr->tr_reserved); tr 63 fs/gfs2/trans.c current->journal_info = tr; tr 69 fs/gfs2/trans.c kfree(tr); tr 74 fs/gfs2/trans.c static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr) tr 76 fs/gfs2/trans.c fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip); tr 78 fs/gfs2/trans.c tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, tr 79 fs/gfs2/trans.c test_bit(TR_TOUCHED, &tr->tr_flags)); tr 81 fs/gfs2/trans.c tr->tr_num_buf_new, tr->tr_num_buf_rm, tr 82 fs/gfs2/trans.c tr->tr_num_databuf_new, tr->tr_num_databuf_rm, tr 83 fs/gfs2/trans.c tr->tr_num_revoke); tr 88 fs/gfs2/trans.c struct gfs2_trans *tr = current->journal_info; tr 90 fs/gfs2/trans.c int alloced = test_bit(TR_ALLOCED, &tr->tr_flags); tr 94 fs/gfs2/trans.c if (!test_bit(TR_TOUCHED, &tr->tr_flags)) { tr 95 fs/gfs2/trans.c gfs2_log_release(sdp, tr->tr_reserved); tr 97 fs/gfs2/trans.c kfree(tr); tr 103 fs/gfs2/trans.c nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new; tr 104 fs/gfs2/trans.c nbuf -= tr->tr_num_buf_rm; tr 105 fs/gfs2/trans.c nbuf -= tr->tr_num_databuf_rm; tr 107 fs/gfs2/trans.c if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) && tr 108 fs/gfs2/trans.c (tr->tr_num_revoke <= tr->tr_revokes))) tr 109 fs/gfs2/trans.c gfs2_print_trans(sdp, tr); tr 111 fs/gfs2/trans.c gfs2_log_commit(sdp, tr); tr 112 fs/gfs2/trans.c if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags)) tr 113 fs/gfs2/trans.c kfree(tr); tr 152 fs/gfs2/trans.c struct gfs2_trans *tr = current->journal_info; tr 158 fs/gfs2/trans.c set_bit(TR_TOUCHED, &tr->tr_flags); tr 174 fs/gfs2/trans.c set_bit(TR_TOUCHED, &tr->tr_flags); tr 179 fs/gfs2/trans.c tr->tr_num_databuf_new++; tr 180 fs/gfs2/trans.c list_add_tail(&bd->bd_list, &tr->tr_databuf); tr 193 fs/gfs2/trans.c struct gfs2_trans *tr = current->journal_info; tr 198 fs/gfs2/trans.c set_bit(TR_TOUCHED, &tr->tr_flags); tr 216 fs/gfs2/trans.c set_bit(TR_TOUCHED, &tr->tr_flags); tr 235 fs/gfs2/trans.c list_add(&bd->bd_list, &tr->tr_buf); tr 236 fs/gfs2/trans.c tr->tr_num_buf_new++; tr 245 fs/gfs2/trans.c struct gfs2_trans *tr = current->journal_info; tr 249 fs/gfs2/trans.c set_bit(TR_TOUCHED, &tr->tr_flags); tr 250 fs/gfs2/trans.c tr->tr_num_revoke++; tr 256 fs/gfs2/trans.c struct gfs2_trans *tr = current->journal_info; tr 268 fs/gfs2/trans.c tr->tr_num_revoke--; tr 837 include/linux/kvm_host.h struct kvm_translation *tr); tr 20 include/linux/mtd/blktrans.h struct mtd_blktrans_ops *tr; tr 66 include/linux/mtd/blktrans.h void (*add_mtd)(struct mtd_blktrans_ops *tr, struct mtd_info *mtd); tr 74 include/linux/mtd/blktrans.h extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr); tr 75 include/linux/mtd/blktrans.h extern int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr); tr 75 include/linux/trace_events.h struct trace_array *tr; tr 551 include/linux/trace_events.h int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); tr 77 include/uapi/scsi/scsi_bsg_ufs.h struct utp_upiu_query tr; tr 2201 kernel/sysctl.c const char *perm_tr, unsigned perm_tr_len, char *tr) tr 2239 kernel/sysctl.c if (tr && (len < *size)) tr 2240 kernel/sysctl.c *tr = *p; tr 1460 kernel/trace/blktrace.c static void blk_tracer_start(struct trace_array *tr) tr 1465 kernel/trace/blktrace.c static int blk_tracer_init(struct trace_array *tr) tr 1467 kernel/trace/blktrace.c blk_tr = tr; tr 1468 kernel/trace/blktrace.c blk_tracer_start(tr); tr 1472 kernel/trace/blktrace.c static void blk_tracer_stop(struct trace_array *tr) tr 1477 kernel/trace/blktrace.c static void blk_tracer_reset(struct trace_array *tr) tr 1479 kernel/trace/blktrace.c blk_tracer_stop(tr); tr 1507 kernel/trace/blktrace.c struct trace_array *tr = iter->tr; tr 1517 kernel/trace/blktrace.c long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE); tr 1576 kernel/trace/blktrace.c blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) tr 1581 kernel/trace/blktrace.c tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO; tr 1583 kernel/trace/blktrace.c tr->trace_flags |= TRACE_ITER_CONTEXT_INFO; tr 100 kernel/trace/ftrace.c struct trace_array *tr; tr 105 kernel/trace/ftrace.c tr = ops->private; tr 107 kernel/trace/ftrace.c return tr->function_pids != NULL; tr 147 kernel/trace/ftrace.c struct trace_array *tr = op->private; tr 149 kernel/trace/ftrace.c if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) tr 1029 kernel/trace/ftrace.c struct trace_array *tr; tr 1299 kernel/trace/ftrace.c static int ftrace_add_mod(struct trace_array *tr, tr 1304 kernel/trace/ftrace.c struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; tr 3061 kernel/trace/ftrace.c struct trace_array *tr; tr 3072 kernel/trace/ftrace.c struct trace_array *tr = iter->ops->private; tr 3083 kernel/trace/ftrace.c if (!tr) tr 3086 kernel/trace/ftrace.c func_probes = &tr->func_probes; tr 3203 kernel/trace/ftrace.c struct trace_array *tr = iter->tr; tr 3210 kernel/trace/ftrace.c if (iter->mod_list == &tr->mod_trace || tr 3211 kernel/trace/ftrace.c iter->mod_list == &tr->mod_notrace) { tr 3233 kernel/trace/ftrace.c if (!iter->tr) tr 3256 kernel/trace/ftrace.c struct trace_array *tr = iter->tr; tr 3259 kernel/trace/ftrace.c iter->mod_list == &tr->mod_trace || tr 3260 kernel/trace/ftrace.c iter->mod_list == &tr->mod_notrace) tr 3557 kernel/trace/ftrace.c struct trace_array *tr = ops->private; tr 3565 kernel/trace/ftrace.c if (tracing_check_open_get_tr(tr)) tr 3577 kernel/trace/ftrace.c iter->tr = tr; tr 3583 kernel/trace/ftrace.c mod_head = tr ? &tr->mod_notrace : NULL; tr 3586 kernel/trace/ftrace.c mod_head = tr ? &tr->mod_trace : NULL; tr 3631 kernel/trace/ftrace.c if (tr) tr 3632 kernel/trace/ftrace.c trace_array_put(tr); tr 3926 kernel/trace/ftrace.c static int cache_mod(struct trace_array *tr, tr 3930 kernel/trace/ftrace.c struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; tr 3963 kernel/trace/ftrace.c ret = ftrace_add_mod(tr, func, module, enable); tr 4047 kernel/trace/ftrace.c struct trace_array *tr; tr 4055 kernel/trace/ftrace.c list_for_each_entry(tr, &ftrace_trace_arrays, list) { tr 4056 kernel/trace/ftrace.c if (!list_empty(&tr->mod_trace)) tr 4057 kernel/trace/ftrace.c process_mod_list(&tr->mod_trace, tr->ops, mod, true); tr 4058 kernel/trace/ftrace.c if (!list_empty(&tr->mod_notrace)) tr 4059 kernel/trace/ftrace.c process_mod_list(&tr->mod_notrace, tr->ops, mod, false); tr 4073 kernel/trace/ftrace.c ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, tr 4095 kernel/trace/ftrace.c return cache_mod(tr, func_orig, module, enable); tr 4127 kernel/trace/ftrace.c probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); tr 4293 kernel/trace/ftrace.c probe_ops->free(probe_ops, probe->tr, 0, probe->data); tr 4310 kernel/trace/ftrace.c register_ftrace_function_probe(char *glob, struct trace_array *tr, tr 4324 kernel/trace/ftrace.c if (WARN_ON(!tr)) tr 4334 kernel/trace/ftrace.c list_for_each_entry(probe, &tr->func_probes, list) { tr 4338 kernel/trace/ftrace.c if (&probe->list == &tr->func_probes) { tr 4346 kernel/trace/ftrace.c probe->tr = tr; tr 4348 kernel/trace/ftrace.c list_add(&probe->list, &tr->func_probes); tr 4390 kernel/trace/ftrace.c ret = probe_ops->init(probe_ops, tr, tr 4395 kernel/trace/ftrace.c probe_ops->free(probe_ops, tr, tr 4446 kernel/trace/ftrace.c probe_ops->free(probe_ops, tr, entry->ip, probe->data); tr 4453 kernel/trace/ftrace.c unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, tr 4486 kernel/trace/ftrace.c list_for_each_entry(probe, &tr->func_probes, list) { tr 4490 kernel/trace/ftrace.c if (&probe->list == &tr->func_probes) tr 4563 kernel/trace/ftrace.c probe_ops->free(probe_ops, tr, entry->ip, probe->data); tr 4581 kernel/trace/ftrace.c void clear_ftrace_function_probes(struct trace_array *tr) tr 4585 kernel/trace/ftrace.c list_for_each_entry_safe(probe, n, &tr->func_probes, list) tr 4586 kernel/trace/ftrace.c unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); tr 4642 kernel/trace/ftrace.c struct trace_array *tr = iter->ops->private; tr 4665 kernel/trace/ftrace.c ret = p->func(tr, hash, func, command, next, enable); tr 5048 kernel/trace/ftrace.c if (iter->tr && !list_empty(&iter->tr->mod_trace)) tr 5064 kernel/trace/ftrace.c if (iter->tr) tr 5065 kernel/trace/ftrace.c trace_array_put(iter->tr); tr 5735 kernel/trace/ftrace.c struct trace_array *tr; tr 5738 kernel/trace/ftrace.c list_for_each_entry(tr, &ftrace_trace_arrays, list) { tr 5739 kernel/trace/ftrace.c if (!tr->ops || !tr->ops->func_hash) tr 5741 kernel/trace/ftrace.c mutex_lock(&tr->ops->func_hash->regex_lock); tr 5742 kernel/trace/ftrace.c clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); tr 5743 kernel/trace/ftrace.c clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); tr 5744 kernel/trace/ftrace.c mutex_unlock(&tr->ops->func_hash->regex_lock); tr 6089 kernel/trace/ftrace.c struct trace_array *tr; tr 6092 kernel/trace/ftrace.c list_for_each_entry(tr, &ftrace_trace_arrays, list) { tr 6093 kernel/trace/ftrace.c if (!tr->ops || !tr->ops->func_hash) tr 6095 kernel/trace/ftrace.c mutex_lock(&tr->ops->func_hash->regex_lock); tr 6096 kernel/trace/ftrace.c clear_func_from_hash(func, tr->ops->func_hash->filter_hash); tr 6097 kernel/trace/ftrace.c clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); tr 6098 kernel/trace/ftrace.c mutex_unlock(&tr->ops->func_hash->regex_lock); tr 6241 kernel/trace/ftrace.c void ftrace_init_trace_array(struct trace_array *tr) tr 6243 kernel/trace/ftrace.c INIT_LIST_HEAD(&tr->func_probes); tr 6244 kernel/trace/ftrace.c INIT_LIST_HEAD(&tr->mod_trace); tr 6245 kernel/trace/ftrace.c INIT_LIST_HEAD(&tr->mod_notrace); tr 6276 kernel/trace/ftrace.c __init void ftrace_init_global_array_ops(struct trace_array *tr) tr 6278 kernel/trace/ftrace.c tr->ops = &global_ops; tr 6279 kernel/trace/ftrace.c tr->ops->private = tr; tr 6280 kernel/trace/ftrace.c ftrace_init_trace_array(tr); tr 6283 kernel/trace/ftrace.c void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) tr 6286 kernel/trace/ftrace.c if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { tr 6287 kernel/trace/ftrace.c if (WARN_ON(tr->ops->func != ftrace_stub)) tr 6289 kernel/trace/ftrace.c tr->ops->func); tr 6291 kernel/trace/ftrace.c tr->ops->func = func; tr 6292 kernel/trace/ftrace.c tr->ops->private = tr; tr 6295 kernel/trace/ftrace.c void ftrace_reset_array_ops(struct trace_array *tr) tr 6297 kernel/trace/ftrace.c tr->ops->func = ftrace_stub; tr 6426 kernel/trace/ftrace.c struct trace_array *tr = data; tr 6429 kernel/trace/ftrace.c pid_list = rcu_dereference_sched(tr->function_pids); tr 6431 kernel/trace/ftrace.c this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, tr 6441 kernel/trace/ftrace.c struct trace_array *tr = data; tr 6443 kernel/trace/ftrace.c pid_list = rcu_dereference_sched(tr->function_pids); tr 6451 kernel/trace/ftrace.c struct trace_array *tr = data; tr 6453 kernel/trace/ftrace.c pid_list = rcu_dereference_sched(tr->function_pids); tr 6457 kernel/trace/ftrace.c void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) tr 6461 kernel/trace/ftrace.c tr); tr 6463 kernel/trace/ftrace.c tr); tr 6466 kernel/trace/ftrace.c tr); tr 6468 kernel/trace/ftrace.c tr); tr 6472 kernel/trace/ftrace.c static void clear_ftrace_pids(struct trace_array *tr) tr 6477 kernel/trace/ftrace.c pid_list = rcu_dereference_protected(tr->function_pids, tr 6482 kernel/trace/ftrace.c unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); tr 6485 kernel/trace/ftrace.c per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false; tr 6487 kernel/trace/ftrace.c rcu_assign_pointer(tr->function_pids, NULL); tr 6495 kernel/trace/ftrace.c void ftrace_clear_pids(struct trace_array *tr) tr 6499 kernel/trace/ftrace.c clear_ftrace_pids(tr); tr 6504 kernel/trace/ftrace.c static void ftrace_pid_reset(struct trace_array *tr) tr 6507 kernel/trace/ftrace.c clear_ftrace_pids(tr); tr 6522 kernel/trace/ftrace.c struct trace_array *tr = m->private; tr 6527 kernel/trace/ftrace.c pid_list = rcu_dereference_sched(tr->function_pids); tr 6537 kernel/trace/ftrace.c struct trace_array *tr = m->private; tr 6538 kernel/trace/ftrace.c struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); tr 6574 kernel/trace/ftrace.c struct trace_array *tr = inode->i_private; tr 6578 kernel/trace/ftrace.c ret = tracing_check_open_get_tr(tr); tr 6584 kernel/trace/ftrace.c ftrace_pid_reset(tr); tr 6588 kernel/trace/ftrace.c trace_array_put(tr); tr 6592 kernel/trace/ftrace.c m->private = tr; tr 6600 kernel/trace/ftrace.c struct trace_array *tr = data; tr 6607 kernel/trace/ftrace.c pid_list = rcu_dereference_protected(tr->function_pids, tr 6610 kernel/trace/ftrace.c this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, tr 6619 kernel/trace/ftrace.c struct trace_array *tr = m->private; tr 6629 kernel/trace/ftrace.c filtered_pids = rcu_dereference_protected(tr->function_pids, tr 6636 kernel/trace/ftrace.c rcu_assign_pointer(tr->function_pids, pid_list); tr 6643 kernel/trace/ftrace.c register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); tr 6651 kernel/trace/ftrace.c on_each_cpu(ignore_task_cpu, tr, 1); tr 6667 kernel/trace/ftrace.c struct trace_array *tr = inode->i_private; tr 6669 kernel/trace/ftrace.c trace_array_put(tr); tr 6682 kernel/trace/ftrace.c void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) tr 6685 kernel/trace/ftrace.c tr, &ftrace_pid_fops); tr 6688 kernel/trace/ftrace.c void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, tr 6692 kernel/trace/ftrace.c WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); tr 83 kernel/trace/trace.c dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) tr 162 kernel/trace/trace.c static int tracing_set_tracer(struct trace_array *tr, const char *buf); tr 279 kernel/trace/trace.c struct trace_array *tr; tr 283 kernel/trace/trace.c list_for_each_entry(tr, &ftrace_trace_arrays, list) { tr 284 kernel/trace/trace.c if (tr == this_tr) { tr 285 kernel/trace/trace.c tr->ref++; tr 308 kernel/trace/trace.c int tracing_check_open_get_tr(struct trace_array *tr) tr 319 kernel/trace/trace.c if (tr && trace_array_get(tr) < 0) tr 738 kernel/trace/trace.c static inline void ftrace_trace_stack(struct trace_array *tr, tr 749 kernel/trace/trace.c static inline void ftrace_trace_stack(struct trace_array *tr, tr 782 kernel/trace/trace.c void tracer_tracing_on(struct trace_array *tr) tr 784 kernel/trace/trace.c if (tr->trace_buffer.buffer) tr 785 kernel/trace/trace.c ring_buffer_record_on(tr->trace_buffer.buffer); tr 794 kernel/trace/trace.c tr->buffer_disabled = 0; tr 919 kernel/trace/trace.c void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data) tr 921 kernel/trace/trace.c struct tracer *tracer = tr->current_trace; tr 930 kernel/trace/trace.c if (!tr->allocated_snapshot) { tr 945 kernel/trace/trace.c update_max_tr(tr, current, smp_processor_id(), cond_data); tr 949 kernel/trace/trace.c void tracing_snapshot_instance(struct trace_array *tr) tr 951 kernel/trace/trace.c tracing_snapshot_instance_cond(tr, NULL); tr 970 kernel/trace/trace.c struct trace_array *tr = &global_trace; tr 972 kernel/trace/trace.c tracing_snapshot_instance(tr); tr 989 kernel/trace/trace.c void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) tr 991 kernel/trace/trace.c tracing_snapshot_instance_cond(tr, cond_data); tr 1009 kernel/trace/trace.c void *tracing_cond_snapshot_data(struct trace_array *tr) tr 1013 kernel/trace/trace.c arch_spin_lock(&tr->max_lock); tr 1015 kernel/trace/trace.c if (tr->cond_snapshot) tr 1016 kernel/trace/trace.c cond_data = tr->cond_snapshot->cond_data; tr 1018 kernel/trace/trace.c arch_spin_unlock(&tr->max_lock); tr 1028 kernel/trace/trace.c int tracing_alloc_snapshot_instance(struct trace_array *tr) tr 1032 kernel/trace/trace.c if (!tr->allocated_snapshot) { tr 1035 kernel/trace/trace.c ret = resize_buffer_duplicate_size(&tr->max_buffer, tr 1036 kernel/trace/trace.c &tr->trace_buffer, RING_BUFFER_ALL_CPUS); tr 1040 kernel/trace/trace.c tr->allocated_snapshot = true; tr 1046 kernel/trace/trace.c static void free_snapshot(struct trace_array *tr) tr 1053 kernel/trace/trace.c ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); tr 1054 kernel/trace/trace.c set_buffer_entries(&tr->max_buffer, 1); tr 1055 kernel/trace/trace.c tracing_reset_online_cpus(&tr->max_buffer); tr 1056 kernel/trace/trace.c tr->allocated_snapshot = false; tr 1071 kernel/trace/trace.c struct trace_array *tr = &global_trace; tr 1074 kernel/trace/trace.c ret = tracing_alloc_snapshot_instance(tr); tr 1117 kernel/trace/trace.c int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, tr 1132 kernel/trace/trace.c ret = tracing_alloc_snapshot_instance(tr); tr 1136 kernel/trace/trace.c if (tr->current_trace->use_max_tr) { tr 1149 kernel/trace/trace.c if (tr->cond_snapshot) { tr 1154 kernel/trace/trace.c arch_spin_lock(&tr->max_lock); tr 1155 kernel/trace/trace.c tr->cond_snapshot = cond_snapshot; tr 1156 kernel/trace/trace.c arch_spin_unlock(&tr->max_lock); tr 1179 kernel/trace/trace.c int tracing_snapshot_cond_disable(struct trace_array *tr) tr 1183 kernel/trace/trace.c arch_spin_lock(&tr->max_lock); tr 1185 kernel/trace/trace.c if (!tr->cond_snapshot) tr 1188 kernel/trace/trace.c kfree(tr->cond_snapshot); tr 1189 kernel/trace/trace.c tr->cond_snapshot = NULL; tr 1192 kernel/trace/trace.c arch_spin_unlock(&tr->max_lock); tr 1203 kernel/trace/trace.c void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) tr 1220 kernel/trace/trace.c void *tracing_cond_snapshot_data(struct trace_array *tr) tr 1225 kernel/trace/trace.c int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) tr 1230 kernel/trace/trace.c int tracing_snapshot_cond_disable(struct trace_array *tr) tr 1237 kernel/trace/trace.c void tracer_tracing_off(struct trace_array *tr) tr 1239 kernel/trace/trace.c if (tr->trace_buffer.buffer) tr 1240 kernel/trace/trace.c ring_buffer_record_off(tr->trace_buffer.buffer); tr 1249 kernel/trace/trace.c tr->buffer_disabled = 1; tr 1280 kernel/trace/trace.c bool tracer_tracing_is_on(struct trace_array *tr) tr 1282 kernel/trace/trace.c if (tr->trace_buffer.buffer) tr 1283 kernel/trace/trace.c return ring_buffer_record_is_on(tr->trace_buffer.buffer); tr 1284 kernel/trace/trace.c return !tr->buffer_disabled; tr 1362 kernel/trace/trace.c bool trace_clock_in_ns(struct trace_array *tr) tr 1364 kernel/trace/trace.c if (trace_clocks[tr->clock_id].in_ns) tr 1508 kernel/trace/trace.c __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) tr 1510 kernel/trace/trace.c struct trace_buffer *trace_buf = &tr->trace_buffer; tr 1511 kernel/trace/trace.c struct trace_buffer *max_buf = &tr->max_buffer; tr 1518 kernel/trace/trace.c max_data->saved_latency = tr->max_latency; tr 1552 kernel/trace/trace.c update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, tr 1555 kernel/trace/trace.c if (tr->stop_count) tr 1560 kernel/trace/trace.c if (!tr->allocated_snapshot) { tr 1562 kernel/trace/trace.c WARN_ON_ONCE(tr->current_trace != &nop_trace); tr 1566 kernel/trace/trace.c arch_spin_lock(&tr->max_lock); tr 1569 kernel/trace/trace.c if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) tr 1570 kernel/trace/trace.c ring_buffer_record_on(tr->max_buffer.buffer); tr 1572 kernel/trace/trace.c ring_buffer_record_off(tr->max_buffer.buffer); tr 1575 kernel/trace/trace.c if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) tr 1578 kernel/trace/trace.c swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); tr 1580 kernel/trace/trace.c __update_max_tr(tr, tsk, cpu); tr 1583 kernel/trace/trace.c arch_spin_unlock(&tr->max_lock); tr 1595 kernel/trace/trace.c update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) tr 1599 kernel/trace/trace.c if (tr->stop_count) tr 1603 kernel/trace/trace.c if (!tr->allocated_snapshot) { tr 1605 kernel/trace/trace.c WARN_ON_ONCE(tr->current_trace != &nop_trace); tr 1609 kernel/trace/trace.c arch_spin_lock(&tr->max_lock); tr 1611 kernel/trace/trace.c ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); tr 1620 kernel/trace/trace.c trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, tr 1626 kernel/trace/trace.c __update_max_tr(tr, tsk, cpu); tr 1627 kernel/trace/trace.c arch_spin_unlock(&tr->max_lock); tr 1666 kernel/trace/trace.c struct trace_array *tr = &global_trace; tr 1667 kernel/trace/trace.c struct tracer *saved_tracer = tr->current_trace; tr 1688 kernel/trace/trace.c tracing_reset_online_cpus(&tr->trace_buffer); tr 1690 kernel/trace/trace.c tr->current_trace = type; tr 1696 kernel/trace/trace.c ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, tr 1698 kernel/trace/trace.c tr->allocated_snapshot = true; tr 1704 kernel/trace/trace.c ret = type->selftest(type, tr); tr 1706 kernel/trace/trace.c tr->current_trace = saved_tracer; tr 1714 kernel/trace/trace.c tracing_reset_online_cpus(&tr->trace_buffer); tr 1718 kernel/trace/trace.c tr->allocated_snapshot = false; tr 1722 kernel/trace/trace.c ring_buffer_resize(tr->max_buffer.buffer, 1, tr 1784 kernel/trace/trace.c static void add_tracer_options(struct trace_array *tr, struct tracer *t); tr 1923 kernel/trace/trace.c struct trace_array *tr; tr 1925 kernel/trace/trace.c list_for_each_entry(tr, &ftrace_trace_arrays, list) { tr 1926 kernel/trace/trace.c if (!tr->clear_trace) tr 1928 kernel/trace/trace.c tr->clear_trace = false; tr 1929 kernel/trace/trace.c tracing_reset_online_cpus(&tr->trace_buffer); tr 1931 kernel/trace/trace.c tracing_reset_online_cpus(&tr->max_buffer); tr 2054 kernel/trace/trace.c static void tracing_start_tr(struct trace_array *tr) tr 2063 kernel/trace/trace.c if (tr->flags & TRACE_ARRAY_FL_GLOBAL) tr 2066 kernel/trace/trace.c raw_spin_lock_irqsave(&tr->start_lock, flags); tr 2068 kernel/trace/trace.c if (--tr->stop_count) { tr 2069 kernel/trace/trace.c if (tr->stop_count < 0) { tr 2072 kernel/trace/trace.c tr->stop_count = 0; tr 2077 kernel/trace/trace.c buffer = tr->trace_buffer.buffer; tr 2082 kernel/trace/trace.c raw_spin_unlock_irqrestore(&tr->start_lock, flags); tr 2119 kernel/trace/trace.c static void tracing_stop_tr(struct trace_array *tr) tr 2125 kernel/trace/trace.c if (tr->flags & TRACE_ARRAY_FL_GLOBAL) tr 2128 kernel/trace/trace.c raw_spin_lock_irqsave(&tr->start_lock, flags); tr 2129 kernel/trace/trace.c if (tr->stop_count++) tr 2132 kernel/trace/trace.c buffer = tr->trace_buffer.buffer; tr 2137 kernel/trace/trace.c raw_spin_unlock_irqrestore(&tr->start_lock, flags); tr 2493 kernel/trace/trace.c *current_rb = trace_file->tr->trace_buffer.buffer; tr 2609 kernel/trace/trace.c void trace_buffer_unlock_commit_regs(struct trace_array *tr, tr 2623 kernel/trace/trace.c ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); tr 2761 kernel/trace/trace.c trace_function(struct trace_array *tr, tr 2766 kernel/trace/trace.c struct ring_buffer *buffer = tr->trace_buffer.buffer; tr 2878 kernel/trace/trace.c static inline void ftrace_trace_stack(struct trace_array *tr, tr 2883 kernel/trace/trace.c if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) tr 2889 kernel/trace/trace.c void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, tr 2892 kernel/trace/trace.c struct ring_buffer *buffer = tr->trace_buffer.buffer; tr 3110 kernel/trace/trace.c struct trace_array *tr = &global_trace; tr 3138 kernel/trace/trace.c buffer = tr->trace_buffer.buffer; tr 3150 kernel/trace/trace.c ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); tr 3220 kernel/trace/trace.c int trace_array_vprintk(struct trace_array *tr, tr 3223 kernel/trace/trace.c return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); tr 3227 kernel/trace/trace.c int trace_array_printk(struct trace_array *tr, tr 3237 kernel/trace/trace.c ret = trace_array_vprintk(tr, ip, fmt, ap); tr 3445 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 3458 kernel/trace/trace.c if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) tr 3459 kernel/trace/trace.c *iter->trace = *tr->current_trace; tr 3558 kernel/trace/trace.c unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) tr 3562 kernel/trace/trace.c if (!tr) tr 3563 kernel/trace/trace.c tr = &global_trace; tr 3565 kernel/trace/trace.c get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu); tr 3570 kernel/trace/trace.c unsigned long trace_total_entries(struct trace_array *tr) tr 3574 kernel/trace/trace.c if (!tr) tr 3575 kernel/trace/trace.c tr = &global_trace; tr 3577 kernel/trace/trace.c get_total_entries(&tr->trace_buffer, &total, &entries); tr 3699 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 3701 kernel/trace/trace.c if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) tr 3725 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 3727 kernel/trace/trace.c unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); tr 3737 kernel/trace/trace.c if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { tr 3757 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 3764 kernel/trace/trace.c if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) tr 3782 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 3790 kernel/trace/trace.c if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { tr 3812 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 3819 kernel/trace/trace.c if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { tr 3868 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 3869 kernel/trace/trace.c unsigned long trace_flags = tr->trace_flags; tr 3915 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 3924 kernel/trace/trace.c if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) tr 3931 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 3932 kernel/trace/trace.c unsigned long trace_flags = tr->trace_flags; tr 3992 kernel/trace/trace.c if (iter->tr->allocated_snapshot) tr 4014 kernel/trace/trace.c if (iter->tr) { tr 4073 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 4098 kernel/trace/trace.c *iter->trace = *tr->current_trace; tr 4103 kernel/trace/trace.c iter->tr = tr; tr 4107 kernel/trace/trace.c if (tr->current_trace->print_max || snapshot) tr 4108 kernel/trace/trace.c iter->trace_buffer = &tr->max_buffer; tr 4111 kernel/trace/trace.c iter->trace_buffer = &tr->trace_buffer; tr 4126 kernel/trace/trace.c if (trace_clocks[tr->clock_id].in_ns) tr 4131 kernel/trace/trace.c tracing_stop_tr(tr); tr 4190 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 4193 kernel/trace/trace.c ret = tracing_check_open_get_tr(tr); tr 4204 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 4210 kernel/trace/trace.c trace_array_put(tr); tr 4228 kernel/trace/trace.c tracing_start_tr(tr); tr 4230 kernel/trace/trace.c __trace_array_put(tr); tr 4245 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 4247 kernel/trace/trace.c trace_array_put(tr); tr 4253 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 4255 kernel/trace/trace.c trace_array_put(tr); tr 4262 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 4266 kernel/trace/trace.c ret = tracing_check_open_get_tr(tr); tr 4273 kernel/trace/trace.c struct trace_buffer *trace_buf = &tr->trace_buffer; tr 4276 kernel/trace/trace.c if (tr->current_trace->print_max) tr 4277 kernel/trace/trace.c trace_buf = &tr->max_buffer; tr 4290 kernel/trace/trace.c else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) tr 4295 kernel/trace/trace.c trace_array_put(tr); tr 4306 kernel/trace/trace.c trace_ok_for_array(struct tracer *t, struct trace_array *tr) tr 4308 kernel/trace/trace.c return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; tr 4313 kernel/trace/trace.c get_tracer_for_array(struct trace_array *tr, struct tracer *t) tr 4315 kernel/trace/trace.c while (t && !trace_ok_for_array(t, tr)) tr 4324 kernel/trace/trace.c struct trace_array *tr = m->private; tr 4330 kernel/trace/trace.c t = get_tracer_for_array(tr, t->next); tr 4337 kernel/trace/trace.c struct trace_array *tr = m->private; tr 4343 kernel/trace/trace.c t = get_tracer_for_array(tr, trace_types); tr 4380 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 4384 kernel/trace/trace.c ret = tracing_check_open_get_tr(tr); tr 4390 kernel/trace/trace.c trace_array_put(tr); tr 4395 kernel/trace/trace.c m->private = tr; tr 4402 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 4404 kernel/trace/trace.c trace_array_put(tr); tr 4446 kernel/trace/trace.c struct trace_array *tr = file_inode(filp)->i_private; tr 4451 kernel/trace/trace.c cpumask_pr_args(tr->tracing_cpumask)) + 1; tr 4457 kernel/trace/trace.c cpumask_pr_args(tr->tracing_cpumask)); tr 4474 kernel/trace/trace.c struct trace_array *tr = file_inode(filp)->i_private; tr 4486 kernel/trace/trace.c arch_spin_lock(&tr->max_lock); tr 4492 kernel/trace/trace.c if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && tr 4494 kernel/trace/trace.c atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); tr 4495 kernel/trace/trace.c ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); tr 4497 kernel/trace/trace.c if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && tr 4499 kernel/trace/trace.c atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); tr 4500 kernel/trace/trace.c ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); tr 4503 kernel/trace/trace.c arch_spin_unlock(&tr->max_lock); tr 4506 kernel/trace/trace.c cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); tr 4528 kernel/trace/trace.c struct trace_array *tr = m->private; tr 4533 kernel/trace/trace.c tracer_flags = tr->current_trace->flags->val; tr 4534 kernel/trace/trace.c trace_opts = tr->current_trace->flags->opts; tr 4537 kernel/trace/trace.c if (tr->trace_flags & (1 << i)) tr 4554 kernel/trace/trace.c static int __set_tracer_option(struct trace_array *tr, tr 4561 kernel/trace/trace.c ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); tr 4573 kernel/trace/trace.c static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) tr 4575 kernel/trace/trace.c struct tracer *trace = tr->current_trace; tr 4584 kernel/trace/trace.c return __set_tracer_option(tr, trace->flags, opts, neg); tr 4599 kernel/trace/trace.c int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) tr 4606 kernel/trace/trace.c if (!!(tr->trace_flags & mask) == !!enabled) tr 4610 kernel/trace/trace.c if (tr->current_trace->flag_changed) tr 4611 kernel/trace/trace.c if (tr->current_trace->flag_changed(tr, mask, !!enabled)) tr 4615 kernel/trace/trace.c tr->trace_flags |= mask; tr 4617 kernel/trace/trace.c tr->trace_flags &= ~mask; tr 4628 kernel/trace/trace.c tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; tr 4636 kernel/trace/trace.c trace_event_follow_fork(tr, enabled); tr 4639 kernel/trace/trace.c ftrace_pid_follow_fork(tr, enabled); tr 4642 kernel/trace/trace.c ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); tr 4644 kernel/trace/trace.c ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); tr 4656 kernel/trace/trace.c static int trace_set_options(struct trace_array *tr, char *option) tr 4678 kernel/trace/trace.c ret = set_tracer_option(tr, cmp, neg); tr 4680 kernel/trace/trace.c ret = set_tracer_flag(tr, 1 << ret, !neg); tr 4720 kernel/trace/trace.c struct trace_array *tr = m->private; tr 4732 kernel/trace/trace.c ret = trace_set_options(tr, buf); tr 4743 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 4746 kernel/trace/trace.c ret = tracing_check_open_get_tr(tr); tr 4752 kernel/trace/trace.c trace_array_put(tr); tr 5437 kernel/trace/trace.c struct trace_array *tr = filp->private_data; tr 5442 kernel/trace/trace.c r = sprintf(buf, "%s\n", tr->current_trace->name); tr 5448 kernel/trace/trace.c int tracer_init(struct tracer *t, struct trace_array *tr) tr 5450 kernel/trace/trace.c tracing_reset_online_cpus(&tr->trace_buffer); tr 5451 kernel/trace/trace.c return t->init(tr); tr 5490 kernel/trace/trace.c static int __tracing_resize_ring_buffer(struct trace_array *tr, tr 5503 kernel/trace/trace.c if (!tr->trace_buffer.buffer) tr 5506 kernel/trace/trace.c ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); tr 5511 kernel/trace/trace.c if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || tr 5512 kernel/trace/trace.c !tr->current_trace->use_max_tr) tr 5515 kernel/trace/trace.c ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); tr 5517 kernel/trace/trace.c int r = resize_buffer_duplicate_size(&tr->trace_buffer, tr 5518 kernel/trace/trace.c &tr->trace_buffer, cpu); tr 5541 kernel/trace/trace.c set_buffer_entries(&tr->max_buffer, size); tr 5543 kernel/trace/trace.c per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; tr 5549 kernel/trace/trace.c set_buffer_entries(&tr->trace_buffer, size); tr 5551 kernel/trace/trace.c per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; tr 5556 kernel/trace/trace.c static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, tr 5571 kernel/trace/trace.c ret = __tracing_resize_ring_buffer(tr, size, cpu_id); tr 5608 kernel/trace/trace.c create_trace_option_files(struct trace_array *tr, struct tracer *tracer); tr 5614 kernel/trace/trace.c static void tracing_set_nop(struct trace_array *tr) tr 5616 kernel/trace/trace.c if (tr->current_trace == &nop_trace) tr 5619 kernel/trace/trace.c tr->current_trace->enabled--; tr 5621 kernel/trace/trace.c if (tr->current_trace->reset) tr 5622 kernel/trace/trace.c tr->current_trace->reset(tr); tr 5624 kernel/trace/trace.c tr->current_trace = &nop_trace; tr 5627 kernel/trace/trace.c static void add_tracer_options(struct trace_array *tr, struct tracer *t) tr 5630 kernel/trace/trace.c if (!tr->dir) tr 5633 kernel/trace/trace.c create_trace_option_files(tr, t); tr 5636 kernel/trace/trace.c static int tracing_set_tracer(struct trace_array *tr, const char *buf) tr 5647 kernel/trace/trace.c ret = __tracing_resize_ring_buffer(tr, trace_buf_size, tr 5662 kernel/trace/trace.c if (t == tr->current_trace) tr 5667 kernel/trace/trace.c arch_spin_lock(&tr->max_lock); tr 5668 kernel/trace/trace.c if (tr->cond_snapshot) tr 5670 kernel/trace/trace.c arch_spin_unlock(&tr->max_lock); tr 5683 kernel/trace/trace.c if (!trace_ok_for_array(t, tr)) { tr 5689 kernel/trace/trace.c if (tr->current_trace->ref) { tr 5696 kernel/trace/trace.c tr->current_trace->enabled--; tr 5698 kernel/trace/trace.c if (tr->current_trace->reset) tr 5699 kernel/trace/trace.c tr->current_trace->reset(tr); tr 5702 kernel/trace/trace.c tr->current_trace = &nop_trace; tr 5705 kernel/trace/trace.c had_max_tr = tr->allocated_snapshot; tr 5716 kernel/trace/trace.c free_snapshot(tr); tr 5722 kernel/trace/trace.c ret = tracing_alloc_snapshot_instance(tr); tr 5729 kernel/trace/trace.c ret = tracer_init(t, tr); tr 5734 kernel/trace/trace.c tr->current_trace = t; tr 5735 kernel/trace/trace.c tr->current_trace->enabled++; tr 5736 kernel/trace/trace.c trace_branch_enable(tr); tr 5747 kernel/trace/trace.c struct trace_array *tr = filp->private_data; tr 5767 kernel/trace/trace.c err = tracing_set_tracer(tr, buf); tr 5817 kernel/trace/trace.c struct trace_array *tr = filp->private_data; tr 5825 kernel/trace/trace.c if (tr->current_trace->update_thresh) { tr 5826 kernel/trace/trace.c ret = tr->current_trace->update_thresh(tr); tr 5858 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 5862 kernel/trace/trace.c ret = tracing_check_open_get_tr(tr); tr 5872 kernel/trace/trace.c __trace_array_put(tr); tr 5877 kernel/trace/trace.c iter->trace = tr->current_trace; tr 5887 kernel/trace/trace.c if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) tr 5891 kernel/trace/trace.c if (trace_clocks[tr->clock_id].in_ns) tr 5894 kernel/trace/trace.c iter->tr = tr; tr 5895 kernel/trace/trace.c iter->trace_buffer = &tr->trace_buffer; tr 5905 kernel/trace/trace.c tr->current_trace->ref++; tr 5912 kernel/trace/trace.c __trace_array_put(tr); tr 5920 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 5924 kernel/trace/trace.c tr->current_trace->ref--; tr 5935 kernel/trace/trace.c trace_array_put(tr); tr 5943 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 5949 kernel/trace/trace.c if (tr->trace_flags & TRACE_ITER_BLOCK) tr 5988 kernel/trace/trace.c if (!tracer_tracing_is_on(iter->tr) && iter->pos) tr 6254 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 6272 kernel/trace/trace.c size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; tr 6273 kernel/trace/trace.c if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { tr 6289 kernel/trace/trace.c r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); tr 6302 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 6316 kernel/trace/trace.c ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); tr 6329 kernel/trace/trace.c struct trace_array *tr = filp->private_data; tr 6336 kernel/trace/trace.c size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; tr 6366 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 6369 kernel/trace/trace.c if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) tr 6370 kernel/trace/trace.c tracer_tracing_off(tr); tr 6372 kernel/trace/trace.c tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); tr 6374 kernel/trace/trace.c trace_array_put(tr); tr 6383 kernel/trace/trace.c struct trace_array *tr = filp->private_data; tr 6400 kernel/trace/trace.c if (!(tr->trace_flags & TRACE_ITER_MARKERS)) tr 6415 kernel/trace/trace.c buffer = tr->trace_buffer.buffer; tr 6434 kernel/trace/trace.c if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { tr 6437 kernel/trace/trace.c tt = event_triggers_call(tr->trace_marker_file, entry, event); tr 6449 kernel/trace/trace.c event_triggers_post_call(tr->trace_marker_file, tt); tr 6464 kernel/trace/trace.c struct trace_array *tr = filp->private_data; tr 6478 kernel/trace/trace.c if (!(tr->trace_flags & TRACE_ITER_MARKERS)) tr 6495 kernel/trace/trace.c buffer = tr->trace_buffer.buffer; tr 6522 kernel/trace/trace.c struct trace_array *tr = m->private; tr 6528 kernel/trace/trace.c i == tr->clock_id ? "[" : "", trace_clocks[i].name, tr 6529 kernel/trace/trace.c i == tr->clock_id ? "]" : ""); tr 6535 kernel/trace/trace.c int tracing_set_clock(struct trace_array *tr, const char *clockstr) tr 6548 kernel/trace/trace.c tr->clock_id = i; tr 6550 kernel/trace/trace.c ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); tr 6556 kernel/trace/trace.c tracing_reset_online_cpus(&tr->trace_buffer); tr 6559 kernel/trace/trace.c if (tr->max_buffer.buffer) tr 6560 kernel/trace/trace.c ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); tr 6561 kernel/trace/trace.c tracing_reset_online_cpus(&tr->max_buffer); tr 6573 kernel/trace/trace.c struct trace_array *tr = m->private; tr 6588 kernel/trace/trace.c ret = tracing_set_clock(tr, clockstr); tr 6599 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 6602 kernel/trace/trace.c ret = tracing_check_open_get_tr(tr); tr 6608 kernel/trace/trace.c trace_array_put(tr); tr 6615 kernel/trace/trace.c struct trace_array *tr = m->private; tr 6619 kernel/trace/trace.c if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) tr 6631 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 6634 kernel/trace/trace.c ret = tracing_check_open_get_tr(tr); tr 6640 kernel/trace/trace.c trace_array_put(tr); tr 6645 kernel/trace/trace.c int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) tr 6651 kernel/trace/trace.c if (abs && tr->time_stamp_abs_ref++) tr 6655 kernel/trace/trace.c if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) { tr 6660 kernel/trace/trace.c if (--tr->time_stamp_abs_ref) tr 6664 kernel/trace/trace.c ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); tr 6667 kernel/trace/trace.c if (tr->max_buffer.buffer) tr 6668 kernel/trace/trace.c ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs); tr 6686 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 6691 kernel/trace/trace.c ret = tracing_check_open_get_tr(tr); tr 6712 kernel/trace/trace.c iter->tr = tr; tr 6713 kernel/trace/trace.c iter->trace_buffer = &tr->max_buffer; tr 6720 kernel/trace/trace.c trace_array_put(tr); tr 6731 kernel/trace/trace.c struct trace_array *tr = iter->tr; tr 6745 kernel/trace/trace.c if (tr->current_trace->use_max_tr) { tr 6750 kernel/trace/trace.c arch_spin_lock(&tr->max_lock); tr 6751 kernel/trace/trace.c if (tr->cond_snapshot) tr 6753 kernel/trace/trace.c arch_spin_unlock(&tr->max_lock); tr 6763 kernel/trace/trace.c if (tr->allocated_snapshot) tr 6764 kernel/trace/trace.c free_snapshot(tr); tr 6774 kernel/trace/trace.c if (tr->allocated_snapshot) tr 6775 kernel/trace/trace.c ret = resize_buffer_duplicate_size(&tr->max_buffer, tr 6776 kernel/trace/trace.c &tr->trace_buffer, iter->cpu_file); tr 6778 kernel/trace/trace.c ret = tracing_alloc_snapshot_instance(tr); tr 6784 kernel/trace/trace.c update_max_tr(tr, current, smp_processor_id(), NULL); tr 6786 kernel/trace/trace.c update_max_tr_single(tr, current, iter->cpu_file); tr 6790 kernel/trace/trace.c if (tr->allocated_snapshot) { tr 6792 kernel/trace/trace.c tracing_reset_online_cpus(&tr->max_buffer); tr 6794 kernel/trace/trace.c tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); tr 6851 kernel/trace/trace.c info->iter.trace_buffer = &info->iter.tr->max_buffer; tr 6981 kernel/trace/trace.c static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) tr 6985 kernel/trace/trace.c if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { tr 6989 kernel/trace/trace.c tr->n_err_log_entries++; tr 6994 kernel/trace/trace.c err = list_first_entry(&tr->err_log, struct tracing_log_err, list); tr 7053 kernel/trace/trace.c void tracing_log_err(struct trace_array *tr, tr 7059 kernel/trace/trace.c if (!tr) tr 7060 kernel/trace/trace.c tr = &global_trace; tr 7063 kernel/trace/trace.c err = get_tracing_log_err(tr); tr 7077 kernel/trace/trace.c list_add_tail(&err->list, &tr->err_log); tr 7081 kernel/trace/trace.c static void clear_tracing_err_log(struct trace_array *tr) tr 7086 kernel/trace/trace.c list_for_each_entry_safe(err, next, &tr->err_log, list) { tr 7091 kernel/trace/trace.c tr->n_err_log_entries = 0; tr 7097 kernel/trace/trace.c struct trace_array *tr = m->private; tr 7101 kernel/trace/trace.c return seq_list_start(&tr->err_log, *pos); tr 7106 kernel/trace/trace.c struct trace_array *tr = m->private; tr 7108 kernel/trace/trace.c return seq_list_next(v, &tr->err_log, pos); tr 7155 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 7158 kernel/trace/trace.c ret = tracing_check_open_get_tr(tr); tr 7164 kernel/trace/trace.c clear_tracing_err_log(tr); tr 7170 kernel/trace/trace.c m->private = tr; tr 7172 kernel/trace/trace.c trace_array_put(tr); tr 7187 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 7189 kernel/trace/trace.c trace_array_put(tr); tr 7207 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 7211 kernel/trace/trace.c ret = tracing_check_open_get_tr(tr); tr 7217 kernel/trace/trace.c trace_array_put(tr); tr 7223 kernel/trace/trace.c info->iter.tr = tr; tr 7225 kernel/trace/trace.c info->iter.trace = tr->current_trace; tr 7226 kernel/trace/trace.c info->iter.trace_buffer = &tr->trace_buffer; tr 7233 kernel/trace/trace.c tr->current_trace->ref++; tr 7239 kernel/trace/trace.c trace_array_put(tr); tr 7266 kernel/trace/trace.c if (iter->snapshot && iter->tr->current_trace->use_max_tr) tr 7334 kernel/trace/trace.c iter->tr->current_trace->ref--; tr 7336 kernel/trace/trace.c __trace_array_put(iter->tr); tr 7426 kernel/trace/trace.c if (iter->snapshot && iter->tr->current_trace->use_max_tr) tr 7500 kernel/trace/trace.c ret = wait_on_pipe(iter, iter->tr->buffer_percent); tr 7528 kernel/trace/trace.c struct trace_array *tr = inode->i_private; tr 7529 kernel/trace/trace.c struct trace_buffer *trace_buf = &tr->trace_buffer; tr 7554 kernel/trace/trace.c if (trace_clocks[tr->clock_id].in_ns) { tr 7620 kernel/trace/trace.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 7623 kernel/trace/trace.c tracing_snapshot_instance(tr); tr 7628 kernel/trace/trace.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 7645 kernel/trace/trace.c tracing_snapshot_instance(tr); tr 7671 kernel/trace/trace.c ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, tr 7687 kernel/trace/trace.c ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, tr 7715 kernel/trace/trace.c ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, tr 7723 kernel/trace/trace.c if (!tr) tr 7733 kernel/trace/trace.c return unregister_ftrace_function_probe_func(glob+1, tr, ops); tr 7752 kernel/trace/trace.c ret = tracing_alloc_snapshot_instance(tr); tr 7756 kernel/trace/trace.c ret = register_ftrace_function_probe(glob, tr, ops, count); tr 7775 kernel/trace/trace.c static struct dentry *tracing_get_dentry(struct trace_array *tr) tr 7777 kernel/trace/trace.c if (WARN_ON(!tr->dir)) tr 7781 kernel/trace/trace.c if (tr->flags & TRACE_ARRAY_FL_GLOBAL) tr 7785 kernel/trace/trace.c return tr->dir; tr 7788 kernel/trace/trace.c static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) tr 7792 kernel/trace/trace.c if (tr->percpu_dir) tr 7793 kernel/trace/trace.c return tr->percpu_dir; tr 7795 kernel/trace/trace.c d_tracer = tracing_get_dentry(tr); tr 7799 kernel/trace/trace.c tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); tr 7801 kernel/trace/trace.c WARN_ONCE(!tr->percpu_dir, tr 7804 kernel/trace/trace.c return tr->percpu_dir; tr 7819 kernel/trace/trace.c tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) tr 7821 kernel/trace/trace.c struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); tr 7837 kernel/trace/trace.c tr, cpu, &tracing_pipe_fops); tr 7841 kernel/trace/trace.c tr, cpu, &tracing_fops); tr 7844 kernel/trace/trace.c tr, cpu, &tracing_buffers_fops); tr 7847 kernel/trace/trace.c tr, cpu, &tracing_stats_fops); tr 7850 kernel/trace/trace.c tr, cpu, &tracing_entries_fops); tr 7854 kernel/trace/trace.c tr, cpu, &snapshot_fops); tr 7857 kernel/trace/trace.c tr, cpu, &snapshot_raw_fops); tr 7898 kernel/trace/trace.c ret = __set_tracer_option(topt->tr, topt->flags, tr 7956 kernel/trace/trace.c struct trace_array *tr; tr 7960 kernel/trace/trace.c get_tr_index(tr_index, &tr, &index); tr 7962 kernel/trace/trace.c if (tr->trace_flags & (1 << index)) tr 7975 kernel/trace/trace.c struct trace_array *tr; tr 7980 kernel/trace/trace.c get_tr_index(tr_index, &tr, &index); tr 7991 kernel/trace/trace.c ret = set_tracer_flag(tr, 1 << index, val); tr 8026 kernel/trace/trace.c static struct dentry *trace_options_init_dentry(struct trace_array *tr) tr 8030 kernel/trace/trace.c if (tr->options) tr 8031 kernel/trace/trace.c return tr->options; tr 8033 kernel/trace/trace.c d_tracer = tracing_get_dentry(tr); tr 8037 kernel/trace/trace.c tr->options = tracefs_create_dir("options", d_tracer); tr 8038 kernel/trace/trace.c if (!tr->options) { tr 8043 kernel/trace/trace.c return tr->options; tr 8047 kernel/trace/trace.c create_trace_option_file(struct trace_array *tr, tr 8054 kernel/trace/trace.c t_options = trace_options_init_dentry(tr); tr 8060 kernel/trace/trace.c topt->tr = tr; tr 8068 kernel/trace/trace.c create_trace_option_files(struct trace_array *tr, struct tracer *tracer) tr 8089 kernel/trace/trace.c if (!trace_ok_for_array(tracer, tr)) tr 8092 kernel/trace/trace.c for (i = 0; i < tr->nr_topts; i++) { tr 8094 kernel/trace/trace.c if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) tr 8107 kernel/trace/trace.c tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), tr 8114 kernel/trace/trace.c tr->topts = tr_topts; tr 8115 kernel/trace/trace.c tr->topts[tr->nr_topts].tracer = tracer; tr 8116 kernel/trace/trace.c tr->topts[tr->nr_topts].topts = topts; tr 8117 kernel/trace/trace.c tr->nr_topts++; tr 8120 kernel/trace/trace.c create_trace_option_file(tr, &topts[cnt], flags, tr 8129 kernel/trace/trace.c create_trace_option_core_file(struct trace_array *tr, tr 8134 kernel/trace/trace.c t_options = trace_options_init_dentry(tr); tr 8139 kernel/trace/trace.c (void *)&tr->trace_flags_index[index], tr 8143 kernel/trace/trace.c static void create_trace_options_dir(struct trace_array *tr) tr 8146 kernel/trace/trace.c bool top_level = tr == &global_trace; tr 8149 kernel/trace/trace.c t_options = trace_options_init_dentry(tr); tr 8156 kernel/trace/trace.c create_trace_option_core_file(tr, trace_options[i], i); tr 8164 kernel/trace/trace.c struct trace_array *tr = filp->private_data; tr 8168 kernel/trace/trace.c r = tracer_tracing_is_on(tr); tr 8178 kernel/trace/trace.c struct trace_array *tr = filp->private_data; tr 8179 kernel/trace/trace.c struct ring_buffer *buffer = tr->trace_buffer.buffer; tr 8189 kernel/trace/trace.c if (!!val == tracer_tracing_is_on(tr)) { tr 8192 kernel/trace/trace.c tracer_tracing_on(tr); tr 8193 kernel/trace/trace.c if (tr->current_trace->start) tr 8194 kernel/trace/trace.c tr->current_trace->start(tr); tr 8196 kernel/trace/trace.c tracer_tracing_off(tr); tr 8197 kernel/trace/trace.c if (tr->current_trace->stop) tr 8198 kernel/trace/trace.c tr->current_trace->stop(tr); tr 8220 kernel/trace/trace.c struct trace_array *tr = filp->private_data; tr 8224 kernel/trace/trace.c r = tr->buffer_percent; tr 8234 kernel/trace/trace.c struct trace_array *tr = filp->private_data; tr 8248 kernel/trace/trace.c tr->buffer_percent = val; tr 8266 kernel/trace/trace.c init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); tr 8269 kernel/trace/trace.c allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) tr 8273 kernel/trace/trace.c rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; tr 8275 kernel/trace/trace.c buf->tr = tr; tr 8289 kernel/trace/trace.c set_buffer_entries(&tr->trace_buffer, tr 8290 kernel/trace/trace.c ring_buffer_size(tr->trace_buffer.buffer, 0)); tr 8295 kernel/trace/trace.c static int allocate_trace_buffers(struct trace_array *tr, int size) tr 8299 kernel/trace/trace.c ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); tr 8304 kernel/trace/trace.c ret = allocate_trace_buffer(tr, &tr->max_buffer, tr 8307 kernel/trace/trace.c ring_buffer_free(tr->trace_buffer.buffer); tr 8308 kernel/trace/trace.c tr->trace_buffer.buffer = NULL; tr 8309 kernel/trace/trace.c free_percpu(tr->trace_buffer.data); tr 8310 kernel/trace/trace.c tr->trace_buffer.data = NULL; tr 8313 kernel/trace/trace.c tr->allocated_snapshot = allocate_snapshot; tr 8347 kernel/trace/trace.c static void free_trace_buffers(struct trace_array *tr) tr 8349 kernel/trace/trace.c if (!tr) tr 8352 kernel/trace/trace.c free_trace_buffer(&tr->trace_buffer); tr 8355 kernel/trace/trace.c free_trace_buffer(&tr->max_buffer); tr 8359 kernel/trace/trace.c static void init_trace_flags_index(struct trace_array *tr) tr 8365 kernel/trace/trace.c tr->trace_flags_index[i] = i; tr 8368 kernel/trace/trace.c static void __update_tracer_options(struct trace_array *tr) tr 8373 kernel/trace/trace.c add_tracer_options(tr, t); tr 8376 kernel/trace/trace.c static void update_tracer_options(struct trace_array *tr) tr 8379 kernel/trace/trace.c __update_tracer_options(tr); tr 8385 kernel/trace/trace.c struct trace_array *tr; tr 8392 kernel/trace/trace.c list_for_each_entry(tr, &ftrace_trace_arrays, list) { tr 8393 kernel/trace/trace.c if (tr->name && strcmp(tr->name, name) == 0) tr 8398 kernel/trace/trace.c tr = kzalloc(sizeof(*tr), GFP_KERNEL); tr 8399 kernel/trace/trace.c if (!tr) tr 8402 kernel/trace/trace.c tr->name = kstrdup(name, GFP_KERNEL); tr 8403 kernel/trace/trace.c if (!tr->name) tr 8406 kernel/trace/trace.c if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) tr 8409 kernel/trace/trace.c tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; tr 8411 kernel/trace/trace.c cpumask_copy(tr->tracing_cpumask, cpu_all_mask); tr 8413 kernel/trace/trace.c raw_spin_lock_init(&tr->start_lock); tr 8415 kernel/trace/trace.c tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; tr 8417 kernel/trace/trace.c tr->current_trace = &nop_trace; tr 8419 kernel/trace/trace.c INIT_LIST_HEAD(&tr->systems); tr 8420 kernel/trace/trace.c INIT_LIST_HEAD(&tr->events); tr 8421 kernel/trace/trace.c INIT_LIST_HEAD(&tr->hist_vars); tr 8422 kernel/trace/trace.c INIT_LIST_HEAD(&tr->err_log); tr 8424 kernel/trace/trace.c if (allocate_trace_buffers(tr, trace_buf_size) < 0) tr 8427 kernel/trace/trace.c tr->dir = tracefs_create_dir(name, trace_instance_dir); tr 8428 kernel/trace/trace.c if (!tr->dir) tr 8431 kernel/trace/trace.c ret = event_trace_add_tracer(tr->dir, tr); tr 8433 kernel/trace/trace.c tracefs_remove_recursive(tr->dir); tr 8437 kernel/trace/trace.c ftrace_init_trace_array(tr); tr 8439 kernel/trace/trace.c init_tracer_tracefs(tr, tr->dir); tr 8440 kernel/trace/trace.c init_trace_flags_index(tr); tr 8441 kernel/trace/trace.c __update_tracer_options(tr); tr 8443 kernel/trace/trace.c list_add(&tr->list, &ftrace_trace_arrays); tr 8448 kernel/trace/trace.c return tr; tr 8451 kernel/trace/trace.c free_trace_buffers(tr); tr 8452 kernel/trace/trace.c free_cpumask_var(tr->tracing_cpumask); tr 8453 kernel/trace/trace.c kfree(tr->name); tr 8454 kernel/trace/trace.c kfree(tr); tr 8469 kernel/trace/trace.c static int __remove_instance(struct trace_array *tr) tr 8473 kernel/trace/trace.c if (tr->ref || (tr->current_trace && tr->current_trace->ref)) tr 8476 kernel/trace/trace.c list_del(&tr->list); tr 8481 kernel/trace/trace.c set_tracer_flag(tr, 1 << i, 0); tr 8484 kernel/trace/trace.c tracing_set_nop(tr); tr 8485 kernel/trace/trace.c clear_ftrace_function_probes(tr); tr 8486 kernel/trace/trace.c event_trace_del_tracer(tr); tr 8487 kernel/trace/trace.c ftrace_clear_pids(tr); tr 8488 kernel/trace/trace.c ftrace_destroy_function_files(tr); tr 8489 kernel/trace/trace.c tracefs_remove_recursive(tr->dir); tr 8490 kernel/trace/trace.c free_trace_buffers(tr); tr 8492 kernel/trace/trace.c for (i = 0; i < tr->nr_topts; i++) { tr 8493 kernel/trace/trace.c kfree(tr->topts[i].topts); tr 8495 kernel/trace/trace.c kfree(tr->topts); tr 8497 kernel/trace/trace.c free_cpumask_var(tr->tracing_cpumask); tr 8498 kernel/trace/trace.c kfree(tr->name); tr 8499 kernel/trace/trace.c kfree(tr); tr 8500 kernel/trace/trace.c tr = NULL; tr 8505 kernel/trace/trace.c int trace_array_destroy(struct trace_array *tr) tr 8509 kernel/trace/trace.c if (!tr) tr 8515 kernel/trace/trace.c ret = __remove_instance(tr); tr 8526 kernel/trace/trace.c struct trace_array *tr; tr 8533 kernel/trace/trace.c list_for_each_entry(tr, &ftrace_trace_arrays, list) { tr 8534 kernel/trace/trace.c if (tr->name && strcmp(tr->name, name) == 0) { tr 8535 kernel/trace/trace.c ret = __remove_instance(tr); tr 8556 kernel/trace/trace.c init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) tr 8562 kernel/trace/trace.c tr, &show_traces_fops); tr 8565 kernel/trace/trace.c tr, &set_tracer_fops); tr 8568 kernel/trace/trace.c tr, &tracing_cpumask_fops); tr 8571 kernel/trace/trace.c tr, &tracing_iter_fops); tr 8574 kernel/trace/trace.c tr, &tracing_fops); tr 8577 kernel/trace/trace.c tr, &tracing_pipe_fops); tr 8580 kernel/trace/trace.c tr, &tracing_entries_fops); tr 8583 kernel/trace/trace.c tr, &tracing_total_entries_fops); tr 8586 kernel/trace/trace.c tr, &tracing_free_buffer_fops); tr 8589 kernel/trace/trace.c tr, &tracing_mark_fops); tr 8591 kernel/trace/trace.c file = __find_event_file(tr, "ftrace", "print"); tr 8595 kernel/trace/trace.c tr->trace_marker_file = file; tr 8598 kernel/trace/trace.c tr, &tracing_mark_raw_fops); tr 8600 kernel/trace/trace.c trace_create_file("trace_clock", 0644, d_tracer, tr, tr 8604 kernel/trace/trace.c tr, &rb_simple_fops); tr 8606 kernel/trace/trace.c trace_create_file("timestamp_mode", 0444, d_tracer, tr, tr 8609 kernel/trace/trace.c tr->buffer_percent = 50; tr 8612 kernel/trace/trace.c tr, &buffer_percent_fops); tr 8614 kernel/trace/trace.c create_trace_options_dir(tr); tr 8618 kernel/trace/trace.c &tr->max_latency, &tracing_max_lat_fops); tr 8621 kernel/trace/trace.c if (ftrace_create_function_files(tr, d_tracer)) tr 8626 kernel/trace/trace.c tr, &snapshot_fops); tr 8630 kernel/trace/trace.c tr, &tracing_err_log_fops); tr 8633 kernel/trace/trace.c tracing_init_tracefs_percpu(tr, cpu); tr 8635 kernel/trace/trace.c ftrace_init_tracefs(tr, d_tracer); tr 8669 kernel/trace/trace.c struct trace_array *tr = &global_trace; tr 8677 kernel/trace/trace.c if (tr->dir) tr 8691 kernel/trace/trace.c tr->dir = debugfs_create_automount("tracing", NULL, tr 8901 kernel/trace/trace.c iter->tr = &global_trace; tr 8902 kernel/trace/trace.c iter->trace = iter->tr->current_trace; tr 8914 kernel/trace/trace.c if (trace_clocks[iter->tr->clock_id].in_ns) tr 8923 kernel/trace/trace.c struct trace_array *tr = &global_trace; tr 8954 kernel/trace/trace.c old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; tr 8957 kernel/trace/trace.c tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; tr 9016 kernel/trace/trace.c tr->trace_flags |= old_userobj; tr 177 kernel/trace/trace.h struct trace_array *tr; tr 196 kernel/trace/trace.h typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); tr 339 kernel/trace/trace.h extern int trace_array_get(struct trace_array *tr); tr 340 kernel/trace/trace.h extern void trace_array_put(struct trace_array *tr); tr 341 kernel/trace/trace.h extern int tracing_check_open_get_tr(struct trace_array *tr); tr 343 kernel/trace/trace.h extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs); tr 344 kernel/trace/trace.h extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); tr 346 kernel/trace/trace.h extern bool trace_clock_in_ns(struct trace_array *tr); tr 354 kernel/trace/trace.h struct trace_array *tr; tr 359 kernel/trace/trace.h tr = list_entry(ftrace_trace_arrays.prev, tr 360 kernel/trace/trace.h typeof(*tr), list); tr 361 kernel/trace/trace.h WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); tr 362 kernel/trace/trace.h return tr; tr 442 kernel/trace/trace.h struct trace_array *tr; tr 468 kernel/trace/trace.h int (*init)(struct trace_array *tr); tr 469 kernel/trace/trace.h void (*reset)(struct trace_array *tr); tr 470 kernel/trace/trace.h void (*start)(struct trace_array *tr); tr 471 kernel/trace/trace.h void (*stop)(struct trace_array *tr); tr 472 kernel/trace/trace.h int (*update_thresh)(struct trace_array *tr); tr 488 kernel/trace/trace.h struct trace_array *tr); tr 493 kernel/trace/trace.h int (*set_flag)(struct trace_array *tr, tr 496 kernel/trace/trace.h int (*flag_changed)(struct trace_array *tr, tr 679 kernel/trace/trace.h int tracer_init(struct tracer *t, struct trace_array *tr); tr 687 kernel/trace/trace.h bool tracer_tracing_is_on(struct trace_array *tr); tr 688 kernel/trace/trace.h void tracer_tracing_on(struct trace_array *tr); tr 689 kernel/trace/trace.h void tracer_tracing_off(struct trace_array *tr); tr 707 kernel/trace/trace.h struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, tr 724 kernel/trace/trace.h unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); tr 725 kernel/trace/trace.h unsigned long trace_total_entries(struct trace_array *tr); tr 727 kernel/trace/trace.h void trace_function(struct trace_array *tr, tr 731 kernel/trace/trace.h void trace_graph_function(struct trace_array *tr, tr 742 kernel/trace/trace.h void set_graph_array(struct trace_array *tr); tr 783 kernel/trace/trace.h void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, tr 785 kernel/trace/trace.h void update_max_tr_single(struct trace_array *tr, tr 790 kernel/trace/trace.h void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, tr 793 kernel/trace/trace.h static inline void __trace_stack(struct trace_array *tr, unsigned long flags, tr 803 kernel/trace/trace.h extern void trace_event_follow_fork(struct trace_array *tr, bool enable); tr 807 kernel/trace/trace.h void ftrace_init_trace_array(struct trace_array *tr); tr 809 kernel/trace/trace.h static inline void ftrace_init_trace_array(struct trace_array *tr) { } tr 821 kernel/trace/trace.h struct trace_array *tr); tr 823 kernel/trace/trace.h struct trace_array *tr); tr 825 kernel/trace/trace.h struct trace_array *tr); tr 827 kernel/trace/trace.h struct trace_array *tr); tr 829 kernel/trace/trace.h struct trace_array *tr); tr 831 kernel/trace/trace.h struct trace_array *tr); tr 833 kernel/trace/trace.h struct trace_array *tr); tr 835 kernel/trace/trace.h struct trace_array *tr); tr 854 kernel/trace/trace.h trace_array_vprintk(struct trace_array *tr, tr 856 kernel/trace/trace.h int trace_array_printk(struct trace_array *tr, tr 927 kernel/trace/trace.h extern int __trace_graph_entry(struct trace_array *tr, tr 930 kernel/trace/trace.h extern void __trace_graph_return(struct trace_array *tr, tr 1053 kernel/trace/trace.h int (*func)(struct trace_array *tr, tr 1059 kernel/trace/trace.h static inline int ftrace_trace_task(struct trace_array *tr) tr 1061 kernel/trace/trace.h return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid); tr 1064 kernel/trace/trace.h int ftrace_create_function_files(struct trace_array *tr, tr 1066 kernel/trace/trace.h void ftrace_destroy_function_files(struct trace_array *tr); tr 1067 kernel/trace/trace.h void ftrace_init_global_array_ops(struct trace_array *tr); tr 1068 kernel/trace/trace.h void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); tr 1069 kernel/trace/trace.h void ftrace_reset_array_ops(struct trace_array *tr); tr 1070 kernel/trace/trace.h void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); tr 1071 kernel/trace/trace.h void ftrace_init_tracefs_toplevel(struct trace_array *tr, tr 1073 kernel/trace/trace.h void ftrace_clear_pids(struct trace_array *tr); tr 1075 kernel/trace/trace.h void ftrace_pid_follow_fork(struct trace_array *tr, bool enable); tr 1077 kernel/trace/trace.h static inline int ftrace_trace_task(struct trace_array *tr) tr 1083 kernel/trace/trace.h ftrace_create_function_files(struct trace_array *tr, tr 1088 kernel/trace/trace.h static inline void ftrace_destroy_function_files(struct trace_array *tr) { } tr 1090 kernel/trace/trace.h ftrace_init_global_array_ops(struct trace_array *tr) { } tr 1091 kernel/trace/trace.h static inline void ftrace_reset_array_ops(struct trace_array *tr) { } tr 1092 kernel/trace/trace.h static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } tr 1093 kernel/trace/trace.h static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } tr 1094 kernel/trace/trace.h static inline void ftrace_clear_pids(struct trace_array *tr) { } tr 1096 kernel/trace/trace.h static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { } tr 1098 kernel/trace/trace.h #define ftrace_init_array_ops(tr, func) do { } while (0) tr 1106 kernel/trace/trace.h struct trace_array *tr, tr 1110 kernel/trace/trace.h struct trace_array *tr, tr 1114 kernel/trace/trace.h struct trace_array *tr, tr 1136 kernel/trace/trace.h register_ftrace_function_probe(char *glob, struct trace_array *tr, tr 1139 kernel/trace/trace.h unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, tr 1141 kernel/trace/trace.h extern void clear_ftrace_function_probes(struct trace_array *tr); tr 1160 kernel/trace/trace.h static inline void clear_ftrace_function_probes(struct trace_array *tr) tr 1311 kernel/trace/trace.h extern int enable_branch_tracing(struct trace_array *tr); tr 1313 kernel/trace/trace.h static inline int trace_branch_enable(struct trace_array *tr) tr 1315 kernel/trace/trace.h if (tr->trace_flags & TRACE_ITER_BRANCH) tr 1316 kernel/trace/trace.h return enable_branch_tracing(tr); tr 1325 kernel/trace/trace.h static inline int trace_branch_enable(struct trace_array *tr) tr 1364 kernel/trace/trace.h struct trace_array *tr; tr 1374 kernel/trace/trace.h void trace_buffer_unlock_commit_regs(struct trace_array *tr, tr 1380 kernel/trace/trace.h static inline void trace_buffer_unlock_commit(struct trace_array *tr, tr 1385 kernel/trace/trace.h trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); tr 1462 kernel/trace/trace.h trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); tr 1494 kernel/trace/trace.h trace_buffer_unlock_commit_regs(file->tr, buffer, event, tr 1572 kernel/trace/trace.h extern int create_event_filter(struct trace_array *tr, tr 1585 kernel/trace/trace.h extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); tr 1586 kernel/trace/trace.h extern int event_trace_del_tracer(struct trace_array *tr); tr 1588 kernel/trace/trace.h extern struct trace_event_file *__find_event_file(struct trace_array *tr, tr 1591 kernel/trace/trace.h extern struct trace_event_file *find_event_file(struct trace_array *tr, tr 1615 kernel/trace/trace.h extern void clear_event_triggers(struct trace_array *tr); tr 1879 kernel/trace/trace.h extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); tr 1880 kernel/trace/trace.h extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); tr 1882 kernel/trace/trace.h extern int tracing_snapshot_cond_disable(struct trace_array *tr); tr 1883 kernel/trace/trace.h extern void *tracing_cond_snapshot_data(struct trace_array *tr); tr 1895 kernel/trace/trace.h int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); tr 1905 kernel/trace/trace.h extern void tracing_log_err(struct trace_array *tr, tr 1962 kernel/trace/trace.h void tracing_snapshot_instance(struct trace_array *tr); tr 1963 kernel/trace/trace.h int tracing_alloc_snapshot_instance(struct trace_array *tr); tr 1965 kernel/trace/trace.h static inline void tracing_snapshot_instance(struct trace_array *tr) { } tr 1966 kernel/trace/trace.h static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) tr 34 kernel/trace/trace_branch.c struct trace_array *tr = branch_tracer; tr 53 kernel/trace/trace_branch.c if (unlikely(!tr)) tr 58 kernel/trace/trace_branch.c data = this_cpu_ptr(tr->trace_buffer.data); tr 63 kernel/trace/trace_branch.c buffer = tr->trace_buffer.buffer; tr 102 kernel/trace/trace_branch.c int enable_branch_tracing(struct trace_array *tr) tr 105 kernel/trace/trace_branch.c branch_tracer = tr; tr 130 kernel/trace/trace_branch.c static int branch_trace_init(struct trace_array *tr) tr 132 kernel/trace/trace_branch.c return enable_branch_tracing(tr); tr 135 kernel/trace/trace_branch.c static void branch_trace_reset(struct trace_array *tr) tr 62 kernel/trace/trace_events.c #define do_for_each_event_file(tr, file) \ tr 63 kernel/trace/trace_events.c list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ tr 64 kernel/trace/trace_events.c list_for_each_entry(file, &tr->events, list) tr 66 kernel/trace/trace_events.c #define do_for_each_event_file_safe(tr, file) \ tr 67 kernel/trace/trace_events.c list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ tr 69 kernel/trace/trace_events.c list_for_each_entry_safe(file, ___n, &tr->events, list) tr 232 kernel/trace/trace_events.c struct trace_array *tr = trace_file->tr; tr 236 kernel/trace/trace_events.c pid_list = rcu_dereference_raw(tr->filtered_pids); tr 240 kernel/trace/trace_events.c data = this_cpu_ptr(tr->trace_buffer.data); tr 321 kernel/trace/trace_events.c struct trace_array *tr; tr 325 kernel/trace/trace_events.c do_for_each_event_file(tr, file) { tr 343 kernel/trace/trace_events.c struct trace_array *tr; tr 347 kernel/trace/trace_events.c do_for_each_event_file(tr, file) { tr 366 kernel/trace/trace_events.c struct trace_array *tr = file->tr; tr 438 kernel/trace/trace_events.c if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { tr 444 kernel/trace/trace_events.c if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { tr 492 kernel/trace/trace_events.c static void ftrace_clear_events(struct trace_array *tr) tr 497 kernel/trace/trace_events.c list_for_each_entry(file, &tr->events, list) { tr 507 kernel/trace/trace_events.c struct trace_array *tr = data; tr 509 kernel/trace/trace_events.c pid_list = rcu_dereference_raw(tr->filtered_pids); tr 519 kernel/trace/trace_events.c struct trace_array *tr = data; tr 521 kernel/trace/trace_events.c pid_list = rcu_dereference_sched(tr->filtered_pids); tr 525 kernel/trace/trace_events.c void trace_event_follow_fork(struct trace_array *tr, bool enable) tr 529 kernel/trace/trace_events.c tr, INT_MIN); tr 531 kernel/trace/trace_events.c tr, INT_MAX); tr 534 kernel/trace/trace_events.c tr); tr 536 kernel/trace/trace_events.c tr); tr 544 kernel/trace/trace_events.c struct trace_array *tr = data; tr 547 kernel/trace/trace_events.c pid_list = rcu_dereference_sched(tr->filtered_pids); tr 549 kernel/trace/trace_events.c this_cpu_write(tr->trace_buffer.data->ignore_pid, tr 558 kernel/trace/trace_events.c struct trace_array *tr = data; tr 561 kernel/trace/trace_events.c pid_list = rcu_dereference_sched(tr->filtered_pids); tr 563 kernel/trace/trace_events.c this_cpu_write(tr->trace_buffer.data->ignore_pid, tr 570 kernel/trace/trace_events.c struct trace_array *tr = data; tr 574 kernel/trace/trace_events.c if (!this_cpu_read(tr->trace_buffer.data->ignore_pid)) tr 577 kernel/trace/trace_events.c pid_list = rcu_dereference_sched(tr->filtered_pids); tr 579 kernel/trace/trace_events.c this_cpu_write(tr->trace_buffer.data->ignore_pid, tr 586 kernel/trace/trace_events.c struct trace_array *tr = data; tr 590 kernel/trace/trace_events.c if (this_cpu_read(tr->trace_buffer.data->ignore_pid)) tr 593 kernel/trace/trace_events.c pid_list = rcu_dereference_sched(tr->filtered_pids); tr 596 kernel/trace/trace_events.c this_cpu_write(tr->trace_buffer.data->ignore_pid, tr 600 kernel/trace/trace_events.c static void __ftrace_clear_event_pids(struct trace_array *tr) tr 606 kernel/trace/trace_events.c pid_list = rcu_dereference_protected(tr->filtered_pids, tr 611 kernel/trace/trace_events.c unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); tr 612 kernel/trace/trace_events.c unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); tr 614 kernel/trace/trace_events.c unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); tr 615 kernel/trace/trace_events.c unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); tr 617 kernel/trace/trace_events.c unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr); tr 618 kernel/trace/trace_events.c unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr); tr 620 kernel/trace/trace_events.c unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); tr 621 kernel/trace/trace_events.c unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); tr 623 kernel/trace/trace_events.c list_for_each_entry(file, &tr->events, list) { tr 628 kernel/trace/trace_events.c per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false; tr 630 kernel/trace/trace_events.c rcu_assign_pointer(tr->filtered_pids, NULL); tr 638 kernel/trace/trace_events.c static void ftrace_clear_event_pids(struct trace_array *tr) tr 641 kernel/trace/trace_events.c __ftrace_clear_event_pids(tr); tr 732 kernel/trace/trace_events.c __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, tr 741 kernel/trace/trace_events.c list_for_each_entry(file, &tr->events, list) { tr 779 kernel/trace/trace_events.c static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, tr 785 kernel/trace/trace_events.c ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); tr 791 kernel/trace/trace_events.c int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) tr 820 kernel/trace/trace_events.c ret = __ftrace_set_clr_event(tr, match, sub, event, set); tr 844 kernel/trace/trace_events.c struct trace_array *tr = top_trace_array(); tr 846 kernel/trace/trace_events.c if (!tr) tr 849 kernel/trace/trace_events.c return __ftrace_set_clr_event(tr, NULL, system, event, set); tr 862 kernel/trace/trace_events.c struct trace_array *tr = m->private; tr 883 kernel/trace/trace_events.c ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); tr 901 kernel/trace/trace_events.c struct trace_array *tr = m->private; tr 905 kernel/trace/trace_events.c list_for_each_entry_continue(file, &tr->events, list) { tr 922 kernel/trace/trace_events.c struct trace_array *tr = m->private; tr 927 kernel/trace/trace_events.c file = list_entry(&tr->events, struct trace_event_file, list); tr 940 kernel/trace/trace_events.c struct trace_array *tr = m->private; tr 944 kernel/trace/trace_events.c list_for_each_entry_continue(file, &tr->events, list) { tr 955 kernel/trace/trace_events.c struct trace_array *tr = m->private; tr 960 kernel/trace/trace_events.c file = list_entry(&tr->events, struct trace_event_file, list); tr 989 kernel/trace/trace_events.c struct trace_array *tr = m->private; tr 990 kernel/trace/trace_events.c struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids); tr 999 kernel/trace/trace_events.c struct trace_array *tr = m->private; tr 1010 kernel/trace/trace_events.c pid_list = rcu_dereference_sched(tr->filtered_pids); tr 1100 kernel/trace/trace_events.c struct trace_array *tr = dir->tr; tr 1106 kernel/trace/trace_events.c list_for_each_entry(file, &tr->events, list) { tr 1165 kernel/trace/trace_events.c ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); tr 1394 kernel/trace/trace_events.c struct trace_array *tr; tr 1403 kernel/trace/trace_events.c list_for_each_entry(tr, &ftrace_trace_arrays, list) { tr 1404 kernel/trace/trace_events.c list_for_each_entry(dir, &tr->systems, list) { tr 1426 kernel/trace/trace_events.c if (trace_array_get(tr) < 0) { tr 1433 kernel/trace/trace_events.c trace_array_put(tr); tr 1443 kernel/trace/trace_events.c struct trace_array *tr = inode->i_private; tr 1456 kernel/trace/trace_events.c dir->tr = tr; tr 1466 kernel/trace/trace_events.c trace_array_put(dir->tr); tr 1560 kernel/trace/trace_events.c struct trace_array *tr = data; tr 1567 kernel/trace/trace_events.c pid_list = rcu_dereference_protected(tr->filtered_pids, tr 1570 kernel/trace/trace_events.c this_cpu_write(tr->trace_buffer.data->ignore_pid, tr 1579 kernel/trace/trace_events.c struct trace_array *tr = m->private; tr 1594 kernel/trace/trace_events.c filtered_pids = rcu_dereference_protected(tr->filtered_pids, tr 1601 kernel/trace/trace_events.c rcu_assign_pointer(tr->filtered_pids, pid_list); tr 1603 kernel/trace/trace_events.c list_for_each_entry(file, &tr->events, list) { tr 1618 kernel/trace/trace_events.c tr, INT_MAX); tr 1620 kernel/trace/trace_events.c tr, 0); tr 1623 kernel/trace/trace_events.c tr, INT_MAX); tr 1625 kernel/trace/trace_events.c tr, 0); tr 1628 kernel/trace/trace_events.c tr, INT_MAX); tr 1630 kernel/trace/trace_events.c tr, 0); tr 1633 kernel/trace/trace_events.c tr, INT_MAX); tr 1635 kernel/trace/trace_events.c tr, 0); tr 1643 kernel/trace/trace_events.c on_each_cpu(ignore_task_cpu, tr, 1); tr 1782 kernel/trace/trace_events.c struct trace_array *tr = inode->i_private; tr 1784 kernel/trace/trace_events.c trace_array_put(tr); tr 1802 kernel/trace/trace_events.c struct trace_array *tr = inode->i_private; tr 1805 kernel/trace/trace_events.c ret = tracing_check_open_get_tr(tr); tr 1811 kernel/trace/trace_events.c ftrace_clear_events(tr); tr 1815 kernel/trace/trace_events.c trace_array_put(tr); tr 1823 kernel/trace/trace_events.c struct trace_array *tr = inode->i_private; tr 1826 kernel/trace/trace_events.c ret = tracing_check_open_get_tr(tr); tr 1832 kernel/trace/trace_events.c ftrace_clear_event_pids(tr); tr 1836 kernel/trace/trace_events.c trace_array_put(tr); tr 1874 kernel/trace/trace_events.c event_subsystem_dir(struct trace_array *tr, const char *name, tr 1882 kernel/trace/trace_events.c list_for_each_entry(dir, &tr->systems, list) { tr 1918 kernel/trace/trace_events.c dir->tr = tr; tr 1935 kernel/trace/trace_events.c list_add(&dir->list, &tr->systems); tr 1952 kernel/trace/trace_events.c struct trace_array *tr = file->tr; tr 1963 kernel/trace/trace_events.c d_events = event_subsystem_dir(tr, call->class->system, file, parent); tr 2026 kernel/trace/trace_events.c struct trace_array *tr; tr 2028 kernel/trace/trace_events.c do_for_each_event_file_safe(tr, file) { tr 2045 kernel/trace/trace_events.c struct trace_array *tr; tr 2048 kernel/trace/trace_events.c do_for_each_event_file(tr, file) { tr 2053 kernel/trace/trace_events.c tr->clear_trace = true; tr 2245 kernel/trace/trace_events.c struct trace_array *tr) tr 2254 kernel/trace/trace_events.c file->tr = tr; tr 2258 kernel/trace/trace_events.c list_add(&file->list, &tr->events); tr 2265 kernel/trace/trace_events.c __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) tr 2269 kernel/trace/trace_events.c file = trace_create_new_event(call, tr); tr 2273 kernel/trace/trace_events.c return event_create_dir(tr->event_dir, file); tr 2283 kernel/trace/trace_events.c struct trace_array *tr) tr 2287 kernel/trace/trace_events.c file = trace_create_new_event(call, tr); tr 2327 kernel/trace/trace_events.c struct trace_array *tr; tr 2334 kernel/trace/trace_events.c do_for_each_event_file(tr, file) { tr 2455 kernel/trace/trace_events.c __trace_add_event_dirs(struct trace_array *tr) tr 2461 kernel/trace/trace_events.c ret = __trace_add_new_event(call, tr); tr 2470 kernel/trace/trace_events.c __find_event_file(struct trace_array *tr, const char *system, const char *event) tr 2476 kernel/trace/trace_events.c list_for_each_entry(file, &tr->events, list) { tr 2493 kernel/trace/trace_events.c find_event_file(struct trace_array *tr, const char *system, const char *event) tr 2497 kernel/trace/trace_events.c file = __find_event_file(tr, system, event); tr 2528 kernel/trace/trace_events.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 2545 kernel/trace/trace_events.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 2602 kernel/trace/trace_events.c event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr, tr 2640 kernel/trace/trace_events.c event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr, tr 2693 kernel/trace/trace_events.c event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, tr 2705 kernel/trace/trace_events.c if (!tr) tr 2721 kernel/trace/trace_events.c file = find_event_file(tr, system, event); tr 2733 kernel/trace/trace_events.c ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); tr 2776 kernel/trace/trace_events.c ret = register_ftrace_function_probe(glob, tr, ops, data); tr 2836 kernel/trace/trace_events.c __trace_early_add_event_dirs(struct trace_array *tr) tr 2842 kernel/trace/trace_events.c list_for_each_entry(file, &tr->events, list) { tr 2843 kernel/trace/trace_events.c ret = event_create_dir(tr->event_dir, file); tr 2857 kernel/trace/trace_events.c __trace_early_add_events(struct trace_array *tr) tr 2867 kernel/trace/trace_events.c ret = __trace_early_add_new_event(call, tr); tr 2876 kernel/trace/trace_events.c __trace_remove_event_dirs(struct trace_array *tr) tr 2880 kernel/trace/trace_events.c list_for_each_entry_safe(file, next, &tr->events, list) tr 2886 kernel/trace/trace_events.c struct trace_array *tr; tr 2888 kernel/trace/trace_events.c list_for_each_entry(tr, &ftrace_trace_arrays, list) tr 2889 kernel/trace/trace_events.c __trace_add_new_event(call, tr); tr 2909 kernel/trace/trace_events.c create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) tr 2915 kernel/trace/trace_events.c tr, &ftrace_set_event_fops); tr 2928 kernel/trace/trace_events.c tr, &ftrace_tr_enable_fops); tr 2937 kernel/trace/trace_events.c tr, &ftrace_set_event_pid_fops); tr 2954 kernel/trace/trace_events.c tr->event_dir = d_events; tr 2972 kernel/trace/trace_events.c int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) tr 2978 kernel/trace/trace_events.c ret = create_event_toplevel_files(parent, tr); tr 2983 kernel/trace/trace_events.c __trace_add_event_dirs(tr); tr 2995 kernel/trace/trace_events.c early_event_add_tracer(struct dentry *parent, struct trace_array *tr) tr 3001 kernel/trace/trace_events.c ret = create_event_toplevel_files(parent, tr); tr 3006 kernel/trace/trace_events.c __trace_early_add_event_dirs(tr); tr 3016 kernel/trace/trace_events.c int event_trace_del_tracer(struct trace_array *tr) tr 3021 kernel/trace/trace_events.c clear_event_triggers(tr); tr 3024 kernel/trace/trace_events.c __ftrace_clear_event_pids(tr); tr 3027 kernel/trace/trace_events.c __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); tr 3033 kernel/trace/trace_events.c __trace_remove_event_dirs(tr); tr 3034 kernel/trace/trace_events.c tracefs_remove_recursive(tr->event_dir); tr 3037 kernel/trace/trace_events.c tr->event_dir = NULL; tr 3050 kernel/trace/trace_events.c early_enable_events(struct trace_array *tr, bool disable_first) tr 3065 kernel/trace/trace_events.c ftrace_set_clr_event(tr, token, 0); tr 3067 kernel/trace/trace_events.c ret = ftrace_set_clr_event(tr, token, 1); tr 3080 kernel/trace/trace_events.c struct trace_array *tr = top_trace_array(); tr 3084 kernel/trace/trace_events.c if (!tr) tr 3101 kernel/trace/trace_events.c __trace_early_add_events(tr); tr 3103 kernel/trace/trace_events.c early_enable_events(tr, false); tr 3126 kernel/trace/trace_events.c struct trace_array *tr; tr 3128 kernel/trace/trace_events.c tr = top_trace_array(); tr 3129 kernel/trace/trace_events.c if (!tr) tr 3132 kernel/trace/trace_events.c early_enable_events(tr, true); tr 3141 kernel/trace/trace_events.c struct trace_array *tr; tr 3146 kernel/trace/trace_events.c tr = top_trace_array(); tr 3147 kernel/trace/trace_events.c if (!tr) tr 3155 kernel/trace/trace_events.c tr, &ftrace_avail_fops); tr 3165 kernel/trace/trace_events.c ret = early_event_add_tracer(d_tracer, tr); tr 3247 kernel/trace/trace_events.c struct trace_array *tr; tr 3250 kernel/trace/trace_events.c tr = top_trace_array(); tr 3251 kernel/trace/trace_events.c if (!tr) tr 3256 kernel/trace/trace_events.c list_for_each_entry(file, &tr->events, list) { tr 3299 kernel/trace/trace_events.c list_for_each_entry(dir, &tr->systems, list) { tr 3309 kernel/trace/trace_events.c ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); tr 3318 kernel/trace/trace_events.c ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); tr 3333 kernel/trace/trace_events.c ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); tr 3342 kernel/trace/trace_events.c ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); tr 3405 kernel/trace/trace_events.c event_trace_file.tr = top_trace_array(); tr 3406 kernel/trace/trace_events.c if (WARN_ON(!event_trace_file.tr)) tr 929 kernel/trace/trace_events_filter.c static void append_filter_err(struct trace_array *tr, tr 958 kernel/trace/trace_events_filter.c tracing_log_err(tr, "event filter parse error", tr 963 kernel/trace/trace_events_filter.c tracing_log_err(tr, "event filter parse error", tr 1052 kernel/trace/trace_events_filter.c struct trace_array *tr) tr 1056 kernel/trace/trace_events_filter.c list_for_each_entry(file, &tr->events, list) { tr 1070 kernel/trace/trace_events_filter.c struct trace_array *tr) tr 1074 kernel/trace/trace_events_filter.c list_for_each_entry(file, &tr->events, list) { tr 1591 kernel/trace/trace_events_filter.c struct trace_array *tr, tr 1603 kernel/trace/trace_events_filter.c list_for_each_entry(file, &tr->events, list) { tr 1620 kernel/trace/trace_events_filter.c append_filter_err(tr, pe, filter); tr 1732 kernel/trace/trace_events_filter.c static int create_filter(struct trace_array *tr, tr 1750 kernel/trace/trace_events_filter.c append_filter_err(tr, pe, *filterp); tr 1756 kernel/trace/trace_events_filter.c int create_event_filter(struct trace_array *tr, tr 1761 kernel/trace/trace_events_filter.c return create_filter(tr, call, filter_str, set_str, filterp); tr 1774 kernel/trace/trace_events_filter.c struct trace_array *tr, tr 1782 kernel/trace/trace_events_filter.c err = process_system_preds(dir, tr, pe, filter_str); tr 1788 kernel/trace/trace_events_filter.c append_filter_err(tr, pe, *filterp); tr 1819 kernel/trace/trace_events_filter.c err = create_filter(file->tr, call, filter_string, true, &filter); tr 1852 kernel/trace/trace_events_filter.c struct trace_array *tr = dir->tr; tr 1865 kernel/trace/trace_events_filter.c filter_free_subsystem_preds(dir, tr); tr 1871 kernel/trace/trace_events_filter.c filter_free_subsystem_filters(dir, tr); tr 1876 kernel/trace/trace_events_filter.c err = create_system_filter(dir, tr, filter_string, &filter); tr 629 kernel/trace/trace_events_hist.c static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos) tr 631 kernel/trace/trace_events_hist.c tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, tr 841 kernel/trace/trace_events_hist.c struct trace_array *tr = iter->tr; tr 861 kernel/trace/trace_events_hist.c if (tr->trace_flags & TRACE_ITER_VERBOSE) tr 926 kernel/trace/trace_events_hist.c buffer = trace_file->tr->trace_buffer.buffer; tr 1537 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 1541 kernel/trace/trace_events_hist.c if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr)) tr 1627 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 1631 kernel/trace/trace_events_hist.c list_for_each_entry(var_data, &tr->hist_vars, list) { tr 1676 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 1679 kernel/trace/trace_events_hist.c list_for_each_entry(var_data, &tr->hist_vars, list) { tr 1731 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 1738 kernel/trace/trace_events_hist.c if (tracing_check_open_get_tr(tr)) tr 1743 kernel/trace/trace_events_hist.c trace_array_put(tr); tr 1748 kernel/trace/trace_events_hist.c list_add(&var_data->list, &tr->hist_vars); tr 1755 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 1769 kernel/trace/trace_events_hist.c trace_array_put(tr); tr 1816 kernel/trace/trace_events_hist.c static struct trace_event_file *find_var_file(struct trace_array *tr, tr 1826 kernel/trace/trace_events_hist.c return find_event_file(tr, system, event_name); tr 1828 kernel/trace/trace_events_hist.c list_for_each_entry(var_data, &tr->hist_vars, list) { tr 1836 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name)); tr 1871 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 1883 kernel/trace/trace_events_hist.c file = find_var_file(tr, system, event_name, var_name); tr 1889 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, tr 1906 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 1918 kernel/trace/trace_events_hist.c file = find_var_file(tr, system, event_name, var_name); tr 2122 kernel/trace/trace_events_hist.c static int parse_assignment(struct trace_array *tr, tr 2175 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str)); tr 2193 kernel/trace/trace_events_hist.c parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) tr 2212 kernel/trace/trace_events_hist.c ret = parse_assignment(tr, str, attrs); tr 2808 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 2821 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name)); tr 2832 kernel/trace/trace_events_hist.c struct trace_array *tr = file->tr; tr 2856 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier)); tr 2872 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name)); tr 2986 kernel/trace/trace_events_hist.c hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); tr 3041 kernel/trace/trace_events_hist.c static int check_expr_operands(struct trace_array *tr, tr 3070 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0); tr 3088 kernel/trace/trace_events_hist.c hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); tr 3133 kernel/trace/trace_events_hist.c ret = check_expr_operands(file->tr, operand1, operand2); tr 3256 kernel/trace/trace_events_hist.c static struct trace_event_file *event_file(struct trace_array *tr, tr 3261 kernel/trace/trace_events_hist.c file = __find_event_file(tr, system, event_name); tr 3318 kernel/trace/trace_events_hist.c struct trace_array *tr = target_hist_data->event_file->tr; tr 3330 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); tr 3334 kernel/trace/trace_events_hist.c file = event_file(tr, subsys_name, event_name); tr 3337 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name)); tr 3350 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name)); tr 3411 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name)); tr 3423 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name)); tr 3557 kernel/trace/trace_events_hist.c struct trace_array *tr = file->tr; tr 3562 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); tr 3569 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name)); tr 3576 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); tr 3703 kernel/trace/trace_events_hist.c static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) tr 3706 kernel/trace/trace_events_hist.c struct track_data *track_data = tr->cond_snapshot->cond_data; tr 3748 kernel/trace/trace_events_hist.c tracing_snapshot_cond(file->tr, &context); tr 3780 kernel/trace/trace_events_hist.c track_data = tracing_cond_snapshot_data(file->tr); tr 3801 kernel/trace/trace_events_hist.c static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) tr 3889 kernel/trace/trace_events_hist.c track_data = tracing_cond_snapshot_data(file->tr); tr 3891 kernel/trace/trace_events_hist.c tracing_snapshot_cond_disable(file->tr); tr 3909 kernel/trace/trace_events_hist.c struct trace_array *tr = file->tr; tr 3915 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str)); tr 3922 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str)); tr 3935 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); tr 3943 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); tr 3954 kernel/trace/trace_events_hist.c static int parse_action_params(struct trace_array *tr, char *params, tr 3963 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); tr 3969 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0); tr 3976 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param)); tr 4000 kernel/trace/trace_events_hist.c static int action_parse(struct trace_array *tr, char *str, struct action_data *data, tr 4008 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); tr 4015 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); tr 4024 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0); tr 4029 kernel/trace/trace_events_hist.c ret = parse_action_params(tr, params, data); tr 4038 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); tr 4050 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params)); tr 4060 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); tr 4075 kernel/trace/trace_events_hist.c ret = parse_action_params(tr, params, data); tr 4128 kernel/trace/trace_events_hist.c ret = action_parse(hist_data->event_file->tr, str, data, handler); tr 4201 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 4217 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var)); tr 4275 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 4293 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name)); tr 4360 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param)); tr 4367 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name)); tr 4385 kernel/trace/trace_events_hist.c struct trace_array *tr = file->tr; tr 4402 kernel/trace/trace_events_hist.c ret = tracing_snapshot_cond_enable(file->tr, track_data, tr 4413 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0); tr 4426 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL, tr 4449 kernel/trace/trace_events_hist.c static struct action_data *onmatch_parse(struct trace_array *tr, char *str) tr 4461 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event)); tr 4467 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system)); tr 4471 kernel/trace/trace_events_hist.c if (IS_ERR(event_file(tr, match_event_system, match_event))) { tr 4472 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event)); tr 4488 kernel/trace/trace_events_hist.c ret = action_parse(tr, str, data, HANDLER_ONMATCH); tr 4557 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 4564 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name)); tr 4617 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 4641 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str)); tr 4738 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 4752 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT, tr 4759 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name)); tr 4931 kernel/trace/trace_events_hist.c struct trace_array *tr = hist_data->event_file->tr; tr 4944 kernel/trace/trace_events_hist.c data = onmatch_parse(tr, action_str); tr 5976 kernel/trace/trace_events_hist.c struct trace_array *tr = file->tr; tr 5984 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name)); tr 6007 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0); tr 6015 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0); tr 6038 kernel/trace/trace_events_hist.c ret = tracing_set_clock(file->tr, hist_data->attrs->clock); tr 6040 kernel/trace/trace_events_hist.c hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock)); tr 6044 kernel/trace/trace_events_hist.c tracing_set_time_stamp_abs(file->tr, true); tr 6152 kernel/trace/trace_events_hist.c tracing_set_time_stamp_abs(file->tr, false); tr 6199 kernel/trace/trace_events_hist.c tracing_set_time_stamp_abs(file->tr, false); tr 6266 kernel/trace/trace_events_hist.c attrs = parse_hist_trigger_attrs(file->tr, trigger); tr 475 kernel/trace/trace_events_trigger.c clear_event_triggers(struct trace_array *tr) tr 479 kernel/trace/trace_events_trigger.c list_for_each_entry(file, &tr->events, list) { tr 747 kernel/trace/trace_events_trigger.c ret = create_event_filter(file->tr, file->event_call, tr 1068 kernel/trace/trace_events_trigger.c tracing_snapshot_instance(file->tr); tr 1091 kernel/trace/trace_events_trigger.c if (tracing_alloc_snapshot_instance(file->tr) != 0) tr 1355 kernel/trace/trace_events_trigger.c struct trace_array *tr = file->tr; tr 1379 kernel/trace/trace_events_trigger.c event_enable_file = find_event_file(tr, system, event); tr 22 kernel/trace/trace_functions.c static void tracing_start_function_trace(struct trace_array *tr); tr 23 kernel/trace/trace_functions.c static void tracing_stop_function_trace(struct trace_array *tr); tr 37 kernel/trace/trace_functions.c static int allocate_ftrace_ops(struct trace_array *tr) tr 49 kernel/trace/trace_functions.c tr->ops = ops; tr 50 kernel/trace/trace_functions.c ops->private = tr; tr 55 kernel/trace/trace_functions.c int ftrace_create_function_files(struct trace_array *tr, tr 64 kernel/trace/trace_functions.c if (tr->flags & TRACE_ARRAY_FL_GLOBAL) tr 67 kernel/trace/trace_functions.c ret = allocate_ftrace_ops(tr); tr 71 kernel/trace/trace_functions.c ftrace_create_filter_files(tr->ops, parent); tr 76 kernel/trace/trace_functions.c void ftrace_destroy_function_files(struct trace_array *tr) tr 78 kernel/trace/trace_functions.c ftrace_destroy_filter_files(tr->ops); tr 79 kernel/trace/trace_functions.c kfree(tr->ops); tr 80 kernel/trace/trace_functions.c tr->ops = NULL; tr 83 kernel/trace/trace_functions.c static int function_trace_init(struct trace_array *tr) tr 92 kernel/trace/trace_functions.c if (!tr->ops) tr 96 kernel/trace/trace_functions.c if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr 102 kernel/trace/trace_functions.c ftrace_init_array_ops(tr, func); tr 104 kernel/trace/trace_functions.c tr->trace_buffer.cpu = get_cpu(); tr 108 kernel/trace/trace_functions.c tracing_start_function_trace(tr); tr 112 kernel/trace/trace_functions.c static void function_trace_reset(struct trace_array *tr) tr 114 kernel/trace/trace_functions.c tracing_stop_function_trace(tr); tr 116 kernel/trace/trace_functions.c ftrace_reset_array_ops(tr); tr 119 kernel/trace/trace_functions.c static void function_trace_start(struct trace_array *tr) tr 121 kernel/trace/trace_functions.c tracing_reset_online_cpus(&tr->trace_buffer); tr 128 kernel/trace/trace_functions.c struct trace_array *tr = op->private; tr 135 kernel/trace/trace_functions.c if (unlikely(!tr->function_enabled)) tr 146 kernel/trace/trace_functions.c data = per_cpu_ptr(tr->trace_buffer.data, cpu); tr 149 kernel/trace/trace_functions.c trace_function(tr, ip, parent_ip, flags, pc); tr 179 kernel/trace/trace_functions.c struct trace_array *tr = op->private; tr 186 kernel/trace/trace_functions.c if (unlikely(!tr->function_enabled)) tr 195 kernel/trace/trace_functions.c data = per_cpu_ptr(tr->trace_buffer.data, cpu); tr 200 kernel/trace/trace_functions.c trace_function(tr, ip, parent_ip, flags, pc); tr 201 kernel/trace/trace_functions.c __trace_stack(tr, flags, STACK_SKIP, pc); tr 220 kernel/trace/trace_functions.c static void tracing_start_function_trace(struct trace_array *tr) tr 222 kernel/trace/trace_functions.c tr->function_enabled = 0; tr 223 kernel/trace/trace_functions.c register_ftrace_function(tr->ops); tr 224 kernel/trace/trace_functions.c tr->function_enabled = 1; tr 227 kernel/trace/trace_functions.c static void tracing_stop_function_trace(struct trace_array *tr) tr 229 kernel/trace/trace_functions.c tr->function_enabled = 0; tr 230 kernel/trace/trace_functions.c unregister_ftrace_function(tr->ops); tr 236 kernel/trace/trace_functions.c func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) tr 245 kernel/trace/trace_functions.c if (tr->current_trace != &function_trace) tr 248 kernel/trace/trace_functions.c unregister_ftrace_function(tr->ops); tr 251 kernel/trace/trace_functions.c tr->ops->func = function_stack_trace_call; tr 252 kernel/trace/trace_functions.c register_ftrace_function(tr->ops); tr 254 kernel/trace/trace_functions.c tr->ops->func = function_trace_call; tr 255 kernel/trace/trace_functions.c register_ftrace_function(tr->ops); tr 283 kernel/trace/trace_functions.c struct trace_array *tr, bool on, tr 328 kernel/trace/trace_functions.c if (on == !!tracer_tracing_is_on(tr)) tr 332 kernel/trace/trace_functions.c tracer_tracing_on(tr); tr 334 kernel/trace/trace_functions.c tracer_tracing_off(tr); tr 344 kernel/trace/trace_functions.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 347 kernel/trace/trace_functions.c update_traceon_count(ops, ip, tr, 1, data); tr 352 kernel/trace/trace_functions.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 355 kernel/trace/trace_functions.c update_traceon_count(ops, ip, tr, 0, data); tr 360 kernel/trace/trace_functions.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 363 kernel/trace/trace_functions.c if (tracer_tracing_is_on(tr)) tr 366 kernel/trace/trace_functions.c tracer_tracing_on(tr); tr 371 kernel/trace/trace_functions.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 374 kernel/trace/trace_functions.c if (!tracer_tracing_is_on(tr)) tr 377 kernel/trace/trace_functions.c tracer_tracing_off(tr); tr 402 kernel/trace/trace_functions.c static __always_inline void trace_stack(struct trace_array *tr) tr 410 kernel/trace/trace_functions.c __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc); tr 415 kernel/trace/trace_functions.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 418 kernel/trace/trace_functions.c trace_stack(tr); tr 423 kernel/trace/trace_functions.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 436 kernel/trace/trace_functions.c trace_stack(tr); tr 455 kernel/trace/trace_functions.c trace_stack(tr); tr 483 kernel/trace/trace_functions.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 493 kernel/trace/trace_functions.c struct trace_array *tr, struct ftrace_probe_ops *ops, tr 559 kernel/trace/trace_functions.c ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr, tr 575 kernel/trace/trace_functions.c ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr, tr 637 kernel/trace/trace_functions.c ftrace_trace_probe_callback(struct trace_array *tr, tr 651 kernel/trace/trace_functions.c return unregister_ftrace_function_probe_func(glob+1, tr, ops); tr 670 kernel/trace/trace_functions.c ret = register_ftrace_function_probe(glob, tr, ops, count); tr 676 kernel/trace/trace_functions.c ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash, tr 681 kernel/trace/trace_functions.c if (!tr) tr 690 kernel/trace/trace_functions.c return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, tr 695 kernel/trace/trace_functions.c ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash, tr 700 kernel/trace/trace_functions.c if (!tr) tr 705 kernel/trace/trace_functions.c return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, tr 710 kernel/trace/trace_functions.c ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash, tr 715 kernel/trace/trace_functions.c if (!tr) tr 721 kernel/trace/trace_functions.c return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, tr 726 kernel/trace/trace_functions.c ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash, tr 731 kernel/trace/trace_functions.c if (!tr) tr 737 kernel/trace/trace_functions.c return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, tr 94 kernel/trace/trace_functions_graph.c print_graph_duration(struct trace_array *tr, unsigned long long duration, tr 97 kernel/trace/trace_functions_graph.c int __trace_graph_entry(struct trace_array *tr, tr 104 kernel/trace/trace_functions_graph.c struct ring_buffer *buffer = tr->trace_buffer.buffer; tr 129 kernel/trace/trace_functions_graph.c struct trace_array *tr = graph_array; tr 156 kernel/trace/trace_functions_graph.c if (!ftrace_trace_task(tr)) tr 174 kernel/trace/trace_functions_graph.c data = per_cpu_ptr(tr->trace_buffer.data, cpu); tr 178 kernel/trace/trace_functions_graph.c ret = __trace_graph_entry(tr, trace, flags, pc); tr 190 kernel/trace/trace_functions_graph.c __trace_graph_function(struct trace_array *tr, tr 205 kernel/trace/trace_functions_graph.c __trace_graph_entry(tr, &ent, flags, pc); tr 206 kernel/trace/trace_functions_graph.c __trace_graph_return(tr, &ret, flags, pc); tr 210 kernel/trace/trace_functions_graph.c trace_graph_function(struct trace_array *tr, tr 214 kernel/trace/trace_functions_graph.c __trace_graph_function(tr, ip, flags, pc); tr 217 kernel/trace/trace_functions_graph.c void __trace_graph_return(struct trace_array *tr, tr 224 kernel/trace/trace_functions_graph.c struct ring_buffer *buffer = tr->trace_buffer.buffer; tr 239 kernel/trace/trace_functions_graph.c struct trace_array *tr = graph_array; tr 255 kernel/trace/trace_functions_graph.c data = per_cpu_ptr(tr->trace_buffer.data, cpu); tr 259 kernel/trace/trace_functions_graph.c __trace_graph_return(tr, trace, flags, pc); tr 265 kernel/trace/trace_functions_graph.c void set_graph_array(struct trace_array *tr) tr 267 kernel/trace/trace_functions_graph.c graph_array = tr; tr 300 kernel/trace/trace_functions_graph.c static int graph_trace_init(struct trace_array *tr) tr 304 kernel/trace/trace_functions_graph.c set_graph_array(tr); tr 316 kernel/trace/trace_functions_graph.c static void graph_trace_reset(struct trace_array *tr) tr 325 kernel/trace/trace_functions_graph.c static int graph_trace_update_thresh(struct trace_array *tr) tr 327 kernel/trace/trace_functions_graph.c graph_trace_reset(tr); tr 328 kernel/trace/trace_functions_graph.c return graph_trace_init(tr); tr 516 kernel/trace/trace_functions_graph.c struct trace_array *tr = iter->tr; tr 524 kernel/trace/trace_functions_graph.c if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { tr 544 kernel/trace/trace_functions_graph.c if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) tr 549 kernel/trace/trace_functions_graph.c print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START); tr 556 kernel/trace/trace_functions_graph.c print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END); tr 594 kernel/trace/trace_functions_graph.c print_graph_duration(struct trace_array *tr, unsigned long long duration, tr 598 kernel/trace/trace_functions_graph.c !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) tr 632 kernel/trace/trace_functions_graph.c struct trace_array *tr = iter->tr; tr 662 kernel/trace/trace_functions_graph.c print_graph_duration(tr, duration, s, flags); tr 683 kernel/trace/trace_functions_graph.c struct trace_array *tr = iter->tr; tr 700 kernel/trace/trace_functions_graph.c print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); tr 724 kernel/trace/trace_functions_graph.c struct trace_array *tr = iter->tr; tr 734 kernel/trace/trace_functions_graph.c if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) tr 756 kernel/trace/trace_functions_graph.c if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) tr 907 kernel/trace/trace_functions_graph.c struct trace_array *tr = iter->tr; tr 940 kernel/trace/trace_functions_graph.c print_graph_duration(tr, duration, s, flags); tr 973 kernel/trace/trace_functions_graph.c struct trace_array *tr = iter->tr; tr 974 kernel/trace/trace_functions_graph.c unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); tr 987 kernel/trace/trace_functions_graph.c print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); tr 1135 kernel/trace/trace_functions_graph.c static void __print_graph_headers_flags(struct trace_array *tr, tr 1138 kernel/trace/trace_functions_graph.c int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; tr 1184 kernel/trace/trace_functions_graph.c struct trace_array *tr = iter->tr; tr 1186 kernel/trace/trace_functions_graph.c if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) tr 1189 kernel/trace/trace_functions_graph.c if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { tr 1197 kernel/trace/trace_functions_graph.c __print_graph_headers_flags(tr, s, flags); tr 1253 kernel/trace/trace_functions_graph.c func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) tr 105 kernel/trace/trace_hwlat.c struct trace_array *tr = hwlat_trace; tr 107 kernel/trace/trace_hwlat.c struct ring_buffer *buffer = tr->trace_buffer.buffer; tr 169 kernel/trace/trace_hwlat.c struct trace_array *tr = hwlat_trace; tr 257 kernel/trace/trace_hwlat.c if (sample > tr->max_latency) tr 258 kernel/trace/trace_hwlat.c tr->max_latency = sample; tr 259 kernel/trace/trace_hwlat.c if (outer_sample > tr->max_latency) tr 260 kernel/trace/trace_hwlat.c tr->max_latency = outer_sample; tr 351 kernel/trace/trace_hwlat.c static int start_kthread(struct trace_array *tr) tr 560 kernel/trace/trace_hwlat.c static void hwlat_tracer_start(struct trace_array *tr) tr 564 kernel/trace/trace_hwlat.c err = start_kthread(tr); tr 569 kernel/trace/trace_hwlat.c static void hwlat_tracer_stop(struct trace_array *tr) tr 576 kernel/trace/trace_hwlat.c static int hwlat_tracer_init(struct trace_array *tr) tr 582 kernel/trace/trace_hwlat.c hwlat_trace = tr; tr 586 kernel/trace/trace_hwlat.c tr->max_latency = 0; tr 593 kernel/trace/trace_hwlat.c if (tracer_tracing_is_on(tr)) tr 594 kernel/trace/trace_hwlat.c hwlat_tracer_start(tr); tr 601 kernel/trace/trace_hwlat.c static void hwlat_tracer_reset(struct trace_array *tr) tr 40 kernel/trace/trace_irqsoff.c static void stop_irqsoff_tracer(struct trace_array *tr, int graph); tr 41 kernel/trace/trace_irqsoff.c static int start_irqsoff_tracer(struct trace_array *tr, int graph); tr 65 kernel/trace/trace_irqsoff.c static int irqsoff_display_graph(struct trace_array *tr, int set); tr 66 kernel/trace/trace_irqsoff.c # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) tr 68 kernel/trace/trace_irqsoff.c static inline int irqsoff_display_graph(struct trace_array *tr, int set) tr 72 kernel/trace/trace_irqsoff.c # define is_graph(tr) false tr 99 kernel/trace/trace_irqsoff.c static int func_prolog_dec(struct trace_array *tr, tr 125 kernel/trace/trace_irqsoff.c *data = per_cpu_ptr(tr->trace_buffer.data, cpu); tr 143 kernel/trace/trace_irqsoff.c struct trace_array *tr = irqsoff_trace; tr 147 kernel/trace/trace_irqsoff.c if (!func_prolog_dec(tr, &data, &flags)) tr 150 kernel/trace/trace_irqsoff.c trace_function(tr, ip, parent_ip, flags, preempt_count()); tr 157 kernel/trace/trace_irqsoff.c static int irqsoff_display_graph(struct trace_array *tr, int set) tr 161 kernel/trace/trace_irqsoff.c if (!(is_graph(tr) ^ set)) tr 169 kernel/trace/trace_irqsoff.c tr->max_latency = 0; tr 177 kernel/trace/trace_irqsoff.c struct trace_array *tr = irqsoff_trace; tr 195 kernel/trace/trace_irqsoff.c if (!func_prolog_dec(tr, &data, &flags)) tr 199 kernel/trace/trace_irqsoff.c ret = __trace_graph_entry(tr, trace, flags, pc); tr 207 kernel/trace/trace_irqsoff.c struct trace_array *tr = irqsoff_trace; tr 214 kernel/trace/trace_irqsoff.c if (!func_prolog_dec(tr, &data, &flags)) tr 218 kernel/trace/trace_irqsoff.c __trace_graph_return(tr, trace, flags, pc); tr 229 kernel/trace/trace_irqsoff.c if (is_graph(iter->tr)) tr 251 kernel/trace/trace_irqsoff.c if (is_graph(iter->tr)) tr 259 kernel/trace/trace_irqsoff.c struct trace_array *tr = irqsoff_trace; tr 261 kernel/trace/trace_irqsoff.c if (is_graph(tr)) tr 268 kernel/trace/trace_irqsoff.c __trace_function(struct trace_array *tr, tr 272 kernel/trace/trace_irqsoff.c if (is_graph(tr)) tr 273 kernel/trace/trace_irqsoff.c trace_graph_function(tr, ip, parent_ip, flags, pc); tr 275 kernel/trace/trace_irqsoff.c trace_function(tr, ip, parent_ip, flags, pc); tr 305 kernel/trace/trace_irqsoff.c static bool report_latency(struct trace_array *tr, u64 delta) tr 311 kernel/trace/trace_irqsoff.c if (delta <= tr->max_latency) tr 318 kernel/trace/trace_irqsoff.c check_critical_timing(struct trace_array *tr, tr 335 kernel/trace/trace_irqsoff.c if (!report_latency(tr, delta)) tr 341 kernel/trace/trace_irqsoff.c if (!report_latency(tr, delta)) tr 344 kernel/trace/trace_irqsoff.c __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); tr 346 kernel/trace/trace_irqsoff.c __trace_stack(tr, flags, 5, pc); tr 354 kernel/trace/trace_irqsoff.c tr->max_latency = delta; tr 355 kernel/trace/trace_irqsoff.c update_max_tr_single(tr, current, cpu); tr 366 kernel/trace/trace_irqsoff.c __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); tr 373 kernel/trace/trace_irqsoff.c struct trace_array *tr = irqsoff_trace; tr 385 kernel/trace/trace_irqsoff.c data = per_cpu_ptr(tr->trace_buffer.data, cpu); tr 398 kernel/trace/trace_irqsoff.c __trace_function(tr, ip, parent_ip, flags, pc); tr 409 kernel/trace/trace_irqsoff.c struct trace_array *tr = irqsoff_trace; tr 423 kernel/trace/trace_irqsoff.c data = per_cpu_ptr(tr->trace_buffer.data, cpu); tr 432 kernel/trace/trace_irqsoff.c __trace_function(tr, ip, parent_ip, flags, pc); tr 433 kernel/trace/trace_irqsoff.c check_critical_timing(tr, data, parent_ip ? : ip, cpu); tr 462 kernel/trace/trace_irqsoff.c static int register_irqsoff_function(struct trace_array *tr, int graph, int set) tr 467 kernel/trace/trace_irqsoff.c if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) tr 473 kernel/trace/trace_irqsoff.c ret = register_ftrace_function(tr->ops); tr 481 kernel/trace/trace_irqsoff.c static void unregister_irqsoff_function(struct trace_array *tr, int graph) tr 489 kernel/trace/trace_irqsoff.c unregister_ftrace_function(tr->ops); tr 494 kernel/trace/trace_irqsoff.c static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) tr 500 kernel/trace/trace_irqsoff.c register_irqsoff_function(tr, is_graph(tr), 1); tr 502 kernel/trace/trace_irqsoff.c unregister_irqsoff_function(tr, is_graph(tr)); tr 506 kernel/trace/trace_irqsoff.c static int register_irqsoff_function(struct trace_array *tr, int graph, int set) tr 510 kernel/trace/trace_irqsoff.c static void unregister_irqsoff_function(struct trace_array *tr, int graph) { } tr 511 kernel/trace/trace_irqsoff.c static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) tr 517 kernel/trace/trace_irqsoff.c static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) tr 519 kernel/trace/trace_irqsoff.c struct tracer *tracer = tr->current_trace; tr 521 kernel/trace/trace_irqsoff.c if (irqsoff_function_set(tr, mask, set)) tr 526 kernel/trace/trace_irqsoff.c return irqsoff_display_graph(tr, set); tr 532 kernel/trace/trace_irqsoff.c static int start_irqsoff_tracer(struct trace_array *tr, int graph) tr 536 kernel/trace/trace_irqsoff.c ret = register_irqsoff_function(tr, graph, 0); tr 546 kernel/trace/trace_irqsoff.c static void stop_irqsoff_tracer(struct trace_array *tr, int graph) tr 550 kernel/trace/trace_irqsoff.c unregister_irqsoff_function(tr, graph); tr 555 kernel/trace/trace_irqsoff.c static int __irqsoff_tracer_init(struct trace_array *tr) tr 560 kernel/trace/trace_irqsoff.c save_flags = tr->trace_flags; tr 563 kernel/trace/trace_irqsoff.c set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); tr 564 kernel/trace/trace_irqsoff.c set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); tr 566 kernel/trace/trace_irqsoff.c tr->max_latency = 0; tr 567 kernel/trace/trace_irqsoff.c irqsoff_trace = tr; tr 571 kernel/trace/trace_irqsoff.c ftrace_init_array_ops(tr, irqsoff_tracer_call); tr 574 kernel/trace/trace_irqsoff.c if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr 575 kernel/trace/trace_irqsoff.c is_graph(tr)))) tr 582 kernel/trace/trace_irqsoff.c static void __irqsoff_tracer_reset(struct trace_array *tr) tr 587 kernel/trace/trace_irqsoff.c stop_irqsoff_tracer(tr, is_graph(tr)); tr 589 kernel/trace/trace_irqsoff.c set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); tr 590 kernel/trace/trace_irqsoff.c set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); tr 591 kernel/trace/trace_irqsoff.c ftrace_reset_array_ops(tr); tr 596 kernel/trace/trace_irqsoff.c static void irqsoff_tracer_start(struct trace_array *tr) tr 601 kernel/trace/trace_irqsoff.c static void irqsoff_tracer_stop(struct trace_array *tr) tr 628 kernel/trace/trace_irqsoff.c static int irqsoff_tracer_init(struct trace_array *tr) tr 632 kernel/trace/trace_irqsoff.c return __irqsoff_tracer_init(tr); tr 635 kernel/trace/trace_irqsoff.c static void irqsoff_tracer_reset(struct trace_array *tr) tr 637 kernel/trace/trace_irqsoff.c __irqsoff_tracer_reset(tr); tr 678 kernel/trace/trace_irqsoff.c static int preemptoff_tracer_init(struct trace_array *tr) tr 682 kernel/trace/trace_irqsoff.c return __irqsoff_tracer_init(tr); tr 685 kernel/trace/trace_irqsoff.c static void preemptoff_tracer_reset(struct trace_array *tr) tr 687 kernel/trace/trace_irqsoff.c __irqsoff_tracer_reset(tr); tr 713 kernel/trace/trace_irqsoff.c static int preemptirqsoff_tracer_init(struct trace_array *tr) tr 717 kernel/trace/trace_irqsoff.c return __irqsoff_tracer_init(tr); tr 720 kernel/trace/trace_irqsoff.c static void preemptirqsoff_tracer_reset(struct trace_array *tr) tr 722 kernel/trace/trace_irqsoff.c __irqsoff_tracer_reset(tr); tr 25 kernel/trace/trace_kdb.c struct trace_array *tr; tr 29 kernel/trace/trace_kdb.c tr = iter.tr; tr 31 kernel/trace/trace_kdb.c old_userobj = tr->trace_flags; tr 34 kernel/trace/trace_kdb.c tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; tr 82 kernel/trace/trace_kdb.c tr->trace_flags = old_userobj; tr 1653 kernel/trace/trace_kprobe.c struct trace_array *tr = top_trace_array(); tr 1660 kernel/trace/trace_kprobe.c list_for_each_entry(file, &tr->events, list) tr 1732 kernel/trace/trace_kprobe.c find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) tr 1736 kernel/trace/trace_kprobe.c list_for_each_entry(file, &tr->events, list) tr 30 kernel/trace/trace_mmiotrace.c static void mmio_reset_data(struct trace_array *tr) tr 35 kernel/trace/trace_mmiotrace.c tracing_reset_online_cpus(&tr->trace_buffer); tr 38 kernel/trace/trace_mmiotrace.c static int mmio_trace_init(struct trace_array *tr) tr 41 kernel/trace/trace_mmiotrace.c mmio_trace_array = tr; tr 43 kernel/trace/trace_mmiotrace.c mmio_reset_data(tr); tr 48 kernel/trace/trace_mmiotrace.c static void mmio_trace_reset(struct trace_array *tr) tr 53 kernel/trace/trace_mmiotrace.c mmio_reset_data(tr); tr 57 kernel/trace/trace_mmiotrace.c static void mmio_trace_start(struct trace_array *tr) tr 60 kernel/trace/trace_mmiotrace.c mmio_reset_data(tr); tr 295 kernel/trace/trace_mmiotrace.c static void __trace_mmiotrace_rw(struct trace_array *tr, tr 300 kernel/trace/trace_mmiotrace.c struct ring_buffer *buffer = tr->trace_buffer.buffer; tr 315 kernel/trace/trace_mmiotrace.c trace_buffer_unlock_commit(tr, buffer, event, 0, pc); tr 320 kernel/trace/trace_mmiotrace.c struct trace_array *tr = mmio_trace_array; tr 321 kernel/trace/trace_mmiotrace.c struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); tr 322 kernel/trace/trace_mmiotrace.c __trace_mmiotrace_rw(tr, data, rw); tr 325 kernel/trace/trace_mmiotrace.c static void __trace_mmiotrace_map(struct trace_array *tr, tr 330 kernel/trace/trace_mmiotrace.c struct ring_buffer *buffer = tr->trace_buffer.buffer; tr 345 kernel/trace/trace_mmiotrace.c trace_buffer_unlock_commit(tr, buffer, event, 0, pc); tr 350 kernel/trace/trace_mmiotrace.c struct trace_array *tr = mmio_trace_array; tr 354 kernel/trace/trace_mmiotrace.c data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); tr 355 kernel/trace/trace_mmiotrace.c __trace_mmiotrace_map(tr, data, map); tr 37 kernel/trace/trace_nop.c static void start_nop_trace(struct trace_array *tr) tr 42 kernel/trace/trace_nop.c static void stop_nop_trace(struct trace_array *tr) tr 47 kernel/trace/trace_nop.c static int nop_trace_init(struct trace_array *tr) tr 49 kernel/trace/trace_nop.c ctx_trace = tr; tr 50 kernel/trace/trace_nop.c start_nop_trace(tr); tr 54 kernel/trace/trace_nop.c static void nop_trace_reset(struct trace_array *tr) tr 56 kernel/trace/trace_nop.c stop_nop_trace(tr); tr 64 kernel/trace/trace_nop.c static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) tr 523 kernel/trace/trace_output.c struct trace_array *tr = iter->tr; tr 524 kernel/trace/trace_output.c unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE; tr 567 kernel/trace/trace_output.c struct trace_array *tr = iter->tr; tr 578 kernel/trace/trace_output.c if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { tr 589 kernel/trace/trace_output.c if (tr->trace_flags & TRACE_ITER_IRQ_INFO) tr 605 kernel/trace/trace_output.c struct trace_array *tr = iter->tr; tr 613 kernel/trace/trace_output.c unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE); tr 1086 kernel/trace/trace_output.c struct trace_array *tr = iter->tr; tr 1096 kernel/trace/trace_output.c if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) { tr 36 kernel/trace/trace_sched_wakeup.c static void wakeup_reset(struct trace_array *tr); tr 37 kernel/trace/trace_sched_wakeup.c static void __wakeup_reset(struct trace_array *tr); tr 38 kernel/trace/trace_sched_wakeup.c static int start_func_tracer(struct trace_array *tr, int graph); tr 39 kernel/trace/trace_sched_wakeup.c static void stop_func_tracer(struct trace_array *tr, int graph); tr 44 kernel/trace/trace_sched_wakeup.c # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) tr 46 kernel/trace/trace_sched_wakeup.c # define is_graph(tr) false tr 68 kernel/trace/trace_sched_wakeup.c func_prolog_preempt_disable(struct trace_array *tr, tr 85 kernel/trace/trace_sched_wakeup.c *data = per_cpu_ptr(tr->trace_buffer.data, cpu); tr 102 kernel/trace/trace_sched_wakeup.c static int wakeup_display_graph(struct trace_array *tr, int set) tr 104 kernel/trace/trace_sched_wakeup.c if (!(is_graph(tr) ^ set)) tr 107 kernel/trace/trace_sched_wakeup.c stop_func_tracer(tr, !set); tr 110 kernel/trace/trace_sched_wakeup.c tr->max_latency = 0; tr 112 kernel/trace/trace_sched_wakeup.c return start_func_tracer(tr, set); tr 117 kernel/trace/trace_sched_wakeup.c struct trace_array *tr = wakeup_trace; tr 134 kernel/trace/trace_sched_wakeup.c if (!func_prolog_preempt_disable(tr, &data, &pc)) tr 138 kernel/trace/trace_sched_wakeup.c ret = __trace_graph_entry(tr, trace, flags, pc); tr 147 kernel/trace/trace_sched_wakeup.c struct trace_array *tr = wakeup_trace; tr 154 kernel/trace/trace_sched_wakeup.c if (!func_prolog_preempt_disable(tr, &data, &pc)) tr 158 kernel/trace/trace_sched_wakeup.c __trace_graph_return(tr, trace, flags, pc); tr 172 kernel/trace/trace_sched_wakeup.c if (is_graph(iter->tr)) tr 195 kernel/trace/trace_sched_wakeup.c if (is_graph(iter->tr)) tr 217 kernel/trace/trace_sched_wakeup.c struct trace_array *tr = wakeup_trace; tr 222 kernel/trace/trace_sched_wakeup.c if (!func_prolog_preempt_disable(tr, &data, &pc)) tr 226 kernel/trace/trace_sched_wakeup.c trace_function(tr, ip, parent_ip, flags, pc); tr 233 kernel/trace/trace_sched_wakeup.c static int register_wakeup_function(struct trace_array *tr, int graph, int set) tr 238 kernel/trace/trace_sched_wakeup.c if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) tr 244 kernel/trace/trace_sched_wakeup.c ret = register_ftrace_function(tr->ops); tr 252 kernel/trace/trace_sched_wakeup.c static void unregister_wakeup_function(struct trace_array *tr, int graph) tr 260 kernel/trace/trace_sched_wakeup.c unregister_ftrace_function(tr->ops); tr 265 kernel/trace/trace_sched_wakeup.c static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) tr 271 kernel/trace/trace_sched_wakeup.c register_wakeup_function(tr, is_graph(tr), 1); tr 273 kernel/trace/trace_sched_wakeup.c unregister_wakeup_function(tr, is_graph(tr)); tr 277 kernel/trace/trace_sched_wakeup.c static int register_wakeup_function(struct trace_array *tr, int graph, int set) tr 281 kernel/trace/trace_sched_wakeup.c static void unregister_wakeup_function(struct trace_array *tr, int graph) { } tr 282 kernel/trace/trace_sched_wakeup.c static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) tr 304 kernel/trace/trace_sched_wakeup.c __trace_function(struct trace_array *tr, tr 308 kernel/trace/trace_sched_wakeup.c if (is_graph(tr)) tr 309 kernel/trace/trace_sched_wakeup.c trace_graph_function(tr, ip, parent_ip, flags, pc); tr 311 kernel/trace/trace_sched_wakeup.c trace_function(tr, ip, parent_ip, flags, pc); tr 314 kernel/trace/trace_sched_wakeup.c static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) tr 316 kernel/trace/trace_sched_wakeup.c struct tracer *tracer = tr->current_trace; tr 318 kernel/trace/trace_sched_wakeup.c if (wakeup_function_set(tr, mask, set)) tr 323 kernel/trace/trace_sched_wakeup.c return wakeup_display_graph(tr, set); tr 329 kernel/trace/trace_sched_wakeup.c static int start_func_tracer(struct trace_array *tr, int graph) tr 333 kernel/trace/trace_sched_wakeup.c ret = register_wakeup_function(tr, graph, 0); tr 343 kernel/trace/trace_sched_wakeup.c static void stop_func_tracer(struct trace_array *tr, int graph) tr 347 kernel/trace/trace_sched_wakeup.c unregister_wakeup_function(tr, graph); tr 353 kernel/trace/trace_sched_wakeup.c static bool report_latency(struct trace_array *tr, u64 delta) tr 359 kernel/trace/trace_sched_wakeup.c if (delta <= tr->max_latency) tr 375 kernel/trace/trace_sched_wakeup.c tracing_sched_switch_trace(struct trace_array *tr, tr 381 kernel/trace/trace_sched_wakeup.c struct ring_buffer *buffer = tr->trace_buffer.buffer; tr 399 kernel/trace/trace_sched_wakeup.c trace_buffer_unlock_commit(tr, buffer, event, flags, pc); tr 403 kernel/trace/trace_sched_wakeup.c tracing_sched_wakeup_trace(struct trace_array *tr, tr 411 kernel/trace/trace_sched_wakeup.c struct ring_buffer *buffer = tr->trace_buffer.buffer; tr 427 kernel/trace/trace_sched_wakeup.c trace_buffer_unlock_commit(tr, buffer, event, flags, pc); tr 500 kernel/trace/trace_sched_wakeup.c static void __wakeup_reset(struct trace_array *tr) tr 512 kernel/trace/trace_sched_wakeup.c static void wakeup_reset(struct trace_array *tr) tr 516 kernel/trace/trace_sched_wakeup.c tracing_reset_online_cpus(&tr->trace_buffer); tr 520 kernel/trace/trace_sched_wakeup.c __wakeup_reset(tr); tr 604 kernel/trace/trace_sched_wakeup.c static void start_wakeup_tracer(struct trace_array *tr) tr 636 kernel/trace/trace_sched_wakeup.c wakeup_reset(tr); tr 647 kernel/trace/trace_sched_wakeup.c if (start_func_tracer(tr, is_graph(tr))) tr 659 kernel/trace/trace_sched_wakeup.c static void stop_wakeup_tracer(struct trace_array *tr) tr 662 kernel/trace/trace_sched_wakeup.c stop_func_tracer(tr, is_graph(tr)); tr 671 kernel/trace/trace_sched_wakeup.c static int __wakeup_tracer_init(struct trace_array *tr) tr 673 kernel/trace/trace_sched_wakeup.c save_flags = tr->trace_flags; tr 676 kernel/trace/trace_sched_wakeup.c set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); tr 677 kernel/trace/trace_sched_wakeup.c set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); tr 679 kernel/trace/trace_sched_wakeup.c tr->max_latency = 0; tr 680 kernel/trace/trace_sched_wakeup.c wakeup_trace = tr; tr 681 kernel/trace/trace_sched_wakeup.c ftrace_init_array_ops(tr, wakeup_tracer_call); tr 682 kernel/trace/trace_sched_wakeup.c start_wakeup_tracer(tr); tr 688 kernel/trace/trace_sched_wakeup.c static int wakeup_tracer_init(struct trace_array *tr) tr 695 kernel/trace/trace_sched_wakeup.c return __wakeup_tracer_init(tr); tr 698 kernel/trace/trace_sched_wakeup.c static int wakeup_rt_tracer_init(struct trace_array *tr) tr 705 kernel/trace/trace_sched_wakeup.c return __wakeup_tracer_init(tr); tr 708 kernel/trace/trace_sched_wakeup.c static int wakeup_dl_tracer_init(struct trace_array *tr) tr 715 kernel/trace/trace_sched_wakeup.c return __wakeup_tracer_init(tr); tr 718 kernel/trace/trace_sched_wakeup.c static void wakeup_tracer_reset(struct trace_array *tr) tr 723 kernel/trace/trace_sched_wakeup.c stop_wakeup_tracer(tr); tr 725 kernel/trace/trace_sched_wakeup.c wakeup_reset(tr); tr 727 kernel/trace/trace_sched_wakeup.c set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); tr 728 kernel/trace/trace_sched_wakeup.c set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); tr 729 kernel/trace/trace_sched_wakeup.c ftrace_reset_array_ops(tr); tr 733 kernel/trace/trace_sched_wakeup.c static void wakeup_tracer_start(struct trace_array *tr) tr 735 kernel/trace/trace_sched_wakeup.c wakeup_reset(tr); tr 739 kernel/trace/trace_sched_wakeup.c static void wakeup_tracer_stop(struct trace_array *tr) tr 70 kernel/trace/trace_selftest.c arch_spin_lock(&buf->tr->max_lock); tr 88 kernel/trace/trace_selftest.c arch_spin_unlock(&buf->tr->max_lock); tr 185 kernel/trace/trace_selftest.c static int trace_selftest_ops(struct trace_array *tr, int cnt) tr 222 kernel/trace/trace_selftest.c ftrace_init_array_ops(tr, trace_selftest_test_global_func); tr 223 kernel/trace/trace_selftest.c register_ftrace_function(tr->ops); tr 304 kernel/trace/trace_selftest.c unregister_ftrace_function(tr->ops); tr 305 kernel/trace/trace_selftest.c ftrace_reset_array_ops(tr); tr 326 kernel/trace/trace_selftest.c struct trace_array *tr, tr 355 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); tr 365 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->trace_buffer, &count); tr 386 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->trace_buffer, &count); tr 393 kernel/trace/trace_selftest.c trace->reset(tr); tr 400 kernel/trace/trace_selftest.c ret = trace_selftest_ops(tr, 1); tr 401 kernel/trace/trace_selftest.c trace->reset(tr); tr 411 kernel/trace/trace_selftest.c ret = trace_selftest_ops(tr, 2); tr 536 kernel/trace/trace_selftest.c # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) tr 653 kernel/trace/trace_selftest.c trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) tr 672 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); tr 685 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->trace_buffer, &count); tr 688 kernel/trace/trace_selftest.c trace->reset(tr); tr 697 kernel/trace/trace_selftest.c ret = trace_selftest_startup_dynamic_tracing(trace, tr, tr 755 kernel/trace/trace_selftest.c struct trace_array *tr) tr 771 kernel/trace/trace_selftest.c tracing_reset_online_cpus(&tr->trace_buffer); tr 772 kernel/trace/trace_selftest.c set_graph_array(tr); tr 793 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->trace_buffer, &count); tr 821 kernel/trace/trace_selftest.c trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) tr 823 kernel/trace/trace_selftest.c unsigned long save_max = tr->max_latency; tr 828 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); tr 835 kernel/trace/trace_selftest.c tr->max_latency = 0; tr 847 kernel/trace/trace_selftest.c trace->stop(tr); tr 851 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->trace_buffer, NULL); tr 853 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->max_buffer, &count); tr 854 kernel/trace/trace_selftest.c trace->reset(tr); tr 862 kernel/trace/trace_selftest.c tr->max_latency = save_max; tr 870 kernel/trace/trace_selftest.c trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) tr 872 kernel/trace/trace_selftest.c unsigned long save_max = tr->max_latency; tr 890 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); tr 897 kernel/trace/trace_selftest.c tr->max_latency = 0; tr 909 kernel/trace/trace_selftest.c trace->stop(tr); tr 913 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->trace_buffer, NULL); tr 915 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->max_buffer, &count); tr 916 kernel/trace/trace_selftest.c trace->reset(tr); tr 924 kernel/trace/trace_selftest.c tr->max_latency = save_max; tr 932 kernel/trace/trace_selftest.c trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) tr 934 kernel/trace/trace_selftest.c unsigned long save_max = tr->max_latency; tr 952 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); tr 959 kernel/trace/trace_selftest.c tr->max_latency = 0; tr 975 kernel/trace/trace_selftest.c trace->stop(tr); tr 979 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->trace_buffer, NULL); tr 983 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->max_buffer, &count); tr 994 kernel/trace/trace_selftest.c tr->max_latency = 0; tr 996 kernel/trace/trace_selftest.c trace->start(tr); tr 1005 kernel/trace/trace_selftest.c trace->stop(tr); tr 1009 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->trace_buffer, NULL); tr 1013 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->max_buffer, &count); tr 1024 kernel/trace/trace_selftest.c trace->reset(tr); tr 1025 kernel/trace/trace_selftest.c tr->max_latency = save_max; tr 1033 kernel/trace/trace_selftest.c trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) tr 1085 kernel/trace/trace_selftest.c trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) tr 1087 kernel/trace/trace_selftest.c unsigned long save_max = tr->max_latency; tr 1108 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); tr 1115 kernel/trace/trace_selftest.c tr->max_latency = 0; tr 1139 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->trace_buffer, NULL); tr 1141 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->max_buffer, &count); tr 1144 kernel/trace/trace_selftest.c trace->reset(tr); tr 1147 kernel/trace/trace_selftest.c tr->max_latency = save_max; tr 1163 kernel/trace/trace_selftest.c trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) tr 1169 kernel/trace/trace_selftest.c ret = tracer_init(trace, tr); tr 1180 kernel/trace/trace_selftest.c ret = trace_test_buffer(&tr->trace_buffer, &count); tr 1181 kernel/trace/trace_selftest.c trace->reset(tr); tr 125 kernel/trace/trace_syscalls.c struct trace_array *tr = iter->tr; tr 152 kernel/trace/trace_syscalls.c if (tr->trace_flags & TRACE_ITER_VERBOSE) tr 310 kernel/trace/trace_syscalls.c struct trace_array *tr = data; tr 327 kernel/trace/trace_syscalls.c trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); tr 343 kernel/trace/trace_syscalls.c buffer = tr->trace_buffer.buffer; tr 360 kernel/trace/trace_syscalls.c struct trace_array *tr = data; tr 375 kernel/trace/trace_syscalls.c trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); tr 389 kernel/trace/trace_syscalls.c buffer = tr->trace_buffer.buffer; tr 407 kernel/trace/trace_syscalls.c struct trace_array *tr = file->tr; tr 415 kernel/trace/trace_syscalls.c if (!tr->sys_refcount_enter) tr 416 kernel/trace/trace_syscalls.c ret = register_trace_sys_enter(ftrace_syscall_enter, tr); tr 418 kernel/trace/trace_syscalls.c rcu_assign_pointer(tr->enter_syscall_files[num], file); tr 419 kernel/trace/trace_syscalls.c tr->sys_refcount_enter++; tr 428 kernel/trace/trace_syscalls.c struct trace_array *tr = file->tr; tr 435 kernel/trace/trace_syscalls.c tr->sys_refcount_enter--; tr 436 kernel/trace/trace_syscalls.c RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL); tr 437 kernel/trace/trace_syscalls.c if (!tr->sys_refcount_enter) tr 438 kernel/trace/trace_syscalls.c unregister_trace_sys_enter(ftrace_syscall_enter, tr); tr 445 kernel/trace/trace_syscalls.c struct trace_array *tr = file->tr; tr 453 kernel/trace/trace_syscalls.c if (!tr->sys_refcount_exit) tr 454 kernel/trace/trace_syscalls.c ret = register_trace_sys_exit(ftrace_syscall_exit, tr); tr 456 kernel/trace/trace_syscalls.c rcu_assign_pointer(tr->exit_syscall_files[num], file); tr 457 kernel/trace/trace_syscalls.c tr->sys_refcount_exit++; tr 466 kernel/trace/trace_syscalls.c struct trace_array *tr = file->tr; tr 473 kernel/trace/trace_syscalls.c tr->sys_refcount_exit--; tr 474 kernel/trace/trace_syscalls.c RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL); tr 475 kernel/trace/trace_syscalls.c if (!tr->sys_refcount_exit) tr 476 kernel/trace/trace_syscalls.c unregister_trace_sys_exit(ftrace_syscall_exit, tr); tr 768 security/selinux/ss/policydb.c struct role_trans *tr, *ltr = NULL; tr 815 security/selinux/ss/policydb.c for (tr = p->role_tr; tr; tr = tr->next) { tr 818 security/selinux/ss/policydb.c ltr = tr; tr 2257 security/selinux/ss/policydb.c struct role_trans *tr, *ltr; tr 2414 security/selinux/ss/policydb.c tr = kzalloc(sizeof(*tr), GFP_KERNEL); tr 2415 security/selinux/ss/policydb.c if (!tr) tr 2418 security/selinux/ss/policydb.c ltr->next = tr; tr 2420 security/selinux/ss/policydb.c p->role_tr = tr; tr 2426 security/selinux/ss/policydb.c tr->role = le32_to_cpu(buf[0]); tr 2427 security/selinux/ss/policydb.c tr->type = le32_to_cpu(buf[1]); tr 2428 security/selinux/ss/policydb.c tr->new_role = le32_to_cpu(buf[2]); tr 2433 security/selinux/ss/policydb.c tr->tclass = le32_to_cpu(buf[0]); tr 2435 security/selinux/ss/policydb.c tr->tclass = p->process_class; tr 2438 security/selinux/ss/policydb.c if (!policydb_role_isvalid(p, tr->role) || tr 2439 security/selinux/ss/policydb.c !policydb_type_isvalid(p, tr->type) || tr 2440 security/selinux/ss/policydb.c !policydb_class_isvalid(p, tr->tclass) || tr 2441 security/selinux/ss/policydb.c !policydb_role_isvalid(p, tr->new_role)) tr 2443 security/selinux/ss/policydb.c ltr = tr; tr 2651 security/selinux/ss/policydb.c struct role_trans *tr; tr 2657 security/selinux/ss/policydb.c for (tr = r; tr; tr = tr->next) tr 2663 security/selinux/ss/policydb.c for (tr = r; tr; tr = tr->next) { tr 2664 security/selinux/ss/policydb.c buf[0] = cpu_to_le32(tr->role); tr 2665 security/selinux/ss/policydb.c buf[1] = cpu_to_le32(tr->type); tr 2666 security/selinux/ss/policydb.c buf[2] = cpu_to_le32(tr->new_role); tr 2671 security/selinux/ss/policydb.c buf[0] = cpu_to_le32(tr->tclass); tr 561 sound/pci/au88x0/au88x0_core.c int dirplay, int sl, unsigned int tr, int thsource) tr 575 sound/pci/au88x0/au88x0_core.c if ((tr & 0x10000) && (tr != 0x10000)) { tr 576 sound/pci/au88x0/au88x0_core.c tr = 0; tr 579 sound/pci/au88x0/au88x0_core.c if ((((short)tr) < 0) && (tr != 0x8000)) { tr 580 sound/pci/au88x0/au88x0_core.c tr = 0; tr 583 sound/pci/au88x0/au88x0_core.c tr = 1; tr 589 sound/pci/au88x0/au88x0_core.c tr = 0; /*ebx = 0 */ tr 596 sound/pci/au88x0/au88x0_core.c tr = 1; tr 604 sound/pci/au88x0/au88x0_core.c if (tr) tr 610 sound/pci/au88x0/au88x0_core.c if (tr) tr 623 sound/pci/au88x0/au88x0_core.c (tr << 0x11) | (dirplay << 0x10) | (ebp << 0x8) | esp10); tr 152 tools/arch/x86/include/uapi/asm/kvm.h struct kvm_segment tr, ldt; tr 279 tools/perf/builtin-sched.c struct thread_runtime tr; tr 930 tools/perf/builtin-sched.c struct thread_runtime *tr; tr 932 tools/perf/builtin-sched.c tr = thread__priv(thread); tr 933 tools/perf/builtin-sched.c if (tr == NULL) { tr 934 tools/perf/builtin-sched.c tr = thread__init_runtime(thread); tr 935 tools/perf/builtin-sched.c if (tr == NULL) tr 939 tools/perf/builtin-sched.c return tr; tr 1528 tools/perf/builtin-sched.c struct thread_runtime *tr; tr 1568 tools/perf/builtin-sched.c tr = thread__get_runtime(sched_in); tr 1569 tools/perf/builtin-sched.c if (tr == NULL) { tr 1579 tools/perf/builtin-sched.c if (!tr->shortname[0]) { tr 1585 tools/perf/builtin-sched.c tr->shortname[0] = '.'; tr 1586 tools/perf/builtin-sched.c tr->shortname[1] = ' '; tr 1588 tools/perf/builtin-sched.c tr->shortname[0] = sched->next_shortname1; tr 1589 tools/perf/builtin-sched.c tr->shortname[1] = sched->next_shortname2; tr 1641 tools/perf/builtin-sched.c if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) { tr 1648 tools/perf/builtin-sched.c tr->shortname, thread__comm_str(sched_in), sched_in->tid); tr 1649 tools/perf/builtin-sched.c tr->comm_changed = false; tr 1759 tools/perf/builtin-sched.c struct thread_runtime *tr; tr 1772 tools/perf/builtin-sched.c tr = thread__get_runtime(thread); tr 1773 tools/perf/builtin-sched.c if (tr == NULL) { tr 1778 tools/perf/builtin-sched.c tr->comm_changed = true; tr 2003 tools/perf/builtin-sched.c struct thread_runtime *tr = thread__priv(thread); tr 2032 tools/perf/builtin-sched.c wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt; tr 2035 tools/perf/builtin-sched.c print_sched_time(tr->dt_delay, 6); tr 2036 tools/perf/builtin-sched.c print_sched_time(tr->dt_run, 6); tr 2197 tools/perf/builtin-sched.c init_stats(&itr->tr.run_stats); tr 2404 tools/perf/builtin-sched.c struct thread_runtime *tr = NULL; tr 2412 tools/perf/builtin-sched.c tr = thread__get_runtime(thread); tr 2413 tools/perf/builtin-sched.c if (tr == NULL) tr 2416 tools/perf/builtin-sched.c if (tr->ready_to_run == 0) tr 2417 tools/perf/builtin-sched.c tr->ready_to_run = sample->time; tr 2488 tools/perf/builtin-sched.c struct thread_runtime *tr = NULL; tr 2496 tools/perf/builtin-sched.c tr = thread__get_runtime(thread); tr 2497 tools/perf/builtin-sched.c if (tr == NULL) tr 2500 tools/perf/builtin-sched.c tr->migrations++; tr 2518 tools/perf/builtin-sched.c struct thread_runtime *tr = NULL; tr 2540 tools/perf/builtin-sched.c tr = thread__get_runtime(thread); tr 2541 tools/perf/builtin-sched.c if (tr == NULL) { tr 2574 tools/perf/builtin-sched.c timehist_update_runtime_stats(tr, t, tprev); tr 2577 tools/perf/builtin-sched.c struct idle_thread_runtime *itr = (void *)tr; tr 2618 tools/perf/builtin-sched.c if (tr) { tr 2620 tools/perf/builtin-sched.c tr->last_time = sample->time; tr 2623 tools/perf/builtin-sched.c tr->last_state = state; tr 2626 tools/perf/builtin-sched.c tr->ready_to_run = 0; tr 2883 tools/perf/builtin-sched.c print_sched_time(itr->tr.total_run_time, 6); tr 117 tools/spi/spidev_test.c struct spi_ioc_transfer tr = { tr 127 tools/spi/spidev_test.c tr.tx_nbits = 4; tr 129 tools/spi/spidev_test.c tr.tx_nbits = 2; tr 131 tools/spi/spidev_test.c tr.rx_nbits = 4; tr 133 tools/spi/spidev_test.c tr.rx_nbits = 2; tr 136 tools/spi/spidev_test.c tr.rx_buf = 0; tr 138 tools/spi/spidev_test.c tr.tx_buf = 0; tr 141 tools/spi/spidev_test.c ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr); tr 182 tools/testing/selftests/kvm/include/x86_64/processor.h uint16_t tr; tr 185 tools/testing/selftests/kvm/include/x86_64/processor.h : /* output */ [tr]"=rm"(tr)); tr 186 tools/testing/selftests/kvm/include/x86_64/processor.h return tr; tr 204 tools/testing/selftests/kvm/lib/x86_64/processor.c segment_dump(stream, &sregs->tr, indent + 2); tr 633 tools/testing/selftests/kvm/lib/x86_64/processor.c kvm_setup_tss_64bit(vm, &sregs.tr, 0x18, gdt_memslot, pgd_memslot); tr 3001 virt/kvm/kvm_main.c struct kvm_translation tr; tr 3004 virt/kvm/kvm_main.c if (copy_from_user(&tr, argp, sizeof(tr))) tr 3006 virt/kvm/kvm_main.c r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); tr 3010 virt/kvm/kvm_main.c if (copy_to_user(argp, &tr, sizeof(tr)))