ti 113 arch/alpha/include/asm/elf.h struct thread_info *ti); ti 289 arch/alpha/kernel/process.c dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) ti 324 arch/alpha/kernel/process.c dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; ti 330 arch/alpha/kernel/process.c dest[32] = ti->pcb.unique; ti 216 arch/arm/include/asm/assembler.h .macro inc_preempt_count, ti, tmp ti 222 arch/arm/include/asm/assembler.h .macro dec_preempt_count, ti, tmp ti 228 arch/arm/include/asm/assembler.h .macro dec_preempt_count_ti, ti, tmp ti 233 arch/arm/include/asm/assembler.h .macro inc_preempt_count, ti, tmp ti 236 arch/arm/include/asm/assembler.h .macro dec_preempt_count, ti, tmp ti 239 arch/arm/include/asm/assembler.h .macro dec_preempt_count_ti, ti, tmp ti 78 arch/arm/kernel/kgdb.c struct thread_info *ti; ti 90 arch/arm/kernel/kgdb.c ti = task_thread_info(task); ti 91 arch/arm/kernel/kgdb.c gdb_regs[_R4] = ti->cpu_context.r4; ti 92 arch/arm/kernel/kgdb.c gdb_regs[_R5] = ti->cpu_context.r5; ti 93 arch/arm/kernel/kgdb.c gdb_regs[_R6] = ti->cpu_context.r6; ti 94 arch/arm/kernel/kgdb.c gdb_regs[_R7] = ti->cpu_context.r7; ti 95 arch/arm/kernel/kgdb.c gdb_regs[_R8] = ti->cpu_context.r8; ti 96 arch/arm/kernel/kgdb.c gdb_regs[_R9] = ti->cpu_context.r9; ti 97 arch/arm/kernel/kgdb.c gdb_regs[_R10] = ti->cpu_context.sl; ti 98 arch/arm/kernel/kgdb.c gdb_regs[_FP] = ti->cpu_context.fp; ti 99 arch/arm/kernel/kgdb.c gdb_regs[_SPT] = ti->cpu_context.sp; ti 100 arch/arm/kernel/kgdb.c gdb_regs[_PC] = ti->cpu_context.pc; ti 451 arch/arm/vfp/vfpmodule.c struct thread_info *ti = current_thread_info(); ti 457 arch/arm/vfp/vfpmodule.c vfp_save_state(&ti->vfpstate, fpexc); ti 461 arch/arm/vfp/vfpmodule.c } else if (vfp_current_hw_state[ti->cpu]) { ti 464 arch/arm/vfp/vfpmodule.c vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc); ti 470 arch/arm/vfp/vfpmodule.c vfp_current_hw_state[ti->cpu] = NULL; ti 60 arch/arm64/include/asm/preempt.h struct thread_info *ti = current_thread_info(); ti 61 arch/arm64/include/asm/preempt.h u64 pc = READ_ONCE(ti->preempt_count); ti 64 arch/arm64/include/asm/preempt.h WRITE_ONCE(ti->preempt.count, --pc); ti 73 arch/arm64/include/asm/preempt.h return !pc || !READ_ONCE(ti->preempt_count); ti 435 arch/arm64/kernel/debug-monitors.c struct thread_info *ti = task_thread_info(task); ti 437 arch/arm64/kernel/debug-monitors.c if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP)) ti 31 arch/arm64/kvm/fpsimd.c struct thread_info *ti = ¤t->thread_info; ti 38 arch/arm64/kvm/fpsimd.c ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP); ti 46 arch/arm64/kvm/fpsimd.c vcpu->arch.host_thread_info = kern_hyp_va(ti); ti 64 arch/c6x/include/asm/thread_info.h struct thread_info *ti; ti 66 arch/c6x/include/asm/thread_info.h : "=b" (ti) ti 68 arch/c6x/include/asm/thread_info.h return ti; ti 71 arch/c6x/include/asm/thread_info.h #define get_thread_info(ti) get_task_struct((ti)->task) ti 72 arch/c6x/include/asm/thread_info.h #define put_thread_info(ti) put_task_struct((ti)->task) ti 8 arch/csky/kernel/syscall.c struct thread_info *ti = task_thread_info(current); ti 12 arch/csky/kernel/syscall.c ti->tp_value = addr; ti 52 arch/h8300/include/asm/thread_info.h struct thread_info *ti; ti 56 arch/h8300/include/asm/thread_info.h : "=&r"(ti) ti 58 arch/h8300/include/asm/thread_info.h return ti; ti 56 arch/hexagon/kernel/process.c struct thread_info *ti = task_thread_info(p); ti 61 arch/hexagon/kernel/process.c childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) - ti 64 arch/hexagon/kernel/process.c ti->regs = childregs; ti 1810 arch/ia64/kernel/mca.c struct thread_info *ti; ti 1812 arch/ia64/kernel/mca.c ti = task_thread_info(p); ti 1813 arch/ia64/kernel/mca.c ti->flags = _TIF_MCA_INIT; ti 1814 arch/ia64/kernel/mca.c ti->preempt_count = 1; ti 1815 arch/ia64/kernel/mca.c ti->task = p; ti 1816 arch/ia64/kernel/mca.c ti->cpu = cpu; ti 1817 arch/ia64/kernel/mca.c p->stack = ti; ti 68 arch/ia64/kernel/time.c struct thread_info *ti = task_thread_info(tsk); ti 71 arch/ia64/kernel/time.c if (ti->utime) ti 72 arch/ia64/kernel/time.c account_user_time(tsk, cycle_to_nsec(ti->utime)); ti 74 arch/ia64/kernel/time.c if (ti->gtime) ti 75 arch/ia64/kernel/time.c account_guest_time(tsk, cycle_to_nsec(ti->gtime)); ti 77 arch/ia64/kernel/time.c if (ti->idle_time) ti 78 arch/ia64/kernel/time.c account_idle_time(cycle_to_nsec(ti->idle_time)); ti 80 arch/ia64/kernel/time.c if (ti->stime) { ti 81 arch/ia64/kernel/time.c delta = cycle_to_nsec(ti->stime); ti 85 arch/ia64/kernel/time.c if (ti->hardirq_time) { ti 86 arch/ia64/kernel/time.c delta = cycle_to_nsec(ti->hardirq_time); ti 90 arch/ia64/kernel/time.c if (ti->softirq_time) { ti 91 arch/ia64/kernel/time.c delta = cycle_to_nsec(ti->softirq_time); ti 95 arch/ia64/kernel/time.c ti->utime = 0; ti 96 arch/ia64/kernel/time.c ti->gtime = 0; ti 97 arch/ia64/kernel/time.c ti->idle_time = 0; ti 98 arch/ia64/kernel/time.c ti->stime = 0; ti 99 arch/ia64/kernel/time.c ti->hardirq_time = 0; ti 100 arch/ia64/kernel/time.c ti->softirq_time = 0; ti 123 arch/ia64/kernel/time.c struct thread_info *ti = task_thread_info(tsk); ti 129 arch/ia64/kernel/time.c delta_stime = now - ti->ac_stamp; ti 130 arch/ia64/kernel/time.c ti->ac_stamp = now; ti 137 arch/ia64/kernel/time.c struct thread_info *ti = task_thread_info(tsk); ti 141 arch/ia64/kernel/time.c ti->gtime += stime; ti 143 arch/ia64/kernel/time.c ti->hardirq_time += stime; ti 145 arch/ia64/kernel/time.c ti->softirq_time += stime; ti 147 arch/ia64/kernel/time.c ti->stime += stime; ti 153 arch/ia64/kernel/time.c struct thread_info *ti = task_thread_info(tsk); ti 155 arch/ia64/kernel/time.c ti->idle_time += vtime_delta(tsk); ti 48 arch/m68k/include/asm/thread_info.h struct thread_info *ti; ti 52 arch/m68k/include/asm/thread_info.h : "=&d"(ti) ti 55 arch/m68k/include/asm/thread_info.h return ti; ti 61 arch/microblaze/kernel/process.c struct thread_info *ti = task_thread_info(p); ti 67 arch/microblaze/kernel/process.c memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); ti 68 arch/microblaze/kernel/process.c ti->cpu_context.r1 = (unsigned long)childregs; ti 69 arch/microblaze/kernel/process.c ti->cpu_context.r20 = (unsigned long)usp; /* fn */ ti 70 arch/microblaze/kernel/process.c ti->cpu_context.r19 = (unsigned long)arg; ti 74 arch/microblaze/kernel/process.c ti->cpu_context.msr = childregs->msr & ~MSR_IE; ti 76 arch/microblaze/kernel/process.c ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8; ti 83 arch/microblaze/kernel/process.c memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); ti 84 arch/microblaze/kernel/process.c ti->cpu_context.r1 = (unsigned long)childregs; ti 86 arch/microblaze/kernel/process.c ti->cpu_context.msr = (unsigned long)childregs->msr; ti 106 arch/microblaze/kernel/process.c ti->cpu_context.msr = (childregs->msr|MSR_VM); ti 107 arch/microblaze/kernel/process.c ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */ ti 108 arch/microblaze/kernel/process.c ti->cpu_context.msr &= ~MSR_IE; ti 110 arch/microblaze/kernel/process.c ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; ti 69 arch/mips/kernel/mips-mt-fpaff.c struct thread_info *ti; ti 119 arch/mips/kernel/mips-mt-fpaff.c ti = task_thread_info(p); ti 120 arch/mips/kernel/mips-mt-fpaff.c if (test_ti_thread_flag(ti, TIF_FPUBOUND) && ti 126 arch/mips/kernel/mips-mt-fpaff.c clear_ti_thread_flag(ti, TIF_FPUBOUND); ti 124 arch/mips/kernel/process.c struct thread_info *ti = task_thread_info(p); ti 139 arch/mips/kernel/process.c ti->addr_limit = KERNEL_DS; ti 160 arch/mips/kernel/process.c ti->addr_limit = USER_DS; ti 182 arch/mips/kernel/process.c ti->tp_value = tls; ti 87 arch/mips/kernel/syscall.c struct thread_info *ti = task_thread_info(current); ti 89 arch/mips/kernel/syscall.c ti->tp_value = addr; ti 630 arch/mips/kernel/traps.c struct thread_info *ti = task_thread_info(current); ti 656 arch/mips/kernel/traps.c regs->regs[rt] = ti->tp_value; ti 39 arch/mips/lib/libgcc.h ti_type ti; ti 37 arch/mips/lib/multi3.c aa.ti = a; ti 38 arch/mips/lib/multi3.c bb.ti = b; ti 50 arch/mips/lib/multi3.c return res.ti; ti 82 arch/openrisc/include/asm/thread_info.h #define get_thread_info(ti) get_task_struct((ti)->task) ti 83 arch/openrisc/include/asm/thread_info.h #define put_thread_info(ti) put_task_struct((ti)->task) ti 143 arch/powerpc/include/asm/thread_info.h struct thread_info *ti = current_thread_info(); ti 144 arch/powerpc/include/asm/thread_info.h return (ti->local_flags & flags) != 0; ti 1599 arch/powerpc/kernel/process.c struct thread_info *ti = task_thread_info(p); ti 1619 arch/powerpc/kernel/process.c ti->flags |= _TIF_RESTOREALL; ti 2102 arch/powerpc/kernel/process.c struct thread_info *ti = current_thread_info(); ti 2123 arch/powerpc/kernel/process.c ti->local_flags |= _TLF_RUNLATCH; ti 2129 arch/powerpc/kernel/process.c struct thread_info *ti = current_thread_info(); ti 2131 arch/powerpc/kernel/process.c ti->local_flags &= ~_TLF_RUNLATCH; ti 123 arch/powerpc/kernel/syscalls.c struct thread_info *ti; ti 132 arch/powerpc/kernel/syscalls.c ti = current_thread_info(); ti 133 arch/powerpc/kernel/syscalls.c ti->flags |= _TIF_RESTOREALL; ti 267 arch/powerpc/mm/book3s64/slb.c static bool preload_hit(struct thread_info *ti, unsigned long esid) ti 271 arch/powerpc/mm/book3s64/slb.c for (i = 0; i < ti->slb_preload_nr; i++) { ti 274 arch/powerpc/mm/book3s64/slb.c idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; ti 275 arch/powerpc/mm/book3s64/slb.c if (esid == ti->slb_preload_esid[idx]) ti 281 arch/powerpc/mm/book3s64/slb.c static bool preload_add(struct thread_info *ti, unsigned long ea) ti 294 arch/powerpc/mm/book3s64/slb.c if (preload_hit(ti, esid)) ti 297 arch/powerpc/mm/book3s64/slb.c idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR; ti 298 arch/powerpc/mm/book3s64/slb.c ti->slb_preload_esid[idx] = esid; ti 299 arch/powerpc/mm/book3s64/slb.c if (ti->slb_preload_nr == SLB_PRELOAD_NR) ti 300 arch/powerpc/mm/book3s64/slb.c ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; ti 302 arch/powerpc/mm/book3s64/slb.c ti->slb_preload_nr++; ti 307 arch/powerpc/mm/book3s64/slb.c static void preload_age(struct thread_info *ti) ti 309 arch/powerpc/mm/book3s64/slb.c if (!ti->slb_preload_nr) ti 311 arch/powerpc/mm/book3s64/slb.c ti->slb_preload_nr--; ti 312 arch/powerpc/mm/book3s64/slb.c ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; ti 317 arch/powerpc/mm/book3s64/slb.c struct thread_info *ti = current_thread_info(); ti 327 arch/powerpc/mm/book3s64/slb.c if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR) ti 349 arch/powerpc/mm/book3s64/slb.c if (preload_add(ti, exec)) ti 355 arch/powerpc/mm/book3s64/slb.c if (preload_add(ti, mm->mmap_base)) ti 367 arch/powerpc/mm/book3s64/slb.c struct thread_info *ti = current_thread_info(); ti 374 arch/powerpc/mm/book3s64/slb.c if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR) ti 381 arch/powerpc/mm/book3s64/slb.c if (preload_add(ti, start)) ti 387 arch/powerpc/mm/book3s64/slb.c if (preload_add(ti, sp)) ti 393 arch/powerpc/mm/book3s64/slb.c if (preload_add(ti, heap)) ti 407 arch/powerpc/mm/book3s64/slb.c struct thread_info *ti = task_thread_info(tsk); ti 487 arch/powerpc/mm/book3s64/slb.c preload_age(ti); ti 488 arch/powerpc/mm/book3s64/slb.c preload_add(ti, pc); ti 491 arch/powerpc/mm/book3s64/slb.c for (i = 0; i < ti->slb_preload_nr; i++) { ti 495 arch/powerpc/mm/book3s64/slb.c idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; ti 496 arch/powerpc/mm/book3s64/slb.c ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; ti 101 arch/s390/kernel/cache.c static inline unsigned long ecag(int ai, int li, int ti) ti 103 arch/s390/kernel/cache.c return __ecag(ECAG_CACHE_ATTRIBUTE, ai << 4 | li << 1 | ti); ti 109 arch/s390/kernel/cache.c int ti, num_sets; ti 112 arch/s390/kernel/cache.c ti = CACHE_TI_INSTRUCTION; ti 114 arch/s390/kernel/cache.c ti = CACHE_TI_UNIFIED; ti 117 arch/s390/kernel/cache.c this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti); ti 118 arch/s390/kernel/cache.c this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti); ti 119 arch/s390/kernel/cache.c this_leaf->size = ecag(EXTRACT_SIZE, level, ti); ti 72 arch/sh/include/asm/thread_info.h struct thread_info *ti; ti 74 arch/sh/include/asm/thread_info.h __asm__ __volatile__ ("getcon cr17, %0" : "=r" (ti)); ti 76 arch/sh/include/asm/thread_info.h __asm__ __volatile__ ("stc r7_bank, %0" : "=r" (ti)); ti 83 arch/sh/include/asm/thread_info.h : "=&r" (ti), "=r" (__dummy) ti 88 arch/sh/include/asm/thread_info.h return ti; ti 163 arch/sh/include/asm/thread_info.h struct thread_info *ti = current_thread_info(); ti 164 arch/sh/include/asm/thread_info.h ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT))) ti 170 arch/sh/include/asm/thread_info.h struct thread_info *ti = current_thread_info(); ti 171 arch/sh/include/asm/thread_info.h return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; ti 121 arch/sh/kernel/process_32.c struct thread_info *ti = task_thread_info(p); ti 149 arch/sh/kernel/process_32.c ti->addr_limit = KERNEL_DS; ti 150 arch/sh/kernel/process_32.c ti->status &= ~TS_USEDFPU; ti 158 arch/sh/kernel/process_32.c ti->addr_limit = USER_DS; ti 138 arch/sparc/include/asm/thread_info_64.h #define __thread_flag_byte_ptr(ti) \ ti 139 arch/sparc/include/asm/thread_info_64.h ((unsigned char *)(&((ti)->flags))) ti 307 arch/sparc/kernel/process_32.c struct thread_info *ti = task_thread_info(p); ti 338 arch/sparc/kernel/process_32.c ti->ksp = (unsigned long) new_stack; ti 347 arch/sparc/kernel/process_32.c ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8); ti 351 arch/sparc/kernel/process_32.c ti->kpsr = psr | PSR_PIL; ti 352 arch/sparc/kernel/process_32.c ti->kwim = 1 << (((psr & PSR_CWP) + 1) % nwindows); ti 359 arch/sparc/kernel/process_32.c ti->kpc = (((unsigned long) ret_from_fork) - 0x8); ti 360 arch/sparc/kernel/process_32.c ti->kpsr = current->thread.fork_kpsr | PSR_PIL; ti 361 arch/sparc/kernel/process_32.c ti->kwim = current->thread.fork_kwim; ti 46 arch/um/include/asm/thread_info.h struct thread_info *ti; ti 50 arch/um/include/asm/thread_info.h asm volatile ("" : "=r" (p) : "0" (&ti)); ti 51 arch/um/include/asm/thread_info.h ti = (struct thread_info *) (((unsigned long)p) & ~mask); ti 52 arch/um/include/asm/thread_info.h return ti; ti 540 arch/um/kernel/irq.c struct thread_info *ti; ti 563 arch/um/kernel/irq.c ti = current_thread_info(); ti 564 arch/um/kernel/irq.c nested = (ti->real_thread != NULL); ti 569 arch/um/kernel/irq.c task = cpu_tasks[ti->cpu].task; ti 572 arch/um/kernel/irq.c *ti = *tti; ti 573 arch/um/kernel/irq.c ti->real_thread = tti; ti 574 arch/um/kernel/irq.c task->stack = ti; ti 584 arch/um/kernel/irq.c struct thread_info *ti, *to; ti 587 arch/um/kernel/irq.c ti = current_thread_info(); ti 591 arch/um/kernel/irq.c to = ti->real_thread; ti 593 arch/um/kernel/irq.c ti->real_thread = NULL; ti 594 arch/um/kernel/irq.c *to = *ti; ti 73 arch/x86/entry/common.c struct thread_info *ti = current_thread_info(); ti 80 arch/x86/entry/common.c work = READ_ONCE(ti->flags); ti 183 arch/x86/entry/common.c struct thread_info *ti = current_thread_info(); ti 191 arch/x86/entry/common.c cached_flags = READ_ONCE(ti->flags); ti 197 arch/x86/entry/common.c cached_flags = READ_ONCE(ti->flags); ti 215 arch/x86/entry/common.c ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED); ti 255 arch/x86/entry/common.c struct thread_info *ti = current_thread_info(); ti 256 arch/x86/entry/common.c u32 cached_flags = READ_ONCE(ti->flags); ti 280 arch/x86/entry/common.c struct thread_info *ti; ti 284 arch/x86/entry/common.c ti = current_thread_info(); ti 285 arch/x86/entry/common.c if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) ti 313 arch/x86/entry/common.c struct thread_info *ti = current_thread_info(); ti 317 arch/x86/entry/common.c ti->status |= TS_COMPAT; ti 320 arch/x86/entry/common.c if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) { ti 158 arch/x86/kernel/cpu/bugs.c struct thread_info *ti = current_thread_info(); ti 173 arch/x86/kernel/cpu/bugs.c hostval |= ssbd_tif_to_spec_ctrl(ti->flags); ti 177 arch/x86/kernel/cpu/bugs.c hostval |= stibp_tif_to_spec_ctrl(ti->flags); ti 201 arch/x86/kernel/cpu/bugs.c hostval = ssbd_tif_to_spec_ctrl(ti->flags); ti 435 arch/x86/xen/time.c struct pvclock_vsyscall_time_info *ti; ti 438 arch/x86/xen/time.c ti = (struct pvclock_vsyscall_time_info *)get_zeroed_page(GFP_KERNEL); ti 439 arch/x86/xen/time.c if (!ti) ti 442 arch/x86/xen/time.c t.addr.v = &ti->pvti; ti 447 arch/x86/xen/time.c free_page((unsigned long)ti); ti 456 arch/x86/xen/time.c if (!(ti->pvti.flags & PVCLOCK_TSC_STABLE_BIT)) { ti 461 arch/x86/xen/time.c free_page((unsigned long)ti); ti 467 arch/x86/xen/time.c xen_clock = ti; ti 86 arch/xtensa/include/asm/thread_info.h struct thread_info *ti; ti 88 arch/xtensa/include/asm/thread_info.h "xor %0, a1, %0" : "=&r" (ti) : ); ti 89 arch/xtensa/include/asm/thread_info.h return ti; ti 67 arch/xtensa/kernel/process.c void coprocessor_release_all(struct thread_info *ti) ti 78 arch/xtensa/kernel/process.c cpenable = ti->cpenable; ti 81 arch/xtensa/kernel/process.c if (coprocessor_owner[i] == ti) { ti 87 arch/xtensa/kernel/process.c ti->cpenable = cpenable; ti 88 arch/xtensa/kernel/process.c if (ti == current_thread_info()) ti 94 arch/xtensa/kernel/process.c void coprocessor_flush_all(struct thread_info *ti) ti 102 arch/xtensa/kernel/process.c cpenable = ti->cpenable; ti 106 arch/xtensa/kernel/process.c if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) ti 107 arch/xtensa/kernel/process.c coprocessor_flush(ti, i); ti 143 arch/xtensa/kernel/process.c struct thread_info *ti = current_thread_info(); ti 144 arch/xtensa/kernel/process.c coprocessor_flush_all(ti); ti 145 arch/xtensa/kernel/process.c coprocessor_release_all(ti); ti 212 arch/xtensa/kernel/process.c struct thread_info *ti; ti 286 arch/xtensa/kernel/process.c ti = task_thread_info(p); ti 287 arch/xtensa/kernel/process.c ti->cpenable = 0; ti 124 arch/xtensa/kernel/ptrace.c struct thread_info *ti = task_thread_info(target); ti 131 arch/xtensa/kernel/ptrace.c newregs->user = ti->xtregs_user; ti 135 arch/xtensa/kernel/ptrace.c coprocessor_flush_all(ti); ti 136 arch/xtensa/kernel/ptrace.c newregs->cp0 = ti->xtregs_cp.cp0; ti 137 arch/xtensa/kernel/ptrace.c newregs->cp1 = ti->xtregs_cp.cp1; ti 138 arch/xtensa/kernel/ptrace.c newregs->cp2 = ti->xtregs_cp.cp2; ti 139 arch/xtensa/kernel/ptrace.c newregs->cp3 = ti->xtregs_cp.cp3; ti 140 arch/xtensa/kernel/ptrace.c newregs->cp4 = ti->xtregs_cp.cp4; ti 141 arch/xtensa/kernel/ptrace.c newregs->cp5 = ti->xtregs_cp.cp5; ti 142 arch/xtensa/kernel/ptrace.c newregs->cp6 = ti->xtregs_cp.cp6; ti 143 arch/xtensa/kernel/ptrace.c newregs->cp7 = ti->xtregs_cp.cp7; ti 158 arch/xtensa/kernel/ptrace.c struct thread_info *ti = task_thread_info(target); ti 170 arch/xtensa/kernel/ptrace.c ti->xtregs_user = newregs->user; ti 174 arch/xtensa/kernel/ptrace.c coprocessor_flush_all(ti); ti 175 arch/xtensa/kernel/ptrace.c coprocessor_release_all(ti); ti 176 arch/xtensa/kernel/ptrace.c ti->xtregs_cp.cp0 = newregs->cp0; ti 177 arch/xtensa/kernel/ptrace.c ti->xtregs_cp.cp1 = newregs->cp1; ti 178 arch/xtensa/kernel/ptrace.c ti->xtregs_cp.cp2 = newregs->cp2; ti 179 arch/xtensa/kernel/ptrace.c ti->xtregs_cp.cp3 = newregs->cp3; ti 180 arch/xtensa/kernel/ptrace.c ti->xtregs_cp.cp4 = newregs->cp4; ti 181 arch/xtensa/kernel/ptrace.c ti->xtregs_cp.cp5 = newregs->cp5; ti 182 arch/xtensa/kernel/ptrace.c ti->xtregs_cp.cp6 = newregs->cp6; ti 183 arch/xtensa/kernel/ptrace.c ti->xtregs_cp.cp7 = newregs->cp7; ti 137 arch/xtensa/kernel/signal.c struct thread_info *ti = current_thread_info(); ti 157 arch/xtensa/kernel/signal.c coprocessor_flush_all(ti); ti 158 arch/xtensa/kernel/signal.c coprocessor_release_all(ti); ti 159 arch/xtensa/kernel/signal.c err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp, ti 164 arch/xtensa/kernel/signal.c err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user, ti 176 arch/xtensa/kernel/signal.c struct thread_info *ti = current_thread_info(); ti 222 arch/xtensa/kernel/signal.c coprocessor_release_all(ti); ti 223 arch/xtensa/kernel/signal.c err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp, ti 226 arch/xtensa/kernel/signal.c err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user, ti 1583 drivers/block/pktcdvd.c static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti) ti 1588 drivers/block/pktcdvd.c init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); ti 1600 drivers/block/pktcdvd.c cgc.buflen = be16_to_cpu(ti->track_information_length) + ti 1601 drivers/block/pktcdvd.c sizeof(ti->track_information_length); ti 1614 drivers/block/pktcdvd.c track_information ti; ti 1623 drivers/block/pktcdvd.c ret = pkt_get_track_info(pd, last_track, 1, &ti); ti 1628 drivers/block/pktcdvd.c if (ti.blank) { ti 1630 drivers/block/pktcdvd.c ret = pkt_get_track_info(pd, last_track, 1, &ti); ti 1636 drivers/block/pktcdvd.c if (ti.lra_v) { ti 1637 drivers/block/pktcdvd.c *last_written = be32_to_cpu(ti.last_rec_address); ti 1640 drivers/block/pktcdvd.c *last_written = be32_to_cpu(ti.track_start) + ti 1641 drivers/block/pktcdvd.c be32_to_cpu(ti.track_size); ti 1642 drivers/block/pktcdvd.c if (ti.free_blocks) ti 1643 drivers/block/pktcdvd.c *last_written -= (be32_to_cpu(ti.free_blocks) + 7); ti 1738 drivers/block/pktcdvd.c static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) ti 1749 drivers/block/pktcdvd.c if (!ti->packet || !ti->fp) ti 1755 drivers/block/pktcdvd.c if (ti->rt == 0 && ti->blank == 0) ti 1758 drivers/block/pktcdvd.c if (ti->rt == 0 && ti->blank == 1) ti 1761 drivers/block/pktcdvd.c if (ti->rt == 1 && ti->blank == 0) ti 1764 drivers/block/pktcdvd.c pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); ti 1819 drivers/block/pktcdvd.c track_information ti; ti 1829 drivers/block/pktcdvd.c memset(&ti, 0, sizeof(track_information)); ti 1843 drivers/block/pktcdvd.c ret = pkt_get_track_info(pd, track, 1, &ti); ti 1849 drivers/block/pktcdvd.c if (!pkt_writable_track(pd, &ti)) { ti 1858 drivers/block/pktcdvd.c pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; ti 1867 drivers/block/pktcdvd.c pd->settings.fp = ti.fp; ti 1868 drivers/block/pktcdvd.c pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); ti 1870 drivers/block/pktcdvd.c if (ti.nwa_v) { ti 1871 drivers/block/pktcdvd.c pd->nwa = be32_to_cpu(ti.next_writable); ti 1880 drivers/block/pktcdvd.c if (ti.lra_v) { ti 1881 drivers/block/pktcdvd.c pd->lra = be32_to_cpu(ti.last_rec_address); ti 1893 drivers/block/pktcdvd.c pd->settings.track_mode = ti.track_mode; ti 1898 drivers/block/pktcdvd.c switch (ti.data_mode) { ti 2711 drivers/cdrom/cdrom.c struct cdrom_ti ti; ti 2718 drivers/cdrom/cdrom.c if (copy_from_user(&ti, argp, sizeof(ti))) ti 2724 drivers/cdrom/cdrom.c return cdi->ops->audio_ioctl(cdi, CDROMPLAYTRKIND, &ti); ti 2805 drivers/cdrom/cdrom.c __u16 track, __u8 type, track_information *ti) ti 2811 drivers/cdrom/cdrom.c init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); ti 2823 drivers/cdrom/cdrom.c buflen = be16_to_cpu(ti->track_information_length) + ti 2824 drivers/cdrom/cdrom.c sizeof(ti->track_information_length); ti 2844 drivers/cdrom/cdrom.c track_information ti; ti 2858 drivers/cdrom/cdrom.c ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); ti 2859 drivers/cdrom/cdrom.c if (ti_size < (int)offsetof(typeof(ti), track_start)) ti 2863 drivers/cdrom/cdrom.c if (ti.blank) { ti 2867 drivers/cdrom/cdrom.c ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); ti 2870 drivers/cdrom/cdrom.c if (ti_size < (int)(offsetof(typeof(ti), track_size) ti 2871 drivers/cdrom/cdrom.c + sizeof(ti.track_size))) ti 2875 drivers/cdrom/cdrom.c if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address) ti 2876 drivers/cdrom/cdrom.c + sizeof(ti.last_rec_address))) { ti 2877 drivers/cdrom/cdrom.c *last_written = be32_to_cpu(ti.last_rec_address); ti 2880 drivers/cdrom/cdrom.c *last_written = be32_to_cpu(ti.track_start) + ti 2881 drivers/cdrom/cdrom.c be32_to_cpu(ti.track_size); ti 2882 drivers/cdrom/cdrom.c if (ti.free_blocks) ti 2883 drivers/cdrom/cdrom.c *last_written -= (be32_to_cpu(ti.free_blocks) + 7); ti 2909 drivers/cdrom/cdrom.c track_information ti; ti 2923 drivers/cdrom/cdrom.c ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); ti 2924 drivers/cdrom/cdrom.c if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start)) ti 2928 drivers/cdrom/cdrom.c if (ti.blank) { ti 2932 drivers/cdrom/cdrom.c ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); ti 2938 drivers/cdrom/cdrom.c if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable) ti 2939 drivers/cdrom/cdrom.c + sizeof(ti.next_writable)) { ti 2940 drivers/cdrom/cdrom.c *next_writable = be32_to_cpu(ti.next_writable); ti 59 drivers/clocksource/timer-ti-32k.c struct ti_32k *ti = to_ti_32k(cs); ti 61 drivers/clocksource/timer-ti-32k.c return (u64)readl_relaxed(ti->counter); ti 35 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c struct amdgpu_task_info ti; ti 37 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c memset(&ti, 0, sizeof(struct amdgpu_task_info)); ti 45 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); ti 50 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c ti.process_name, ti.tgid, ti.task_name, ti.pid); ti 39 drivers/gpu/drm/gma500/mdfld_tmd_vid.c struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD; ti 47 drivers/gpu/drm/gma500/mdfld_tmd_vid.c mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; ti 48 drivers/gpu/drm/gma500/mdfld_tmd_vid.c mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; ti 50 drivers/gpu/drm/gma500/mdfld_tmd_vid.c ((ti->hsync_offset_hi << 8) | \ ti 51 drivers/gpu/drm/gma500/mdfld_tmd_vid.c ti->hsync_offset_lo); ti 53 drivers/gpu/drm/gma500/mdfld_tmd_vid.c ((ti->hsync_pulse_width_hi << 8) | \ ti 54 drivers/gpu/drm/gma500/mdfld_tmd_vid.c ti->hsync_pulse_width_lo); ti 55 drivers/gpu/drm/gma500/mdfld_tmd_vid.c mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \ ti 56 drivers/gpu/drm/gma500/mdfld_tmd_vid.c ti->hblank_lo); ti 58 drivers/gpu/drm/gma500/mdfld_tmd_vid.c mode->vdisplay + ((ti->vsync_offset_hi << 8) | \ ti 59 drivers/gpu/drm/gma500/mdfld_tmd_vid.c ti->vsync_offset_lo); ti 61 drivers/gpu/drm/gma500/mdfld_tmd_vid.c mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \ ti 62 drivers/gpu/drm/gma500/mdfld_tmd_vid.c ti->vsync_pulse_width_lo); ti 64 drivers/gpu/drm/gma500/mdfld_tmd_vid.c ((ti->vblank_hi << 8) | ti->vblank_lo); ti 65 drivers/gpu/drm/gma500/mdfld_tmd_vid.c mode->clock = ti->pixel_clock * 10; ti 223 drivers/gpu/drm/gma500/mid_bios.c struct gct_r10_timing_info *ti; ti 244 drivers/gpu/drm/gma500/mid_bios.c ti = &gct[vbt.primary_panel_idx].DTD; ti 245 drivers/gpu/drm/gma500/mid_bios.c dp_ti->pixel_clock = ti->pixel_clock; ti 246 drivers/gpu/drm/gma500/mid_bios.c dp_ti->hactive_hi = ti->hactive_hi; ti 247 drivers/gpu/drm/gma500/mid_bios.c dp_ti->hactive_lo = ti->hactive_lo; ti 248 drivers/gpu/drm/gma500/mid_bios.c dp_ti->hblank_hi = ti->hblank_hi; ti 249 drivers/gpu/drm/gma500/mid_bios.c dp_ti->hblank_lo = ti->hblank_lo; ti 250 drivers/gpu/drm/gma500/mid_bios.c dp_ti->hsync_offset_hi = ti->hsync_offset_hi; ti 251 drivers/gpu/drm/gma500/mid_bios.c dp_ti->hsync_offset_lo = ti->hsync_offset_lo; ti 252 drivers/gpu/drm/gma500/mid_bios.c dp_ti->hsync_pulse_width_hi = ti->hsync_pulse_width_hi; ti 253 drivers/gpu/drm/gma500/mid_bios.c dp_ti->hsync_pulse_width_lo = ti->hsync_pulse_width_lo; ti 254 drivers/gpu/drm/gma500/mid_bios.c dp_ti->vactive_hi = ti->vactive_hi; ti 255 drivers/gpu/drm/gma500/mid_bios.c dp_ti->vactive_lo = ti->vactive_lo; ti 256 drivers/gpu/drm/gma500/mid_bios.c dp_ti->vblank_hi = ti->vblank_hi; ti 257 drivers/gpu/drm/gma500/mid_bios.c dp_ti->vblank_lo = ti->vblank_lo; ti 258 drivers/gpu/drm/gma500/mid_bios.c dp_ti->vsync_offset_hi = ti->vsync_offset_hi; ti 259 drivers/gpu/drm/gma500/mid_bios.c dp_ti->vsync_offset_lo = ti->vsync_offset_lo; ti 260 drivers/gpu/drm/gma500/mid_bios.c dp_ti->vsync_pulse_width_hi = ti->vsync_pulse_width_hi; ti 261 drivers/gpu/drm/gma500/mid_bios.c dp_ti->vsync_pulse_width_lo = ti->vsync_pulse_width_lo; ti 216 drivers/gpu/drm/gma500/oaktrail_lvds.c struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD; ti 226 drivers/gpu/drm/gma500/oaktrail_lvds.c mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; ti 227 drivers/gpu/drm/gma500/oaktrail_lvds.c mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; ti 229 drivers/gpu/drm/gma500/oaktrail_lvds.c ((ti->hsync_offset_hi << 8) | \ ti 230 drivers/gpu/drm/gma500/oaktrail_lvds.c ti->hsync_offset_lo); ti 232 drivers/gpu/drm/gma500/oaktrail_lvds.c ((ti->hsync_pulse_width_hi << 8) | \ ti 233 drivers/gpu/drm/gma500/oaktrail_lvds.c ti->hsync_pulse_width_lo); ti 234 drivers/gpu/drm/gma500/oaktrail_lvds.c mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \ ti 235 drivers/gpu/drm/gma500/oaktrail_lvds.c ti->hblank_lo); ti 237 drivers/gpu/drm/gma500/oaktrail_lvds.c mode->vdisplay + ((ti->vsync_offset_hi << 4) | \ ti 238 drivers/gpu/drm/gma500/oaktrail_lvds.c ti->vsync_offset_lo); ti 240 drivers/gpu/drm/gma500/oaktrail_lvds.c mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \ ti 241 drivers/gpu/drm/gma500/oaktrail_lvds.c ti->vsync_pulse_width_lo); ti 243 drivers/gpu/drm/gma500/oaktrail_lvds.c ((ti->vblank_hi << 8) | ti->vblank_lo); ti 244 drivers/gpu/drm/gma500/oaktrail_lvds.c mode->clock = ti->pixel_clock * 10; ti 346 drivers/ide/ide-cd_ioctl.c struct cdrom_ti *ti = arg; ti 352 drivers/ide/ide-cd_ioctl.c stat = ide_cd_get_toc_entry(drive, ti->cdti_trk0, &first_toc); ti 356 drivers/ide/ide-cd_ioctl.c stat = ide_cd_get_toc_entry(drive, ti->cdti_trk1, &last_toc); ti 360 drivers/ide/ide-cd_ioctl.c if (ti->cdti_trk1 != CDROM_LEADOUT) ti 285 drivers/infiniband/hw/qib/qib_file_ops.c const struct qib_tid_info *ti) ti 303 drivers/infiniband/hw/qib/qib_file_ops.c cnt = ti->tidcnt; ti 345 drivers/infiniband/hw/qib/qib_file_ops.c vaddr = ti->tidvaddr; ti 445 drivers/infiniband/hw/qib/qib_file_ops.c (unsigned long) ti->tidlist, ti 450 drivers/infiniband/hw/qib/qib_file_ops.c if (copy_to_user(u64_to_user_ptr(ti->tidmap), ti 484 drivers/infiniband/hw/qib/qib_file_ops.c const struct qib_tid_info *ti) ti 497 drivers/infiniband/hw/qib/qib_file_ops.c if (copy_from_user(tidmap, u64_to_user_ptr(ti->tidmap), ti 374 drivers/md/dm-cache-target.c struct dm_target *ti; ti 715 drivers/md/dm-cache-target.c dm_table_event(cache->ti->table); ti 950 drivers/md/dm-cache-target.c return dm_device_name(dm_table_get_md(cache->ti->table)); ti 961 drivers/md/dm-cache-target.c dm_table_event(cache->ti->table); ti 2011 drivers/md/dm-cache-target.c dm_put_device(cache->ti, cache->metadata_dev); ti 2014 drivers/md/dm-cache-target.c dm_put_device(cache->ti, cache->origin_dev); ti 2017 drivers/md/dm-cache-target.c dm_put_device(cache->ti, cache->cache_dev); ti 2031 drivers/md/dm-cache-target.c static void cache_dtr(struct dm_target *ti) ti 2033 drivers/md/dm-cache-target.c struct cache *cache = ti->private; ti 2075 drivers/md/dm-cache-target.c struct dm_target *ti; ti 2097 drivers/md/dm-cache-target.c dm_put_device(ca->ti, ca->metadata_dev); ti 2100 drivers/md/dm-cache-target.c dm_put_device(ca->ti, ca->cache_dev); ti 2103 drivers/md/dm-cache-target.c dm_put_device(ca->ti, ca->origin_dev); ti 2128 drivers/md/dm-cache-target.c r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, ti 2151 drivers/md/dm-cache-target.c r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, ti 2170 drivers/md/dm-cache-target.c r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, ti 2178 drivers/md/dm-cache-target.c if (ca->ti->len > ca->origin_sectors) { ti 2466 drivers/md/dm-cache-target.c char **error = &ca->ti->error; ti 2468 drivers/md/dm-cache-target.c struct dm_target *ti = ca->ti; ti 2477 drivers/md/dm-cache-target.c cache->ti = ca->ti; ti 2478 drivers/md/dm-cache-target.c ti->private = cache; ti 2479 drivers/md/dm-cache-target.c ti->num_flush_bios = 2; ti 2480 drivers/md/dm-cache-target.c ti->flush_supported = true; ti 2482 drivers/md/dm-cache-target.c ti->num_discard_bios = 1; ti 2483 drivers/md/dm-cache-target.c ti->discards_supported = true; ti 2485 drivers/md/dm-cache-target.c ti->per_io_data_size = sizeof(struct per_bio_data); ti 2496 drivers/md/dm-cache-target.c dm_table_add_target_callbacks(ti->table, &cache->callbacks); ti 2509 drivers/md/dm-cache-target.c if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { ti 2686 drivers/md/dm-cache-target.c static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) ti 2694 drivers/md/dm-cache-target.c ti->error = "Error allocating memory for cache"; ti 2697 drivers/md/dm-cache-target.c ca->ti = ti; ti 2699 drivers/md/dm-cache-target.c r = parse_cache_args(ca, argc, argv, &ti->error); ti 2713 drivers/md/dm-cache-target.c ti->private = cache; ti 2721 drivers/md/dm-cache-target.c static int cache_map(struct dm_target *ti, struct bio *bio) ti 2723 drivers/md/dm-cache-target.c struct cache *cache = ti->private; ti 2753 drivers/md/dm-cache-target.c static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) ti 2755 drivers/md/dm-cache-target.c struct cache *cache = ti->private; ti 2863 drivers/md/dm-cache-target.c static void cache_postsuspend(struct dm_target *ti) ti 2865 drivers/md/dm-cache-target.c struct cache *cache = ti->private; ti 3036 drivers/md/dm-cache-target.c static int cache_preresume(struct dm_target *ti) ti 3039 drivers/md/dm-cache-target.c struct cache *cache = ti->private; ti 3098 drivers/md/dm-cache-target.c static void cache_resume(struct dm_target *ti) ti 3100 drivers/md/dm-cache-target.c struct cache *cache = ti->private; ti 3151 drivers/md/dm-cache-target.c static void cache_status(struct dm_target *ti, status_type_t type, ti 3160 drivers/md/dm-cache-target.c struct cache *cache = ti->private; ti 3172 drivers/md/dm-cache-target.c if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) ti 3399 drivers/md/dm-cache-target.c static int cache_message(struct dm_target *ti, unsigned argc, char **argv, ti 3402 drivers/md/dm-cache-target.c struct cache *cache = ti->private; ti 3422 drivers/md/dm-cache-target.c static int cache_iterate_devices(struct dm_target *ti, ti 3426 drivers/md/dm-cache-target.c struct cache *cache = ti->private; ti 3428 drivers/md/dm-cache-target.c r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); ti 3430 drivers/md/dm-cache-target.c r = fn(ti, cache->origin_dev, 0, ti->len, data); ti 3493 drivers/md/dm-cache-target.c static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 3495 drivers/md/dm-cache-target.c struct cache *cache = ti->private; ti 70 drivers/md/dm-clone-target.c struct dm_target *ti; ti 166 drivers/md/dm-clone-target.c return dm_table_device_name(clone->ti->table); ti 197 drivers/md/dm-clone-target.c dm_table_event(clone->ti->table); ti 815 drivers/md/dm-clone-target.c tail_size = clone->ti->len & (region_size - 1); ti 1175 drivers/md/dm-clone-target.c dm_table_event(clone->ti->table); ti 1325 drivers/md/dm-clone-target.c static int clone_map(struct dm_target *ti, struct bio *bio) ti 1327 drivers/md/dm-clone-target.c struct clone *clone = ti->private; ti 1347 drivers/md/dm-clone-target.c bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); ti 1384 drivers/md/dm-clone-target.c static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error) ti 1386 drivers/md/dm-clone-target.c struct clone *clone = ti->private; ti 1433 drivers/md/dm-clone-target.c static void clone_status(struct dm_target *ti, status_type_t type, ti 1443 drivers/md/dm-clone-target.c struct clone *clone = ti->private; ti 1453 drivers/md/dm-clone-target.c if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) ti 1560 drivers/md/dm-clone-target.c struct dm_target *ti = clone->ti; ti 1572 drivers/md/dm-clone-target.c r = dm_read_arg_group(&args, as, &argc, &ti->error); ti 1585 drivers/md/dm-clone-target.c ti->error = "Invalid feature argument"; ti 1599 drivers/md/dm-clone-target.c struct dm_target *ti = clone->ti; ti 1615 drivers/md/dm-clone-target.c r = dm_read_arg_group(&args, as, &argc, &ti->error); ti 1620 drivers/md/dm-clone-target.c ti->error = "Number of core arguments must be even"; ti 1630 drivers/md/dm-clone-target.c ti->error = "Invalid value for argument `hydration_threshold'"; ti 1636 drivers/md/dm-clone-target.c ti->error = "Invalid value for argument `hydration_batch_size'"; ti 1641 drivers/md/dm-clone-target.c ti->error = "Invalid core argument"; ti 1701 drivers/md/dm-clone-target.c r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, ti 1721 drivers/md/dm-clone-target.c r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, ti 1729 drivers/md/dm-clone-target.c if (dest_dev_size < clone->ti->len) { ti 1730 drivers/md/dm-clone-target.c dm_put_device(clone->ti, clone->dest_dev); ti 1743 drivers/md/dm-clone-target.c r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ, ti 1751 drivers/md/dm-clone-target.c if (source_dev_size < clone->ti->len) { ti 1752 drivers/md/dm-clone-target.c dm_put_device(clone->ti, clone->source_dev); ti 1789 drivers/md/dm-clone-target.c static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 1797 drivers/md/dm-clone-target.c ti->error = "Invalid number of arguments"; ti 1806 drivers/md/dm-clone-target.c ti->error = "Failed to allocate clone structure"; ti 1810 drivers/md/dm-clone-target.c clone->ti = ti; ti 1817 drivers/md/dm-clone-target.c r = parse_metadata_dev(clone, &as, &ti->error); ti 1821 drivers/md/dm-clone-target.c r = parse_dest_dev(clone, &as, &ti->error); ti 1825 drivers/md/dm-clone-target.c r = parse_source_dev(clone, &as, &ti->error); ti 1829 drivers/md/dm-clone-target.c r = parse_region_size(clone, &as, &ti->error); ti 1834 drivers/md/dm-clone-target.c nr_regions = dm_sector_div_up(ti->len, clone->region_size); ti 1838 drivers/md/dm-clone-target.c ti->error = "Too many regions. Consider increasing the region size"; ti 1845 drivers/md/dm-clone-target.c r = validate_nr_regions(clone->nr_regions, &ti->error); ti 1849 drivers/md/dm-clone-target.c r = dm_set_target_max_io_len(ti, clone->region_size); ti 1851 drivers/md/dm-clone-target.c ti->error = "Failed to set max io len"; ti 1864 drivers/md/dm-clone-target.c clone->cmd = dm_clone_metadata_open(clone->metadata_dev->bdev, ti->len, ti 1867 drivers/md/dm-clone-target.c ti->error = "Failed to load metadata"; ti 1875 drivers/md/dm-clone-target.c ti->error = "Unable to get write access to metadata, please check/repair metadata"; ti 1885 drivers/md/dm-clone-target.c ti->error = "Failed to allocate hydration hash table"; ti 1902 drivers/md/dm-clone-target.c ti->error = "Failed to allocate workqueue"; ti 1919 drivers/md/dm-clone-target.c ti->error = "Failed to create dm_clone_region_hydration memory pool"; ti 1924 drivers/md/dm-clone-target.c r = copy_ctr_args(clone, argc - 3, (const char **)argv + 3, &ti->error); ti 1930 drivers/md/dm-clone-target.c dm_table_add_target_callbacks(ti->table, &clone->callbacks); ti 1933 drivers/md/dm-clone-target.c ti->num_flush_bios = 1; ti 1934 drivers/md/dm-clone-target.c ti->flush_supported = true; ti 1937 drivers/md/dm-clone-target.c ti->discards_supported = true; ti 1938 drivers/md/dm-clone-target.c ti->num_discard_bios = 1; ti 1940 drivers/md/dm-clone-target.c ti->private = clone; ti 1955 drivers/md/dm-clone-target.c dm_put_device(ti, clone->source_dev); ti 1957 drivers/md/dm-clone-target.c dm_put_device(ti, clone->dest_dev); ti 1959 drivers/md/dm-clone-target.c dm_put_device(ti, clone->metadata_dev); ti 1966 drivers/md/dm-clone-target.c static void clone_dtr(struct dm_target *ti) ti 1969 drivers/md/dm-clone-target.c struct clone *clone = ti->private; ti 1983 drivers/md/dm-clone-target.c dm_put_device(ti, clone->source_dev); ti 1984 drivers/md/dm-clone-target.c dm_put_device(ti, clone->dest_dev); ti 1985 drivers/md/dm-clone-target.c dm_put_device(ti, clone->metadata_dev); ti 1992 drivers/md/dm-clone-target.c static void clone_postsuspend(struct dm_target *ti) ti 1994 drivers/md/dm-clone-target.c struct clone *clone = ti->private; ti 2030 drivers/md/dm-clone-target.c static void clone_resume(struct dm_target *ti) ti 2032 drivers/md/dm-clone-target.c struct clone *clone = ti->private; ti 2096 drivers/md/dm-clone-target.c static void clone_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 2098 drivers/md/dm-clone-target.c struct clone *clone = ti->private; ti 2115 drivers/md/dm-clone-target.c static int clone_iterate_devices(struct dm_target *ti, ti 2119 drivers/md/dm-clone-target.c struct clone *clone = ti->private; ti 2123 drivers/md/dm-clone-target.c ret = fn(ti, source_dev, 0, ti->len, data); ti 2125 drivers/md/dm-clone-target.c ret = fn(ti, dest_dev, 0, ti->len, data); ti 2160 drivers/md/dm-clone-target.c static int clone_message(struct dm_target *ti, unsigned int argc, char **argv, ti 2163 drivers/md/dm-clone-target.c struct clone *clone = ti->private; ti 90 drivers/md/dm-crypt.c int (*ctr)(struct crypt_config *cc, struct dm_target *ti, ti 331 drivers/md/dm-crypt.c static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, ti 347 drivers/md/dm-crypt.c ti->error = "cypher blocksize is not a power of 2"; ti 352 drivers/md/dm-crypt.c ti->error = "cypher blocksize is > 512"; ti 398 drivers/md/dm-crypt.c static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, ti 404 drivers/md/dm-crypt.c ti->error = "Unsupported sector size for LMK"; ti 410 drivers/md/dm-crypt.c ti->error = "Error initializing LMK hash"; ti 423 drivers/md/dm-crypt.c ti->error = "Error kmallocing seed storage in LMK"; ti 555 drivers/md/dm-crypt.c static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, ti 561 drivers/md/dm-crypt.c ti->error = "Unsupported sector size for TCW"; ti 566 drivers/md/dm-crypt.c ti->error = "Wrong key size for TCW"; ti 572 drivers/md/dm-crypt.c ti->error = "Error initializing CRC32 in TCW"; ti 580 drivers/md/dm-crypt.c ti->error = "Error allocating seed storage in TCW"; ti 700 drivers/md/dm-crypt.c static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti, ti 704 drivers/md/dm-crypt.c ti->error = "AEAD transforms not supported for EBOIV"; ti 709 drivers/md/dm-crypt.c ti->error = "Block size of EBOIV cipher does " ti 845 drivers/md/dm-crypt.c static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) ti 849 drivers/md/dm-crypt.c struct mapped_device *md = dm_table_get_md(ti->table); ti 853 drivers/md/dm-crypt.c ti->error = "Integrity profile not supported."; ti 859 drivers/md/dm-crypt.c ti->error = "Integrity profile tag size mismatch."; ti 863 drivers/md/dm-crypt.c ti->error = "Integrity profile sector size mismatch."; ti 873 drivers/md/dm-crypt.c ti->error = "Integrity AEAD auth tag size is not supported."; ti 881 drivers/md/dm-crypt.c ti->error = "Not enough space for integrity tag in the profile."; ti 887 drivers/md/dm-crypt.c ti->error = "Integrity profile not supported."; ti 2114 drivers/md/dm-crypt.c static void crypt_dtr(struct dm_target *ti) ti 2116 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2118 drivers/md/dm-crypt.c ti->private = NULL; ti 2146 drivers/md/dm-crypt.c dm_put_device(ti, cc->dev); ti 2165 drivers/md/dm-crypt.c static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) ti 2167 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2221 drivers/md/dm-crypt.c ti->error = "Invalid IV mode"; ti 2267 drivers/md/dm-crypt.c static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key, ti 2270 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2301 drivers/md/dm-crypt.c ti->error = "Invalid AEAD cipher spec"; ti 2311 drivers/md/dm-crypt.c ti->error = "Digest algorithm missing for ESSIV mode"; ti 2317 drivers/md/dm-crypt.c ti->error = "Cannot allocate cipher string"; ti 2328 drivers/md/dm-crypt.c ti->error = "Error allocating crypto tfm"; ti 2340 drivers/md/dm-crypt.c static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key, ti 2343 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2350 drivers/md/dm-crypt.c ti->error = "Bad cipher specification"; ti 2366 drivers/md/dm-crypt.c ti->error = "Bad cipher key count specification"; ti 2385 drivers/md/dm-crypt.c ti->error = "IV mechanism required"; ti 2395 drivers/md/dm-crypt.c ti->error = "Digest algorithm missing for ESSIV mode"; ti 2413 drivers/md/dm-crypt.c ti->error = "Error allocating crypto tfm"; ti 2421 drivers/md/dm-crypt.c ti->error = "Cannot allocate cipher strings"; ti 2425 drivers/md/dm-crypt.c static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) ti 2427 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2433 drivers/md/dm-crypt.c ti->error = "Cannot allocate cipher strings"; ti 2438 drivers/md/dm-crypt.c ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts); ti 2440 drivers/md/dm-crypt.c ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts); ti 2445 drivers/md/dm-crypt.c ret = crypt_ctr_ivmode(ti, ivmode); ti 2452 drivers/md/dm-crypt.c ti->error = "Error decoding and setting key"; ti 2458 drivers/md/dm-crypt.c ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); ti 2460 drivers/md/dm-crypt.c ti->error = "Error creating IV"; ti 2469 drivers/md/dm-crypt.c ti->error = "Error initialising IV"; ti 2481 drivers/md/dm-crypt.c static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv) ti 2483 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2497 drivers/md/dm-crypt.c ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); ti 2504 drivers/md/dm-crypt.c ti->error = "Not enough feature arguments"; ti 2509 drivers/md/dm-crypt.c ti->num_discard_bios = 1; ti 2518 drivers/md/dm-crypt.c ti->error = "Invalid integrity arguments"; ti 2526 drivers/md/dm-crypt.c ti->error = "Unknown integrity profile"; ti 2537 drivers/md/dm-crypt.c ti->error = "Invalid feature value for sector_size"; ti 2540 drivers/md/dm-crypt.c if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) { ti 2541 drivers/md/dm-crypt.c ti->error = "Device size is not multiple of sector_size feature"; ti 2548 drivers/md/dm-crypt.c ti->error = "Invalid feature arguments"; ti 2560 drivers/md/dm-crypt.c static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 2563 drivers/md/dm-crypt.c const char *devname = dm_table_device_name(ti->table); ti 2572 drivers/md/dm-crypt.c ti->error = "Not enough arguments"; ti 2578 drivers/md/dm-crypt.c ti->error = "Cannot parse key size"; ti 2584 drivers/md/dm-crypt.c ti->error = "Cannot allocate encryption context"; ti 2591 drivers/md/dm-crypt.c ti->private = cc; ti 2604 drivers/md/dm-crypt.c ret = crypt_ctr_optional(ti, argc - 5, &argv[5]); ti 2609 drivers/md/dm-crypt.c ret = crypt_ctr_cipher(ti, argv[0], argv[1]); ti 2646 drivers/md/dm-crypt.c ti->error = "Cannot allocate crypt request mempool"; ti 2650 drivers/md/dm-crypt.c cc->per_bio_data_size = ti->per_io_data_size = ti 2656 drivers/md/dm-crypt.c ti->error = "Cannot allocate page mempool"; ti 2662 drivers/md/dm-crypt.c ti->error = "Cannot allocate crypt bioset"; ti 2671 drivers/md/dm-crypt.c ti->error = "Invalid iv_offset sector"; ti 2676 drivers/md/dm-crypt.c ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev); ti 2678 drivers/md/dm-crypt.c ti->error = "Device lookup failed"; ti 2684 drivers/md/dm-crypt.c ti->error = "Invalid device sector"; ti 2690 drivers/md/dm-crypt.c ret = crypt_integrity_ctr(cc, ti); ti 2701 drivers/md/dm-crypt.c ti->error = "Cannot allocate integrity tags mempool"; ti 2711 drivers/md/dm-crypt.c ti->error = "Couldn't create kcryptd io queue"; ti 2723 drivers/md/dm-crypt.c ti->error = "Couldn't create kcryptd queue"; ti 2734 drivers/md/dm-crypt.c ti->error = "Couldn't spawn write thread"; ti 2739 drivers/md/dm-crypt.c ti->num_flush_bios = 1; ti 2744 drivers/md/dm-crypt.c crypt_dtr(ti); ti 2748 drivers/md/dm-crypt.c static int crypt_map(struct dm_target *ti, struct bio *bio) ti 2751 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2763 drivers/md/dm-crypt.c dm_target_offset(ti, bio->bi_iter.bi_sector); ti 2785 drivers/md/dm-crypt.c crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); ti 2814 drivers/md/dm-crypt.c static void crypt_status(struct dm_target *ti, status_type_t type, ti 2817 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2841 drivers/md/dm-crypt.c num_feature_args += !!ti->num_discard_bios; ti 2850 drivers/md/dm-crypt.c if (ti->num_discard_bios) ti 2868 drivers/md/dm-crypt.c static void crypt_postsuspend(struct dm_target *ti) ti 2870 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2875 drivers/md/dm-crypt.c static int crypt_preresume(struct dm_target *ti) ti 2877 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2887 drivers/md/dm-crypt.c static void crypt_resume(struct dm_target *ti) ti 2889 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2898 drivers/md/dm-crypt.c static int crypt_message(struct dm_target *ti, unsigned argc, char **argv, ti 2901 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2939 drivers/md/dm-crypt.c static int crypt_iterate_devices(struct dm_target *ti, ti 2942 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 2944 drivers/md/dm-crypt.c return fn(ti, cc->dev, cc->start, ti->len, data); ti 2947 drivers/md/dm-crypt.c static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 2949 drivers/md/dm-crypt.c struct crypt_config *cc = ti->private; ti 120 drivers/md/dm-delay.c static void delay_dtr(struct dm_target *ti) ti 122 drivers/md/dm-delay.c struct delay_c *dc = ti->private; ti 128 drivers/md/dm-delay.c dm_put_device(ti, dc->read.dev); ti 130 drivers/md/dm-delay.c dm_put_device(ti, dc->write.dev); ti 132 drivers/md/dm-delay.c dm_put_device(ti, dc->flush.dev); ti 139 drivers/md/dm-delay.c static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **argv) ti 146 drivers/md/dm-delay.c ti->error = "Invalid device sector"; ti 152 drivers/md/dm-delay.c ti->error = "Invalid delay"; ti 156 drivers/md/dm-delay.c ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev); ti 158 drivers/md/dm-delay.c ti->error = "Device lookup failed"; ti 173 drivers/md/dm-delay.c static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 179 drivers/md/dm-delay.c ti->error = "Requires exactly 3, 6 or 9 arguments"; ti 185 drivers/md/dm-delay.c ti->error = "Cannot allocate context"; ti 189 drivers/md/dm-delay.c ti->private = dc; ti 197 drivers/md/dm-delay.c ret = delay_class_ctr(ti, &dc->read, argv); ti 202 drivers/md/dm-delay.c ret = delay_class_ctr(ti, &dc->write, argv); ti 205 drivers/md/dm-delay.c ret = delay_class_ctr(ti, &dc->flush, argv); ti 211 drivers/md/dm-delay.c ret = delay_class_ctr(ti, &dc->write, argv + 3); ti 215 drivers/md/dm-delay.c ret = delay_class_ctr(ti, &dc->flush, argv + 3); ti 221 drivers/md/dm-delay.c ret = delay_class_ctr(ti, &dc->flush, argv + 6); ti 233 drivers/md/dm-delay.c ti->num_flush_bios = 1; ti 234 drivers/md/dm-delay.c ti->num_discard_bios = 1; ti 235 drivers/md/dm-delay.c ti->per_io_data_size = sizeof(struct dm_delay_info); ti 239 drivers/md/dm-delay.c delay_dtr(ti); ti 266 drivers/md/dm-delay.c static void delay_presuspend(struct dm_target *ti) ti 268 drivers/md/dm-delay.c struct delay_c *dc = ti->private; ti 275 drivers/md/dm-delay.c static void delay_resume(struct dm_target *ti) ti 277 drivers/md/dm-delay.c struct delay_c *dc = ti->private; ti 282 drivers/md/dm-delay.c static int delay_map(struct dm_target *ti, struct bio *bio) ti 284 drivers/md/dm-delay.c struct delay_c *dc = ti->private; ti 299 drivers/md/dm-delay.c bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector); ti 307 drivers/md/dm-delay.c static void delay_status(struct dm_target *ti, status_type_t type, ti 310 drivers/md/dm-delay.c struct delay_c *dc = ti->private; ti 332 drivers/md/dm-delay.c static int delay_iterate_devices(struct dm_target *ti, ti 335 drivers/md/dm-delay.c struct delay_c *dc = ti->private; ti 338 drivers/md/dm-delay.c ret = fn(ti, dc->read.dev, dc->read.start, ti->len, data); ti 341 drivers/md/dm-delay.c ret = fn(ti, dc->write.dev, dc->write.start, ti->len, data); ti 344 drivers/md/dm-delay.c ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data); ti 209 drivers/md/dm-dust.c static int dust_map(struct dm_target *ti, struct bio *bio) ti 211 drivers/md/dm-dust.c struct dust_device *dd = ti->private; ti 215 drivers/md/dm-dust.c bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector); ti 279 drivers/md/dm-dust.c static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 287 drivers/md/dm-dust.c sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS); ti 290 drivers/md/dm-dust.c ti->error = "Invalid argument count"; ti 295 drivers/md/dm-dust.c ti->error = "Invalid block size parameter"; ti 300 drivers/md/dm-dust.c ti->error = "Block size must be at least 512"; ti 305 drivers/md/dm-dust.c ti->error = "Block size must be a power of 2"; ti 310 drivers/md/dm-dust.c ti->error = "Block size is too large"; ti 317 drivers/md/dm-dust.c ti->error = "Invalid device offset sector"; ti 323 drivers/md/dm-dust.c ti->error = "Cannot allocate context"; ti 327 drivers/md/dm-dust.c if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) { ti 328 drivers/md/dm-dust.c ti->error = "Device lookup failed"; ti 354 drivers/md/dm-dust.c BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0); ti 356 drivers/md/dm-dust.c ti->num_discard_bios = 1; ti 357 drivers/md/dm-dust.c ti->num_flush_bios = 1; ti 358 drivers/md/dm-dust.c ti->private = dd; ti 363 drivers/md/dm-dust.c static void dust_dtr(struct dm_target *ti) ti 365 drivers/md/dm-dust.c struct dust_device *dd = ti->private; ti 368 drivers/md/dm-dust.c dm_put_device(ti, dd->dev); ti 372 drivers/md/dm-dust.c static int dust_message(struct dm_target *ti, unsigned int argc, char **argv, ti 375 drivers/md/dm-dust.c struct dust_device *dd = ti->private; ti 442 drivers/md/dm-dust.c static void dust_status(struct dm_target *ti, status_type_t type, ti 445 drivers/md/dm-dust.c struct dust_device *dd = ti->private; ti 462 drivers/md/dm-dust.c static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) ti 464 drivers/md/dm-dust.c struct dust_device *dd = ti->private; ti 473 drivers/md/dm-dust.c ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) ti 479 drivers/md/dm-dust.c static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, ti 482 drivers/md/dm-dust.c struct dust_device *dd = ti->private; ti 484 drivers/md/dm-dust.c return fn(ti, dd->dev, dd->start, ti->len, data); ti 1139 drivers/md/dm-era-target.c struct dm_target *ti; ti 1399 drivers/md/dm-era-target.c dm_put_device(era->ti, era->origin_dev); ti 1402 drivers/md/dm-era-target.c dm_put_device(era->ti, era->metadata_dev); ti 1409 drivers/md/dm-era-target.c return dm_sector_div_up(era->ti->len, era->sectors_per_block); ti 1423 drivers/md/dm-era-target.c static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) ti 1431 drivers/md/dm-era-target.c ti->error = "Invalid argument count"; ti 1437 drivers/md/dm-era-target.c ti->error = "Error allocating era structure"; ti 1441 drivers/md/dm-era-target.c era->ti = ti; ti 1443 drivers/md/dm-era-target.c r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev); ti 1445 drivers/md/dm-era-target.c ti->error = "Error opening metadata device"; ti 1450 drivers/md/dm-era-target.c r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev); ti 1452 drivers/md/dm-era-target.c ti->error = "Error opening data device"; ti 1459 drivers/md/dm-era-target.c ti->error = "Error parsing block size"; ti 1464 drivers/md/dm-era-target.c r = dm_set_target_max_io_len(ti, era->sectors_per_block); ti 1466 drivers/md/dm-era-target.c ti->error = "could not set max io len"; ti 1472 drivers/md/dm-era-target.c ti->error = "Invalid block size"; ti 1483 drivers/md/dm-era-target.c ti->error = "Error reading metadata"; ti 1493 drivers/md/dm-era-target.c ti->error = "couldn't resize metadata"; ti 1500 drivers/md/dm-era-target.c ti->error = "could not create workqueue for metadata object"; ti 1512 drivers/md/dm-era-target.c ti->private = era; ti 1513 drivers/md/dm-era-target.c ti->num_flush_bios = 1; ti 1514 drivers/md/dm-era-target.c ti->flush_supported = true; ti 1516 drivers/md/dm-era-target.c ti->num_discard_bios = 1; ti 1518 drivers/md/dm-era-target.c dm_table_add_target_callbacks(ti->table, &era->callbacks); ti 1523 drivers/md/dm-era-target.c static void era_dtr(struct dm_target *ti) ti 1525 drivers/md/dm-era-target.c era_destroy(ti->private); ti 1528 drivers/md/dm-era-target.c static int era_map(struct dm_target *ti, struct bio *bio) ti 1530 drivers/md/dm-era-target.c struct era *era = ti->private; ti 1553 drivers/md/dm-era-target.c static void era_postsuspend(struct dm_target *ti) ti 1556 drivers/md/dm-era-target.c struct era *era = ti->private; ti 1567 drivers/md/dm-era-target.c static int era_preresume(struct dm_target *ti) ti 1570 drivers/md/dm-era-target.c struct era *era = ti->private; ti 1598 drivers/md/dm-era-target.c static void era_status(struct dm_target *ti, status_type_t type, ti 1602 drivers/md/dm-era-target.c struct era *era = ti->private; ti 1639 drivers/md/dm-era-target.c static int era_message(struct dm_target *ti, unsigned argc, char **argv, ti 1642 drivers/md/dm-era-target.c struct era *era = ti->private; ti 1667 drivers/md/dm-era-target.c static int era_iterate_devices(struct dm_target *ti, ti 1670 drivers/md/dm-era-target.c struct era *era = ti->private; ti 1671 drivers/md/dm-era-target.c return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); ti 1674 drivers/md/dm-era-target.c static void era_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 1676 drivers/md/dm-era-target.c struct era *era = ti->private; ti 191 drivers/md/dm-exception-store.c int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, ti 202 drivers/md/dm-exception-store.c ti->error = "Insufficient exception store arguments"; ti 208 drivers/md/dm-exception-store.c ti->error = "Exception store allocation failed"; ti 218 drivers/md/dm-exception-store.c ti->error = "Exception store type is not P or N"; ti 224 drivers/md/dm-exception-store.c ti->error = "Exception store type not recognised"; ti 232 drivers/md/dm-exception-store.c r = set_chunk_size(tmp_store, argv[1], &ti->error); ti 238 drivers/md/dm-exception-store.c ti->error = "Exception store type constructor failed"; ti 187 drivers/md/dm-exception-store.h int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, ti 48 drivers/md/dm-flakey.c struct dm_target *ti) ti 65 drivers/md/dm-flakey.c r = dm_read_arg_group(_args, as, &argc, &ti->error); ti 74 drivers/md/dm-flakey.c ti->error = "Insufficient feature arguments"; ti 83 drivers/md/dm-flakey.c ti->error = "Feature drop_writes duplicated"; ti 86 drivers/md/dm-flakey.c ti->error = "Feature drop_writes conflicts with feature error_writes"; ti 98 drivers/md/dm-flakey.c ti->error = "Feature error_writes duplicated"; ti 102 drivers/md/dm-flakey.c ti->error = "Feature error_writes conflicts with feature drop_writes"; ti 114 drivers/md/dm-flakey.c ti->error = "Feature corrupt_bio_byte requires parameters"; ti 118 drivers/md/dm-flakey.c r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); ti 132 drivers/md/dm-flakey.c ti->error = "Invalid corrupt bio direction (r or w)"; ti 140 drivers/md/dm-flakey.c r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error); ti 148 drivers/md/dm-flakey.c r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error); ti 156 drivers/md/dm-flakey.c ti->error = "Unrecognised flakey feature requested"; ti 161 drivers/md/dm-flakey.c ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set"; ti 165 drivers/md/dm-flakey.c ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set"; ti 184 drivers/md/dm-flakey.c static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 202 drivers/md/dm-flakey.c ti->error = "Invalid argument count"; ti 208 drivers/md/dm-flakey.c ti->error = "Cannot allocate context"; ti 217 drivers/md/dm-flakey.c ti->error = "Invalid device sector"; ti 222 drivers/md/dm-flakey.c r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error); ti 226 drivers/md/dm-flakey.c r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error); ti 231 drivers/md/dm-flakey.c ti->error = "Total (up + down) interval is zero"; ti 237 drivers/md/dm-flakey.c ti->error = "Interval overflow"; ti 242 drivers/md/dm-flakey.c r = parse_features(&as, fc, ti); ti 246 drivers/md/dm-flakey.c r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev); ti 248 drivers/md/dm-flakey.c ti->error = "Device lookup failed"; ti 252 drivers/md/dm-flakey.c ti->num_flush_bios = 1; ti 253 drivers/md/dm-flakey.c ti->num_discard_bios = 1; ti 254 drivers/md/dm-flakey.c ti->per_io_data_size = sizeof(struct per_bio_data); ti 255 drivers/md/dm-flakey.c ti->private = fc; ti 263 drivers/md/dm-flakey.c static void flakey_dtr(struct dm_target *ti) ti 265 drivers/md/dm-flakey.c struct flakey_c *fc = ti->private; ti 267 drivers/md/dm-flakey.c dm_put_device(ti, fc->dev); ti 271 drivers/md/dm-flakey.c static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) ti 273 drivers/md/dm-flakey.c struct flakey_c *fc = ti->private; ti 275 drivers/md/dm-flakey.c return fc->start + dm_target_offset(ti, bi_sector); ti 278 drivers/md/dm-flakey.c static void flakey_map_bio(struct dm_target *ti, struct bio *bio) ti 280 drivers/md/dm-flakey.c struct flakey_c *fc = ti->private; ti 285 drivers/md/dm-flakey.c flakey_map_sector(ti, bio->bi_iter.bi_sector); ti 318 drivers/md/dm-flakey.c static int flakey_map(struct dm_target *ti, struct bio *bio) ti 320 drivers/md/dm-flakey.c struct flakey_c *fc = ti->private; ti 376 drivers/md/dm-flakey.c flakey_map_bio(ti, bio); ti 381 drivers/md/dm-flakey.c static int flakey_end_io(struct dm_target *ti, struct bio *bio, ti 384 drivers/md/dm-flakey.c struct flakey_c *fc = ti->private; ti 411 drivers/md/dm-flakey.c static void flakey_status(struct dm_target *ti, status_type_t type, ti 415 drivers/md/dm-flakey.c struct flakey_c *fc = ti->private; ti 447 drivers/md/dm-flakey.c static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) ti 449 drivers/md/dm-flakey.c struct flakey_c *fc = ti->private; ti 457 drivers/md/dm-flakey.c ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) ti 463 drivers/md/dm-flakey.c static int flakey_report_zones(struct dm_target *ti, sector_t sector, ti 466 drivers/md/dm-flakey.c struct flakey_c *fc = ti->private; ti 470 drivers/md/dm-flakey.c ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector), ti 476 drivers/md/dm-flakey.c dm_remap_zone_report(ti, fc->start, zones, nr_zones); ti 481 drivers/md/dm-flakey.c static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) ti 483 drivers/md/dm-flakey.c struct flakey_c *fc = ti->private; ti 485 drivers/md/dm-flakey.c return fn(ti, fc->dev, fc->start, ti->len, data); ti 209 drivers/md/dm-integrity.c struct dm_target *ti; ti 369 drivers/md/dm-integrity.c static void dm_integrity_dtr(struct dm_target *ti); ti 1613 drivers/md/dm-integrity.c static int dm_integrity_map(struct dm_target *ti, struct bio *bio) ti 1615 drivers/md/dm-integrity.c struct dm_integrity_c *ic = ti->private; ti 1629 drivers/md/dm-integrity.c dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); ti 2301 drivers/md/dm-integrity.c if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev) ti 2362 drivers/md/dm-integrity.c if (unlikely(dm_suspended(ic->ti))) ti 2781 drivers/md/dm-integrity.c static void dm_integrity_postsuspend(struct dm_target *ti) ti 2783 drivers/md/dm-integrity.c struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; ti 2823 drivers/md/dm-integrity.c static void dm_integrity_resume(struct dm_target *ti) ti 2825 drivers/md/dm-integrity.c struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; ti 2916 drivers/md/dm-integrity.c static void dm_integrity_status(struct dm_target *ti, status_type_t type, ti 2919 drivers/md/dm-integrity.c struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; ti 2985 drivers/md/dm-integrity.c static int dm_integrity_iterate_devices(struct dm_target *ti, ti 2988 drivers/md/dm-integrity.c struct dm_integrity_c *ic = ti->private; ti 2991 drivers/md/dm-integrity.c return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); ti 2993 drivers/md/dm-integrity.c return fn(ti, ic->dev, 0, ti->len, data); ti 2996 drivers/md/dm-integrity.c static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 2998 drivers/md/dm-integrity.c struct dm_integrity_c *ic = ti->private; ti 3130 drivers/md/dm-integrity.c static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) ti 3132 drivers/md/dm-integrity.c struct gendisk *disk = dm_disk(dm_table_get_md(ti->table)); ti 3568 drivers/md/dm-integrity.c static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ti 3590 drivers/md/dm-integrity.c ti->error = "Invalid argument count"; ti 3596 drivers/md/dm-integrity.c ti->error = "Cannot allocate integrity context"; ti 3599 drivers/md/dm-integrity.c ti->private = ic; ti 3600 drivers/md/dm-integrity.c ti->per_io_data_size = sizeof(struct dm_integrity_io); ti 3601 drivers/md/dm-integrity.c ic->ti = ti; ti 3612 drivers/md/dm-integrity.c r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); ti 3614 drivers/md/dm-integrity.c ti->error = "Device lookup failed"; ti 3619 drivers/md/dm-integrity.c ti->error = "Invalid starting offset"; ti 3627 drivers/md/dm-integrity.c ti->error = "Invalid tag size"; ti 3637 drivers/md/dm-integrity.c ti->error = "Invalid mode (expecting J, B, D, R)"; ti 3651 drivers/md/dm-integrity.c r = dm_read_arg_group(_args, &as, &extra_args, &ti->error); ti 3662 drivers/md/dm-integrity.c ti->error = "Not enough feature arguments"; ti 3677 drivers/md/dm-integrity.c dm_put_device(ti, ic->meta_dev); ti 3680 drivers/md/dm-integrity.c r = dm_get_device(ti, strchr(opt_string, ':') + 1, ti 3681 drivers/md/dm-integrity.c dm_table_get_mode(ti->table), &ic->meta_dev); ti 3683 drivers/md/dm-integrity.c ti->error = "Device lookup failed"; ti 3691 drivers/md/dm-integrity.c ti->error = "Invalid block_size argument"; ti 3700 drivers/md/dm-integrity.c ti->error = "Invalid bitmap_flush_interval argument"; ti 3704 drivers/md/dm-integrity.c r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, ti 3709 drivers/md/dm-integrity.c r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, ti 3714 drivers/md/dm-integrity.c r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, ti 3722 drivers/md/dm-integrity.c ti->error = "Invalid argument"; ti 3742 drivers/md/dm-integrity.c r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error, ti 3747 drivers/md/dm-integrity.c r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error, ti 3754 drivers/md/dm-integrity.c ti->error = "Unknown tag size"; ti 3761 drivers/md/dm-integrity.c ti->error = "Too big tag size"; ti 3772 drivers/md/dm-integrity.c ti->error = "Bitmap mode can be only used with internal hash"; ti 3784 drivers/md/dm-integrity.c ti->error = "Cannot allocate dm io"; ti 3790 drivers/md/dm-integrity.c ti->error = "Cannot allocate mempool"; ti 3797 drivers/md/dm-integrity.c ti->error = "Cannot allocate workqueue"; ti 3808 drivers/md/dm-integrity.c ti->error = "Cannot allocate workqueue"; ti 3816 drivers/md/dm-integrity.c ti->error = "Cannot allocate workqueue"; ti 3823 drivers/md/dm-integrity.c ti->error = "Cannot allocate workqueue"; ti 3832 drivers/md/dm-integrity.c ti->error = "Cannot allocate workqueue"; ti 3842 drivers/md/dm-integrity.c ti->error = "Cannot allocate superblock area"; ti 3848 drivers/md/dm-integrity.c ti->error = "Error reading superblock"; ti 3856 drivers/md/dm-integrity.c ti->error = "The device is not initialized"; ti 3863 drivers/md/dm-integrity.c ti->error = "Could not initialize superblock"; ti 3872 drivers/md/dm-integrity.c ti->error = "Unknown version"; ti 3877 drivers/md/dm-integrity.c ti->error = "Tag size doesn't match the information in superblock"; ti 3882 drivers/md/dm-integrity.c ti->error = "Block size doesn't match the information in superblock"; ti 3887 drivers/md/dm-integrity.c ti->error = "Corrupted superblock, journal_sections is 0"; ti 3895 drivers/md/dm-integrity.c ti->error = "Invalid interleave_sectors in the superblock"; ti 3901 drivers/md/dm-integrity.c ti->error = "Invalid interleave_sectors in the superblock"; ti 3909 drivers/md/dm-integrity.c ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors"; ti 3914 drivers/md/dm-integrity.c ti->error = "Journal mac mismatch"; ti 3927 drivers/md/dm-integrity.c ti->error = "The device is too small"; ti 3954 drivers/md/dm-integrity.c if (ti->len > ic->provided_data_sectors) { ti 3956 drivers/md/dm-integrity.c ti->error = "Not enough provided sectors for requested mapping size"; ti 3992 drivers/md/dm-integrity.c ti->error = "Cannot allocate workqueue"; ti 3999 drivers/md/dm-integrity.c ti->error = "Cannot allocate buffer for recalculating"; ti 4006 drivers/md/dm-integrity.c ti->error = "Cannot allocate tags for recalculating"; ti 4016 drivers/md/dm-integrity.c ti->error = "Cannot initialize dm-bufio"; ti 4023 drivers/md/dm-integrity.c r = create_journal(ic, &ti->error); ti 4073 drivers/md/dm-integrity.c ti->error = "Error initializing journal"; ti 4078 drivers/md/dm-integrity.c ti->error = "Error initializing superblock"; ti 4085 drivers/md/dm-integrity.c r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors); ti 4093 drivers/md/dm-integrity.c DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len); ti 4094 drivers/md/dm-integrity.c if (!ti->max_io_len || ti->max_io_len > max_io_len) { ti 4095 drivers/md/dm-integrity.c r = dm_set_target_max_io_len(ti, max_io_len); ti 4102 drivers/md/dm-integrity.c dm_integrity_set(ti, ic); ti 4104 drivers/md/dm-integrity.c ti->num_flush_bios = 1; ti 4105 drivers/md/dm-integrity.c ti->flush_supported = true; ti 4110 drivers/md/dm-integrity.c dm_integrity_dtr(ti); ti 4114 drivers/md/dm-integrity.c static void dm_integrity_dtr(struct dm_target *ti) ti 4116 drivers/md/dm-integrity.c struct dm_integrity_c *ic = ti->private; ti 4142 drivers/md/dm-integrity.c dm_put_device(ti, ic->dev); ti 4144 drivers/md/dm-integrity.c dm_put_device(ti, ic->meta_dev); ti 1156 drivers/md/dm-ioctl.c struct dm_target *ti = dm_table_get_target(table, i); ti 1168 drivers/md/dm-ioctl.c spec->sector_start = ti->begin; ti 1169 drivers/md/dm-ioctl.c spec->length = ti->len; ti 1170 drivers/md/dm-ioctl.c strncpy(spec->target_type, ti->type->name, ti 1181 drivers/md/dm-ioctl.c if (ti->type->status) { ti 1184 drivers/md/dm-ioctl.c ti->type->status(ti, type, status_flags, outptr, remaining); ti 1578 drivers/md/dm-ioctl.c struct dm_target *ti; ti 1619 drivers/md/dm-ioctl.c ti = dm_table_find_target(table, tmsg->sector); ti 1620 drivers/md/dm-ioctl.c if (!ti) { ti 1623 drivers/md/dm-ioctl.c } else if (ti->type->message) ti 1624 drivers/md/dm-ioctl.c r = ti->type->message(ti, argc, argv, result, maxlen); ti 29 drivers/md/dm-linear.c static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 37 drivers/md/dm-linear.c ti->error = "Invalid argument count"; ti 43 drivers/md/dm-linear.c ti->error = "Cannot allocate linear context"; ti 49 drivers/md/dm-linear.c ti->error = "Invalid device sector"; ti 54 drivers/md/dm-linear.c ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev); ti 56 drivers/md/dm-linear.c ti->error = "Device lookup failed"; ti 60 drivers/md/dm-linear.c ti->num_flush_bios = 1; ti 61 drivers/md/dm-linear.c ti->num_discard_bios = 1; ti 62 drivers/md/dm-linear.c ti->num_secure_erase_bios = 1; ti 63 drivers/md/dm-linear.c ti->num_write_same_bios = 1; ti 64 drivers/md/dm-linear.c ti->num_write_zeroes_bios = 1; ti 65 drivers/md/dm-linear.c ti->private = lc; ti 73 drivers/md/dm-linear.c static void linear_dtr(struct dm_target *ti) ti 75 drivers/md/dm-linear.c struct linear_c *lc = (struct linear_c *) ti->private; ti 77 drivers/md/dm-linear.c dm_put_device(ti, lc->dev); ti 81 drivers/md/dm-linear.c static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector) ti 83 drivers/md/dm-linear.c struct linear_c *lc = ti->private; ti 85 drivers/md/dm-linear.c return lc->start + dm_target_offset(ti, bi_sector); ti 88 drivers/md/dm-linear.c static void linear_map_bio(struct dm_target *ti, struct bio *bio) ti 90 drivers/md/dm-linear.c struct linear_c *lc = ti->private; ti 95 drivers/md/dm-linear.c linear_map_sector(ti, bio->bi_iter.bi_sector); ti 98 drivers/md/dm-linear.c static int linear_map(struct dm_target *ti, struct bio *bio) ti 100 drivers/md/dm-linear.c linear_map_bio(ti, bio); ti 105 drivers/md/dm-linear.c static void linear_status(struct dm_target *ti, status_type_t type, ti 108 drivers/md/dm-linear.c struct linear_c *lc = (struct linear_c *) ti->private; ti 122 drivers/md/dm-linear.c static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) ti 124 drivers/md/dm-linear.c struct linear_c *lc = (struct linear_c *) ti->private; ti 133 drivers/md/dm-linear.c ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) ti 139 drivers/md/dm-linear.c static int linear_report_zones(struct dm_target *ti, sector_t sector, ti 142 drivers/md/dm-linear.c struct linear_c *lc = (struct linear_c *) ti->private; ti 146 drivers/md/dm-linear.c ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector), ti 152 drivers/md/dm-linear.c dm_remap_zone_report(ti, lc->start, zones, nr_zones); ti 157 drivers/md/dm-linear.c static int linear_iterate_devices(struct dm_target *ti, ti 160 drivers/md/dm-linear.c struct linear_c *lc = ti->private; ti 162 drivers/md/dm-linear.c return fn(ti, lc->dev, lc->start, ti->len, data); ti 166 drivers/md/dm-linear.c static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, ti 170 drivers/md/dm-linear.c struct linear_c *lc = ti->private; ti 175 drivers/md/dm-linear.c dev_sector = linear_map_sector(ti, sector); ti 182 drivers/md/dm-linear.c static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, ti 185 drivers/md/dm-linear.c struct linear_c *lc = ti->private; ti 190 drivers/md/dm-linear.c dev_sector = linear_map_sector(ti, sector); ti 196 drivers/md/dm-linear.c static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, ti 199 drivers/md/dm-linear.c struct linear_c *lc = ti->private; ti 204 drivers/md/dm-linear.c dev_sector = linear_map_sector(ti, sector); ti 37 drivers/md/dm-log-userspace-base.c struct dm_target *ti; ti 125 drivers/md/dm-log-userspace-base.c static int build_constructor_string(struct dm_target *ti, ti 148 drivers/md/dm-log-userspace-base.c str_size = sprintf(str, "%llu", (unsigned long long)ti->len); ti 166 drivers/md/dm-log-userspace-base.c dm_table_event(lc->ti->table); ti 190 drivers/md/dm-log-userspace-base.c static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, ti 216 drivers/md/dm-log-userspace-base.c lc->ti = ti; ti 239 drivers/md/dm-log-userspace-base.c str_size = build_constructor_string(ti, argc, argv, &ctr_str); ti 285 drivers/md/dm-log-userspace-base.c lc->region_count = dm_sector_div_up(ti->len, lc->region_size); ti 293 drivers/md/dm-log-userspace-base.c r = dm_get_device(ti, devices_rdata, ti 294 drivers/md/dm-log-userspace-base.c dm_table_get_mode(ti->table), &lc->log_dev); ti 342 drivers/md/dm-log-userspace-base.c dm_put_device(lc->ti, lc->log_dev); ti 637 drivers/md/dm-log-userspace-base.c dm_table_event(lc->ti->table); ti 528 drivers/md/dm-log-writes.c static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 539 drivers/md/dm-log-writes.c ti->error = "Invalid argument count"; ti 545 drivers/md/dm-log-writes.c ti->error = "Cannot allocate context"; ti 557 drivers/md/dm-log-writes.c ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev); ti 559 drivers/md/dm-log-writes.c ti->error = "Device lookup failed"; ti 564 drivers/md/dm-log-writes.c ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), ti 567 drivers/md/dm-log-writes.c ti->error = "Log device lookup failed"; ti 568 drivers/md/dm-log-writes.c dm_put_device(ti, lc->dev); ti 577 drivers/md/dm-log-writes.c ti->error = "Couldn't alloc kthread"; ti 578 drivers/md/dm-log-writes.c dm_put_device(ti, lc->dev); ti 579 drivers/md/dm-log-writes.c dm_put_device(ti, lc->logdev); ti 593 drivers/md/dm-log-writes.c ti->num_flush_bios = 1; ti 594 drivers/md/dm-log-writes.c ti->flush_supported = true; ti 595 drivers/md/dm-log-writes.c ti->num_discard_bios = 1; ti 596 drivers/md/dm-log-writes.c ti->discards_supported = true; ti 597 drivers/md/dm-log-writes.c ti->per_io_data_size = sizeof(struct per_bio_data); ti 598 drivers/md/dm-log-writes.c ti->private = lc; ti 633 drivers/md/dm-log-writes.c static void log_writes_dtr(struct dm_target *ti) ti 635 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 653 drivers/md/dm-log-writes.c dm_put_device(ti, lc->dev); ti 654 drivers/md/dm-log-writes.c dm_put_device(ti, lc->logdev); ti 658 drivers/md/dm-log-writes.c static void normal_map_bio(struct dm_target *ti, struct bio *bio) ti 660 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 665 drivers/md/dm-log-writes.c static int log_writes_map(struct dm_target *ti, struct bio *bio) ti 667 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 786 drivers/md/dm-log-writes.c normal_map_bio(ti, bio); ti 790 drivers/md/dm-log-writes.c static int normal_end_io(struct dm_target *ti, struct bio *bio, ti 793 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 819 drivers/md/dm-log-writes.c static void log_writes_status(struct dm_target *ti, status_type_t type, ti 824 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 840 drivers/md/dm-log-writes.c static int log_writes_prepare_ioctl(struct dm_target *ti, ti 843 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 850 drivers/md/dm-log-writes.c if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) ti 855 drivers/md/dm-log-writes.c static int log_writes_iterate_devices(struct dm_target *ti, ti 859 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 861 drivers/md/dm-log-writes.c return fn(ti, lc->dev, 0, ti->len, data); ti 868 drivers/md/dm-log-writes.c static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv, ti 872 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 887 drivers/md/dm-log-writes.c static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 889 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 948 drivers/md/dm-log-writes.c static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, ti 951 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 961 drivers/md/dm-log-writes.c static size_t log_writes_dax_copy_from_iter(struct dm_target *ti, ti 965 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 985 drivers/md/dm-log-writes.c static size_t log_writes_dax_copy_to_iter(struct dm_target *ti, ti 989 drivers/md/dm-log-writes.c struct log_writes_c *lc = ti->private; ti 148 drivers/md/dm-log.c struct dm_target *ti, ti 149 drivers/md/dm-log.c int (*flush_callback_fn)(struct dm_target *ti), ti 167 drivers/md/dm-log.c if (type->ctr(log, ti, argc, argv)) { ti 218 drivers/md/dm-log.c struct dm_target *ti; ti 346 drivers/md/dm-log.c static int _check_region_size(struct dm_target *ti, uint32_t region_size) ti 348 drivers/md/dm-log.c if (region_size < 2 || region_size > ti->len) ti 363 drivers/md/dm-log.c static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, ti 394 drivers/md/dm-log.c !_check_region_size(ti, region_size)) { ti 399 drivers/md/dm-log.c region_count = dm_sector_div_up(ti->len, region_size); ti 407 drivers/md/dm-log.c lc->ti = ti; ti 516 drivers/md/dm-log.c static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti, ti 519 drivers/md/dm-log.c return create_log_context(log, ti, argc, argv, NULL); ti 542 drivers/md/dm-log.c static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti, ti 553 drivers/md/dm-log.c r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev); ti 557 drivers/md/dm-log.c r = create_log_context(log, ti, argc - 1, argv + 1, dev); ti 559 drivers/md/dm-log.c dm_put_device(ti, dev); ti 570 drivers/md/dm-log.c dm_put_device(lc->ti, lc->log_dev); ti 582 drivers/md/dm-log.c dm_table_event(lc->ti->table); ti 690 drivers/md/dm-log.c log->flush_callback_fn(lc->ti)) { ti 90 drivers/md/dm-mpath.c struct dm_target *ti; ti 157 drivers/md/dm-mpath.c static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) ti 163 drivers/md/dm-mpath.c dm_put_device(ti, pgpath->path.dev); ti 169 drivers/md/dm-mpath.c struct dm_target *ti) ti 178 drivers/md/dm-mpath.c free_pgpaths(&pg->pgpaths, ti); ti 182 drivers/md/dm-mpath.c static struct multipath *alloc_multipath(struct dm_target *ti) ti 196 drivers/md/dm-mpath.c m->ti = ti; ti 197 drivers/md/dm-mpath.c ti->private = m; ti 203 drivers/md/dm-mpath.c static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) ti 216 drivers/md/dm-mpath.c dm_table_set_type(ti->table, m->queue_mode); ti 237 drivers/md/dm-mpath.c free_priority_group(pg, m->ti); ti 439 drivers/md/dm-mpath.c struct mapped_device *md = dm_table_get_md((m)->ti->table); \ ti 445 drivers/md/dm-mpath.c dm_noflush_suspending((m)->ti)); \ ti 461 drivers/md/dm-mpath.c dm_noflush_suspending(m->ti)); ti 483 drivers/md/dm-mpath.c static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, ti 487 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 632 drivers/md/dm-mpath.c static int multipath_map_bio(struct dm_target *ti, struct bio *bio) ti 634 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 644 drivers/md/dm-mpath.c dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); ti 715 drivers/md/dm-mpath.c dm_table_run_md_queue_async(m->ti->table); ti 731 drivers/md/dm-mpath.c dm_table_event(m->ti->table); ti 745 drivers/md/dm-mpath.c struct dm_target *ti) ti 757 drivers/md/dm-mpath.c ti->error = "unknown path selector type"; ti 761 drivers/md/dm-mpath.c r = dm_read_arg_group(_args, as, &ps_argc, &ti->error); ti 770 drivers/md/dm-mpath.c ti->error = "path selector constructor failed"; ti 837 drivers/md/dm-mpath.c struct dm_target *ti) ti 841 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 847 drivers/md/dm-mpath.c ti->error = "no device given"; ti 855 drivers/md/dm-mpath.c r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), ti 858 drivers/md/dm-mpath.c ti->error = "error getting device"; ti 866 drivers/md/dm-mpath.c r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error); ti 869 drivers/md/dm-mpath.c dm_put_device(ti, p->path.dev); ti 874 drivers/md/dm-mpath.c r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); ti 876 drivers/md/dm-mpath.c dm_put_device(ti, p->path.dev); ti 897 drivers/md/dm-mpath.c struct dm_target *ti = m->ti; ti 901 drivers/md/dm-mpath.c ti->error = "not enough priority group arguments"; ti 907 drivers/md/dm-mpath.c ti->error = "couldn't allocate priority group"; ti 912 drivers/md/dm-mpath.c r = parse_path_selector(as, pg, ti); ti 919 drivers/md/dm-mpath.c r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error); ti 923 drivers/md/dm-mpath.c r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error); ti 933 drivers/md/dm-mpath.c ti->error = "not enough path parameters"; ti 941 drivers/md/dm-mpath.c pgpath = parse_path(&path_args, &pg->ps, ti); ti 955 drivers/md/dm-mpath.c free_priority_group(pg, ti); ti 963 drivers/md/dm-mpath.c struct dm_target *ti = m->ti; ti 969 drivers/md/dm-mpath.c if (dm_read_arg_group(_args, as, &hw_argc, &ti->error)) ti 993 drivers/md/dm-mpath.c ti->error = "memory allocation failed"; ti 1014 drivers/md/dm-mpath.c struct dm_target *ti = m->ti; ti 1023 drivers/md/dm-mpath.c r = dm_read_arg_group(_args, as, &argc, &ti->error); ti 1046 drivers/md/dm-mpath.c r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error); ti 1053 drivers/md/dm-mpath.c r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error); ti 1068 drivers/md/dm-mpath.c ti->error = "Unknown 'queue_mode' requested"; ti 1075 drivers/md/dm-mpath.c ti->error = "Unrecognised multipath feature request"; ti 1082 drivers/md/dm-mpath.c static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) ti 1099 drivers/md/dm-mpath.c m = alloc_multipath(ti); ti 1101 drivers/md/dm-mpath.c ti->error = "can't allocate multipath"; ti 1109 drivers/md/dm-mpath.c r = alloc_multipath_stage2(ti, m); ti 1117 drivers/md/dm-mpath.c r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error); ti 1121 drivers/md/dm-mpath.c r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error); ti 1127 drivers/md/dm-mpath.c ti->error = "invalid initial priority group"; ti 1154 drivers/md/dm-mpath.c ti->error = "priority group count mismatch"; ti 1159 drivers/md/dm-mpath.c ti->num_flush_bios = 1; ti 1160 drivers/md/dm-mpath.c ti->num_discard_bios = 1; ti 1161 drivers/md/dm-mpath.c ti->num_write_same_bios = 1; ti 1162 drivers/md/dm-mpath.c ti->num_write_zeroes_bios = 1; ti 1164 drivers/md/dm-mpath.c ti->per_io_data_size = multipath_per_bio_data_size(); ti 1166 drivers/md/dm-mpath.c ti->per_io_data_size = sizeof(struct dm_mpath_io); ti 1209 drivers/md/dm-mpath.c static void multipath_dtr(struct dm_target *ti) ti 1211 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 1241 drivers/md/dm-mpath.c dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, ti 1284 drivers/md/dm-mpath.c dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, ti 1292 drivers/md/dm-mpath.c dm_table_run_md_queue_async(m->ti->table); ti 1521 drivers/md/dm-mpath.c static int multipath_end_io(struct dm_target *ti, struct request *clone, ti 1540 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 1569 drivers/md/dm-mpath.c static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, ti 1572 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 1619 drivers/md/dm-mpath.c static void multipath_presuspend(struct dm_target *ti) ti 1621 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 1626 drivers/md/dm-mpath.c static void multipath_postsuspend(struct dm_target *ti) ti 1628 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 1638 drivers/md/dm-mpath.c static void multipath_resume(struct dm_target *ti) ti 1640 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 1665 drivers/md/dm-mpath.c static void multipath_status(struct dm_target *ti, status_type_t type, ti 1670 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 1787 drivers/md/dm-mpath.c static int multipath_message(struct dm_target *ti, unsigned argc, char **argv, ti 1792 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 1797 drivers/md/dm-mpath.c if (dm_suspended(ti)) { ti 1835 drivers/md/dm-mpath.c r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); ti 1844 drivers/md/dm-mpath.c dm_put_device(ti, dev); ti 1851 drivers/md/dm-mpath.c static int multipath_prepare_ioctl(struct dm_target *ti, ti 1854 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 1885 drivers/md/dm-mpath.c dm_table_run_md_queue_async(m->ti->table); ti 1892 drivers/md/dm-mpath.c if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) ti 1897 drivers/md/dm-mpath.c static int multipath_iterate_devices(struct dm_target *ti, ti 1900 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 1907 drivers/md/dm-mpath.c ret = fn(ti, p->path.dev, ti->begin, ti->len, data); ti 1932 drivers/md/dm-mpath.c static int multipath_busy(struct dm_target *ti) ti 1935 drivers/md/dm-mpath.c struct multipath *m = ti->private; ti 226 drivers/md/dm-raid.c struct dm_target *ti; ti 509 drivers/md/dm-raid.c rs->ti->error = "Invalid flags combination"; ti 697 drivers/md/dm-raid.c struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); ti 731 drivers/md/dm-raid.c static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type, ti 738 drivers/md/dm-raid.c ti->error = "Insufficient number of devices"; ti 744 drivers/md/dm-raid.c ti->error = "Cannot allocate raid context"; ti 753 drivers/md/dm-raid.c rs->ti = ti; ti 786 drivers/md/dm-raid.c dm_put_device(rs->ti, rs->journal_dev.dev); ti 791 drivers/md/dm-raid.c dm_put_device(rs->ti, rs->dev[i].meta_dev); ti 794 drivers/md/dm-raid.c dm_put_device(rs->ti, rs->dev[i].data_dev); ti 848 drivers/md/dm-raid.c r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), ti 851 drivers/md/dm-raid.c rs->ti->error = "RAID metadata device lookup failure"; ti 857 drivers/md/dm-raid.c rs->ti->error = "Failed to allocate superblock page"; ti 869 drivers/md/dm-raid.c rs->ti->error = "Drive designated for rebuild not specified"; ti 874 drivers/md/dm-raid.c rs->ti->error = "No data device supplied with metadata device"; ti 881 drivers/md/dm-raid.c r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), ti 884 drivers/md/dm-raid.c rs->ti->error = "RAID device lookup failure"; ti 917 drivers/md/dm-raid.c rs->ti->error = "Unable to rebuild drive while array is not in-sync"; ti 936 drivers/md/dm-raid.c unsigned long min_region_size = rs->ti->len / (1 << 21); ti 958 drivers/md/dm-raid.c if (region_size > rs->ti->len) { ti 959 drivers/md/dm-raid.c rs->ti->error = "Supplied region size is too large"; ti 966 drivers/md/dm-raid.c rs->ti->error = "Supplied region size is too small"; ti 971 drivers/md/dm-raid.c rs->ti->error = "Region size is not a power of 2"; ti 976 drivers/md/dm-raid.c rs->ti->error = "Region size is smaller than the chunk size"; ti 1134 drivers/md/dm-raid.c rs->ti->error = "Bad numerical argument given for chunk_size"; ti 1147 drivers/md/dm-raid.c rs->ti->error = "Chunk size must be a power of 2"; ti 1150 drivers/md/dm-raid.c rs->ti->error = "Chunk size value is too small"; ti 1184 drivers/md/dm-raid.c rs->ti->error = "Not enough raid parameters given"; ti 1190 drivers/md/dm-raid.c rs->ti->error = "Only one 'nosync' argument allowed"; ti 1197 drivers/md/dm-raid.c rs->ti->error = "Only one 'sync' argument allowed"; ti 1204 drivers/md/dm-raid.c rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed"; ti 1213 drivers/md/dm-raid.c rs->ti->error = "Wrong number of raid parameters given"; ti 1223 drivers/md/dm-raid.c rs->ti->error = "Only one 'raid10_format' argument pair allowed"; ti 1227 drivers/md/dm-raid.c rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; ti 1232 drivers/md/dm-raid.c rs->ti->error = "Invalid 'raid10_format' value given"; ti 1244 drivers/md/dm-raid.c rs->ti->error = "Only one raid4/5/6 set journaling device allowed"; ti 1248 drivers/md/dm-raid.c rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type"; ti 1251 drivers/md/dm-raid.c r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), ti 1254 drivers/md/dm-raid.c rs->ti->error = "raid4/5/6 journal device lookup failure"; ti 1263 drivers/md/dm-raid.c rs->ti->error = "No space for raid4/5/6 journal"; ti 1276 drivers/md/dm-raid.c rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'"; ti 1280 drivers/md/dm-raid.c rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed"; ti 1285 drivers/md/dm-raid.c rs->ti->error = "Invalid 'journal_mode' argument"; ti 1296 drivers/md/dm-raid.c rs->ti->error = "Bad numerical argument given in raid params"; ti 1307 drivers/md/dm-raid.c rs->ti->error = "Invalid rebuild index given"; ti 1312 drivers/md/dm-raid.c rs->ti->error = "rebuild for this index already given"; ti 1323 drivers/md/dm-raid.c rs->ti->error = "write_mostly option is only valid for RAID1"; ti 1328 drivers/md/dm-raid.c rs->ti->error = "Invalid write_mostly index given"; ti 1337 drivers/md/dm-raid.c rs->ti->error = "max_write_behind option is only valid for RAID1"; ti 1342 drivers/md/dm-raid.c rs->ti->error = "Only one max_write_behind argument pair allowed"; ti 1351 drivers/md/dm-raid.c rs->ti->error = "Max write-behind limit out of range"; ti 1358 drivers/md/dm-raid.c rs->ti->error = "Only one daemon_sleep argument pair allowed"; ti 1362 drivers/md/dm-raid.c rs->ti->error = "daemon sleep period out of range"; ti 1369 drivers/md/dm-raid.c rs->ti->error = "Only one data_offset argument pair allowed"; ti 1375 drivers/md/dm-raid.c rs->ti->error = "Bogus data_offset value"; ti 1382 drivers/md/dm-raid.c rs->ti->error = "Only one delta_disks argument pair allowed"; ti 1387 drivers/md/dm-raid.c rs->ti->error = "Too many delta_disk requested"; ti 1394 drivers/md/dm-raid.c rs->ti->error = "Only one stripe_cache argument pair allowed"; ti 1399 drivers/md/dm-raid.c rs->ti->error = "Inappropriate argument: stripe_cache"; ti 1404 drivers/md/dm-raid.c rs->ti->error = "Bogus stripe cache entries value"; ti 1410 drivers/md/dm-raid.c rs->ti->error = "Only one min_recovery_rate argument pair allowed"; ti 1415 drivers/md/dm-raid.c rs->ti->error = "min_recovery_rate out of range"; ti 1421 drivers/md/dm-raid.c rs->ti->error = "Only one max_recovery_rate argument pair allowed"; ti 1426 drivers/md/dm-raid.c rs->ti->error = "max_recovery_rate out of range"; ti 1432 drivers/md/dm-raid.c rs->ti->error = "Only one region_size argument pair allowed"; ti 1440 drivers/md/dm-raid.c rs->ti->error = "Only one raid10_copies argument pair allowed"; ti 1445 drivers/md/dm-raid.c rs->ti->error = "Bad value for 'raid10_copies'"; ti 1452 drivers/md/dm-raid.c rs->ti->error = "Unable to parse RAID parameter"; ti 1459 drivers/md/dm-raid.c rs->ti->error = "sync and nosync are mutually exclusive"; ti 1466 drivers/md/dm-raid.c rs->ti->error = "sync/nosync and rebuild are mutually exclusive"; ti 1471 drivers/md/dm-raid.c rs->ti->error = "Can't set all raid1 devices to write_mostly"; ti 1477 drivers/md/dm-raid.c rs->ti->error = "Bogus recovery rates"; ti 1489 drivers/md/dm-raid.c if (dm_set_target_max_io_len(rs->ti, max_io_len)) ti 1494 drivers/md/dm-raid.c rs->ti->error = "Not enough devices to satisfy specification"; ti 1500 drivers/md/dm-raid.c rs->ti->error = "Error getting raid10 format"; ti 1506 drivers/md/dm-raid.c rs->ti->error = "Failed to recognize new raid10 layout"; ti 1513 drivers/md/dm-raid.c rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"; ti 1538 drivers/md/dm-raid.c rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size"; ti 1550 drivers/md/dm-raid.c rs->ti->error = "Cannot change stripe_cache size on inactive RAID set"; ti 1558 drivers/md/dm-raid.c rs->ti->error = "Failed to set raid4/5/6 stripe cache size"; ti 1609 drivers/md/dm-raid.c rs->ti->error = "Component device(s) too small"; ti 1624 drivers/md/dm-raid.c sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len; ti 1640 drivers/md/dm-raid.c rs->ti->error = "Bogus raid10 data copies or delta disks"; ti 1668 drivers/md/dm-raid.c rs->ti->error = "Target length not divisible by number of data devices"; ti 1720 drivers/md/dm-raid.c dm_table_event(rs->ti->table); ti 1742 drivers/md/dm-raid.c rs->ti->error = "Can't takeover degraded raid set"; ti 1747 drivers/md/dm-raid.c rs->ti->error = "Can't takeover reshaping raid set"; ti 1885 drivers/md/dm-raid.c rs->ti->error = "takeover not possible"; ti 2032 drivers/md/dm-raid.c rs->ti->error = "Reshape not supported"; ti 2034 drivers/md/dm-raid.c rs->ti->error = "Can't reshape degraded raid set"; ti 2036 drivers/md/dm-raid.c rs->ti->error = "Convert request on recovering raid set prohibited"; ti 2038 drivers/md/dm-raid.c rs->ti->error = "raid set already reshaping!"; ti 2040 drivers/md/dm-raid.c rs->ti->error = "Reshaping only supported for raid1/4/5/6/10"; ti 2420 drivers/md/dm-raid.c rs->ti->error = ti 2430 drivers/md/dm-raid.c rs->ti->error = "Cannot change device positions in raid set"; ti 2468 drivers/md/dm-raid.c rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags"; ti 2473 drivers/md/dm-raid.c rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet"; ti 2520 drivers/md/dm-raid.c static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) ti 2591 drivers/md/dm-raid.c rs->ti->error = "Unable to assemble array: Invalid superblocks"; ti 2596 drivers/md/dm-raid.c rs->ti->error = "Insufficient redundancy to activate array"; ti 2685 drivers/md/dm-raid.c rs->ti->error = data_offset ? "No space for forward reshape" : ti 2787 drivers/md/dm-raid.c rs->ti->error = "Can't reshape raid10 mirror groups"; ti 2814 drivers/md/dm-raid.c rs->ti->error = "Called with bogus raid type"; ti 2963 drivers/md/dm-raid.c struct dm_target *ti = rs->ti; ti 2989 drivers/md/dm-raid.c ti->num_discard_bios = 1; ti 3005 drivers/md/dm-raid.c static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 3024 drivers/md/dm-raid.c ti->error = "No arguments"; ti 3030 drivers/md/dm-raid.c ti->error = "Unrecognised raid_type"; ti 3035 drivers/md/dm-raid.c if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error)) ti 3042 drivers/md/dm-raid.c if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error)) ti 3046 drivers/md/dm-raid.c ti->error = "Invalid number of supplied raid devices"; ti 3050 drivers/md/dm-raid.c rs = raid_set_alloc(ti, rt, num_raid_devs); ti 3083 drivers/md/dm-raid.c r = analyse_superblocks(ti, rs); ti 3089 drivers/md/dm-raid.c ti->error = "Invalid rdev size"; ti 3100 drivers/md/dm-raid.c ti->private = rs; ti 3101 drivers/md/dm-raid.c ti->num_flush_bios = 1; ti 3116 drivers/md/dm-raid.c ti->error = "'nosync' not allowed for new raid6 set"; ti 3134 drivers/md/dm-raid.c ti->error = "Can't resize a reshaping raid set"; ti 3141 drivers/md/dm-raid.c ti->error = "Can't takeover a reshaping raid set"; ti 3148 drivers/md/dm-raid.c ti->error = "Can't takeover a journaled raid4/5/6 set"; ti 3181 drivers/md/dm-raid.c ti->error = "Can't reshape a journaled raid4/5/6 set"; ti 3231 drivers/md/dm-raid.c ti->error = "Failed to run raid array"; ti 3239 drivers/md/dm-raid.c ti->error = "Failed to start raid array"; ti 3245 drivers/md/dm-raid.c dm_table_add_target_callbacks(ti->table, &rs->callbacks); ti 3251 drivers/md/dm-raid.c ti->error = "Failed to set raid4/5/6 journal mode"; ti 3279 drivers/md/dm-raid.c ti->error = "Reshape check failed"; ti 3302 drivers/md/dm-raid.c static void raid_dtr(struct dm_target *ti) ti 3304 drivers/md/dm-raid.c struct raid_set *rs = ti->private; ti 3311 drivers/md/dm-raid.c static int raid_map(struct dm_target *ti, struct bio *bio) ti 3313 drivers/md/dm-raid.c struct raid_set *rs = ti->private; ti 3502 drivers/md/dm-raid.c static void raid_status(struct dm_target *ti, status_type_t type, ti 3505 drivers/md/dm-raid.c struct raid_set *rs = ti->private; ti 3669 drivers/md/dm-raid.c static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, ti 3672 drivers/md/dm-raid.c struct raid_set *rs = ti->private; ti 3720 drivers/md/dm-raid.c static int raid_iterate_devices(struct dm_target *ti, ti 3723 drivers/md/dm-raid.c struct raid_set *rs = ti->private; ti 3729 drivers/md/dm-raid.c r = fn(ti, ti 3738 drivers/md/dm-raid.c static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 3740 drivers/md/dm-raid.c struct raid_set *rs = ti->private; ti 3756 drivers/md/dm-raid.c static void raid_postsuspend(struct dm_target *ti) ti 3758 drivers/md/dm-raid.c struct raid_set *rs = ti->private; ti 3908 drivers/md/dm-raid.c rs->ti->error = "pers->check_reshape() failed"; ti 3919 drivers/md/dm-raid.c rs->ti->error = "pers->start_reshape() failed"; ti 3934 drivers/md/dm-raid.c static int raid_preresume(struct dm_target *ti) ti 3937 drivers/md/dm-raid.c struct raid_set *rs = ti->private; ti 3990 drivers/md/dm-raid.c static void raid_resume(struct dm_target *ti) ti 3992 drivers/md/dm-raid.c struct raid_set *rs = ti->private; ti 54 drivers/md/dm-raid1.c struct dm_target *ti; ti 254 drivers/md/dm-raid1.c static int mirror_flush(struct dm_target *ti) ti 256 drivers/md/dm-raid1.c struct mirror_set *ms = ti->private; ti 347 drivers/md/dm-raid1.c from.count = ms->ti->len & (region_size - 1); ti 406 drivers/md/dm-raid1.c dm_table_event(ms->ti->table); ti 455 drivers/md/dm-raid1.c return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); ti 486 drivers/md/dm-raid1.c if (dm_noflush_suspending(ms->ti)) ti 844 drivers/md/dm-raid1.c dm_table_event(ms->ti->table); ti 878 drivers/md/dm-raid1.c struct dm_target *ti, ti 885 drivers/md/dm-raid1.c ti->error = "Cannot allocate mirror context"; ti 895 drivers/md/dm-raid1.c ms->ti = ti; ti 897 drivers/md/dm-raid1.c ms->nr_regions = dm_sector_div_up(ti->len, region_size); ti 906 drivers/md/dm-raid1.c ti->error = "Error creating dm_io client"; ti 913 drivers/md/dm-raid1.c ms->ti->begin, MAX_RECOVERY, ti 916 drivers/md/dm-raid1.c ti->error = "Error creating dirty region hash"; ti 925 drivers/md/dm-raid1.c static void free_context(struct mirror_set *ms, struct dm_target *ti, ti 929 drivers/md/dm-raid1.c dm_put_device(ti, ms->mirror[m].dev); ti 936 drivers/md/dm-raid1.c static int get_mirror(struct mirror_set *ms, struct dm_target *ti, ti 945 drivers/md/dm-raid1.c ti->error = "Invalid offset"; ti 949 drivers/md/dm-raid1.c ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), ti 952 drivers/md/dm-raid1.c ti->error = "Device lookup failure"; ti 967 drivers/md/dm-raid1.c static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, ti 976 drivers/md/dm-raid1.c ti->error = "Insufficient mirror log arguments"; ti 981 drivers/md/dm-raid1.c ti->error = "Invalid mirror log argument count"; ti 988 drivers/md/dm-raid1.c ti->error = "Insufficient mirror log arguments"; ti 992 drivers/md/dm-raid1.c dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count, ti 995 drivers/md/dm-raid1.c ti->error = "Error creating mirror dirty log"; ti 1006 drivers/md/dm-raid1.c struct dm_target *ti = ms->ti; ti 1016 drivers/md/dm-raid1.c ti->error = "Invalid number of features"; ti 1025 drivers/md/dm-raid1.c ti->error = "Not enough arguments to support feature count"; ti 1035 drivers/md/dm-raid1.c ti->error = "Unrecognised feature requested"; ti 1044 drivers/md/dm-raid1.c ti->error = "keep_log feature requires the handle_errors feature"; ti 1063 drivers/md/dm-raid1.c static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 1071 drivers/md/dm-raid1.c dl = create_dirty_log(ti, argc, argv, &args_used); ti 1080 drivers/md/dm-raid1.c ti->error = "Invalid number of mirrors"; ti 1088 drivers/md/dm-raid1.c ti->error = "Too few mirror arguments"; ti 1093 drivers/md/dm-raid1.c ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); ti 1101 drivers/md/dm-raid1.c r = get_mirror(ms, ti, m, argv); ti 1103 drivers/md/dm-raid1.c free_context(ms, ti, m); ti 1110 drivers/md/dm-raid1.c ti->private = ms; ti 1112 drivers/md/dm-raid1.c r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh)); ti 1116 drivers/md/dm-raid1.c ti->num_flush_bios = 1; ti 1117 drivers/md/dm-raid1.c ti->num_discard_bios = 1; ti 1118 drivers/md/dm-raid1.c ti->per_io_data_size = sizeof(struct dm_raid1_bio_record); ti 1148 drivers/md/dm-raid1.c ti->error = "Too many mirror arguments"; ti 1165 drivers/md/dm-raid1.c free_context(ms, ti, ms->nr_mirrors); ti 1169 drivers/md/dm-raid1.c static void mirror_dtr(struct dm_target *ti) ti 1171 drivers/md/dm-raid1.c struct mirror_set *ms = (struct mirror_set *) ti->private; ti 1178 drivers/md/dm-raid1.c free_context(ms, ti, ms->nr_mirrors); ti 1184 drivers/md/dm-raid1.c static int mirror_map(struct dm_target *ti, struct bio *bio) ti 1188 drivers/md/dm-raid1.c struct mirror_set *ms = ti->private; ti 1233 drivers/md/dm-raid1.c static int mirror_end_io(struct dm_target *ti, struct bio *bio, ti 1237 drivers/md/dm-raid1.c struct mirror_set *ms = (struct mirror_set *) ti->private; ti 1300 drivers/md/dm-raid1.c static void mirror_presuspend(struct dm_target *ti) ti 1302 drivers/md/dm-raid1.c struct mirror_set *ms = (struct mirror_set *) ti->private; ti 1346 drivers/md/dm-raid1.c static void mirror_postsuspend(struct dm_target *ti) ti 1348 drivers/md/dm-raid1.c struct mirror_set *ms = ti->private; ti 1356 drivers/md/dm-raid1.c static void mirror_resume(struct dm_target *ti) ti 1358 drivers/md/dm-raid1.c struct mirror_set *ms = ti->private; ti 1393 drivers/md/dm-raid1.c static void mirror_status(struct dm_target *ti, status_type_t type, ti 1398 drivers/md/dm-raid1.c struct mirror_set *ms = (struct mirror_set *) ti->private; ti 1441 drivers/md/dm-raid1.c static int mirror_iterate_devices(struct dm_target *ti, ti 1444 drivers/md/dm-raid1.c struct mirror_set *ms = ti->private; ti 1449 drivers/md/dm-raid1.c ret = fn(ti, ms->mirror[i].dev, ti 1450 drivers/md/dm-raid1.c ms->mirror[i].offset, ti->len, data); ti 20 drivers/md/dm-rq.c struct dm_target *ti; ti 171 drivers/md/dm-rq.c tio->ti->type->release_clone_rq(clone, NULL); ti 204 drivers/md/dm-rq.c tio->ti->type->release_clone_rq(tio->clone, NULL); ti 217 drivers/md/dm-rq.c if (tio->ti) { ti 218 drivers/md/dm-rq.c rq_end_io = tio->ti->type->rq_end_io; ti 221 drivers/md/dm-rq.c r = rq_end_io(tio->ti, clone, error, &tio->info); ti 364 drivers/md/dm-rq.c tio->ti = NULL; ti 387 drivers/md/dm-rq.c struct dm_target *ti = tio->ti; ti 393 drivers/md/dm-rq.c r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); ti 401 drivers/md/dm-rq.c ti->type->release_clone_rq(clone, &tio->info); ti 412 drivers/md/dm-rq.c tio->ti->type->release_clone_rq(clone, &tio->info); ti 497 drivers/md/dm-rq.c struct dm_target *ti = md->immutable_target; ti 499 drivers/md/dm-rq.c if (unlikely(!ti)) { ti 503 drivers/md/dm-rq.c ti = dm_table_find_target(map, 0); ti 507 drivers/md/dm-rq.c if (ti->type->busy && ti->type->busy(ti)) ti 518 drivers/md/dm-rq.c tio->ti = ti; ti 30 drivers/md/dm-snap.c #define dm_target_is_snapshot_merge(ti) \ ti 31 drivers/md/dm-snap.c ((ti)->type->name == dm_snapshot_merge_target_name) ti 54 drivers/md/dm-snap.c struct dm_target *ti; ti 326 drivers/md/dm-snap.c struct dm_target *ti; ti 458 drivers/md/dm-snap.c if (dm_target_is_snapshot_merge(s->ti) && snap_merge) ti 493 drivers/md/dm-snap.c snap->ti->error = "Snapshot cow pairing for exception " ti 508 drivers/md/dm-snap.c if (!dm_target_is_snapshot_merge(snap->ti)) ti 515 drivers/md/dm-snap.c snap->ti->error = "A snapshot is already merging."; ti 521 drivers/md/dm-snap.c snap->ti->error = "Snapshot exception store does not " ti 1177 drivers/md/dm-snap.c struct dm_target *ti) ti 1193 drivers/md/dm-snap.c r = dm_read_arg_group(_args, as, &argc, &ti->error); ti 1208 drivers/md/dm-snap.c ti->error = "Unrecognised feature requested"; ti 1219 drivers/md/dm-snap.c ti->error = "discard_passdown_origin feature depends on discard_zeroes_cow"; ti 1230 drivers/md/dm-snap.c static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 1242 drivers/md/dm-snap.c ti->error = "requires 4 or more arguments"; ti 1247 drivers/md/dm-snap.c if (dm_target_is_snapshot_merge(ti)) { ti 1254 drivers/md/dm-snap.c ti->error = "Cannot allocate private snapshot structure"; ti 1262 drivers/md/dm-snap.c r = parse_snapshot_features(&as, s, ti); ti 1270 drivers/md/dm-snap.c r = dm_get_device(ti, origin_path, origin_mode, &s->origin); ti 1272 drivers/md/dm-snap.c ti->error = "Cannot get origin device"; ti 1283 drivers/md/dm-snap.c ti->error = "COW device cannot be the same as origin device"; ti 1288 drivers/md/dm-snap.c r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); ti 1290 drivers/md/dm-snap.c ti->error = "Cannot get COW device"; ti 1294 drivers/md/dm-snap.c r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); ti 1296 drivers/md/dm-snap.c ti->error = "Couldn't create exception store"; ti 1304 drivers/md/dm-snap.c s->ti = ti; ti 1324 drivers/md/dm-snap.c ti->error = "Unable to allocate hash table space"; ti 1334 drivers/md/dm-snap.c ti->error = "Could not create kcopyd client"; ti 1340 drivers/md/dm-snap.c ti->error = "Could not allocate mempool for pending exceptions"; ti 1349 drivers/md/dm-snap.c ti->private = s; ti 1350 drivers/md/dm-snap.c ti->num_flush_bios = num_flush_bios; ti 1352 drivers/md/dm-snap.c ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1); ti 1353 drivers/md/dm-snap.c ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk); ti 1359 drivers/md/dm-snap.c ti->error = "Snapshot origin struct allocation failed"; ti 1380 drivers/md/dm-snap.c ti->error = "Failed to read snapshot metadata"; ti 1388 drivers/md/dm-snap.c ti->error = "Chunk size not set"; ti 1392 drivers/md/dm-snap.c r = dm_set_target_max_io_len(ti, s->store->chunk_size); ti 1410 drivers/md/dm-snap.c dm_put_device(ti, s->cow); ti 1412 drivers/md/dm-snap.c dm_put_device(ti, s->origin); ti 1452 drivers/md/dm-snap.c snap_dest->ti->max_io_len = snap_dest->store->chunk_size; ti 1462 drivers/md/dm-snap.c static void snapshot_dtr(struct dm_target *ti) ti 1467 drivers/md/dm-snap.c struct dm_snapshot *s = ti->private; ti 1481 drivers/md/dm-snap.c if (dm_target_is_snapshot_merge(ti)) ti 1507 drivers/md/dm-snap.c dm_put_device(ti, s->cow); ti 1509 drivers/md/dm-snap.c dm_put_device(ti, s->origin); ti 1626 drivers/md/dm-snap.c dm_table_event(s->ti->table); ti 1941 drivers/md/dm-snap.c static int snapshot_map(struct dm_target *ti, struct bio *bio) ti 1944 drivers/md/dm-snap.c struct dm_snapshot *s = ti->private; ti 2109 drivers/md/dm-snap.c static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) ti 2112 drivers/md/dm-snap.c struct dm_snapshot *s = ti->private; ti 2175 drivers/md/dm-snap.c static int snapshot_end_io(struct dm_target *ti, struct bio *bio, ti 2178 drivers/md/dm-snap.c struct dm_snapshot *s = ti->private; ti 2186 drivers/md/dm-snap.c static void snapshot_merge_presuspend(struct dm_target *ti) ti 2188 drivers/md/dm-snap.c struct dm_snapshot *s = ti->private; ti 2193 drivers/md/dm-snap.c static int snapshot_preresume(struct dm_target *ti) ti 2196 drivers/md/dm-snap.c struct dm_snapshot *s = ti->private; ti 2207 drivers/md/dm-snap.c } else if (!dm_suspended(snap_src->ti)) { ti 2219 drivers/md/dm-snap.c static void snapshot_resume(struct dm_target *ti) ti 2221 drivers/md/dm-snap.c struct dm_snapshot *s = ti->private; ti 2231 drivers/md/dm-snap.c origin_md = dm_table_get_md(o->ti->table); ti 2235 drivers/md/dm-snap.c origin_md = dm_table_get_md(snap_merging->ti->table); ti 2237 drivers/md/dm-snap.c if (origin_md == dm_table_get_md(ti->table)) ti 2293 drivers/md/dm-snap.c static void snapshot_merge_resume(struct dm_target *ti) ti 2295 drivers/md/dm-snap.c struct dm_snapshot *s = ti->private; ti 2300 drivers/md/dm-snap.c snapshot_resume(ti); ti 2305 drivers/md/dm-snap.c ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev); ti 2310 drivers/md/dm-snap.c static void snapshot_status(struct dm_target *ti, status_type_t type, ti 2314 drivers/md/dm-snap.c struct dm_snapshot *snap = ti->private; ti 2370 drivers/md/dm-snap.c static int snapshot_iterate_devices(struct dm_target *ti, ti 2373 drivers/md/dm-snap.c struct dm_snapshot *snap = ti->private; ti 2376 drivers/md/dm-snap.c r = fn(ti, snap->origin, 0, ti->len, data); ti 2379 drivers/md/dm-snap.c r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); ti 2384 drivers/md/dm-snap.c static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 2386 drivers/md/dm-snap.c struct dm_snapshot *snap = ti->private; ti 2437 drivers/md/dm-snap.c if (dm_target_is_snapshot_merge(snap->ti)) ti 2441 drivers/md/dm-snap.c if (sector >= dm_table_get_size(snap->ti->table)) ti 2590 drivers/md/dm-snap.c for (n = 0; n < size; n += merging_snap->ti->max_io_len) ti 2608 drivers/md/dm-snap.c static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 2614 drivers/md/dm-snap.c ti->error = "origin: incorrect number of arguments"; ti 2620 drivers/md/dm-snap.c ti->error = "Cannot allocate private origin structure"; ti 2625 drivers/md/dm-snap.c r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev); ti 2627 drivers/md/dm-snap.c ti->error = "Cannot get target device"; ti 2631 drivers/md/dm-snap.c o->ti = ti; ti 2632 drivers/md/dm-snap.c ti->private = o; ti 2633 drivers/md/dm-snap.c ti->num_flush_bios = 1; ti 2643 drivers/md/dm-snap.c static void origin_dtr(struct dm_target *ti) ti 2645 drivers/md/dm-snap.c struct dm_origin *o = ti->private; ti 2647 drivers/md/dm-snap.c dm_put_device(ti, o->dev); ti 2651 drivers/md/dm-snap.c static int origin_map(struct dm_target *ti, struct bio *bio) ti 2653 drivers/md/dm-snap.c struct dm_origin *o = ti->private; ti 2678 drivers/md/dm-snap.c static void origin_resume(struct dm_target *ti) ti 2680 drivers/md/dm-snap.c struct dm_origin *o = ti->private; ti 2689 drivers/md/dm-snap.c static void origin_postsuspend(struct dm_target *ti) ti 2691 drivers/md/dm-snap.c struct dm_origin *o = ti->private; ti 2698 drivers/md/dm-snap.c static void origin_status(struct dm_target *ti, status_type_t type, ti 2701 drivers/md/dm-snap.c struct dm_origin *o = ti->private; ti 2714 drivers/md/dm-snap.c static int origin_iterate_devices(struct dm_target *ti, ti 2717 drivers/md/dm-snap.c struct dm_origin *o = ti->private; ti 2719 drivers/md/dm-snap.c return fn(ti, o->dev, 0, ti->len, data); ti 39 drivers/md/dm-stripe.c struct dm_target *ti; ti 55 drivers/md/dm-stripe.c dm_table_event(sc->ti->table); ti 74 drivers/md/dm-stripe.c static int get_stripe(struct dm_target *ti, struct stripe_c *sc, ti 84 drivers/md/dm-stripe.c ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), ti 98 drivers/md/dm-stripe.c static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 108 drivers/md/dm-stripe.c ti->error = "Not enough arguments"; ti 113 drivers/md/dm-stripe.c ti->error = "Invalid stripe count"; ti 118 drivers/md/dm-stripe.c ti->error = "Invalid chunk_size"; ti 122 drivers/md/dm-stripe.c width = ti->len; ti 124 drivers/md/dm-stripe.c ti->error = "Target length not divisible by " ti 131 drivers/md/dm-stripe.c ti->error = "Target length not divisible by " ti 140 drivers/md/dm-stripe.c ti->error = "Not enough destinations " ti 147 drivers/md/dm-stripe.c ti->error = "Memory allocation for striped context " ti 155 drivers/md/dm-stripe.c sc->ti = ti; ti 164 drivers/md/dm-stripe.c r = dm_set_target_max_io_len(ti, chunk_size); ti 170 drivers/md/dm-stripe.c ti->num_flush_bios = stripes; ti 171 drivers/md/dm-stripe.c ti->num_discard_bios = stripes; ti 172 drivers/md/dm-stripe.c ti->num_secure_erase_bios = stripes; ti 173 drivers/md/dm-stripe.c ti->num_write_same_bios = stripes; ti 174 drivers/md/dm-stripe.c ti->num_write_zeroes_bios = stripes; ti 188 drivers/md/dm-stripe.c r = get_stripe(ti, sc, i, argv); ti 190 drivers/md/dm-stripe.c ti->error = "Couldn't parse stripe destination"; ti 192 drivers/md/dm-stripe.c dm_put_device(ti, sc->stripe[i].dev); ti 199 drivers/md/dm-stripe.c ti->private = sc; ti 204 drivers/md/dm-stripe.c static void stripe_dtr(struct dm_target *ti) ti 207 drivers/md/dm-stripe.c struct stripe_c *sc = (struct stripe_c *) ti->private; ti 210 drivers/md/dm-stripe.c dm_put_device(ti, sc->stripe[i].dev); ti 219 drivers/md/dm-stripe.c sector_t chunk = dm_target_offset(sc->ti, sector); ti 286 drivers/md/dm-stripe.c static int stripe_map(struct dm_target *ti, struct bio *bio) ti 288 drivers/md/dm-stripe.c struct stripe_c *sc = ti->private; ti 317 drivers/md/dm-stripe.c static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, ti 321 drivers/md/dm-stripe.c struct stripe_c *sc = ti->private; ti 338 drivers/md/dm-stripe.c static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, ti 342 drivers/md/dm-stripe.c struct stripe_c *sc = ti->private; ti 357 drivers/md/dm-stripe.c static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, ti 361 drivers/md/dm-stripe.c struct stripe_c *sc = ti->private; ti 395 drivers/md/dm-stripe.c static void stripe_status(struct dm_target *ti, status_type_t type, ti 398 drivers/md/dm-stripe.c struct stripe_c *sc = (struct stripe_c *) ti->private; ti 425 drivers/md/dm-stripe.c static int stripe_end_io(struct dm_target *ti, struct bio *bio, ti 430 drivers/md/dm-stripe.c struct stripe_c *sc = ti->private; ti 461 drivers/md/dm-stripe.c static int stripe_iterate_devices(struct dm_target *ti, ti 464 drivers/md/dm-stripe.c struct stripe_c *sc = ti->private; ti 469 drivers/md/dm-stripe.c ret = fn(ti, sc->stripe[i].dev, ti 477 drivers/md/dm-stripe.c static void stripe_io_hints(struct dm_target *ti, ti 480 drivers/md/dm-stripe.c struct stripe_c *sc = ti->private; ti 39 drivers/md/dm-switch.c struct dm_target *ti; ti 59 drivers/md/dm-switch.c static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_paths, ti 68 drivers/md/dm-switch.c sctx->ti = ti; ti 71 drivers/md/dm-switch.c ti->private = sctx; ti 76 drivers/md/dm-switch.c static int alloc_region_table(struct dm_target *ti, unsigned nr_paths) ti 78 drivers/md/dm-switch.c struct switch_ctx *sctx = ti->private; ti 79 drivers/md/dm-switch.c sector_t nr_regions = ti->len; ti 102 drivers/md/dm-switch.c ti->error = "Region table too large"; ti 112 drivers/md/dm-switch.c ti->error = "Region table too large"; ti 119 drivers/md/dm-switch.c ti->error = "Cannot allocate region table"; ti 204 drivers/md/dm-switch.c static int parse_path(struct dm_arg_set *as, struct dm_target *ti) ti 206 drivers/md/dm-switch.c struct switch_ctx *sctx = ti->private; ti 210 drivers/md/dm-switch.c r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), ti 213 drivers/md/dm-switch.c ti->error = "Device lookup failed"; ti 218 drivers/md/dm-switch.c ti->error = "Invalid device starting offset"; ti 219 drivers/md/dm-switch.c dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev); ti 233 drivers/md/dm-switch.c static void switch_dtr(struct dm_target *ti) ti 235 drivers/md/dm-switch.c struct switch_ctx *sctx = ti->private; ti 238 drivers/md/dm-switch.c dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev); ti 252 drivers/md/dm-switch.c static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv) ti 268 drivers/md/dm-switch.c r = dm_read_arg(_args, &as, &nr_paths, &ti->error); ti 272 drivers/md/dm-switch.c r = dm_read_arg(_args + 1, &as, ®ion_size, &ti->error); ti 276 drivers/md/dm-switch.c r = dm_read_arg_group(_args + 2, &as, &nr_optional_args, &ti->error); ti 282 drivers/md/dm-switch.c ti->error = "Incorrect number of path arguments"; ti 286 drivers/md/dm-switch.c sctx = alloc_switch_ctx(ti, nr_paths, region_size); ti 288 drivers/md/dm-switch.c ti->error = "Cannot allocate redirection context"; ti 292 drivers/md/dm-switch.c r = dm_set_target_max_io_len(ti, region_size); ti 297 drivers/md/dm-switch.c r = parse_path(&as, ti); ti 302 drivers/md/dm-switch.c r = alloc_region_table(ti, nr_paths); ti 309 drivers/md/dm-switch.c ti->num_discard_bios = 1; ti 314 drivers/md/dm-switch.c switch_dtr(ti); ti 319 drivers/md/dm-switch.c static int switch_map(struct dm_target *ti, struct bio *bio) ti 321 drivers/md/dm-switch.c struct switch_ctx *sctx = ti->private; ti 322 drivers/md/dm-switch.c sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); ti 469 drivers/md/dm-switch.c static int switch_message(struct dm_target *ti, unsigned argc, char **argv, ti 474 drivers/md/dm-switch.c struct switch_ctx *sctx = ti->private; ti 489 drivers/md/dm-switch.c static void switch_status(struct dm_target *ti, status_type_t type, ti 492 drivers/md/dm-switch.c struct switch_ctx *sctx = ti->private; ti 515 drivers/md/dm-switch.c static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) ti 517 drivers/md/dm-switch.c struct switch_ctx *sctx = ti->private; ti 527 drivers/md/dm-switch.c if (ti->len + sctx->path_list[path_nr].start != ti 533 drivers/md/dm-switch.c static int switch_iterate_devices(struct dm_target *ti, ti 536 drivers/md/dm-switch.c struct switch_ctx *sctx = ti->private; ti 541 drivers/md/dm-switch.c r = fn(ti, sctx->path_list[path_nr].dmdev, ti 542 drivers/md/dm-switch.c sctx->path_list[path_nr].start, ti->len, data); ti 279 drivers/md/dm-table.c static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, ti 300 drivers/md/dm-table.c dm_device_name(ti->table->md), bdevname(bdev, b), ti 313 drivers/md/dm-table.c dm_device_name(ti->table->md), bdevname(bdev, b), ti 329 drivers/md/dm-table.c dm_device_name(ti->table->md), ti 346 drivers/md/dm-table.c dm_device_name(ti->table->md), ti 359 drivers/md/dm-table.c dm_device_name(ti->table->md), ti 368 drivers/md/dm-table.c dm_device_name(ti->table->md), ti 426 drivers/md/dm-table.c int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, ti 432 drivers/md/dm-table.c struct dm_table *t = ti->table; ti 467 drivers/md/dm-table.c static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, ti 477 drivers/md/dm-table.c dm_device_name(ti->table->md), bdevname(bdev, b)); ti 485 drivers/md/dm-table.c dm_device_name(ti->table->md), bdevname(bdev, b), ti 499 drivers/md/dm-table.c void dm_put_device(struct dm_target *ti, struct dm_dev *d) ti 502 drivers/md/dm-table.c struct list_head *devices = &ti->table->devices; ti 513 drivers/md/dm-table.c dm_device_name(ti->table->md), d->name); ti 517 drivers/md/dm-table.c dm_put_table_device(ti->table->md, d); ti 527 drivers/md/dm-table.c static int adjoin(struct dm_table *table, struct dm_target *ti) ti 532 drivers/md/dm-table.c return !ti->begin; ti 535 drivers/md/dm-table.c return (ti->begin == (prev->begin + prev->len)); ti 662 drivers/md/dm-table.c struct dm_target *uninitialized_var(ti); ti 670 drivers/md/dm-table.c ti = dm_table_get_target(table, i); ti 675 drivers/md/dm-table.c if (ti->type->iterate_devices) ti 676 drivers/md/dm-table.c ti->type->iterate_devices(ti, dm_set_device_limits, ti 683 drivers/md/dm-table.c if (remaining < ti->len && ti 689 drivers/md/dm-table.c (unsigned short) ((next_target_start + ti->len) & ti 699 drivers/md/dm-table.c (unsigned long long) ti->begin, ti 700 drivers/md/dm-table.c (unsigned long long) ti->len, ti 882 drivers/md/dm-table.c int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, ti 892 drivers/md/dm-table.c static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev, ti 901 drivers/md/dm-table.c struct dm_target *ti; ti 906 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 908 drivers/md/dm-table.c if (!ti->type->direct_access) ti 911 drivers/md/dm-table.c if (!ti->type->iterate_devices || ti 912 drivers/md/dm-table.c !ti->type->iterate_devices(ti, iterate_fn, blocksize)) ti 926 drivers/md/dm-table.c static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev, ti 1082 drivers/md/dm-table.c struct dm_target *ti; ti 1086 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 1087 drivers/md/dm-table.c if (dm_target_is_wildcard(ti->type)) ti 1088 drivers/md/dm-table.c return ti; ti 1109 drivers/md/dm-table.c struct dm_target *ti; ti 1119 drivers/md/dm-table.c ti = t->targets + i; ti 1120 drivers/md/dm-table.c per_io_data_size = max(per_io_data_size, ti->per_io_data_size); ti 1121 drivers/md/dm-table.c min_pool_size = max(min_pool_size, ti->num_flush_bios); ti 1208 drivers/md/dm-table.c struct dm_target *ti = dm_table_get_target(t, i); ti 1209 drivers/md/dm-table.c if (!dm_target_passes_integrity(ti->type)) ti 1383 drivers/md/dm-table.c static int count_device(struct dm_target *ti, struct dm_dev *dev, ti 1401 drivers/md/dm-table.c struct dm_target *ti; ti 1405 drivers/md/dm-table.c ti = dm_table_get_target(table, i); ti 1407 drivers/md/dm-table.c if (!ti->type->iterate_devices) ti 1411 drivers/md/dm-table.c ti->type->iterate_devices(ti, count_device, &num_devices); ti 1419 drivers/md/dm-table.c static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, ti 1431 drivers/md/dm-table.c struct dm_target *ti; ti 1435 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 1438 drivers/md/dm-table.c !dm_target_supports_zoned_hm(ti->type)) ti 1441 drivers/md/dm-table.c if (!ti->type->iterate_devices || ti 1442 drivers/md/dm-table.c !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model)) ti 1449 drivers/md/dm-table.c static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, ti 1461 drivers/md/dm-table.c struct dm_target *ti; ti 1465 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 1467 drivers/md/dm-table.c if (!ti->type->iterate_devices || ti 1468 drivers/md/dm-table.c !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors)) ti 1507 drivers/md/dm-table.c struct dm_target *ti; ti 1518 drivers/md/dm-table.c ti = dm_table_get_target(table, i); ti 1520 drivers/md/dm-table.c if (!ti->type->iterate_devices) ti 1526 drivers/md/dm-table.c ti->type->iterate_devices(ti, dm_set_device_limits, ti 1539 drivers/md/dm-table.c if (ti->type->io_hints) ti 1540 drivers/md/dm-table.c ti->type->io_hints(ti, &ti_limits); ti 1546 drivers/md/dm-table.c if (ti->type->iterate_devices(ti, device_area_is_invalid, ti 1560 drivers/md/dm-table.c (unsigned long long) ti->begin, ti 1561 drivers/md/dm-table.c (unsigned long long) ti->len); ti 1631 drivers/md/dm-table.c static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, ti 1642 drivers/md/dm-table.c struct dm_target *ti; ti 1652 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 1654 drivers/md/dm-table.c if (!ti->num_flush_bios) ti 1657 drivers/md/dm-table.c if (ti->flush_supported) ti 1660 drivers/md/dm-table.c if (ti->type->iterate_devices && ti 1661 drivers/md/dm-table.c ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) ti 1668 drivers/md/dm-table.c static int device_dax_write_cache_enabled(struct dm_target *ti, ti 1684 drivers/md/dm-table.c struct dm_target *ti; ti 1688 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 1690 drivers/md/dm-table.c if (ti->type->iterate_devices && ti 1691 drivers/md/dm-table.c ti->type->iterate_devices(ti, ti 1699 drivers/md/dm-table.c static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, ti 1707 drivers/md/dm-table.c static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, ti 1718 drivers/md/dm-table.c struct dm_target *ti; ti 1722 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 1724 drivers/md/dm-table.c if (!ti->type->iterate_devices || ti 1725 drivers/md/dm-table.c !ti->type->iterate_devices(ti, func, NULL)) ti 1732 drivers/md/dm-table.c static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev, ti 1746 drivers/md/dm-table.c static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, ti 1756 drivers/md/dm-table.c struct dm_target *ti; ti 1760 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 1762 drivers/md/dm-table.c if (!ti->num_write_same_bios) ti 1765 drivers/md/dm-table.c if (!ti->type->iterate_devices || ti 1766 drivers/md/dm-table.c ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) ti 1773 drivers/md/dm-table.c static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, ti 1783 drivers/md/dm-table.c struct dm_target *ti; ti 1787 drivers/md/dm-table.c ti = dm_table_get_target(t, i++); ti 1789 drivers/md/dm-table.c if (!ti->num_write_zeroes_bios) ti 1792 drivers/md/dm-table.c if (!ti->type->iterate_devices || ti 1793 drivers/md/dm-table.c ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL)) ti 1800 drivers/md/dm-table.c static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, ti 1810 drivers/md/dm-table.c struct dm_target *ti; ti 1814 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 1816 drivers/md/dm-table.c if (!ti->num_discard_bios) ti 1824 drivers/md/dm-table.c if (!ti->discards_supported && ti 1825 drivers/md/dm-table.c (!ti->type->iterate_devices || ti 1826 drivers/md/dm-table.c ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) ti 1833 drivers/md/dm-table.c static int device_not_secure_erase_capable(struct dm_target *ti, ti 1844 drivers/md/dm-table.c struct dm_target *ti; ti 1848 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 1850 drivers/md/dm-table.c if (!ti->num_secure_erase_bios) ti 1853 drivers/md/dm-table.c if (!ti->type->iterate_devices || ti 1854 drivers/md/dm-table.c ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL)) ti 1861 drivers/md/dm-table.c static int device_requires_stable_pages(struct dm_target *ti, ti 1877 drivers/md/dm-table.c struct dm_target *ti; ti 1881 drivers/md/dm-table.c ti = dm_table_get_target(t, i); ti 1883 drivers/md/dm-table.c if (ti->type->iterate_devices && ti 1884 drivers/md/dm-table.c ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) ti 2004 drivers/md/dm-table.c struct dm_target *ti = t->targets; ti 2011 drivers/md/dm-table.c if (ti->type->presuspend) ti 2012 drivers/md/dm-table.c ti->type->presuspend(ti); ti 2015 drivers/md/dm-table.c if (ti->type->presuspend_undo) ti 2016 drivers/md/dm-table.c ti->type->presuspend_undo(ti); ti 2019 drivers/md/dm-table.c if (ti->type->postsuspend) ti 2020 drivers/md/dm-table.c ti->type->postsuspend(ti); ti 2023 drivers/md/dm-table.c ti++; ti 2058 drivers/md/dm-table.c struct dm_target *ti = t->targets + i; ti 2060 drivers/md/dm-table.c if (!ti->type->preresume) ti 2063 drivers/md/dm-table.c r = ti->type->preresume(ti); ti 2066 drivers/md/dm-table.c dm_device_name(t->md), ti->type->name, r); ti 2072 drivers/md/dm-table.c struct dm_target *ti = t->targets + i; ti 2074 drivers/md/dm-table.c if (ti->type->resume) ti 2075 drivers/md/dm-table.c ti->type->resume(ti); ti 132 drivers/md/dm-target.c static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, ti 144 drivers/md/dm-target.c static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, ti 231 drivers/md/dm-thin.c struct dm_target *ti; /* Only set if a pool target is bound */ ti 313 drivers/md/dm-thin.c dm_table_event(pool->ti->table); ti 323 drivers/md/dm-thin.c struct dm_target *ti; ti 1523 drivers/md/dm-thin.c dm_table_event(pool->ti->table); ti 2524 drivers/md/dm-thin.c struct pool_c *pt = pool->ti->private; ti 2538 drivers/md/dm-thin.c struct pool_c *pt = pool->ti->private; ti 2717 drivers/md/dm-thin.c static int thin_bio_map(struct dm_target *ti, struct bio *bio) ti 2720 drivers/md/dm-thin.c struct thin_c *tc = ti->private; ti 2878 drivers/md/dm-thin.c static int bind_control_target(struct pool *pool, struct dm_target *ti) ti 2880 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 2895 drivers/md/dm-thin.c pool->ti = ti; ti 2904 drivers/md/dm-thin.c static void unbind_control_target(struct pool *pool, struct dm_target *ti) ti 2906 drivers/md/dm-thin.c if (pool->ti == ti) ti 2907 drivers/md/dm-thin.c pool->ti = NULL; ti 3137 drivers/md/dm-thin.c static void pool_dtr(struct dm_target *ti) ti 3139 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3143 drivers/md/dm-thin.c unbind_control_target(pt->pool, ti); ti 3145 drivers/md/dm-thin.c dm_put_device(ti, pt->metadata_dev); ti 3146 drivers/md/dm-thin.c dm_put_device(ti, pt->data_dev); ti 3154 drivers/md/dm-thin.c struct dm_target *ti) ti 3170 drivers/md/dm-thin.c r = dm_read_arg_group(_args, as, &argc, &ti->error); ti 3194 drivers/md/dm-thin.c ti->error = "Unrecognised pool feature requested"; ti 3210 drivers/md/dm-thin.c dm_table_event(pool->ti->table); ti 3300 drivers/md/dm-thin.c static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) ti 3319 drivers/md/dm-thin.c ti->error = "Invalid argument count"; ti 3329 drivers/md/dm-thin.c ti->error = "Error setting metadata or data device"; ti 3340 drivers/md/dm-thin.c r = parse_pool_features(&as, &pf, ti); ti 3345 drivers/md/dm-thin.c r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev); ti 3347 drivers/md/dm-thin.c ti->error = "Error opening metadata block device"; ti 3352 drivers/md/dm-thin.c r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); ti 3354 drivers/md/dm-thin.c ti->error = "Error getting data device"; ti 3362 drivers/md/dm-thin.c ti->error = "Invalid block size"; ti 3368 drivers/md/dm-thin.c ti->error = "Invalid low water mark"; ti 3379 drivers/md/dm-thin.c pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev, ti 3380 drivers/md/dm-thin.c block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created); ti 3393 drivers/md/dm-thin.c ti->error = "Discard support cannot be disabled once enabled"; ti 3399 drivers/md/dm-thin.c pt->ti = ti; ti 3405 drivers/md/dm-thin.c ti->num_flush_bios = 1; ti 3413 drivers/md/dm-thin.c ti->num_discard_bios = 1; ti 3420 drivers/md/dm-thin.c ti->discards_supported = true; ti 3422 drivers/md/dm-thin.c ti->private = pt; ti 3432 drivers/md/dm-thin.c dm_table_add_target_callbacks(ti->table, &pt->callbacks); ti 3443 drivers/md/dm-thin.c dm_put_device(ti, data_dev); ti 3445 drivers/md/dm-thin.c dm_put_device(ti, metadata_dev); ti 3452 drivers/md/dm-thin.c static int pool_map(struct dm_target *ti, struct bio *bio) ti 3455 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3470 drivers/md/dm-thin.c static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) ti 3473 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3475 drivers/md/dm-thin.c sector_t data_size = ti->len; ti 3518 drivers/md/dm-thin.c static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) ti 3521 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3580 drivers/md/dm-thin.c static int pool_preresume(struct dm_target *ti) ti 3584 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3590 drivers/md/dm-thin.c r = bind_control_target(pool, ti); ti 3597 drivers/md/dm-thin.c r = maybe_resize_data_dev(ti, &need_commit1); ti 3601 drivers/md/dm-thin.c r = maybe_resize_metadata_dev(ti, &need_commit2); ti 3635 drivers/md/dm-thin.c static void pool_resume(struct dm_target *ti) ti 3637 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3656 drivers/md/dm-thin.c static void pool_presuspend(struct dm_target *ti) ti 3658 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3669 drivers/md/dm-thin.c static void pool_presuspend_undo(struct dm_target *ti) ti 3671 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3682 drivers/md/dm-thin.c static void pool_postsuspend(struct dm_target *ti) ti 3684 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3857 drivers/md/dm-thin.c static int pool_message(struct dm_target *ti, unsigned argc, char **argv, ti 3861 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3927 drivers/md/dm-thin.c static void pool_status(struct dm_target *ti, status_type_t type, ti 3941 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 3952 drivers/md/dm-thin.c if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) ti 4053 drivers/md/dm-thin.c static int pool_iterate_devices(struct dm_target *ti, ti 4056 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 4058 drivers/md/dm-thin.c return fn(ti, pt->data_dev, 0, ti->len, data); ti 4061 drivers/md/dm-thin.c static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 4063 drivers/md/dm-thin.c struct pool_c *pt = ti->private; ti 4155 drivers/md/dm-thin.c static void thin_dtr(struct dm_target *ti) ti 4157 drivers/md/dm-thin.c struct thin_c *tc = ti->private; ti 4172 drivers/md/dm-thin.c dm_put_device(ti, tc->pool_dev); ti 4174 drivers/md/dm-thin.c dm_put_device(ti, tc->origin_dev); ti 4192 drivers/md/dm-thin.c static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) ti 4203 drivers/md/dm-thin.c ti->error = "Invalid argument count"; ti 4208 drivers/md/dm-thin.c tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL); ti 4210 drivers/md/dm-thin.c ti->error = "Out of memory"; ti 4214 drivers/md/dm-thin.c tc->thin_md = dm_table_get_md(ti->table); ti 4223 drivers/md/dm-thin.c ti->error = "Error setting origin device"; ti 4228 drivers/md/dm-thin.c r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); ti 4230 drivers/md/dm-thin.c ti->error = "Error opening origin device"; ti 4236 drivers/md/dm-thin.c r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev); ti 4238 drivers/md/dm-thin.c ti->error = "Error opening pool device"; ti 4244 drivers/md/dm-thin.c ti->error = "Invalid device id"; ti 4251 drivers/md/dm-thin.c ti->error = "Couldn't get pool mapped device"; ti 4258 drivers/md/dm-thin.c ti->error = "Couldn't find pool object"; ti 4265 drivers/md/dm-thin.c ti->error = "Couldn't open thin device, Pool is in fail mode"; ti 4272 drivers/md/dm-thin.c ti->error = "Couldn't open thin internal device"; ti 4276 drivers/md/dm-thin.c r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); ti 4280 drivers/md/dm-thin.c ti->num_flush_bios = 1; ti 4281 drivers/md/dm-thin.c ti->flush_supported = true; ti 4282 drivers/md/dm-thin.c ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); ti 4286 drivers/md/dm-thin.c ti->discards_supported = true; ti 4287 drivers/md/dm-thin.c ti->num_discard_bios = 1; ti 4296 drivers/md/dm-thin.c ti->error = "Unable to activate thin device while pool is suspended"; ti 4323 drivers/md/dm-thin.c dm_put_device(ti, tc->pool_dev); ti 4326 drivers/md/dm-thin.c dm_put_device(ti, tc->origin_dev); ti 4335 drivers/md/dm-thin.c static int thin_map(struct dm_target *ti, struct bio *bio) ti 4337 drivers/md/dm-thin.c bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); ti 4339 drivers/md/dm-thin.c return thin_bio_map(ti, bio); ti 4342 drivers/md/dm-thin.c static int thin_endio(struct dm_target *ti, struct bio *bio, ti 4381 drivers/md/dm-thin.c static void thin_presuspend(struct dm_target *ti) ti 4383 drivers/md/dm-thin.c struct thin_c *tc = ti->private; ti 4385 drivers/md/dm-thin.c if (dm_noflush_suspending(ti)) ti 4389 drivers/md/dm-thin.c static void thin_postsuspend(struct dm_target *ti) ti 4391 drivers/md/dm-thin.c struct thin_c *tc = ti->private; ti 4400 drivers/md/dm-thin.c static int thin_preresume(struct dm_target *ti) ti 4402 drivers/md/dm-thin.c struct thin_c *tc = ti->private; ti 4413 drivers/md/dm-thin.c static void thin_status(struct dm_target *ti, status_type_t type, ti 4420 drivers/md/dm-thin.c struct thin_c *tc = ti->private; ti 4468 drivers/md/dm-thin.c static int thin_iterate_devices(struct dm_target *ti, ti 4472 drivers/md/dm-thin.c struct thin_c *tc = ti->private; ti 4479 drivers/md/dm-thin.c if (!pool->ti) ti 4482 drivers/md/dm-thin.c blocks = pool->ti->len; ti 4485 drivers/md/dm-thin.c return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); ti 4490 drivers/md/dm-thin.c static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 4492 drivers/md/dm-thin.c struct thin_c *tc = ti->private; ti 59 drivers/md/dm-uevent.c struct dm_target *ti, ti 75 drivers/md/dm-uevent.c if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { ti 170 drivers/md/dm-uevent.c void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti, ti 173 drivers/md/dm-uevent.c struct mapped_device *md = dm_table_get_md(ti->table); ti 181 drivers/md/dm-uevent.c event = dm_build_path_uevent(md, ti, ti 22 drivers/md/dm-uevent.h struct dm_target *ti, const char *path, ti 39 drivers/md/dm-uevent.h struct dm_target *ti, const char *path, ti 27 drivers/md/dm-unstripe.c static void cleanup_unstripe(struct unstripe_c *uc, struct dm_target *ti) ti 30 drivers/md/dm-unstripe.c dm_put_device(ti, uc->dev); ti 38 drivers/md/dm-unstripe.c static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 46 drivers/md/dm-unstripe.c ti->error = "Invalid number of arguments"; ti 52 drivers/md/dm-unstripe.c ti->error = "Memory allocation for unstriped context failed"; ti 57 drivers/md/dm-unstripe.c ti->error = "Invalid stripe count"; ti 62 drivers/md/dm-unstripe.c ti->error = "Invalid chunk_size"; ti 67 drivers/md/dm-unstripe.c ti->error = "Invalid stripe number"; ti 72 drivers/md/dm-unstripe.c ti->error = "Please provide stripe between [0, # of stripes]"; ti 76 drivers/md/dm-unstripe.c if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &uc->dev)) { ti 77 drivers/md/dm-unstripe.c ti->error = "Couldn't get striped device"; ti 82 drivers/md/dm-unstripe.c ti->error = "Invalid striped device offset"; ti 91 drivers/md/dm-unstripe.c tmp_len = ti->len; ti 93 drivers/md/dm-unstripe.c ti->error = "Target length not divisible by chunk size"; ti 97 drivers/md/dm-unstripe.c if (dm_set_target_max_io_len(ti, uc->chunk_size)) { ti 98 drivers/md/dm-unstripe.c ti->error = "Failed to set max io len"; ti 102 drivers/md/dm-unstripe.c ti->private = uc; ti 105 drivers/md/dm-unstripe.c cleanup_unstripe(uc, ti); ti 109 drivers/md/dm-unstripe.c static void unstripe_dtr(struct dm_target *ti) ti 111 drivers/md/dm-unstripe.c struct unstripe_c *uc = ti->private; ti 113 drivers/md/dm-unstripe.c cleanup_unstripe(uc, ti); ti 116 drivers/md/dm-unstripe.c static sector_t map_to_core(struct dm_target *ti, struct bio *bio) ti 118 drivers/md/dm-unstripe.c struct unstripe_c *uc = ti->private; ti 134 drivers/md/dm-unstripe.c static int unstripe_map(struct dm_target *ti, struct bio *bio) ti 136 drivers/md/dm-unstripe.c struct unstripe_c *uc = ti->private; ti 139 drivers/md/dm-unstripe.c bio->bi_iter.bi_sector = map_to_core(ti, bio) + uc->physical_start; ti 144 drivers/md/dm-unstripe.c static void unstripe_status(struct dm_target *ti, status_type_t type, ti 147 drivers/md/dm-unstripe.c struct unstripe_c *uc = ti->private; ti 162 drivers/md/dm-unstripe.c static int unstripe_iterate_devices(struct dm_target *ti, ti 165 drivers/md/dm-unstripe.c struct unstripe_c *uc = ti->private; ti 167 drivers/md/dm-unstripe.c return fn(ti, uc->dev, uc->physical_start, ti->len, data); ti 170 drivers/md/dm-unstripe.c static void unstripe_io_hints(struct dm_target *ti, ti 173 drivers/md/dm-unstripe.c struct unstripe_c *uc = ti->private; ti 563 drivers/md/dm-verity-fec.c dm_put_device(v->ti, f->dev); ti 596 drivers/md/dm-verity-fec.c struct dm_target *ti = v->ti; ti 603 drivers/md/dm-verity-fec.c ti->error = "FEC feature arguments require a value"; ti 611 drivers/md/dm-verity-fec.c r = dm_get_device(ti, arg_value, FMODE_READ, &v->fec->dev); ti 613 drivers/md/dm-verity-fec.c ti->error = "FEC device lookup failed"; ti 621 drivers/md/dm-verity-fec.c ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS; ti 630 drivers/md/dm-verity-fec.c ti->error = "Invalid " DM_VERITY_OPT_FEC_START; ti 639 drivers/md/dm-verity-fec.c ti->error = "Invalid " DM_VERITY_OPT_FEC_ROOTS; ti 645 drivers/md/dm-verity-fec.c ti->error = "Unrecognized verity FEC feature request"; ti 661 drivers/md/dm-verity-fec.c v->ti->error = "Cannot allocate FEC structure"; ti 676 drivers/md/dm-verity-fec.c struct dm_target *ti = v->ti; ti 707 drivers/md/dm-verity-fec.c ti->error = "Block sizes must match to use FEC"; ti 712 drivers/md/dm-verity-fec.c ti->error = "Missing " DM_VERITY_OPT_FEC_ROOTS; ti 718 drivers/md/dm-verity-fec.c ti->error = "Missing " DM_VERITY_OPT_FEC_BLOCKS; ti 731 drivers/md/dm-verity-fec.c ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS; ti 741 drivers/md/dm-verity-fec.c ti->error = "Hash device is too small for " ti 750 drivers/md/dm-verity-fec.c ti->error = "Cannot initialize FEC bufio client"; ti 756 drivers/md/dm-verity-fec.c ti->error = "FEC device is too small"; ti 764 drivers/md/dm-verity-fec.c ti->error = "Cannot initialize FEC data bufio client"; ti 769 drivers/md/dm-verity-fec.c ti->error = "Data device is too small"; ti 777 drivers/md/dm-verity-fec.c ti->error = "Cannot allocate RS pool"; ti 785 drivers/md/dm-verity-fec.c ti->error = "Cannot create FEC buffer cache"; ti 794 drivers/md/dm-verity-fec.c ti->error = "Cannot allocate FEC buffer prealloc pool"; ti 800 drivers/md/dm-verity-fec.c ti->error = "Cannot allocate FEC buffer extra pool"; ti 808 drivers/md/dm-verity-fec.c ti->error = "Cannot allocate FEC output pool"; ti 813 drivers/md/dm-verity-fec.c ti->per_io_data_size += sizeof(struct dm_verity_fec_io); ti 81 drivers/md/dm-verity-target.c return v->data_start + dm_target_offset(v->ti, bi_sector); ti 218 drivers/md/dm-verity-target.c struct mapped_device *md = dm_table_get_md(v->ti->table); ti 371 drivers/md/dm-verity-target.c struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); ti 418 drivers/md/dm-verity-target.c struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); ti 459 drivers/md/dm-verity-target.c struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); ti 542 drivers/md/dm-verity-target.c struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); ti 633 drivers/md/dm-verity-target.c static int verity_map(struct dm_target *ti, struct bio *bio) ti 635 drivers/md/dm-verity-target.c struct dm_verity *v = ti->private; ti 656 drivers/md/dm-verity-target.c io = dm_per_bio_data(bio, ti->per_io_data_size); ti 678 drivers/md/dm-verity-target.c static void verity_status(struct dm_target *ti, status_type_t type, ti 681 drivers/md/dm-verity-target.c struct dm_verity *v = ti->private; ti 747 drivers/md/dm-verity-target.c static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) ti 749 drivers/md/dm-verity-target.c struct dm_verity *v = ti->private; ti 754 drivers/md/dm-verity-target.c ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT) ti 759 drivers/md/dm-verity-target.c static int verity_iterate_devices(struct dm_target *ti, ti 762 drivers/md/dm-verity-target.c struct dm_verity *v = ti->private; ti 764 drivers/md/dm-verity-target.c return fn(ti, v->data_dev, v->data_start, ti->len, data); ti 767 drivers/md/dm-verity-target.c static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 769 drivers/md/dm-verity-target.c struct dm_verity *v = ti->private; ti 780 drivers/md/dm-verity-target.c static void verity_dtr(struct dm_target *ti) ti 782 drivers/md/dm-verity-target.c struct dm_verity *v = ti->private; ti 801 drivers/md/dm-verity-target.c dm_put_device(ti, v->hash_dev); ti 804 drivers/md/dm-verity-target.c dm_put_device(ti, v->data_dev); ti 815 drivers/md/dm-verity-target.c struct dm_target *ti = v->ti; ti 819 drivers/md/dm-verity-target.c ti->error = "device too large to use check_at_most_once"; ti 827 drivers/md/dm-verity-target.c ti->error = "failed to allocate bitset for check_at_most_once"; ti 870 drivers/md/dm-verity-target.c struct dm_target *ti = v->ti; ti 877 drivers/md/dm-verity-target.c r = dm_read_arg_group(_args, as, &argc, &ti->error); ti 899 drivers/md/dm-verity-target.c ti->error = "Cannot allocate zero digest"; ti 925 drivers/md/dm-verity-target.c ti->error = "Unrecognized verity feature request"; ti 946 drivers/md/dm-verity-target.c static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) ti 961 drivers/md/dm-verity-target.c ti->error = "Cannot allocate verity structure"; ti 964 drivers/md/dm-verity-target.c ti->private = v; ti 965 drivers/md/dm-verity-target.c v->ti = ti; ti 971 drivers/md/dm-verity-target.c if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) { ti 972 drivers/md/dm-verity-target.c ti->error = "Device must be readonly"; ti 978 drivers/md/dm-verity-target.c ti->error = "Not enough arguments"; ti 985 drivers/md/dm-verity-target.c ti->error = "Invalid version"; ti 991 drivers/md/dm-verity-target.c r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev); ti 993 drivers/md/dm-verity-target.c ti->error = "Data device lookup failed"; ti 997 drivers/md/dm-verity-target.c r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev); ti 999 drivers/md/dm-verity-target.c ti->error = "Hash device lookup failed"; ti 1007 drivers/md/dm-verity-target.c ti->error = "Invalid data device block size"; ti 1017 drivers/md/dm-verity-target.c ti->error = "Invalid hash device block size"; ti 1026 drivers/md/dm-verity-target.c ti->error = "Invalid data blocks"; ti 1032 drivers/md/dm-verity-target.c if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) { ti 1033 drivers/md/dm-verity-target.c ti->error = "Data device is too small"; ti 1041 drivers/md/dm-verity-target.c ti->error = "Invalid hash start"; ti 1049 drivers/md/dm-verity-target.c ti->error = "Cannot allocate algorithm name"; ti 1056 drivers/md/dm-verity-target.c ti->error = "Cannot initialize hash function"; ti 1072 drivers/md/dm-verity-target.c ti->error = "Digest size too big"; ti 1081 drivers/md/dm-verity-target.c ti->error = "Cannot allocate root digest"; ti 1087 drivers/md/dm-verity-target.c ti->error = "Invalid root digest"; ti 1097 drivers/md/dm-verity-target.c ti->error = "Cannot allocate salt"; ti 1103 drivers/md/dm-verity-target.c ti->error = "Invalid salt"; ti 1128 drivers/md/dm-verity-target.c ti->error = "Root hash verification failed"; ti 1142 drivers/md/dm-verity-target.c ti->error = "Too many tree levels"; ti 1154 drivers/md/dm-verity-target.c ti->error = "Hash device offset overflow"; ti 1166 drivers/md/dm-verity-target.c ti->error = "Cannot initialize dm-bufio"; ti 1173 drivers/md/dm-verity-target.c ti->error = "Hash device is too small"; ti 1181 drivers/md/dm-verity-target.c ti->error = "Cannot allocate workqueue"; ti 1186 drivers/md/dm-verity-target.c ti->per_io_data_size = sizeof(struct dm_verity_io) + ti 1193 drivers/md/dm-verity-target.c ti->per_io_data_size = roundup(ti->per_io_data_size, ti 1203 drivers/md/dm-verity-target.c verity_dtr(ti); ti 73 drivers/md/dm-verity-verify-sig.c struct dm_target *ti = v->ti; ti 78 drivers/md/dm-verity-verify-sig.c ti->error = DM_VERITY_VERIFY_ERR("Signature key not specified"); ti 87 drivers/md/dm-verity-verify-sig.c ti->error = DM_VERITY_VERIFY_ERR("Invalid key specified"); ti 36 drivers/md/dm-verity.h struct dm_target *ti; ti 136 drivers/md/dm-writecache.c struct dm_target *ti; ti 830 drivers/md/dm-writecache.c static void writecache_suspend(struct dm_target *ti) ti 832 drivers/md/dm-writecache.c struct dm_writecache *wc = ti->private; ti 899 drivers/md/dm-writecache.c static void writecache_resume(struct dm_target *ti) ti 901 drivers/md/dm-writecache.c struct dm_writecache *wc = ti->private; ti 1011 drivers/md/dm-writecache.c if (dm_suspended(wc->ti)) { ti 1050 drivers/md/dm-writecache.c static int writecache_message(struct dm_target *ti, unsigned argc, char **argv, ti 1054 drivers/md/dm-writecache.c struct dm_writecache *wc = ti->private; ti 1149 drivers/md/dm-writecache.c static int writecache_map(struct dm_target *ti, struct bio *bio) ti 1152 drivers/md/dm-writecache.c struct dm_writecache *wc = ti->private; ti 1172 drivers/md/dm-writecache.c bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); ti 1294 drivers/md/dm-writecache.c static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status) ti 1296 drivers/md/dm-writecache.c struct dm_writecache *wc = ti->private; ti 1307 drivers/md/dm-writecache.c static int writecache_iterate_devices(struct dm_target *ti, ti 1310 drivers/md/dm-writecache.c struct dm_writecache *wc = ti->private; ti 1312 drivers/md/dm-writecache.c return fn(ti, wc->dev, 0, ti->len, data); ti 1315 drivers/md/dm-writecache.c static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 1317 drivers/md/dm-writecache.c struct dm_writecache *wc = ti->private; ti 1635 drivers/md/dm-writecache.c likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) { ti 1817 drivers/md/dm-writecache.c static void writecache_dtr(struct dm_target *ti) ti 1819 drivers/md/dm-writecache.c struct dm_writecache *wc = ti->private; ti 1838 drivers/md/dm-writecache.c dm_put_device(ti, wc->dev); ti 1841 drivers/md/dm-writecache.c dm_put_device(ti, wc->ssd_dev); ti 1865 drivers/md/dm-writecache.c static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) ti 1888 drivers/md/dm-writecache.c ti->error = "Cannot allocate writecache structure"; ti 1892 drivers/md/dm-writecache.c ti->private = wc; ti 1893 drivers/md/dm-writecache.c wc->ti = ti; ti 1908 drivers/md/dm-writecache.c ti->error = "Unable to allocate dm-io client"; ti 1916 drivers/md/dm-writecache.c ti->error = "Could not allocate writeback workqueue"; ti 1928 drivers/md/dm-writecache.c ti->error = "Couldn't spawn endio thread"; ti 1953 drivers/md/dm-writecache.c ti->error = "Persistent memory or DAX not supported on this system"; ti 1965 drivers/md/dm-writecache.c ti->error = "Could not allocate bio set"; ti 1971 drivers/md/dm-writecache.c ti->error = "Could not allocate mempool"; ti 1982 drivers/md/dm-writecache.c r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev); ti 1984 drivers/md/dm-writecache.c ti->error = "Origin data device lookup failed"; ti 1995 drivers/md/dm-writecache.c r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev); ti 1997 drivers/md/dm-writecache.c ti->error = "Cache data device lookup failed"; ti 2012 drivers/md/dm-writecache.c ti->error = "Invalid block size"; ti 2018 drivers/md/dm-writecache.c ti->error = "Block size is smaller than device logical block size"; ti 2030 drivers/md/dm-writecache.c r = dm_read_arg_group(_args, &as, &opt_params, &ti->error); ti 2091 drivers/md/dm-writecache.c ti->error = "Invalid optional argument"; ti 2098 drivers/md/dm-writecache.c ti->error = "High watermark must be greater than or equal to low watermark"; ti 2105 drivers/md/dm-writecache.c ti->error = "Unable to map persistent memory for cache"; ti 2119 drivers/md/dm-writecache.c ti->error = "Couldn't spawn flush thread"; ti 2127 drivers/md/dm-writecache.c ti->error = "Invalid device size"; ti 2136 drivers/md/dm-writecache.c ti->error = "Invalid device size"; ti 2143 drivers/md/dm-writecache.c ti->error = "Unable to allocate memory for metadata"; ti 2150 drivers/md/dm-writecache.c ti->error = "Unable to allocate dm-kcopyd client"; ti 2161 drivers/md/dm-writecache.c ti->error = "Unable to allocate dirty bitmap"; ti 2167 drivers/md/dm-writecache.c ti->error = "Unable to read first block of metadata"; ti 2174 drivers/md/dm-writecache.c ti->error = "Hardware memory error when reading superblock"; ti 2180 drivers/md/dm-writecache.c ti->error = "Unable to initialize device"; ti 2185 drivers/md/dm-writecache.c ti->error = "Hardware memory error when reading superblock"; ti 2191 drivers/md/dm-writecache.c ti->error = "Invalid magic in the superblock"; ti 2197 drivers/md/dm-writecache.c ti->error = "Invalid version in the superblock"; ti 2203 drivers/md/dm-writecache.c ti->error = "Block size does not match superblock"; ti 2213 drivers/md/dm-writecache.c ti->error = "Overflow in size calculation"; ti 2226 drivers/md/dm-writecache.c ti->error = "Memory area is too small"; ti 2245 drivers/md/dm-writecache.c ti->error = "Cannot allocate memory"; ti 2249 drivers/md/dm-writecache.c ti->num_flush_bios = 1; ti 2250 drivers/md/dm-writecache.c ti->flush_supported = true; ti 2251 drivers/md/dm-writecache.c ti->num_discard_bios = 1; ti 2260 drivers/md/dm-writecache.c ti->error = "Bad arguments"; ti 2262 drivers/md/dm-writecache.c writecache_dtr(ti); ti 2266 drivers/md/dm-writecache.c static void writecache_status(struct dm_target *ti, status_type_t type, ti 2269 drivers/md/dm-writecache.c struct dm_writecache *wc = ti->private; ti 18 drivers/md/dm-zero.c static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 21 drivers/md/dm-zero.c ti->error = "No arguments required"; ti 28 drivers/md/dm-zero.c ti->num_discard_bios = 1; ti 36 drivers/md/dm-zero.c static int zero_map(struct dm_target *ti, struct bio *bio) ti 618 drivers/md/dm-zoned-target.c static int dmz_map(struct dm_target *ti, struct bio *bio) ti 620 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; ti 682 drivers/md/dm-zoned-target.c static int dmz_get_zoned_device(struct dm_target *ti, char *path) ti 684 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; ti 691 drivers/md/dm-zoned-target.c ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev); ti 693 drivers/md/dm-zoned-target.c ti->error = "Get target device failed"; ti 708 drivers/md/dm-zoned-target.c ti->error = "Not a zoned block device"; ti 717 drivers/md/dm-zoned-target.c if (ti->begin || ti 718 drivers/md/dm-zoned-target.c ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) { ti 719 drivers/md/dm-zoned-target.c ti->error = "Partial mapping not supported"; ti 736 drivers/md/dm-zoned-target.c dm_put_device(ti, dmz->ddev); ti 745 drivers/md/dm-zoned-target.c static void dmz_put_zoned_device(struct dm_target *ti) ti 747 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; ti 749 drivers/md/dm-zoned-target.c dm_put_device(ti, dmz->ddev); ti 757 drivers/md/dm-zoned-target.c static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti 765 drivers/md/dm-zoned-target.c ti->error = "Invalid argument count"; ti 772 drivers/md/dm-zoned-target.c ti->error = "Unable to allocate the zoned target descriptor"; ti 775 drivers/md/dm-zoned-target.c ti->private = dmz; ti 778 drivers/md/dm-zoned-target.c ret = dmz_get_zoned_device(ti, argv[0]); ti 788 drivers/md/dm-zoned-target.c ti->error = "Metadata initialization failed"; ti 793 drivers/md/dm-zoned-target.c ti->max_io_len = dev->zone_nr_sectors << 9; ti 794 drivers/md/dm-zoned-target.c ti->num_flush_bios = 1; ti 795 drivers/md/dm-zoned-target.c ti->num_discard_bios = 1; ti 796 drivers/md/dm-zoned-target.c ti->num_write_zeroes_bios = 1; ti 797 drivers/md/dm-zoned-target.c ti->per_io_data_size = sizeof(struct dmz_bioctx); ti 798 drivers/md/dm-zoned-target.c ti->flush_supported = true; ti 799 drivers/md/dm-zoned-target.c ti->discards_supported = true; ti 802 drivers/md/dm-zoned-target.c ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift; ti 807 drivers/md/dm-zoned-target.c ti->error = "Create BIO set failed"; ti 817 drivers/md/dm-zoned-target.c ti->error = "Create chunk workqueue failed"; ti 829 drivers/md/dm-zoned-target.c ti->error = "Create flush workqueue failed"; ti 838 drivers/md/dm-zoned-target.c ti->error = "Zone reclaim initialization failed"; ti 843 drivers/md/dm-zoned-target.c (unsigned long long)ti->len, ti 844 drivers/md/dm-zoned-target.c (unsigned long long)dmz_sect2blk(ti->len)); ti 857 drivers/md/dm-zoned-target.c dmz_put_zoned_device(ti); ti 867 drivers/md/dm-zoned-target.c static void dmz_dtr(struct dm_target *ti) ti 869 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; ti 885 drivers/md/dm-zoned-target.c dmz_put_zoned_device(ti); ti 895 drivers/md/dm-zoned-target.c static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) ti 897 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; ti 923 drivers/md/dm-zoned-target.c static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) ti 925 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; ti 938 drivers/md/dm-zoned-target.c static void dmz_suspend(struct dm_target *ti) ti 940 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; ti 950 drivers/md/dm-zoned-target.c static void dmz_resume(struct dm_target *ti) ti 952 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; ti 958 drivers/md/dm-zoned-target.c static int dmz_iterate_devices(struct dm_target *ti, ti 961 drivers/md/dm-zoned-target.c struct dmz_target *dmz = ti->private; ti 965 drivers/md/dm-zoned-target.c return fn(ti, dmz->ddev, 0, capacity, data); ti 80 drivers/md/dm.c struct dm_target *ti; ti 597 drivers/md/dm.c static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, ti 616 drivers/md/dm.c tio->ti = ti; ti 979 drivers/md/dm.c dm_endio_fn endio = tio->ti->type->end_io; ti 994 drivers/md/dm.c int r = endio(tio->ti, bio, &error); ti 1018 drivers/md/dm.c static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) ti 1020 drivers/md/dm.c sector_t target_offset = dm_target_offset(ti, sector); ti 1022 drivers/md/dm.c return ti->len - target_offset; ti 1025 drivers/md/dm.c static sector_t max_io_len(sector_t sector, struct dm_target *ti) ti 1027 drivers/md/dm.c sector_t len = max_io_len_target_boundary(sector, ti); ti 1033 drivers/md/dm.c if (ti->max_io_len) { ti 1034 drivers/md/dm.c offset = dm_target_offset(ti, sector); ti 1035 drivers/md/dm.c if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) ti 1036 drivers/md/dm.c max_len = sector_div(offset, ti->max_io_len); ti 1038 drivers/md/dm.c max_len = offset & (ti->max_io_len - 1); ti 1039 drivers/md/dm.c max_len = ti->max_io_len - max_len; ti 1048 drivers/md/dm.c int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) ti 1053 drivers/md/dm.c ti->error = "Maximum size of target IO is too large"; ti 1057 drivers/md/dm.c ti->max_io_len = (uint32_t) len; ti 1068 drivers/md/dm.c struct dm_target *ti; ti 1074 drivers/md/dm.c ti = dm_table_find_target(map, sector); ti 1075 drivers/md/dm.c if (!ti) ti 1078 drivers/md/dm.c return ti; ti 1086 drivers/md/dm.c struct dm_target *ti; ti 1090 drivers/md/dm.c ti = dm_dax_get_live_target(md, sector, &srcu_idx); ti 1092 drivers/md/dm.c if (!ti) ti 1094 drivers/md/dm.c if (!ti->type->direct_access) ti 1096 drivers/md/dm.c len = max_io_len(sector, ti) / PAGE_SECTORS; ti 1100 drivers/md/dm.c ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); ti 1132 drivers/md/dm.c struct dm_target *ti; ti 1136 drivers/md/dm.c ti = dm_dax_get_live_target(md, sector, &srcu_idx); ti 1138 drivers/md/dm.c if (!ti) ti 1140 drivers/md/dm.c if (!ti->type->dax_copy_from_iter) { ti 1144 drivers/md/dm.c ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); ti 1156 drivers/md/dm.c struct dm_target *ti; ti 1160 drivers/md/dm.c ti = dm_dax_get_live_target(md, sector, &srcu_idx); ti 1162 drivers/md/dm.c if (!ti) ti 1164 drivers/md/dm.c if (!ti->type->dax_copy_to_iter) { ti 1168 drivers/md/dm.c ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); ti 1223 drivers/md/dm.c void dm_remap_zone_report(struct dm_target *ti, sector_t start, ti 1239 drivers/md/dm.c if (zone->start >= start + ti->len) { ti 1244 drivers/md/dm.c zone->start = zone->start + ti->begin - start; ti 1253 drivers/md/dm.c zone->wp = zone->wp + ti->begin - start; ti 1270 drivers/md/dm.c struct dm_target *ti = tio->ti; ti 1283 drivers/md/dm.c r = ti->type->map(ti, clone); ti 1331 drivers/md/dm.c if (unlikely(!dm_target_has_integrity(tio->ti->type) && ti 1332 drivers/md/dm.c !dm_target_passes_integrity(tio->ti->type))) { ti 1335 drivers/md/dm.c tio->ti->type->name); ti 1354 drivers/md/dm.c struct dm_target *ti, unsigned num_bios) ti 1363 drivers/md/dm.c tio = alloc_tio(ci, ti, 0, GFP_NOIO); ti 1375 drivers/md/dm.c tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); ti 1407 drivers/md/dm.c static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, ti 1414 drivers/md/dm.c alloc_multiple_bios(&blist, ci, ti, num_bios); ti 1425 drivers/md/dm.c struct dm_target *ti; ti 1437 drivers/md/dm.c while ((ti = dm_table_get_target(ci->map, target_nr++))) ti 1438 drivers/md/dm.c __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); ti 1445 drivers/md/dm.c static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, ti 1452 drivers/md/dm.c tio = alloc_tio(ci, ti, 0, GFP_NOIO); ti 1464 drivers/md/dm.c typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); ti 1466 drivers/md/dm.c static unsigned get_num_discard_bios(struct dm_target *ti) ti 1468 drivers/md/dm.c return ti->num_discard_bios; ti 1471 drivers/md/dm.c static unsigned get_num_secure_erase_bios(struct dm_target *ti) ti 1473 drivers/md/dm.c return ti->num_secure_erase_bios; ti 1476 drivers/md/dm.c static unsigned get_num_write_same_bios(struct dm_target *ti) ti 1478 drivers/md/dm.c return ti->num_write_same_bios; ti 1481 drivers/md/dm.c static unsigned get_num_write_zeroes_bios(struct dm_target *ti) ti 1483 drivers/md/dm.c return ti->num_write_zeroes_bios; ti 1486 drivers/md/dm.c static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, ti 1500 drivers/md/dm.c len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); ti 1502 drivers/md/dm.c __send_duplicate_bios(ci, ti, num_bios, &len); ti 1510 drivers/md/dm.c static int __send_discard(struct clone_info *ci, struct dm_target *ti) ti 1512 drivers/md/dm.c return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti)); ti 1515 drivers/md/dm.c static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) ti 1517 drivers/md/dm.c return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti)); ti 1520 drivers/md/dm.c static int __send_write_same(struct clone_info *ci, struct dm_target *ti) ti 1522 drivers/md/dm.c return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti)); ti 1525 drivers/md/dm.c static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) ti 1527 drivers/md/dm.c return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti)); ti 1546 drivers/md/dm.c static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, ti 1552 drivers/md/dm.c *result = __send_discard(ci, ti); ti 1554 drivers/md/dm.c *result = __send_secure_erase(ci, ti); ti 1556 drivers/md/dm.c *result = __send_write_same(ci, ti); ti 1558 drivers/md/dm.c *result = __send_write_zeroes(ci, ti); ti 1570 drivers/md/dm.c struct dm_target *ti; ti 1574 drivers/md/dm.c ti = dm_table_find_target(ci->map, ci->sector); ti 1575 drivers/md/dm.c if (!ti) ti 1578 drivers/md/dm.c if (__process_abnormal_io(ci, ti, &r)) ti 1581 drivers/md/dm.c len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); ti 1583 drivers/md/dm.c r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); ti 1682 drivers/md/dm.c struct bio *bio, struct dm_target *ti) ti 1709 drivers/md/dm.c if (__process_abnormal_io(&ci, ti, &error)) ti 1712 drivers/md/dm.c tio = alloc_tio(&ci, ti, 0, GFP_NOIO); ti 1721 drivers/md/dm.c static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) ti 1726 drivers/md/dm.c len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); ti 1742 drivers/md/dm.c struct dm_target *ti = md->immutable_target; ti 1749 drivers/md/dm.c if (!ti) { ti 1750 drivers/md/dm.c ti = dm_table_find_target(map, bio->bi_iter.bi_sector); ti 1751 drivers/md/dm.c if (unlikely(!ti)) { ti 1766 drivers/md/dm.c dm_queue_split(md, ti, &bio); ti 1770 drivers/md/dm.c return __process_bio(md, map, bio, ti); ti 2975 drivers/md/dm.c int dm_suspended(struct dm_target *ti) ti 2977 drivers/md/dm.c return dm_suspended_md(dm_table_get_md(ti->table)); ti 2981 drivers/md/dm.c int dm_noflush_suspending(struct dm_target *ti) ti 2983 drivers/md/dm.c return __noflush_suspending(dm_table_get_md(ti->table)); ti 3059 drivers/md/dm.c struct dm_target *ti; ti 3069 drivers/md/dm.c ti = dm_table_get_target(table, 0); ti 3072 drivers/md/dm.c if (!ti->type->iterate_devices) ti 3075 drivers/md/dm.c ret = ti->type->iterate_devices(ti, fn, data); ti 3084 drivers/md/dm.c static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, ti 77 drivers/md/dm.h int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, ti 2383 drivers/net/ethernet/amd/xgbe/xgbe-drv.c struct udp_tunnel_info *ti) ti 2391 drivers/net/ethernet/amd/xgbe/xgbe-drv.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 2398 drivers/net/ethernet/amd/xgbe/xgbe-drv.c ti->sa_family, be16_to_cpu(ti->port)); ti 2414 drivers/net/ethernet/amd/xgbe/xgbe-drv.c vdata->sa_family = ti->sa_family; ti 2415 drivers/net/ethernet/amd/xgbe/xgbe-drv.c vdata->port = ti->port; ti 2428 drivers/net/ethernet/amd/xgbe/xgbe-drv.c struct udp_tunnel_info *ti) ti 2436 drivers/net/ethernet/amd/xgbe/xgbe-drv.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 2441 drivers/net/ethernet/amd/xgbe/xgbe-drv.c ti->sa_family, be16_to_cpu(ti->port)); ti 2445 drivers/net/ethernet/amd/xgbe/xgbe-drv.c if (vdata->sa_family != ti->sa_family) ti 2448 drivers/net/ethernet/amd/xgbe/xgbe-drv.c if (vdata->port != ti->port) ti 10242 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c struct udp_tunnel_info *ti) ti 10245 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c u16 t_port = ntohs(ti->port); ti 10247 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c switch (ti->type) { ti 10260 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c struct udp_tunnel_info *ti) ti 10263 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c u16 t_port = ntohs(ti->port); ti 10265 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c switch (ti->type) { ti 11185 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct udp_tunnel_info *ti) ti 11189 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) ti 11195 drivers/net/ethernet/broadcom/bnxt/bnxt.c switch (ti->type) { ti 11197 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) ti 11202 drivers/net/ethernet/broadcom/bnxt/bnxt.c bp->vxlan_port = ti->port; ti 11208 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (bp->nge_port_cnt && bp->nge_port != ti->port) ti 11213 drivers/net/ethernet/broadcom/bnxt/bnxt.c bp->nge_port = ti->port; ti 11225 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct udp_tunnel_info *ti) ti 11229 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) ti 11235 drivers/net/ethernet/broadcom/bnxt/bnxt.c switch (ti->type) { ti 11237 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) ti 11247 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (!bp->nge_port_cnt || bp->nge_port != ti->port) ti 2796 drivers/net/ethernet/cavium/liquidio/lio_main.c struct udp_tunnel_info *ti) ti 2798 drivers/net/ethernet/cavium/liquidio/lio_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 2803 drivers/net/ethernet/cavium/liquidio/lio_main.c htons(ti->port), ti 2808 drivers/net/ethernet/cavium/liquidio/lio_main.c struct udp_tunnel_info *ti) ti 2810 drivers/net/ethernet/cavium/liquidio/lio_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 2815 drivers/net/ethernet/cavium/liquidio/lio_main.c htons(ti->port), ti 1840 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c struct udp_tunnel_info *ti) ti 1842 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 1847 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c htons(ti->port), ti 1852 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c struct udp_tunnel_info *ti) ti 1854 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 1859 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c htons(ti->port), ti 1115 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c struct tid_info *ti; ti 1137 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c ti = &(T3C_DATA(tdev))->tid_maps; ti 1138 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c for (tid = 0; tid < ti->ntids; tid++) { ti 1139 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c te = lookup_tid(ti, tid); ti 3211 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct udp_tunnel_info *ti) ti 3222 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c switch (ti->type) { ti 3225 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adapter->vxlan_port != ti->port) ti 3237 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adapter->geneve_port != ti->port) ti 3272 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct udp_tunnel_info *ti) ti 3283 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c switch (ti->type) { ti 3291 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adapter->vxlan_port == ti->port) { ti 3300 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c be16_to_cpu(ti->port)); ti 3304 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adapter->vxlan_port = ti->port; ti 3308 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); ti 3312 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adapter->geneve_port == ti->port) { ti 3321 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c be16_to_cpu(ti->port)); ti 3325 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adapter->geneve_port = ti->port; ti 3329 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); ti 3352 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c be16_to_cpu(ti->port)); ti 3353 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c cxgb_del_udp_tunnel(netdev, ti); ti 181 drivers/net/ethernet/cisco/enic/enic_main.c struct udp_tunnel_info *ti) ti 184 drivers/net/ethernet/cisco/enic/enic_main.c __be16 port = ti->port; ti 189 drivers/net/ethernet/cisco/enic/enic_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) { ti 194 drivers/net/ethernet/cisco/enic/enic_main.c switch (ti->sa_family) { ti 235 drivers/net/ethernet/cisco/enic/enic_main.c (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family); ti 241 drivers/net/ethernet/cisco/enic/enic_main.c ntohs(port), ti->sa_family, ti->type); ti 247 drivers/net/ethernet/cisco/enic/enic_main.c struct udp_tunnel_info *ti) ti 254 drivers/net/ethernet/cisco/enic/enic_main.c if ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number) || ti 255 drivers/net/ethernet/cisco/enic/enic_main.c ti->type != UDP_TUNNEL_TYPE_VXLAN) { ti 257 drivers/net/ethernet/cisco/enic/enic_main.c ntohs(ti->port), ti->sa_family, ti->type); ti 265 drivers/net/ethernet/cisco/enic/enic_main.c ntohs(ti->port)); ti 272 drivers/net/ethernet/cisco/enic/enic_main.c ntohs(ti->port), ti->sa_family); ti 5167 drivers/net/ethernet/emulex/benet/be_main.c struct udp_tunnel_info *ti, ti 5173 drivers/net/ethernet/emulex/benet/be_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 5181 drivers/net/ethernet/emulex/benet/be_main.c cmd_work->info.vxlan_port = ti->port; ti 5187 drivers/net/ethernet/emulex/benet/be_main.c struct udp_tunnel_info *ti) ti 5189 drivers/net/ethernet/emulex/benet/be_main.c be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port); ti 5193 drivers/net/ethernet/emulex/benet/be_main.c struct udp_tunnel_info *ti) ti 5195 drivers/net/ethernet/emulex/benet/be_main.c be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port); ti 435 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c struct udp_tunnel_info *ti) ti 440 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c if ((port->port == ti->port) && ti 441 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c (port->sa_family == ti->sa_family)) { ti 451 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c struct udp_tunnel_info *ti) ti 458 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c port = fm10k_remove_tunnel_port(ports, ti); ti 463 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c port->port = ti->port; ti 464 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c port->sa_family = ti->sa_family; ti 480 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c struct udp_tunnel_info *ti) ti 488 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c switch (ti->type) { ti 490 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c fm10k_insert_tunnel_port(&interface->vxlan_port, ti); ti 493 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c fm10k_insert_tunnel_port(&interface->geneve_port, ti); ti 512 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c struct udp_tunnel_info *ti) ti 520 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c switch (ti->type) { ti 522 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c port = fm10k_remove_tunnel_port(&interface->vxlan_port, ti); ti 525 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c port = fm10k_remove_tunnel_port(&interface->geneve_port, ti); ti 1759 drivers/net/ethernet/intel/fm10k/fm10k_pci.c unsigned int ri = 0, ti = 0; ti 1771 drivers/net/ethernet/intel/fm10k/fm10k_pci.c ti++; ti 1777 drivers/net/ethernet/intel/fm10k/fm10k_pci.c "%s-tx-%u", dev->name, ti++); ti 5890 drivers/net/ethernet/intel/i40e/i40e_common.c u32 ti; ti 5901 drivers/net/ethernet/intel/i40e/i40e_common.c ti = le32_to_cpu(filters[i].element.tenant_id); ti 5902 drivers/net/ethernet/intel/i40e/i40e_common.c filters[i].element.tenant_id = cpu_to_le32(ti << 8); ti 5984 drivers/net/ethernet/intel/i40e/i40e_common.c u32 ti; ti 5995 drivers/net/ethernet/intel/i40e/i40e_common.c ti = le32_to_cpu(filters[i].element.tenant_id); ti 5996 drivers/net/ethernet/intel/i40e/i40e_common.c filters[i].element.tenant_id = cpu_to_le32(ti << 8); ti 12162 drivers/net/ethernet/intel/i40e/i40e_main.c struct udp_tunnel_info *ti) ti 12167 drivers/net/ethernet/intel/i40e/i40e_main.c u16 port = ntohs(ti->port); ti 12188 drivers/net/ethernet/intel/i40e/i40e_main.c switch (ti->type) { ti 12214 drivers/net/ethernet/intel/i40e/i40e_main.c struct udp_tunnel_info *ti) ti 12219 drivers/net/ethernet/intel/i40e/i40e_main.c u16 port = ntohs(ti->port); ti 12228 drivers/net/ethernet/intel/i40e/i40e_main.c switch (ti->type) { ti 3228 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int ri = 0, ti = 0; ti 3238 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ti++; ti 3244 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c "%s-tx-%u", netdev->name, ti++); ti 9816 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct udp_tunnel_info *ti) ti 9820 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c __be16 port = ti->port; ti 9824 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (ti->sa_family != AF_INET) ti 9827 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c switch (ti->type) { ti 9877 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct udp_tunnel_info *ti) ti 9882 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN && ti 9883 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ti->type != UDP_TUNNEL_TYPE_GENEVE) ti 9886 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (ti->sa_family != AF_INET) ti 9889 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c switch (ti->type) { ti 9894 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (adapter->vxlan_port != ti->port) { ti 9896 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ntohs(ti->port)); ti 9906 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (adapter->geneve_port != ti->port) { ti 9908 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ntohs(ti->port)); ti 1535 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int ri = 0, ti = 0; ti 1545 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ti++; ti 1551 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c "%s-tx-%u", netdev->name, ti++); ti 2673 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct udp_tunnel_info *ti) ti 2676 drivers/net/ethernet/mellanox/mlx4/en_netdev.c __be16 port = ti->port; ti 2679 drivers/net/ethernet/mellanox/mlx4/en_netdev.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 2682 drivers/net/ethernet/mellanox/mlx4/en_netdev.c if (ti->sa_family != AF_INET) ti 2700 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct udp_tunnel_info *ti) ti 2703 drivers/net/ethernet/mellanox/mlx4/en_netdev.c __be16 port = ti->port; ti 2706 drivers/net/ethernet/mellanox/mlx4/en_netdev.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 2709 drivers/net/ethernet/mellanox/mlx4/en_netdev.c if (ti->sa_family != AF_INET) ti 1194 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti); ti 1195 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti); ti 4239 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) ti 4243 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 4249 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1); ti 4252 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) ti 4256 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 4262 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0); ti 3611 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct udp_tunnel_info *ti) ti 3616 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 3619 drivers/net/ethernet/netronome/nfp/nfp_net_common.c idx = nfp_net_find_vxlan_idx(nn, ti->port); ti 3624 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_set_vxlan_port(nn, idx, ti->port); ti 3628 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct udp_tunnel_info *ti) ti 3633 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 3636 drivers/net/ethernet/netronome/nfp/nfp_net_common.c idx = nfp_net_find_vxlan_idx(nn, ti->port); ti 549 drivers/net/ethernet/qlogic/qede/qede.h void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti); ti 550 drivers/net/ethernet/qlogic/qede/qede.h void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti); ti 981 drivers/net/ethernet/qlogic/qede/qede_filter.c void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) ti 985 drivers/net/ethernet/qlogic/qede/qede_filter.c u16 t_port = ntohs(ti->port); ti 990 drivers/net/ethernet/qlogic/qede/qede_filter.c switch (ti->type) { ti 1045 drivers/net/ethernet/qlogic/qede/qede_filter.c struct udp_tunnel_info *ti) ti 1049 drivers/net/ethernet/qlogic/qede/qede_filter.c u16 t_port = ntohs(ti->port); ti 1053 drivers/net/ethernet/qlogic/qede/qede_filter.c switch (ti->type) { ti 475 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct udp_tunnel_info *ti) ti 480 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 490 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c ahw->vxlan_port = ntohs(ti->port); ti 494 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c if (ahw->vxlan_port == ntohs(ti->port)) ti 500 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct udp_tunnel_info *ti) ti 505 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c if (ti->type != UDP_TUNNEL_TYPE_VXLAN) ti 509 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c (ahw->vxlan_port != ntohs(ti->port))) ti 2430 drivers/net/ethernet/sfc/efx.c static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) ti 2436 drivers/net/ethernet/sfc/efx.c efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); ti 2441 drivers/net/ethernet/sfc/efx.c tnl.port = ti->port; ti 2447 drivers/net/ethernet/sfc/efx.c static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) ti 2453 drivers/net/ethernet/sfc/efx.c efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); ti 2458 drivers/net/ethernet/sfc/efx.c tnl.port = ti->port; ti 257 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c unsigned int i, ti, ri; ti 285 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, ti 289 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c if (!pdata->per_channel_irq && (ti || ri)) { ti 140 drivers/net/vmxnet3/vmxnet3_defs.h u32 ti:1; /* VLAN Tag Insertion */ ti 152 drivers/net/vmxnet3/vmxnet3_defs.h u32 ti:1; /* VLAN Tag Insertion */ ti 1106 drivers/net/vmxnet3/vmxnet3_drv.c gdesc->txd.ti = 1; ti 115 drivers/scsi/sr_ioctl.c static int sr_fake_playtrkind(struct cdrom_device_info *cdi, struct cdrom_ti *ti) ti 128 drivers/scsi/sr_ioctl.c if (ti->cdti_trk1 == ntracks) ti 129 drivers/scsi/sr_ioctl.c ti->cdti_trk1 = CDROM_LEADOUT; ti 130 drivers/scsi/sr_ioctl.c else if (ti->cdti_trk1 != CDROM_LEADOUT) ti 131 drivers/scsi/sr_ioctl.c ti->cdti_trk1 ++; ti 133 drivers/scsi/sr_ioctl.c trk0_te.cdte_track = ti->cdti_trk0; ti 135 drivers/scsi/sr_ioctl.c trk1_te.cdte_track = ti->cdti_trk1; ti 159 drivers/scsi/sr_ioctl.c struct cdrom_ti *ti) ti 169 drivers/scsi/sr_ioctl.c cgc.cmd[4] = ti->cdti_trk0; ti 170 drivers/scsi/sr_ioctl.c cgc.cmd[5] = ti->cdti_ind0; ti 171 drivers/scsi/sr_ioctl.c cgc.cmd[7] = ti->cdti_trk1; ti 172 drivers/scsi/sr_ioctl.c cgc.cmd[8] = ti->cdti_ind1; ti 177 drivers/scsi/sr_ioctl.c result = sr_fake_playtrkind(cdi, ti); ti 649 fs/afs/dir.c struct inode *inode = NULL, *ti; ti 719 fs/afs/dir.c ti = ilookup5_nowait(dir->i_sb, iget_data.fid.vnode, ti 721 fs/afs/dir.c if (!IS_ERR_OR_NULL(ti)) { ti 722 fs/afs/dir.c vnode = AFS_FS_I(ti); ti 724 fs/afs/dir.c cookie->inodes[i] = ti; ti 820 fs/afs/dir.c ti = afs_iget(dir->i_sb, key, &iget_data, scb, cbi, dvnode); ti 821 fs/afs/dir.c if (!IS_ERR(ti)) ti 822 fs/afs/dir.c afs_cache_permit(AFS_FS_I(ti), key, ti 827 fs/afs/dir.c inode = ti; ti 829 fs/afs/dir.c if (!IS_ERR(ti)) ti 830 fs/afs/dir.c iput(ti); ti 1357 fs/jfs/jfs_dmap.c int rc, ti, i, k, m, n, agperlev; ti 1431 fs/jfs/jfs_dmap.c ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); ti 1439 fs/jfs/jfs_dmap.c for (i = 0; i < bmp->db_agwidth; i++, ti++) { ti 1442 fs/jfs/jfs_dmap.c if (l2nb > dcp->stree[ti]) ti 1450 fs/jfs/jfs_dmap.c for (n = 0, m = (ti << 2) + 1; n < 4; n++) { ti 1452 fs/jfs/jfs_dmap.c ti = m + n; ti 1475 fs/jfs/jfs_dmap.c ((s64) (ti - le32_to_cpu(dcp->leafidx))) << budmin; ti 2502 fs/jfs/jfs_dmap.c int rc, leafno, ti; ti 2523 fs/jfs/jfs_dmap.c ti = leafno + le32_to_cpu(dcp->leafidx); ti 2528 fs/jfs/jfs_dmap.c oldval = dcp->stree[ti]; ti 2554 fs/jfs/jfs_dmap.c oldval = dcp->stree[ti]; ti 2595 fs/jfs/jfs_dmap.c if (dcp->stree[ti] == NOFREE) ti 2961 fs/jfs/jfs_dmap.c int ti, n = 0, k, x = 0; ti 2973 fs/jfs/jfs_dmap.c for (k = le32_to_cpu(tp->dmt_height), ti = 1; ti 2974 fs/jfs/jfs_dmap.c k > 0; k--, ti = ((ti + n) << 2) + 1) { ti 2978 fs/jfs/jfs_dmap.c for (x = ti, n = 0; n < 4; n++) { ti 181 fs/jfs/jfs_dtree.c static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock); ti 4347 fs/jfs/jfs_dtree.c static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock) ti 4359 fs/jfs/jfs_dtree.c tsi = stbl[ti]; ti 50 fs/nilfs2/file.c struct nilfs_transaction_info ti; ti 93 fs/nilfs2/file.c ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); ti 90 fs/nilfs2/inode.c struct nilfs_transaction_info ti; ti 93 fs/nilfs2/inode.c err = nilfs_transaction_begin(inode->i_sb, &ti, 1); ti 726 fs/nilfs2/inode.c struct nilfs_transaction_info ti; ti 737 fs/nilfs2/inode.c nilfs_transaction_begin(sb, &ti, 0); /* never fails */ ti 781 fs/nilfs2/inode.c struct nilfs_transaction_info ti; ti 792 fs/nilfs2/inode.c nilfs_transaction_begin(sb, &ti, 0); /* never fails */ ti 818 fs/nilfs2/inode.c struct nilfs_transaction_info ti; ti 827 fs/nilfs2/inode.c err = nilfs_transaction_begin(sb, &ti, 0); ti 977 fs/nilfs2/inode.c struct nilfs_transaction_info ti; ti 990 fs/nilfs2/inode.c nilfs_transaction_begin(inode->i_sb, &ti, 0); ti 131 fs/nilfs2/ioctl.c struct nilfs_transaction_info ti; ti 155 fs/nilfs2/ioctl.c ret = nilfs_transaction_begin(inode->i_sb, &ti, 0); ti 206 fs/nilfs2/ioctl.c struct nilfs_transaction_info ti; ti 223 fs/nilfs2/ioctl.c nilfs_transaction_begin(inode->i_sb, &ti, 0); ti 261 fs/nilfs2/ioctl.c struct nilfs_transaction_info ti; ti 276 fs/nilfs2/ioctl.c nilfs_transaction_begin(inode->i_sb, &ti, 0); ti 1219 fs/nilfs2/ioctl.c struct nilfs_transaction_info ti; ti 1265 fs/nilfs2/ioctl.c nilfs_transaction_begin(inode->i_sb, &ti, 0); ti 73 fs/nilfs2/mdt.c struct nilfs_transaction_info ti; ti 77 fs/nilfs2/mdt.c nilfs_transaction_begin(sb, &ti, 0); ti 79 fs/nilfs2/namei.c struct nilfs_transaction_info ti; ti 82 fs/nilfs2/namei.c err = nilfs_transaction_begin(dir->i_sb, &ti, 1); ti 106 fs/nilfs2/namei.c struct nilfs_transaction_info ti; ti 109 fs/nilfs2/namei.c err = nilfs_transaction_begin(dir->i_sb, &ti, 1); ti 130 fs/nilfs2/namei.c struct nilfs_transaction_info ti; ti 139 fs/nilfs2/namei.c err = nilfs_transaction_begin(dir->i_sb, &ti, 1); ti 180 fs/nilfs2/namei.c struct nilfs_transaction_info ti; ti 183 fs/nilfs2/namei.c err = nilfs_transaction_begin(dir->i_sb, &ti, 1); ti 207 fs/nilfs2/namei.c struct nilfs_transaction_info ti; ti 210 fs/nilfs2/namei.c err = nilfs_transaction_begin(dir->i_sb, &ti, 1); ti 293 fs/nilfs2/namei.c struct nilfs_transaction_info ti; ti 296 fs/nilfs2/namei.c err = nilfs_transaction_begin(dir->i_sb, &ti, 0); ti 315 fs/nilfs2/namei.c struct nilfs_transaction_info ti; ti 318 fs/nilfs2/namei.c err = nilfs_transaction_begin(dir->i_sb, &ti, 0); ti 351 fs/nilfs2/namei.c struct nilfs_transaction_info ti; ti 357 fs/nilfs2/namei.c err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1); ti 167 fs/nilfs2/nilfs.h struct nilfs_transaction_info *ti = current->journal_info; ti 169 fs/nilfs2/nilfs.h ti->ti_flags |= flag; ti 174 fs/nilfs2/nilfs.h struct nilfs_transaction_info *ti = current->journal_info; ti 176 fs/nilfs2/nilfs.h if (ti == NULL || ti->ti_magic != NILFS_TI_MAGIC) ti 178 fs/nilfs2/nilfs.h return !!(ti->ti_flags & flag); ti 147 fs/nilfs2/segment.c struct nilfs_transaction_info *ti) ti 164 fs/nilfs2/segment.c if (!ti) { ti 165 fs/nilfs2/segment.c ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS); ti 166 fs/nilfs2/segment.c if (!ti) ti 168 fs/nilfs2/segment.c ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC; ti 170 fs/nilfs2/segment.c ti->ti_flags = 0; ti 172 fs/nilfs2/segment.c ti->ti_count = 0; ti 173 fs/nilfs2/segment.c ti->ti_save = save; ti 174 fs/nilfs2/segment.c ti->ti_magic = NILFS_TI_MAGIC; ti 175 fs/nilfs2/segment.c current->journal_info = ti; ti 207 fs/nilfs2/segment.c struct nilfs_transaction_info *ti, ti 211 fs/nilfs2/segment.c int ret = nilfs_prepare_segment_lock(sb, ti); ti 242 fs/nilfs2/segment.c ti = current->journal_info; ti 243 fs/nilfs2/segment.c current->journal_info = ti->ti_save; ti 244 fs/nilfs2/segment.c if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) ti 245 fs/nilfs2/segment.c kmem_cache_free(nilfs_transaction_cachep, ti); ti 263 fs/nilfs2/segment.c struct nilfs_transaction_info *ti = current->journal_info; ti 267 fs/nilfs2/segment.c BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); ti 268 fs/nilfs2/segment.c ti->ti_flags |= NILFS_TI_COMMIT; ti 269 fs/nilfs2/segment.c if (ti->ti_count > 0) { ti 270 fs/nilfs2/segment.c ti->ti_count--; ti 271 fs/nilfs2/segment.c trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti 272 fs/nilfs2/segment.c ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT); ti 278 fs/nilfs2/segment.c if (ti->ti_flags & NILFS_TI_COMMIT) ti 284 fs/nilfs2/segment.c trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti 285 fs/nilfs2/segment.c ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT); ti 287 fs/nilfs2/segment.c current->journal_info = ti->ti_save; ti 289 fs/nilfs2/segment.c if (ti->ti_flags & NILFS_TI_SYNC) ti 291 fs/nilfs2/segment.c if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) ti 292 fs/nilfs2/segment.c kmem_cache_free(nilfs_transaction_cachep, ti); ti 299 fs/nilfs2/segment.c struct nilfs_transaction_info *ti = current->journal_info; ti 302 fs/nilfs2/segment.c BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); ti 303 fs/nilfs2/segment.c if (ti->ti_count > 0) { ti 304 fs/nilfs2/segment.c ti->ti_count--; ti 305 fs/nilfs2/segment.c trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti 306 fs/nilfs2/segment.c ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT); ti 311 fs/nilfs2/segment.c trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti 312 fs/nilfs2/segment.c ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT); ti 314 fs/nilfs2/segment.c current->journal_info = ti->ti_save; ti 315 fs/nilfs2/segment.c if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) ti 316 fs/nilfs2/segment.c kmem_cache_free(nilfs_transaction_cachep, ti); ti 334 fs/nilfs2/segment.c struct nilfs_transaction_info *ti = current->journal_info; ti 336 fs/nilfs2/segment.c ti->ti_flags |= NILFS_TI_WRITER; ti 338 fs/nilfs2/segment.c ti->ti_flags &= ~NILFS_TI_WRITER; ti 344 fs/nilfs2/segment.c struct nilfs_transaction_info *ti, ti 352 fs/nilfs2/segment.c ti->ti_flags = NILFS_TI_WRITER; ti 353 fs/nilfs2/segment.c ti->ti_count = 0; ti 354 fs/nilfs2/segment.c ti->ti_save = cur_ti; ti 355 fs/nilfs2/segment.c ti->ti_magic = NILFS_TI_MAGIC; ti 356 fs/nilfs2/segment.c current->journal_info = ti; ti 359 fs/nilfs2/segment.c trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti 360 fs/nilfs2/segment.c ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK); ti 372 fs/nilfs2/segment.c ti->ti_flags |= NILFS_TI_GC; ti 374 fs/nilfs2/segment.c trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti 375 fs/nilfs2/segment.c ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK); ti 380 fs/nilfs2/segment.c struct nilfs_transaction_info *ti = current->journal_info; ti 383 fs/nilfs2/segment.c BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); ti 384 fs/nilfs2/segment.c BUG_ON(ti->ti_count > 0); ti 387 fs/nilfs2/segment.c current->journal_info = ti->ti_save; ti 389 fs/nilfs2/segment.c trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti 390 fs/nilfs2/segment.c ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK); ti 2234 fs/nilfs2/segment.c struct nilfs_transaction_info *ti; ti 2241 fs/nilfs2/segment.c BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC); ti 2273 fs/nilfs2/segment.c struct nilfs_transaction_info ti; ti 2279 fs/nilfs2/segment.c nilfs_transaction_lock(sb, &ti, 0); ti 2423 fs/nilfs2/segment.c struct nilfs_transaction_info ti; ti 2429 fs/nilfs2/segment.c nilfs_transaction_lock(sb, &ti, 1); ti 2477 fs/nilfs2/segment.c struct nilfs_transaction_info ti; ti 2479 fs/nilfs2/segment.c nilfs_transaction_lock(sci->sc_super, &ti, 0); ti 2691 fs/nilfs2/segment.c struct nilfs_transaction_info ti; ti 2693 fs/nilfs2/segment.c nilfs_transaction_lock(sci->sc_super, &ti, 0); ti 2452 fs/unicode/mkutf8data.c unsigned int ti = si % tc; ti 2457 fs/unicode/mkutf8data.c if (ti) ti 2458 fs/unicode/mkutf8data.c mapping[i++] = tb + ti; ti 2667 fs/unicode/mkutf8data.c unsigned int ti; ti 2674 fs/unicode/mkutf8data.c ti = si % TC; ti 2689 fs/unicode/mkutf8data.c if (ti) ti 2690 fs/unicode/mkutf8data.c h += utf8encode((char *)h, ti + TB); ti 280 fs/unicode/utf8-norm.c unsigned int ti; ti 287 fs/unicode/utf8-norm.c ti = si % TC; ti 302 fs/unicode/utf8-norm.c if (ti) ti 303 fs/unicode/utf8-norm.c h += utf8encode3((char *)h, ti + TB); ti 51 include/linux/device-mapper.h typedef void (*dm_dtr_fn) (struct dm_target *ti); ti 60 include/linux/device-mapper.h typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); ti 61 include/linux/device-mapper.h typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, ti 76 include/linux/device-mapper.h typedef int (*dm_endio_fn) (struct dm_target *ti, ti 78 include/linux/device-mapper.h typedef int (*dm_request_endio_fn) (struct dm_target *ti, ti 82 include/linux/device-mapper.h typedef void (*dm_presuspend_fn) (struct dm_target *ti); ti 83 include/linux/device-mapper.h typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); ti 84 include/linux/device-mapper.h typedef void (*dm_postsuspend_fn) (struct dm_target *ti); ti 85 include/linux/device-mapper.h typedef int (*dm_preresume_fn) (struct dm_target *ti); ti 86 include/linux/device-mapper.h typedef void (*dm_resume_fn) (struct dm_target *ti); ti 88 include/linux/device-mapper.h typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, ti 91 include/linux/device-mapper.h typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, ti 94 include/linux/device-mapper.h typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); ti 96 include/linux/device-mapper.h typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector, ti 110 include/linux/device-mapper.h typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, ti 120 include/linux/device-mapper.h typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, ti 124 include/linux/device-mapper.h typedef void (*dm_io_hints_fn) (struct dm_target *ti, ti 132 include/linux/device-mapper.h typedef int (*dm_busy_fn) (struct dm_target *ti); ti 139 include/linux/device-mapper.h typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, ti 141 include/linux/device-mapper.h typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, ti 160 include/linux/device-mapper.h int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, ti 162 include/linux/device-mapper.h void dm_put_device(struct dm_target *ti, struct dm_dev *d); ti 422 include/linux/device-mapper.h int dm_suspended(struct dm_target *ti); ti 423 include/linux/device-mapper.h int dm_noflush_suspending(struct dm_target *ti); ti 425 include/linux/device-mapper.h void dm_remap_zone_report(struct dm_target *ti, sector_t start, ti 487 include/linux/device-mapper.h int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); ti 604 include/linux/device-mapper.h #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) ti 24 include/linux/dm-dirty-log.h int (*flush_callback_fn)(struct dm_target *ti); ti 35 include/linux/dm-dirty-log.h int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, ti 140 include/linux/dm-dirty-log.h struct dm_target *ti, ti 141 include/linux/dm-dirty-log.h int (*flush_callback_fn)(struct dm_target *ti), ti 1419 include/linux/netdevice.h struct udp_tunnel_info *ti); ti 1421 include/linux/netdevice.h struct udp_tunnel_info *ti); ti 546 include/linux/phy.h int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti); ti 53 include/linux/thread_info.h static inline void set_ti_thread_flag(struct thread_info *ti, int flag) ti 55 include/linux/thread_info.h set_bit(flag, (unsigned long *)&ti->flags); ti 58 include/linux/thread_info.h static inline void clear_ti_thread_flag(struct thread_info *ti, int flag) ti 60 include/linux/thread_info.h clear_bit(flag, (unsigned long *)&ti->flags); ti 63 include/linux/thread_info.h static inline void update_ti_thread_flag(struct thread_info *ti, int flag, ti 67 include/linux/thread_info.h set_ti_thread_flag(ti, flag); ti 69 include/linux/thread_info.h clear_ti_thread_flag(ti, flag); ti 72 include/linux/thread_info.h static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) ti 74 include/linux/thread_info.h return test_and_set_bit(flag, (unsigned long *)&ti->flags); ti 77 include/linux/thread_info.h static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) ti 79 include/linux/thread_info.h return test_and_clear_bit(flag, (unsigned long *)&ti->flags); ti 82 include/linux/thread_info.h static inline int test_ti_thread_flag(struct thread_info *ti, int flag) ti 84 include/linux/thread_info.h return test_bit(flag, (unsigned long *)&ti->flags); ti 87 include/sound/timer.h void (*private_free) (struct snd_timer_instance *ti); ti 121 include/sound/timer.h int snd_timer_open(struct snd_timer_instance **ti, char *owner, struct snd_timer_id *tid, unsigned int slave_id); ti 68 include/trace/events/nilfs2.h struct nilfs_transaction_info *ti, ti 73 include/trace/events/nilfs2.h TP_ARGS(sb, ti, count, flags, state), ti 77 include/trace/events/nilfs2.h __field(void *, ti) ti 85 include/trace/events/nilfs2.h __entry->ti = ti; ti 93 include/trace/events/nilfs2.h __entry->ti, ti 707 kernel/irq/timings.c static int __init irq_timings_test_next_index(struct timings_intervals *ti) ti 713 kernel/irq/timings.c count = ti->count - 1; ti 725 kernel/irq/timings.c index = irq_timings_interval_index(ti->intervals[i]); ti 741 kernel/irq/timings.c i = irq_timings_interval_index(ti->intervals[ti->count - 1]); ti 769 kernel/irq/timings.c static int __init irq_timings_test_irqs(struct timings_intervals *ti) ti 789 kernel/irq/timings.c for (i = 0; i < ti->count; i++) { ti 791 kernel/irq/timings.c index = irq_timings_interval_index(ti->intervals[i]); ti 793 kernel/irq/timings.c i, ti->intervals[i], index); ti 795 kernel/irq/timings.c __irq_timings_store(irq, irqs, ti->intervals[i]); ti 802 kernel/irq/timings.c if (irqs->count != ti->count) { ti 18 kernel/locking/mutex.h #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) ti 370 kernel/sched/core.c struct thread_info *ti = task_thread_info(p); ti 371 kernel/sched/core.c return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); ti 382 kernel/sched/core.c struct thread_info *ti = task_thread_info(p); ti 383 kernel/sched/core.c typeof(ti->flags) old, val = READ_ONCE(ti->flags); ti 390 kernel/sched/core.c old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); ti 24 lib/lzo/lzo1x_compress.c size_t ti, void *wrkmem, signed char *state_offset, ti 37 lib/lzo/lzo1x_compress.c ip += ti < 4 ? 4 - ti : 0; ti 115 lib/lzo/lzo1x_compress.c ii -= ti; ti 116 lib/lzo/lzo1x_compress.c ti = 0; ti 301 lib/lzo/lzo1x_compress.c return in_end - (ii - ti); ti 93 net/ipv4/udp_tunnel.c struct udp_tunnel_info ti; ti 99 net/ipv4/udp_tunnel.c ti.type = type; ti 100 net/ipv4/udp_tunnel.c ti.sa_family = sk->sk_family; ti 101 net/ipv4/udp_tunnel.c ti.port = inet_sk(sk)->inet_sport; ti 103 net/ipv4/udp_tunnel.c dev->netdev_ops->ndo_udp_tunnel_add(dev, &ti); ti 111 net/ipv4/udp_tunnel.c struct udp_tunnel_info ti; ti 117 net/ipv4/udp_tunnel.c ti.type = type; ti 118 net/ipv4/udp_tunnel.c ti.sa_family = sk->sk_family; ti 119 net/ipv4/udp_tunnel.c ti.port = inet_sk(sk)->inet_sport; ti 121 net/ipv4/udp_tunnel.c dev->netdev_ops->ndo_udp_tunnel_del(dev, &ti); ti 130 net/ipv4/udp_tunnel.c struct udp_tunnel_info ti; ti 133 net/ipv4/udp_tunnel.c ti.type = type; ti 134 net/ipv4/udp_tunnel.c ti.sa_family = sk->sk_family; ti 135 net/ipv4/udp_tunnel.c ti.port = inet_sk(sk)->inet_sport; ti 143 net/ipv4/udp_tunnel.c dev->netdev_ops->ndo_udp_tunnel_add(dev, &ti); ti 154 net/ipv4/udp_tunnel.c struct udp_tunnel_info ti; ti 157 net/ipv4/udp_tunnel.c ti.type = type; ti 158 net/ipv4/udp_tunnel.c ti.sa_family = sk->sk_family; ti 159 net/ipv4/udp_tunnel.c ti.port = inet_sk(sk)->inet_sport; ti 167 net/ipv4/udp_tunnel.c dev->netdev_ops->ndo_udp_tunnel_del(dev, &ti); ti 482 net/l2tp/l2tp_netlink.c int ti = cb->args[0]; ti 487 net/l2tp/l2tp_netlink.c tunnel = l2tp_tunnel_get_nth(net, ti); ti 499 net/l2tp/l2tp_netlink.c ti++; ti 503 net/l2tp/l2tp_netlink.c cb->args[0] = ti; ti 823 net/l2tp/l2tp_netlink.c int ti = cb->args[0]; ti 828 net/l2tp/l2tp_netlink.c tunnel = l2tp_tunnel_get_nth(net, ti); ti 835 net/l2tp/l2tp_netlink.c ti++; ti 855 net/l2tp/l2tp_netlink.c cb->args[0] = ti; ti 1752 net/netfilter/ipvs/ip_vs_sync.c struct ip_vs_sync_thread_data *ti = NULL, *tinfo; ti 1855 net/netfilter/ipvs/ip_vs_sync.c ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data), ti 1857 net/netfilter/ipvs/ip_vs_sync.c if (!ti) ti 1861 net/netfilter/ipvs/ip_vs_sync.c tinfo = &ti[id]; ti 1889 net/netfilter/ipvs/ip_vs_sync.c ipvs->master_tinfo = ti; ti 1891 net/netfilter/ipvs/ip_vs_sync.c ipvs->backup_tinfo = ti; ti 1907 net/netfilter/ipvs/ip_vs_sync.c if (ti) { ti 1908 net/netfilter/ipvs/ip_vs_sync.c for (tinfo = ti + id; tinfo >= ti; tinfo--) { ti 1920 net/netfilter/ipvs/ip_vs_sync.c if (ti) { ti 1921 net/netfilter/ipvs/ip_vs_sync.c for (tinfo = ti + id; tinfo >= ti; tinfo--) { ti 1926 net/netfilter/ipvs/ip_vs_sync.c kfree(ti); ti 1945 net/netfilter/ipvs/ip_vs_sync.c struct ip_vs_sync_thread_data *ti, *tinfo; ti 1956 net/netfilter/ipvs/ip_vs_sync.c ti = ipvs->master_tinfo; ti 1975 net/netfilter/ipvs/ip_vs_sync.c tinfo = &ti[id]; ti 1990 net/netfilter/ipvs/ip_vs_sync.c ti = ipvs->backup_tinfo; ti 1997 net/netfilter/ipvs/ip_vs_sync.c tinfo = &ti[id]; ti 2012 net/netfilter/ipvs/ip_vs_sync.c for (tinfo = ti + id; tinfo >= ti; tinfo--) { ti 2017 net/netfilter/ipvs/ip_vs_sync.c kfree(ti); ti 1379 net/openvswitch/datapath.c struct table_instance *ti; ti 1397 net/openvswitch/datapath.c ti = rcu_dereference(dp->table.ti); ti 1404 net/openvswitch/datapath.c flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj); ti 135 net/openvswitch/flow_table.c static void __table_instance_destroy(struct table_instance *ti) ti 137 net/openvswitch/flow_table.c kvfree(ti->buckets); ti 138 net/openvswitch/flow_table.c kfree(ti); ti 143 net/openvswitch/flow_table.c struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL); ti 146 net/openvswitch/flow_table.c if (!ti) ti 149 net/openvswitch/flow_table.c ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head), ti 151 net/openvswitch/flow_table.c if (!ti->buckets) { ti 152 net/openvswitch/flow_table.c kfree(ti); ti 157 net/openvswitch/flow_table.c INIT_HLIST_HEAD(&ti->buckets[i]); ti 159 net/openvswitch/flow_table.c ti->n_buckets = new_size; ti 160 net/openvswitch/flow_table.c ti->node_ver = 0; ti 161 net/openvswitch/flow_table.c ti->keep_flows = false; ti 162 net/openvswitch/flow_table.c get_random_bytes(&ti->hash_seed, sizeof(u32)); ti 164 net/openvswitch/flow_table.c return ti; ti 169 net/openvswitch/flow_table.c struct table_instance *ti, *ufid_ti; ti 171 net/openvswitch/flow_table.c ti = table_instance_alloc(TBL_MIN_BUCKETS); ti 173 net/openvswitch/flow_table.c if (!ti) ti 180 net/openvswitch/flow_table.c rcu_assign_pointer(table->ti, ti); ti 189 net/openvswitch/flow_table.c __table_instance_destroy(ti); ti 195 net/openvswitch/flow_table.c struct table_instance *ti = container_of(rcu, struct table_instance, rcu); ti 197 net/openvswitch/flow_table.c __table_instance_destroy(ti); ti 200 net/openvswitch/flow_table.c static void table_instance_destroy(struct table_instance *ti, ti 206 net/openvswitch/flow_table.c if (!ti) ti 210 net/openvswitch/flow_table.c if (ti->keep_flows) ti 213 net/openvswitch/flow_table.c for (i = 0; i < ti->n_buckets; i++) { ti 215 net/openvswitch/flow_table.c struct hlist_head *head = &ti->buckets[i]; ti 217 net/openvswitch/flow_table.c int ver = ti->node_ver; ti 230 net/openvswitch/flow_table.c call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); ti 233 net/openvswitch/flow_table.c __table_instance_destroy(ti); ti 243 net/openvswitch/flow_table.c struct table_instance *ti = rcu_dereference_raw(table->ti); ti 246 net/openvswitch/flow_table.c table_instance_destroy(ti, ufid_ti, false); ti 249 net/openvswitch/flow_table.c struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, ti 257 net/openvswitch/flow_table.c ver = ti->node_ver; ti 258 net/openvswitch/flow_table.c while (*bucket < ti->n_buckets) { ti 260 net/openvswitch/flow_table.c head = &ti->buckets[*bucket]; ti 276 net/openvswitch/flow_table.c static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) ti 278 net/openvswitch/flow_table.c hash = jhash_1word(hash, ti->hash_seed); ti 279 net/openvswitch/flow_table.c return &ti->buckets[hash & (ti->n_buckets - 1)]; ti 282 net/openvswitch/flow_table.c static void table_instance_insert(struct table_instance *ti, ti 287 net/openvswitch/flow_table.c head = find_bucket(ti, flow->flow_table.hash); ti 288 net/openvswitch/flow_table.c hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); ti 291 net/openvswitch/flow_table.c static void ufid_table_instance_insert(struct table_instance *ti, ti 296 net/openvswitch/flow_table.c head = find_bucket(ti, flow->ufid_table.hash); ti 297 net/openvswitch/flow_table.c hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head); ti 327 net/openvswitch/flow_table.c static struct table_instance *table_instance_rehash(struct table_instance *ti, ti 336 net/openvswitch/flow_table.c flow_table_copy_flows(ti, new_ti, ufid); ti 353 net/openvswitch/flow_table.c old_ti = ovsl_dereference(flow_table->ti); ti 356 net/openvswitch/flow_table.c rcu_assign_pointer(flow_table->ti, new_ti); ti 426 net/openvswitch/flow_table.c static struct sw_flow *masked_flow_lookup(struct table_instance *ti, ti 437 net/openvswitch/flow_table.c head = find_bucket(ti, hash); ti 438 net/openvswitch/flow_table.c hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { ti 450 net/openvswitch/flow_table.c struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); ti 457 net/openvswitch/flow_table.c flow = masked_flow_lookup(ti, key, mask); ti 475 net/openvswitch/flow_table.c struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); ti 481 net/openvswitch/flow_table.c flow = masked_flow_lookup(ti, match->key, mask); ti 514 net/openvswitch/flow_table.c struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); ti 520 net/openvswitch/flow_table.c head = find_bucket(ti, hash); ti 521 net/openvswitch/flow_table.c hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) { ti 540 net/openvswitch/flow_table.c static struct table_instance *table_instance_expand(struct table_instance *ti, ti 543 net/openvswitch/flow_table.c return table_instance_rehash(ti, ti->n_buckets * 2, ufid); ti 567 net/openvswitch/flow_table.c struct table_instance *ti = ovsl_dereference(table->ti); ti 571 net/openvswitch/flow_table.c hlist_del_rcu(&flow->flow_table.node[ti->node_ver]); ti 648 net/openvswitch/flow_table.c struct table_instance *ti; ti 651 net/openvswitch/flow_table.c ti = ovsl_dereference(table->ti); ti 652 net/openvswitch/flow_table.c table_instance_insert(ti, flow); ti 656 net/openvswitch/flow_table.c if (table->count > ti->n_buckets) ti 657 net/openvswitch/flow_table.c new_ti = table_instance_expand(ti, false); ti 659 net/openvswitch/flow_table.c new_ti = table_instance_rehash(ti, ti->n_buckets, false); ti 662 net/openvswitch/flow_table.c rcu_assign_pointer(table->ti, new_ti); ti 663 net/openvswitch/flow_table.c call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); ti 671 net/openvswitch/flow_table.c struct table_instance *ti; ti 674 net/openvswitch/flow_table.c ti = ovsl_dereference(table->ufid_ti); ti 675 net/openvswitch/flow_table.c ufid_table_instance_insert(ti, flow); ti 679 net/openvswitch/flow_table.c if (table->ufid_count > ti->n_buckets) { ti 682 net/openvswitch/flow_table.c new_ti = table_instance_expand(ti, true); ti 685 net/openvswitch/flow_table.c call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); ti 35 net/openvswitch/flow_table.h struct table_instance __rcu *ti; ti 3607 net/xfrm/xfrm_policy.c int ti = 0; ti 3620 net/xfrm/xfrm_policy.c if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { ti 3625 net/xfrm/xfrm_policy.c tpp[ti++] = &pols[pi]->xfrm_vec[i]; ti 3627 net/xfrm/xfrm_policy.c xfrm_nr = ti; ti 733 scripts/asn1_compiler.c const struct type *const *ti = _ti; ti 734 scripts/asn1_compiler.c const struct type *type = *ti; ti 668 sound/aoa/codecs/onyx.c struct transfer_info *ti, ti 684 sound/aoa/codecs/onyx.c switch (ti->tag) { ti 665 sound/aoa/codecs/tas.c struct transfer_info *ti, ti 54 sound/aoa/codecs/toonie.c struct transfer_info *ti, ti 101 sound/aoa/soundbus/i2sbus/pcm.c struct transfer_info *ti = cii->codec->transfers; ti 104 sound/aoa/soundbus/i2sbus/pcm.c while (ti->formats && ti->rates) { ti 105 sound/aoa/soundbus/i2sbus/pcm.c v = *ti; ti 106 sound/aoa/soundbus/i2sbus/pcm.c if (ti->transfer_in == in ti 107 sound/aoa/soundbus/i2sbus/pcm.c && cii->codec->usable(cii, ti, &v)) { ti 117 sound/aoa/soundbus/i2sbus/pcm.c ti++; ti 100 sound/aoa/soundbus/soundbus.h struct transfer_info *ti, ti 468 sound/core/seq/seq_timer.c struct snd_timer_instance *ti; ti 479 sound/core/seq/seq_timer.c ti = tmr->timeri; ti 480 sound/core/seq/seq_timer.c if (!ti) ti 482 sound/core/seq/seq_timer.c snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name); ti 483 sound/core/seq/seq_timer.c resolution = snd_timer_resolution(ti) * tmr->ticks; ti 239 sound/core/timer.c int snd_timer_open(struct snd_timer_instance **ti, ti 345 sound/core/timer.c *ti = timeri; ti 468 sound/core/timer.c static void snd_timer_notify1(struct snd_timer_instance *ti, int event) ti 470 sound/core/timer.c struct snd_timer *timer = ti->timer; ti 486 sound/core/timer.c if (ti->ccallback) ti 487 sound/core/timer.c ti->ccallback(ti, event, &tstamp, resolution); ti 488 sound/core/timer.c if (ti->flags & SNDRV_TIMER_IFLG_SLAVE) ti 494 sound/core/timer.c list_for_each_entry(ts, &ti->slave_active_head, active_list) ti 721 sound/core/timer.c struct snd_timer_instance *ti; ti 724 sound/core/timer.c list_for_each_entry(ti, &timer->active_list_head, active_list) { ti 725 sound/core/timer.c if (ti->flags & SNDRV_TIMER_IFLG_START) { ti 726 sound/core/timer.c ti->flags &= ~SNDRV_TIMER_IFLG_START; ti 727 sound/core/timer.c ti->flags |= SNDRV_TIMER_IFLG_RUNNING; ti 730 sound/core/timer.c if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) { ti 731 sound/core/timer.c if (ticks > ti->cticks) ti 732 sound/core/timer.c ticks = ti->cticks; ti 750 sound/core/timer.c struct snd_timer_instance *ti; ti 754 sound/core/timer.c ti = list_first_entry(head, struct snd_timer_instance, ti 758 sound/core/timer.c list_del_init(&ti->ack_list); ti 760 sound/core/timer.c if (!(ti->flags & SNDRV_TIMER_IFLG_DEAD)) { ti 761 sound/core/timer.c ticks = ti->pticks; ti 762 sound/core/timer.c ti->pticks = 0; ti 763 sound/core/timer.c resolution = ti->resolution; ti 764 sound/core/timer.c ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; ti 766 sound/core/timer.c if (ti->callback) ti 767 sound/core/timer.c ti->callback(ti, resolution, ticks); ti 769 sound/core/timer.c ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; ti 813 sound/core/timer.c struct snd_timer_instance *ti, *ts, *tmp; ti 837 sound/core/timer.c list_for_each_entry_safe(ti, tmp, &timer->active_list_head, ti 839 sound/core/timer.c if (ti->flags & SNDRV_TIMER_IFLG_DEAD) ti 841 sound/core/timer.c if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING)) ti 843 sound/core/timer.c ti->pticks += ticks_left; ti 844 sound/core/timer.c ti->resolution = resolution; ti 845 sound/core/timer.c if (ti->cticks < ticks_left) ti 846 sound/core/timer.c ti->cticks = 0; ti 848 sound/core/timer.c ti->cticks -= ticks_left; ti 849 sound/core/timer.c if (ti->cticks) /* not expired */ ti 851 sound/core/timer.c if (ti->flags & SNDRV_TIMER_IFLG_AUTO) { ti 852 sound/core/timer.c ti->cticks = ti->ticks; ti 854 sound/core/timer.c ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING; ti 856 sound/core/timer.c list_del_init(&ti->active_list); ti 859 sound/core/timer.c (ti->flags & SNDRV_TIMER_IFLG_FAST)) ti 863 sound/core/timer.c if (list_empty(&ti->ack_list)) ti 864 sound/core/timer.c list_add_tail(&ti->ack_list, ack_list_head); ti 865 sound/core/timer.c list_for_each_entry(ts, &ti->slave_active_head, active_list) { ti 866 sound/core/timer.c ts->pticks = ti->pticks; ti 966 sound/core/timer.c struct snd_timer_instance *ti; ti 970 sound/core/timer.c ti = list_entry(p, struct snd_timer_instance, open_list); ti 971 sound/core/timer.c ti->timer = NULL; ti 1032 sound/core/timer.c struct snd_timer_instance *ti; ti 1037 sound/core/timer.c list_for_each_entry(ti, &timer->open_list_head, open_list) { ti 1038 sound/core/timer.c if (ti->disconnect) ti 1039 sound/core/timer.c ti->disconnect(ti); ti 1049 sound/core/timer.c struct snd_timer_instance *ti, *ts; ti 1063 sound/core/timer.c list_for_each_entry(ti, &timer->active_list_head, active_list) { ti 1064 sound/core/timer.c if (ti->ccallback) ti 1065 sound/core/timer.c ti->ccallback(ti, event, tstamp, resolution); ti 1066 sound/core/timer.c list_for_each_entry(ts, &ti->slave_active_head, active_list) ti 1220 sound/core/timer.c struct snd_timer_instance *ti; ti 1252 sound/core/timer.c list_for_each_entry(ti, &timer->open_list_head, open_list) ti 1254 sound/core/timer.c ti->owner ? ti->owner : "unknown", ti 1255 sound/core/timer.c ti->flags & (SNDRV_TIMER_IFLG_START | ti 65 sound/pci/ctxfi/cttimer.c struct ct_timer_instance *ti = from_timer(ti, t, timer); ti 66 sound/pci/ctxfi/cttimer.c struct snd_pcm_substream *substream = ti->substream; ti 68 sound/pci/ctxfi/cttimer.c struct ct_atc_pcm *apcm = ti->apcm; ti 75 sound/pci/ctxfi/cttimer.c dist = (position + buffer_size - ti->position) % buffer_size; ti 77 sound/pci/ctxfi/cttimer.c position / period_size != ti->position / period_size) { ti 79 sound/pci/ctxfi/cttimer.c ti->position = position; ti 85 sound/pci/ctxfi/cttimer.c spin_lock_irqsave(&ti->lock, flags); ti 86 sound/pci/ctxfi/cttimer.c if (ti->running) ti 87 sound/pci/ctxfi/cttimer.c mod_timer(&ti->timer, jiffies + interval); ti 88 sound/pci/ctxfi/cttimer.c spin_unlock_irqrestore(&ti->lock, flags); ti 91 sound/pci/ctxfi/cttimer.c static void ct_systimer_init(struct ct_timer_instance *ti) ti 93 sound/pci/ctxfi/cttimer.c timer_setup(&ti->timer, ct_systimer_callback, 0); ti 96 sound/pci/ctxfi/cttimer.c static void ct_systimer_start(struct ct_timer_instance *ti) ti 98 sound/pci/ctxfi/cttimer.c struct snd_pcm_runtime *runtime = ti->substream->runtime; ti 101 sound/pci/ctxfi/cttimer.c spin_lock_irqsave(&ti->lock, flags); ti 102 sound/pci/ctxfi/cttimer.c ti->running = 1; ti 103 sound/pci/ctxfi/cttimer.c mod_timer(&ti->timer, ti 106 sound/pci/ctxfi/cttimer.c spin_unlock_irqrestore(&ti->lock, flags); ti 109 sound/pci/ctxfi/cttimer.c static void ct_systimer_stop(struct ct_timer_instance *ti) ti 113 sound/pci/ctxfi/cttimer.c spin_lock_irqsave(&ti->lock, flags); ti 114 sound/pci/ctxfi/cttimer.c ti->running = 0; ti 115 sound/pci/ctxfi/cttimer.c del_timer(&ti->timer); ti 116 sound/pci/ctxfi/cttimer.c spin_unlock_irqrestore(&ti->lock, flags); ti 119 sound/pci/ctxfi/cttimer.c static void ct_systimer_prepare(struct ct_timer_instance *ti) ti 121 sound/pci/ctxfi/cttimer.c ct_systimer_stop(ti); ti 122 sound/pci/ctxfi/cttimer.c try_to_del_timer_sync(&ti->timer); ti 181 sound/pci/ctxfi/cttimer.c struct ct_timer_instance *ti; ti 195 sound/pci/ctxfi/cttimer.c list_for_each_entry(ti, &atimer->running_head, running_list) { ti 196 sound/pci/ctxfi/cttimer.c if (ti->frag_count > diff) ti 197 sound/pci/ctxfi/cttimer.c ti->frag_count -= diff; ti 202 sound/pci/ctxfi/cttimer.c period_size = ti->substream->runtime->period_size; ti 203 sound/pci/ctxfi/cttimer.c rate = ti->substream->runtime->rate; ti 204 sound/pci/ctxfi/cttimer.c pos = ti->substream->ops->pointer(ti->substream); ti 205 sound/pci/ctxfi/cttimer.c if (pos / period_size != ti->position / period_size) { ti 206 sound/pci/ctxfi/cttimer.c ti->need_update = 1; ti 207 sound/pci/ctxfi/cttimer.c ti->position = pos; ti 212 sound/pci/ctxfi/cttimer.c ti->frag_count = div_u64((u64)pos * CT_TIMER_FREQ + ti 215 sound/pci/ctxfi/cttimer.c if (ti->need_update && !can_update) ti 217 sound/pci/ctxfi/cttimer.c if (ti->frag_count < min_intr) ti 218 sound/pci/ctxfi/cttimer.c min_intr = ti->frag_count; ti 231 sound/pci/ctxfi/cttimer.c struct ct_timer_instance *ti; ti 235 sound/pci/ctxfi/cttimer.c list_for_each_entry(ti, &atimer->instance_head, instance_list) { ti 236 sound/pci/ctxfi/cttimer.c if (ti->running && ti->need_update) { ti 237 sound/pci/ctxfi/cttimer.c ti->need_update = 0; ti 238 sound/pci/ctxfi/cttimer.c ti->apcm->interrupt(ti->apcm); ti 263 sound/pci/ctxfi/cttimer.c static void ct_xfitimer_prepare(struct ct_timer_instance *ti) ti 265 sound/pci/ctxfi/cttimer.c ti->frag_count = ti->substream->runtime->period_size; ti 266 sound/pci/ctxfi/cttimer.c ti->running = 0; ti 267 sound/pci/ctxfi/cttimer.c ti->need_update = 0; ti 289 sound/pci/ctxfi/cttimer.c static void ct_xfitimer_start(struct ct_timer_instance *ti) ti 291 sound/pci/ctxfi/cttimer.c struct ct_timer *atimer = ti->timer_base; ti 295 sound/pci/ctxfi/cttimer.c if (list_empty(&ti->running_list)) ti 297 sound/pci/ctxfi/cttimer.c ti->running = 1; ti 298 sound/pci/ctxfi/cttimer.c ti->need_update = 0; ti 299 sound/pci/ctxfi/cttimer.c list_add(&ti->running_list, &atimer->running_head); ti 304 sound/pci/ctxfi/cttimer.c static void ct_xfitimer_stop(struct ct_timer_instance *ti) ti 306 sound/pci/ctxfi/cttimer.c struct ct_timer *atimer = ti->timer_base; ti 310 sound/pci/ctxfi/cttimer.c list_del_init(&ti->running_list); ti 311 sound/pci/ctxfi/cttimer.c ti->running = 0; ti 336 sound/pci/ctxfi/cttimer.c struct ct_timer_instance *ti; ti 338 sound/pci/ctxfi/cttimer.c ti = kzalloc(sizeof(*ti), GFP_KERNEL); ti 339 sound/pci/ctxfi/cttimer.c if (!ti) ti 341 sound/pci/ctxfi/cttimer.c spin_lock_init(&ti->lock); ti 342 sound/pci/ctxfi/cttimer.c INIT_LIST_HEAD(&ti->instance_list); ti 343 sound/pci/ctxfi/cttimer.c INIT_LIST_HEAD(&ti->running_list); ti 344 sound/pci/ctxfi/cttimer.c ti->timer_base = atimer; ti 345 sound/pci/ctxfi/cttimer.c ti->apcm = apcm; ti 346 sound/pci/ctxfi/cttimer.c ti->substream = apcm->substream; ti 348 sound/pci/ctxfi/cttimer.c atimer->ops->init(ti); ti 351 sound/pci/ctxfi/cttimer.c list_add(&ti->instance_list, &atimer->instance_head); ti 354 sound/pci/ctxfi/cttimer.c return ti; ti 357 sound/pci/ctxfi/cttimer.c void ct_timer_prepare(struct ct_timer_instance *ti) ti 359 sound/pci/ctxfi/cttimer.c if (ti->timer_base->ops->prepare) ti 360 sound/pci/ctxfi/cttimer.c ti->timer_base->ops->prepare(ti); ti 361 sound/pci/ctxfi/cttimer.c ti->position = 0; ti 362 sound/pci/ctxfi/cttimer.c ti->running = 0; ti 365 sound/pci/ctxfi/cttimer.c void ct_timer_start(struct ct_timer_instance *ti) ti 367 sound/pci/ctxfi/cttimer.c struct ct_timer *atimer = ti->timer_base; ti 368 sound/pci/ctxfi/cttimer.c atimer->ops->start(ti); ti 371 sound/pci/ctxfi/cttimer.c void ct_timer_stop(struct ct_timer_instance *ti) ti 373 sound/pci/ctxfi/cttimer.c struct ct_timer *atimer = ti->timer_base; ti 374 sound/pci/ctxfi/cttimer.c atimer->ops->stop(ti); ti 377 sound/pci/ctxfi/cttimer.c void ct_timer_instance_free(struct ct_timer_instance *ti) ti 379 sound/pci/ctxfi/cttimer.c struct ct_timer *atimer = ti->timer_base; ti 381 sound/pci/ctxfi/cttimer.c atimer->ops->stop(ti); /* to be sure */ ti 383 sound/pci/ctxfi/cttimer.c atimer->ops->free_instance(ti); ti 386 sound/pci/ctxfi/cttimer.c list_del(&ti->instance_list); ti 389 sound/pci/ctxfi/cttimer.c kfree(ti); ti 25 sound/pci/ctxfi/cttimer.h void ct_timer_instance_free(struct ct_timer_instance *ti); ti 26 sound/pci/ctxfi/cttimer.h void ct_timer_start(struct ct_timer_instance *ti); ti 27 sound/pci/ctxfi/cttimer.h void ct_timer_stop(struct ct_timer_instance *ti); ti 28 sound/pci/ctxfi/cttimer.h void ct_timer_prepare(struct ct_timer_instance *ti);