it               1042 arch/alpha/kernel/osf_sys.c SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it)
it               1051 arch/alpha/kernel/osf_sys.c 	if (!error && put_it32(it, &kit))
it                144 arch/arc/include/asm/entry-arcv2.h 	;  - U mode: retrieve it from AUX_USER_SP
it                179 arch/arc/include/asm/entry-arcv2.h 	;  - for K mode, it will be implicitly restored as stack is unwound
it                138 arch/arc/include/asm/entry.h 	; Retrieve orig r25 and save it with rest of callee_regs
it                 46 arch/arc/include/asm/tlb-mmu1.h ;  Slower in thrash case (where it matters) because more code is executed
it                218 arch/arm/include/asm/assembler.h 	add	\tmp, \tmp, #1			@ increment it
it                224 arch/arm/include/asm/assembler.h 	sub	\tmp, \tmp, #1			@ decrement it
it                 17 arch/arm/include/asm/tls.h 	str	\tmp2, [\base, #TI_TP_VALUE + 4] @ save it
it                 29 arch/arm/include/asm/tls.h 	strne	\tmp2, [\base, #TI_TP_VALUE + 4] @ save it
it                113 arch/arm/mach-ux500/pm.c 	u32 it, im;
it                117 arch/arm/mach-ux500/pm.c 		it = readl(PRCM_ARMITVAL31TO0 + i * 4);
it                119 arch/arm/mach-ux500/pm.c 		if (it & im)
it                 53 arch/arm/probes/decode.h 		unsigned long it = cpsr & mask;
it                 54 arch/arm/probes/decode.h 		it <<= 1;
it                 55 arch/arm/probes/decode.h 		it |= it >> (27 - 10);  /* Carry ITSTATE<2> to correct place */
it                 56 arch/arm/probes/decode.h 		it &= mask;
it                 58 arch/arm/probes/decode.h 		cpsr |= it;
it                573 arch/arm64/kernel/traps.c 	u32 it, pstate = regs->pstate;
it                575 arch/arm64/kernel/traps.c 	it  = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
it                576 arch/arm64/kernel/traps.c 	it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
it                578 arch/arm64/kernel/traps.c 	return it;
it                581 arch/arm64/kernel/traps.c static void compat_set_it_state(struct pt_regs *regs, u32 it)
it                585 arch/arm64/kernel/traps.c 	pstate_it  = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
it                586 arch/arm64/kernel/traps.c 	pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
it                598 arch/arm64/kernel/traps.c 		u32 it;
it                600 arch/arm64/kernel/traps.c 		it = compat_get_it_state(regs);
it                601 arch/arm64/kernel/traps.c 		if (!it)
it                604 arch/arm64/kernel/traps.c 		cond = it >> 4;
it                614 arch/arm64/kernel/traps.c 	u32 it;
it                621 arch/arm64/kernel/traps.c 	it  = compat_get_it_state(regs);
it                627 arch/arm64/kernel/traps.c 	if (!(it & 7))
it                628 arch/arm64/kernel/traps.c 		it = 0;
it                630 arch/arm64/kernel/traps.c 		it = (it & 0xe0) | ((it << 1) & 0x1f);
it                632 arch/arm64/kernel/traps.c 	compat_set_it_state(regs, it);
it                112 arch/ia64/include/asm/processor.h 	__u64 it : 1;
it                189 arch/ia64/kernel/brl_emu.c 	if (ia64_psr(regs)->it == 0)
it                360 arch/ia64/kernel/mca.c #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
it                361 arch/ia64/kernel/mca.c #define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
it                362 arch/ia64/kernel/mca.c #define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
it                363 arch/ia64/kernel/mca.c #define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index
it                364 arch/ia64/kernel/mca.c #define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index
it                365 arch/ia64/kernel/mca.c #define IA64_LOG_INDEX_INC(it) \
it                366 arch/ia64/kernel/mca.c     {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
it                367 arch/ia64/kernel/mca.c     ia64_state_log[it].isl_count++;}
it                368 arch/ia64/kernel/mca.c #define IA64_LOG_INDEX_DEC(it) \
it                369 arch/ia64/kernel/mca.c     ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
it                370 arch/ia64/kernel/mca.c #define IA64_LOG_NEXT_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
it                371 arch/ia64/kernel/mca.c #define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
it                372 arch/ia64/kernel/mca.c #define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
it                374 arch/ia64/kernel/mca.c static inline void ia64_log_allocate(int it, u64 size)
it                376 arch/ia64/kernel/mca.c 	ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] =
it                378 arch/ia64/kernel/mca.c 	if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])
it                381 arch/ia64/kernel/mca.c 	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] =
it                383 arch/ia64/kernel/mca.c 	if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])
it               1374 arch/ia64/kernel/unaligned.c 	       regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
it                 63 arch/m68k/fpsp040/fpsp.h |	because it needs to report an exception back to the user.  This
it                 72 arch/m68k/fpsp040/fpsp.h |	of these registers, it should modify the saved copy and let
it               20598 arch/m68k/ifpsp060/src/fpsp.S #     if exp now equals one, then it overflowed so call ovf_res.
it               20809 arch/m68k/ifpsp060/src/fpsp.S #     if exp now equals one, then it overflowed so call ovf_res.
it               23193 arch/m68k/ifpsp060/src/fpsp.S #  if the exp was positive, and added if it was negative.  The purpose
it               23499 arch/m68k/ifpsp060/src/fpsp.S #		if it is a positive number, or the number of digits	#
it               24765 arch/m68k/ifpsp060/src/fpsp.S # if it's a fmove out instruction, we don't have to fix a7
it                578 arch/m68k/ifpsp060/src/isp.S # if exception occurred in user mode, then we have to restore a7 in case it
it               7499 arch/m68k/ifpsp060/src/pfpsp.S #     if exp now equals one, then it overflowed so call ovf_res.
it               7710 arch/m68k/ifpsp060/src/pfpsp.S #     if exp now equals one, then it overflowed so call ovf_res.
it               13153 arch/m68k/ifpsp060/src/pfpsp.S #  if the exp was positive, and added if it was negative.  The purpose
it               13459 arch/m68k/ifpsp060/src/pfpsp.S #		if it is a positive number, or the number of digits	#
it               14725 arch/m68k/ifpsp060/src/pfpsp.S # if it's a fmove out instruction, we don't have to fix a7
it                261 arch/m68k/include/asm/math-emu.h 	.if	\bit+1
it                263 arch/m68k/include/asm/math-emu.h 	moveq	#\bit,%d0
it                265 arch/m68k/include/asm/math-emu.h 	btst	%d0,fp_debugprint+((31-\bit)/8)
it                267 arch/m68k/include/asm/math-emu.h 	btst	#\bit,fp_debugprint+((31-\bit)/8)
it                289 arch/m68k/include/asm/math-emu.h .Lx1\@:	printf	\bit," %c",1,%d0
it                293 arch/m68k/include/asm/math-emu.h 	printf	\bit,"0."
it                295 arch/m68k/include/asm/math-emu.h .Lx2\@:	printf	\bit,"1."
it                296 arch/m68k/include/asm/math-emu.h .Lx3\@:	printf	\bit,"%08x%08x",2,%d0,%a0@(8)
it                299 arch/m68k/include/asm/math-emu.h 	printf	\bit,"E%04x",1,%d0
it                301 arch/m68k/include/asm/math-emu.h 	printf	\bit," %08x%08x%08x",3,%a0@,%a0@(4),%a0@(8)
it                113 arch/m68k/math-emu/fp_decode.h | it depends on the instr which of the modes is valid
it                133 arch/m68k/math-emu/fp_emu.h 	bset	#(\bit&7),(FPD_FPSR+3-(\bit/8),FPDATA)
it                137 arch/m68k/math-emu/fp_emu.h 	bclr	#(\bit&7),(FPD_FPSR+3-(\bit/8),FPDATA)
it                141 arch/m68k/math-emu/fp_emu.h 	btst	#(\bit&7),(FPD_FPSR+3-(\bit/8),FPDATA)
it                 27 arch/mips/fw/arc/promlib.c 	CHAR it = c;
it                 30 arch/mips/fw/arc/promlib.c 	ArcWrite(1, &it, 1, &cnt);
it                118 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# Keep looping if it isn't me
it                 79 arch/nios2/include/asm/asm-macros.h .if \bit > 31
it                 82 arch/nios2/include/asm/asm-macros.h 	.if \bit < 16
it                 83 arch/nios2/include/asm/asm-macros.h 		andi	\reg1, \reg2, (1 << \bit)
it                 85 arch/nios2/include/asm/asm-macros.h 		andhi	\reg1, \reg2, (1 << (\bit - 16))
it                 98 arch/nios2/include/asm/asm-macros.h 	BT	\reg1, \reg2, \bit
it                110 arch/nios2/include/asm/asm-macros.h 	BT	\reg1, \reg2, \bit
it                122 arch/nios2/include/asm/asm-macros.h .if \bit > 31
it                125 arch/nios2/include/asm/asm-macros.h 	.if \bit < 16
it                126 arch/nios2/include/asm/asm-macros.h 		andi	\reg1, \reg2, (1 << \bit)
it                127 arch/nios2/include/asm/asm-macros.h 		xori	\reg2, \reg2, (1 << \bit)
it                129 arch/nios2/include/asm/asm-macros.h 		andhi	\reg1, \reg2, (1 << (\bit - 16))
it                130 arch/nios2/include/asm/asm-macros.h 		xorhi	\reg2, \reg2, (1 << (\bit - 16))
it                143 arch/nios2/include/asm/asm-macros.h .if \bit > 31
it                146 arch/nios2/include/asm/asm-macros.h 	.if \bit < 16
it                147 arch/nios2/include/asm/asm-macros.h 		andi	\reg1, \reg2, (1 << \bit)
it                148 arch/nios2/include/asm/asm-macros.h 		ori	\reg2, \reg2, (1 << \bit)
it                150 arch/nios2/include/asm/asm-macros.h 		andhi	\reg1, \reg2, (1 << (\bit - 16))
it                151 arch/nios2/include/asm/asm-macros.h 		orhi	\reg2, \reg2, (1 << (\bit - 16))
it                164 arch/nios2/include/asm/asm-macros.h .if \bit > 31
it                167 arch/nios2/include/asm/asm-macros.h 	.if \bit < 16
it                168 arch/nios2/include/asm/asm-macros.h 		andi	\reg1, \reg2, (1 << \bit)
it                169 arch/nios2/include/asm/asm-macros.h 		andi	\reg2, \reg2, %lo(~(1 << \bit))
it                171 arch/nios2/include/asm/asm-macros.h 		andhi	\reg1, \reg2, (1 << (\bit - 16))
it                172 arch/nios2/include/asm/asm-macros.h 		andhi	\reg2, \reg2, %lo(~(1 << (\bit - 16)))
it                186 arch/nios2/include/asm/asm-macros.h 	BTC	\reg1, \reg2, \bit
it                199 arch/nios2/include/asm/asm-macros.h 	BTC	\reg1, \reg2, \bit
it                212 arch/nios2/include/asm/asm-macros.h 	BTS	\reg1, \reg2, \bit
it                225 arch/nios2/include/asm/asm-macros.h 	BTS	\reg1, \reg2, \bit
it                238 arch/nios2/include/asm/asm-macros.h 	BTR	\reg1, \reg2, \bit
it                251 arch/nios2/include/asm/asm-macros.h 	BTR	\reg1, \reg2, \bit
it                431 arch/powerpc/kvm/book3s_64_mmu_radix.c 		unsigned long it;
it                433 arch/powerpc/kvm/book3s_64_mmu_radix.c 		for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
it                567 arch/powerpc/perf/hv-24x7.c 		struct event_uniq *it;
it                570 arch/powerpc/perf/hv-24x7.c 		it = rb_entry(*new, struct event_uniq, node);
it                571 arch/powerpc/perf/hv-24x7.c 		result = ev_uniq_ord(name, nl, domain, it->name, it->nl,
it                572 arch/powerpc/perf/hv-24x7.c 					it->domain);
it                580 arch/powerpc/perf/hv-24x7.c 			it->ct++;
it                582 arch/powerpc/perf/hv-24x7.c 						name, it->ct);
it                583 arch/powerpc/perf/hv-24x7.c 			return it->ct;
it                218 arch/x86/kernel/devicetree.c 	struct of_ioapic_type *it;
it                229 arch/x86/kernel/devicetree.c 	it = &of_ioapic_type[type_index];
it                230 arch/x86/kernel/devicetree.c 	ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
it               3302 arch/x86/kvm/mmu.c static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
it               3306 arch/x86/kvm/mmu.c 	u64 spte = *it.sptep;
it               3308 arch/x86/kvm/mmu.c 	if (it.level == level && level > PT_PAGE_TABLE_LEVEL &&
it               3329 arch/x86/kvm/mmu.c 	struct kvm_shadow_walk_iterator it;
it               3339 arch/x86/kvm/mmu.c 	for_each_shadow_entry(vcpu, gpa, it) {
it               3344 arch/x86/kvm/mmu.c 		disallowed_hugepage_adjust(it, gfn, &pfn, &level);
it               3346 arch/x86/kvm/mmu.c 		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
it               3347 arch/x86/kvm/mmu.c 		if (it.level == level)
it               3350 arch/x86/kvm/mmu.c 		drop_large_spte(vcpu, it.sptep);
it               3351 arch/x86/kvm/mmu.c 		if (!is_shadow_present_pte(*it.sptep)) {
it               3352 arch/x86/kvm/mmu.c 			sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
it               3353 arch/x86/kvm/mmu.c 					      it.level - 1, true, ACC_ALL);
it               3355 arch/x86/kvm/mmu.c 			link_shadow_page(vcpu, it.sptep, sp);
it               3361 arch/x86/kvm/mmu.c 	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
it               3364 arch/x86/kvm/mmu.c 	direct_pte_prefetch(vcpu, it.sptep);
it                621 arch/x86/kvm/paging_tmpl.h 	struct kvm_shadow_walk_iterator it;
it                643 arch/x86/kvm/paging_tmpl.h 	for (shadow_walk_init(&it, vcpu, addr);
it                644 arch/x86/kvm/paging_tmpl.h 	     shadow_walk_okay(&it) && it.level > gw->level;
it                645 arch/x86/kvm/paging_tmpl.h 	     shadow_walk_next(&it)) {
it                648 arch/x86/kvm/paging_tmpl.h 		clear_sp_write_flooding_count(it.sptep);
it                649 arch/x86/kvm/paging_tmpl.h 		drop_large_spte(vcpu, it.sptep);
it                652 arch/x86/kvm/paging_tmpl.h 		if (!is_shadow_present_pte(*it.sptep)) {
it                653 arch/x86/kvm/paging_tmpl.h 			table_gfn = gw->table_gfn[it.level - 2];
it                654 arch/x86/kvm/paging_tmpl.h 			sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
it                662 arch/x86/kvm/paging_tmpl.h 		if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
it                666 arch/x86/kvm/paging_tmpl.h 			link_shadow_page(vcpu, it.sptep, sp);
it                678 arch/x86/kvm/paging_tmpl.h 	for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
it                679 arch/x86/kvm/paging_tmpl.h 		clear_sp_write_flooding_count(it.sptep);
it                685 arch/x86/kvm/paging_tmpl.h 		disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel);
it                687 arch/x86/kvm/paging_tmpl.h 		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
it                688 arch/x86/kvm/paging_tmpl.h 		if (it.level == hlevel)
it                691 arch/x86/kvm/paging_tmpl.h 		validate_direct_spte(vcpu, it.sptep, direct_access);
it                693 arch/x86/kvm/paging_tmpl.h 		drop_large_spte(vcpu, it.sptep);
it                695 arch/x86/kvm/paging_tmpl.h 		if (!is_shadow_present_pte(*it.sptep)) {
it                697 arch/x86/kvm/paging_tmpl.h 					      it.level - 1, true, direct_access);
it                698 arch/x86/kvm/paging_tmpl.h 			link_shadow_page(vcpu, it.sptep, sp);
it                704 arch/x86/kvm/paging_tmpl.h 	ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
it                705 arch/x86/kvm/paging_tmpl.h 			   it.level, base_gfn, pfn, prefault, map_writable);
it                706 arch/x86/kvm/paging_tmpl.h 	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
it               1197 block/genhd.c  	__ATTR(make-it-fail, 0644, part_fail_show, part_fail_store);
it                193 block/partition-generic.c 	__ATTR(make-it-fail, 0644, part_fail_show, part_fail_store);
it               2612 drivers/base/power/domain.c 	struct of_phandle_iterator it;
it               2621 drivers/base/power/domain.c 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
it               2622 drivers/base/power/domain.c 		np = it.node;
it               1365 drivers/block/rbd.c 	struct ceph_bio_iter it = *bio_pos;
it               1367 drivers/block/rbd.c 	ceph_bio_iter_advance(&it, off);
it               1368 drivers/block/rbd.c 	ceph_bio_iter_advance_step(&it, bytes, ({
it               1375 drivers/block/rbd.c 	struct ceph_bvec_iter it = *bvec_pos;
it               1377 drivers/block/rbd.c 	ceph_bvec_iter_advance(&it, off);
it               1378 drivers/block/rbd.c 	ceph_bvec_iter_advance_step(&it, bytes, ({
it               2755 drivers/block/rbd.c 	struct ceph_bio_iter *it = arg;
it               2758 drivers/block/rbd.c 	obj_req->bio_pos = *it;
it               2759 drivers/block/rbd.c 	ceph_bio_iter_advance(it, bytes);
it               2766 drivers/block/rbd.c 	struct ceph_bio_iter *it = arg;
it               2769 drivers/block/rbd.c 	ceph_bio_iter_advance_step(it, bytes, ({
it               2779 drivers/block/rbd.c 	struct ceph_bio_iter *it = arg;
it               2782 drivers/block/rbd.c 	ceph_bio_iter_advance_step(it, bytes, ({
it               2809 drivers/block/rbd.c 	struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
it               2811 drivers/block/rbd.c 	return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
it               2818 drivers/block/rbd.c 	struct ceph_bvec_iter *it = arg;
it               2820 drivers/block/rbd.c 	obj_req->bvec_pos = *it;
it               2822 drivers/block/rbd.c 	ceph_bvec_iter_advance(it, bytes);
it               2829 drivers/block/rbd.c 	struct ceph_bvec_iter *it = arg;
it               2831 drivers/block/rbd.c 	ceph_bvec_iter_advance_step(it, bytes, ({
it               2840 drivers/block/rbd.c 	struct ceph_bvec_iter *it = arg;
it               2842 drivers/block/rbd.c 	ceph_bvec_iter_advance_step(it, bytes, ({
it               2870 drivers/block/rbd.c 	struct ceph_bvec_iter it = {
it               2877 drivers/block/rbd.c 					 &it);
it               3133 drivers/block/rbd.c 	struct ceph_bvec_iter it = {
it               3138 drivers/block/rbd.c 	ceph_bvec_iter_advance_step(&it, bytes, ({
it                 20 drivers/crypto/caam/error.c 	struct scatterlist *it;
it                 25 drivers/crypto/caam/error.c 	for (it = sg; it && tlen > 0 ; it = sg_next(it)) {
it                 30 drivers/crypto/caam/error.c 		it_page = kmap_atomic(sg_page(it));
it                 36 drivers/crypto/caam/error.c 		buf = it_page + it->offset;
it                 37 drivers/crypto/caam/error.c 		len = min_t(size_t, tlen, it->length);
it                 62 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	struct interval_tree_node	it;
it                 84 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 					     &amn->objects.rb_root, it.rb) {
it                206 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	struct interval_tree_node *it;
it                217 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	it = interval_tree_iter_first(&amn->objects, start, end);
it                218 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	while (it) {
it                226 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 		node = container_of(it, struct amdgpu_mn_node, it);
it                227 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 		it = interval_tree_iter_next(it, start, end);
it                255 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	struct interval_tree_node *it;
it                263 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	it = interval_tree_iter_first(&amn->objects, start, end);
it                264 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	while (it) {
it                273 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 		node = container_of(it, struct amdgpu_mn_node, it);
it                274 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 		it = interval_tree_iter_next(it, start, end);
it                384 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	struct interval_tree_node *it;
it                398 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
it                400 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 		node = container_of(it, struct amdgpu_mn_node, it);
it                401 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 		interval_tree_remove(&node->it, &amn->objects);
it                402 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 		addr = min(it->start, addr);
it                403 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 		end = max(it->last, end);
it                414 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	node->it.start = addr;
it                415 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	node->it.last = end;
it                420 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	interval_tree_insert(&node->it, &amn->objects);
it                460 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 		interval_tree_remove(&node->it, &amn->objects);
it                461 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_gem_engines_iter it;
it                464 drivers/gpu/drm/i915/gem/i915_gem_context.c 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
it                875 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_gem_engines_iter it;
it                893 drivers/gpu/drm/i915/gem/i915_gem_context.c 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
it               2368 drivers/gpu/drm/i915/gem/i915_gem_context.c i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
it               2370 drivers/gpu/drm/i915/gem/i915_gem_context.c 	const struct i915_gem_engines *e = it->engines;
it               2374 drivers/gpu/drm/i915/gem/i915_gem_context.c 		if (it->idx >= e->num_engines)
it               2377 drivers/gpu/drm/i915/gem/i915_gem_context.c 		ctx = e->engines[it->idx++];
it                213 drivers/gpu/drm/i915/gem/i915_gem_context.h i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
it                217 drivers/gpu/drm/i915/gem/i915_gem_context.h 	it->engines = engines;
it                218 drivers/gpu/drm/i915/gem/i915_gem_context.h 	it->idx = 0;
it                222 drivers/gpu/drm/i915/gem/i915_gem_context.h i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
it                224 drivers/gpu/drm/i915/gem/i915_gem_context.h #define for_each_gem_engine(ce, engines, it) \
it                225 drivers/gpu/drm/i915/gem/i915_gem_context.h 	for (i915_gem_engines_iter_init(&(it), (engines)); \
it                226 drivers/gpu/drm/i915/gem/i915_gem_context.h 	     ((ce) = i915_gem_engines_iter_next(&(it)));)
it                 43 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct interval_tree_node it;
it                 48 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
it                 49 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	interval_tree_insert(&mo->it, &mo->mn->objects);
it                 54 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	if (RB_EMPTY_NODE(&mo->it.rb))
it                 57 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	interval_tree_remove(&mo->it, &mo->mn->objects);
it                 58 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	RB_CLEAR_NODE(&mo->it.rb);
it                 94 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	struct interval_tree_node *it;
it                106 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	it = interval_tree_iter_first(&mn->objects, range->start, end);
it                107 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	while (it) {
it                125 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		obj = container_of(it, struct i915_mmu_object, it)->obj;
it                127 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			it = interval_tree_iter_next(it, range->start, end);
it                167 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		it = interval_tree_iter_first(&mn->objects, range->start, end);
it                277 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mo->it.start = obj->userptr.ptr;
it                278 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	mo->it.last = obj->userptr.ptr + obj->base.size - 1;
it                279 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	RB_CLEAR_NODE(&mo->it.rb);
it                305 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			struct i915_request *it =
it                306 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 				list_entry(pos, typeof(*it), signal_link);
it                308 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
it                 21 drivers/gpu/drm/i915/gt/intel_engine_user.c 		struct intel_engine_cs *it =
it                 22 drivers/gpu/drm/i915/gt/intel_engine_user.c 			rb_entry(p, typeof(*it), uabi_node);
it                 24 drivers/gpu/drm/i915/gt/intel_engine_user.c 		if (class < it->uabi_class)
it                 26 drivers/gpu/drm/i915/gt/intel_engine_user.c 		else if (class > it->uabi_class ||
it                 27 drivers/gpu/drm/i915/gt/intel_engine_user.c 			 instance > it->uabi_instance)
it                 29 drivers/gpu/drm/i915/gt/intel_engine_user.c 		else if (instance < it->uabi_instance)
it                 32 drivers/gpu/drm/i915/gt/intel_engine_user.c 			return it;
it                197 drivers/gpu/drm/i915/gt/intel_engine_user.c 	struct list_head *it, *next;
it                205 drivers/gpu/drm/i915/gt/intel_engine_user.c 	list_for_each_safe(it, next, &engines) {
it                207 drivers/gpu/drm/i915/gt/intel_engine_user.c 			container_of((struct rb_node *)it, typeof(*engine),
it                 25 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct i915_gem_engines_iter it;
it                 45 drivers/gpu/drm/i915/gt/selftest_lrc.c 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
it               1088 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	struct i915_gem_engines_iter it;
it               1094 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
it               1156 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	struct i915_gem_engines_iter it;
it               1177 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
it                126 drivers/gpu/drm/i915/i915_active.c 	struct active_node *it, *n;
it                146 drivers/gpu/drm/i915/i915_active.c 	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
it                147 drivers/gpu/drm/i915/i915_active.c 		GEM_BUG_ON(i915_active_request_isset(&it->base));
it                148 drivers/gpu/drm/i915/i915_active.c 		kmem_cache_free(global.slab_cache, it);
it                408 drivers/gpu/drm/i915/i915_active.c 	struct active_node *it, *n;
it                426 drivers/gpu/drm/i915/i915_active.c 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
it                427 drivers/gpu/drm/i915/i915_active.c 		if (is_barrier(&it->base)) { /* unconnected idle-barrier */
it                432 drivers/gpu/drm/i915/i915_active.c 		err = i915_active_request_retire(&it->base, BKL(ref),
it                462 drivers/gpu/drm/i915/i915_active.c 	struct active_node *it, *n;
it                474 drivers/gpu/drm/i915/i915_active.c 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
it                475 drivers/gpu/drm/i915/i915_active.c 		err = i915_request_await_active_request(rq, &it->base);
it                678 drivers/gpu/drm/i915/i915_active.c 			struct active_node *it;
it                682 drivers/gpu/drm/i915/i915_active.c 			it = rb_entry(parent, struct active_node, node);
it                683 drivers/gpu/drm/i915/i915_active.c 			if (it->timeline < node->timeline)
it                315 drivers/gpu/drm/i915/i915_debugfs.c 		struct i915_gem_engines_iter it;
it                319 drivers/gpu/drm/i915/i915_debugfs.c 				    i915_gem_context_lock_engines(ctx), it) {
it               1577 drivers/gpu/drm/i915/i915_debugfs.c 		struct i915_gem_engines_iter it;
it               1603 drivers/gpu/drm/i915/i915_debugfs.c 				    i915_gem_context_lock_engines(ctx), it) {
it                 54 drivers/gpu/drm/i915/i915_gpu_error.c 			 void *addr, unsigned int len, loff_t it)
it                 59 drivers/gpu/drm/i915/i915_gpu_error.c 	sg->dma_address = it;
it               1206 drivers/gpu/drm/i915/i915_perf.c 	struct i915_gem_engines_iter it;
it               1216 drivers/gpu/drm/i915/i915_perf.c 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
it               1806 drivers/gpu/drm/i915/i915_perf.c 	struct i915_gem_engines_iter it;
it               1810 drivers/gpu/drm/i915/i915_perf.c 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
it                 16 drivers/gpu/drm/i915/i915_scheduler.h #define priolist_for_each_request(it, plist, idx) \
it                 18 drivers/gpu/drm/i915/i915_scheduler.h 		list_for_each_entry(it, &(plist)->requests[idx], sched.link)
it                 20 drivers/gpu/drm/i915/i915_scheduler.h #define priolist_for_each_request_consume(it, n, plist, idx) \
it                 24 drivers/gpu/drm/i915/i915_scheduler.h 		list_for_each_entry_safe(it, n, \
it                 98 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
it                101 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	for (lvl = it->max; lvl >= 0; lvl--) {
it                102 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (lvl >= it->lvl)
it                103 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			buf += sprintf(buf,  "%05x:", it->pte[lvl]);
it                120 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
it                122 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it->flush = min(it->flush, it->max - it->lvl);
it                126 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_flush(struct nvkm_vmm_iter *it)
it                128 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (it->flush != NVKM_VMM_LEVELS_MAX) {
it                129 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (it->vmm->func->flush) {
it                130 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			TRA(it, "flush: %d", it->flush);
it                131 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			it->vmm->func->flush(it->vmm, it->flush);
it                133 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		it->flush = NVKM_VMM_LEVELS_MAX;
it                138 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
it                140 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *desc = it->desc;
it                141 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const int type = desc[it->lvl].type == SPT;
it                142 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
it                143 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
it                145 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm *vmm = it->vmm;
it                146 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	u32 pdei = it->pte[it->lvl + 1];
it                149 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it->lvl++;
it                151 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
it                153 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
it                181 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_flush_mark(it);
it                182 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_flush(it);
it                185 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_unref_pdes(it);
it                189 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
it                193 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it->lvl--;
it                197 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
it                200 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *pair = it->page[-1].desc;
it                203 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm *vmm = it->vmm;
it                243 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
it                251 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
it                258 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
it                260 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *desc = it->desc;
it                262 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm_pt *pgt = it->pt[0];
it                267 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
it                270 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_flush_mark(it);
it                271 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_flush(it);
it                272 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
it                281 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
it                285 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		it->lvl++;
it                286 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
it                287 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		it->lvl--;
it                288 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_unref_pdes(it);
it                296 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
it                299 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *pair = it->page[-1].desc;
it                302 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm *vmm = it->vmm;
it                347 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
it                350 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
it                357 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
it                364 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
it                366 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *desc = it->desc;
it                368 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm_pt *pgt = it->pt[0];
it                375 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
it                394 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
it                396 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm_pt *pt = it->pt[0];
it                397 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (it->desc->type == PGD)
it                400 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (it->desc->type == LPT)
it                402 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
it                406 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
it                408 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
it                409 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
it                413 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
it                415 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
it                419 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm *vmm = it->vmm;
it                430 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		it->lvl--;
it                431 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_unref_pdes(it);
it                477 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
it                478 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
it                479 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_flush_mark(it);
it                484 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
it                486 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
it                489 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
it                492 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_unref_pdes(it);
it                508 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm_iter it;
it                511 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.page = page;
it                512 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.desc = desc;
it                513 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.vmm = vmm;
it                514 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.cnt = size >> page->shift;
it                515 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.flush = NVKM_VMM_LEVELS_MAX;
it                518 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
it                519 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
it                520 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		bits >>= desc[it.lvl].bits;
it                522 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.max = --it.lvl;
it                523 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.pt[it.max] = vmm->pd;
it                525 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.lvl = 0;
it                526 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	TRA(&it, "%s: %016llx %016llx %d %lld PTEs", name,
it                527 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	         addr, size, page->shift, it.cnt);
it                528 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it.lvl = it.max;
it                531 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	while (it.cnt) {
it                532 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
it                535 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		const u32 ptei = it.pte[0];
it                536 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		const u32 ptes = min_t(u64, it.cnt, pten - ptei);
it                539 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		for (; it.lvl; it.lvl--) {
it                540 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			const u32 pdei = it.pte[it.lvl];
it                545 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
it                548 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
it                556 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
it                557 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
it                563 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
it                570 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				nvkm_vmm_flush_mark(&it);
it                575 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		it.pte[it.lvl] += ptes;
it                576 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		it.cnt -= ptes;
it                577 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (it.cnt) {
it                578 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
it                579 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				it.pte[it.lvl++] = 0;
it                580 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				it.pte[it.lvl]++;
it                585 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_flush(&it);
it                592 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	addr = it.pte[it.max--];
it                594 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		addr  = addr << desc[it.max].bits;
it                595 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		addr |= it.pte[it.max];
it                596 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	} while (it.max--);
it               1806 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
it               1808 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_desc *desc = it->desc;
it               1810 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
it                478 drivers/gpu/drm/radeon/radeon.h 	struct interval_tree_node	it;
it                586 drivers/gpu/drm/radeon/radeon_gem.c 	if (bo_va->it.start)
it                683 drivers/gpu/drm/radeon/radeon_gem.c 		if (bo_va->it.start) {
it                685 drivers/gpu/drm/radeon/radeon_gem.c 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
it                 48 drivers/gpu/drm/radeon/radeon_mn.c 	struct interval_tree_node	it;
it                 68 drivers/gpu/drm/radeon/radeon_mn.c 	struct interval_tree_node *it;
it                 83 drivers/gpu/drm/radeon/radeon_mn.c 	it = interval_tree_iter_first(&rmn->objects, range->start, end);
it                 84 drivers/gpu/drm/radeon/radeon_mn.c 	while (it) {
it                 94 drivers/gpu/drm/radeon/radeon_mn.c 		node = container_of(it, struct radeon_mn_node, it);
it                 95 drivers/gpu/drm/radeon/radeon_mn.c 		it = interval_tree_iter_next(it, range->start, end);
it                182 drivers/gpu/drm/radeon/radeon_mn.c 	struct interval_tree_node *it;
it                193 drivers/gpu/drm/radeon/radeon_mn.c 	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
it                195 drivers/gpu/drm/radeon/radeon_mn.c 		node = container_of(it, struct radeon_mn_node, it);
it                196 drivers/gpu/drm/radeon/radeon_mn.c 		interval_tree_remove(&node->it, &rmn->objects);
it                197 drivers/gpu/drm/radeon/radeon_mn.c 		addr = min(it->start, addr);
it                198 drivers/gpu/drm/radeon/radeon_mn.c 		end = max(it->last, end);
it                212 drivers/gpu/drm/radeon/radeon_mn.c 	node->it.start = addr;
it                213 drivers/gpu/drm/radeon/radeon_mn.c 	node->it.last = end;
it                218 drivers/gpu/drm/radeon/radeon_mn.c 	interval_tree_insert(&node->it, &rmn->objects);
it                249 drivers/gpu/drm/radeon/radeon_mn.c 		interval_tree_remove(&node->it, &rmn->objects);
it                 75 drivers/gpu/drm/radeon/radeon_trace.h 			   __entry->soffset = bo_va->it.start;
it                 76 drivers/gpu/drm/radeon/radeon_trace.h 			   __entry->eoffset = bo_va->it.last + 1;
it                331 drivers/gpu/drm/radeon/radeon_vm.c 	bo_va->it.start = 0;
it                332 drivers/gpu/drm/radeon/radeon_vm.c 	bo_va->it.last = 0;
it                481 drivers/gpu/drm/radeon/radeon_vm.c 		struct interval_tree_node *it;
it                482 drivers/gpu/drm/radeon/radeon_vm.c 		it = interval_tree_iter_first(&vm->va, soffset, eoffset);
it                483 drivers/gpu/drm/radeon/radeon_vm.c 		if (it && it != &bo_va->it) {
it                485 drivers/gpu/drm/radeon/radeon_vm.c 			tmp = container_of(it, struct radeon_bo_va, it);
it                489 drivers/gpu/drm/radeon/radeon_vm.c 				soffset, tmp->bo, tmp->it.start, tmp->it.last);
it                496 drivers/gpu/drm/radeon/radeon_vm.c 	if (bo_va->it.start || bo_va->it.last) {
it                505 drivers/gpu/drm/radeon/radeon_vm.c 		tmp->it.start = bo_va->it.start;
it                506 drivers/gpu/drm/radeon/radeon_vm.c 		tmp->it.last = bo_va->it.last;
it                510 drivers/gpu/drm/radeon/radeon_vm.c 		interval_tree_remove(&bo_va->it, &vm->va);
it                512 drivers/gpu/drm/radeon/radeon_vm.c 		bo_va->it.start = 0;
it                513 drivers/gpu/drm/radeon/radeon_vm.c 		bo_va->it.last = 0;
it                521 drivers/gpu/drm/radeon/radeon_vm.c 		bo_va->it.start = soffset;
it                522 drivers/gpu/drm/radeon/radeon_vm.c 		bo_va->it.last = eoffset;
it                525 drivers/gpu/drm/radeon/radeon_vm.c 		interval_tree_insert(&bo_va->it, &vm->va);
it                923 drivers/gpu/drm/radeon/radeon_vm.c 	if (!bo_va->it.start) {
it                967 drivers/gpu/drm/radeon/radeon_vm.c 	nptes = bo_va->it.last - bo_va->it.start + 1;
it               1012 drivers/gpu/drm/radeon/radeon_vm.c 	r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
it               1013 drivers/gpu/drm/radeon/radeon_vm.c 				  bo_va->it.last + 1, addr,
it               1029 drivers/gpu/drm/radeon/radeon_vm.c 	radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
it               1127 drivers/gpu/drm/radeon/radeon_vm.c 	if (bo_va->it.start || bo_va->it.last)
it               1128 drivers/gpu/drm/radeon/radeon_vm.c 		interval_tree_remove(&bo_va->it, &vm->va);
it               1132 drivers/gpu/drm/radeon/radeon_vm.c 	if (bo_va->it.start || bo_va->it.last) {
it               1161 drivers/gpu/drm/radeon/radeon_vm.c 		    (bo_va->it.start || bo_va->it.last))
it               1240 drivers/gpu/drm/radeon/radeon_vm.c 					     &vm->va.rb_root, it.rb) {
it               1241 drivers/gpu/drm/radeon/radeon_vm.c 		interval_tree_remove(&bo_va->it, &vm->va);
it                 92 drivers/gpu/drm/tegra/dc.c 	struct of_phandle_iterator it;
it                 95 drivers/gpu/drm/tegra/dc.c 	of_for_each_phandle(&it, err, np, "nvidia,outputs", NULL, 0)
it                 96 drivers/gpu/drm/tegra/dc.c 		if (it.node == dev->of_node)
it                399 drivers/hwmon/ibmpowernv.c 		struct of_phandle_iterator it;
it                411 drivers/hwmon/ibmpowernv.c 		of_for_each_phandle(&it, rc, sgrp, "sensors", NULL, 0)
it                412 drivers/hwmon/ibmpowernv.c 			if (it.phandle == node->phandle) {
it                413 drivers/hwmon/ibmpowernv.c 				of_node_put(it.node);
it                570 drivers/i2c/busses/i2c-st.c 	int it;
it                576 drivers/i2c/busses/i2c-st.c 	it = __fls(sta & ien);
it                577 drivers/i2c/busses/i2c-st.c 	if (it < 0) {
it                583 drivers/i2c/busses/i2c-st.c 	switch (1 << it) {
it                606 drivers/i2c/busses/i2c-st.c 		it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN;
it                607 drivers/i2c/busses/i2c-st.c 		writel_relaxed(it, i2c_dev->base + SSC_IEN);
it                616 drivers/i2c/busses/i2c-st.c 		it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN;
it                617 drivers/i2c/busses/i2c-st.c 		writel_relaxed(it, i2c_dev->base + SSC_IEN);
it                625 drivers/i2c/busses/i2c-st.c 				"it %d unhandled (sta=0x%04x)\n", it, sta);
it                649 drivers/i2c/busses/i2c-st.c 	u32 ctl, i2c, it;
it                677 drivers/i2c/busses/i2c-st.c 	it = SSC_IEN_NACKEN | SSC_IEN_TEEN | SSC_IEN_ARBLEN;
it                678 drivers/i2c/busses/i2c-st.c 	writel_relaxed(it, i2c_dev->base + SSC_IEN);
it                 43 drivers/iio/light/cm3232.c 	u8 it;
it                141 drivers/iio/light/cm3232.c 		if (als_it == cm3232_als_it_scales[i].it) {
it                172 drivers/iio/light/cm3232.c 			als_it = cm3232_als_it_scales[i].it;
it                342 drivers/iio/light/ltr501.c static int ltr501_set_it_time(struct ltr501_data *data, int it)
it                347 drivers/iio/light/ltr501.c 		if (int_time_mapping[i] == it) {
it                154 drivers/iommu/arm-smmu.c 	struct of_phandle_iterator *it = *(void **)data;
it                155 drivers/iommu/arm-smmu.c 	struct device_node *np = it->node;
it                158 drivers/iommu/arm-smmu.c 	of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
it                160 drivers/iommu/arm-smmu.c 		if (it->node == np) {
it                164 drivers/iommu/arm-smmu.c 	it->node = np;
it                176 drivers/iommu/arm-smmu.c 	struct of_phandle_iterator it;
it                177 drivers/iommu/arm-smmu.c 	void *data = &it;
it                188 drivers/iommu/arm-smmu.c 	it.node = np;
it                202 drivers/iommu/arm-smmu.c 		it.cur = &pci_sid;
it                203 drivers/iommu/arm-smmu.c 		it.cur_count = 1;
it                211 drivers/iommu/arm-smmu.c 	sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
it                216 drivers/iommu/arm-smmu.c 	of_phandle_iterator_args(&it, sids, it.cur_count);
it                217 drivers/iommu/arm-smmu.c 	err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
it                424 drivers/iommu/mtk_iommu_v1.c 	struct of_phandle_iterator it;
it                429 drivers/iommu/mtk_iommu_v1.c 	of_for_each_phandle(&it, err, dev->of_node, "iommus",
it                431 drivers/iommu/mtk_iommu_v1.c 		int count = of_phandle_iterator_args(&it, iommu_spec.args,
it                433 drivers/iommu/mtk_iommu_v1.c 		iommu_spec.np = of_node_get(it.node);
it                559 drivers/iommu/mtk_iommu_v1.c 	struct of_phandle_iterator	it;
it                590 drivers/iommu/mtk_iommu_v1.c 	of_for_each_phandle(&it, err, dev->of_node,
it                593 drivers/iommu/mtk_iommu_v1.c 		int count = of_phandle_iterator_args(&it, larb_spec.args,
it                599 drivers/iommu/mtk_iommu_v1.c 		larb_spec.np = of_node_get(it.node);
it                219 drivers/mtd/nftlmount.c The new DiskOnChip driver already scanned the bad block table.  Just query it.
it                249 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_entry *it;
it                269 drivers/net/ethernet/mellanox/mlx4/alloc.c 	list_for_each_entry(it, &zone_alloc->prios, prio_list)
it                270 drivers/net/ethernet/mellanox/mlx4/alloc.c 		if (it->priority >= priority)
it                273 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (&it->prio_list == &zone_alloc->prios || it->priority > priority)
it                274 drivers/net/ethernet/mellanox/mlx4/alloc.c 		list_add_tail(&zone->prio_list, &it->prio_list);
it                275 drivers/net/ethernet/mellanox/mlx4/alloc.c 	list_add_tail(&zone->list, &it->list);
it                307 drivers/net/ethernet/mellanox/mlx4/alloc.c 		struct mlx4_zone_entry *it;
it                309 drivers/net/ethernet/mellanox/mlx4/alloc.c 		list_for_each_entry(it, &zone_alloc->prios, prio_list) {
it                310 drivers/net/ethernet/mellanox/mlx4/alloc.c 			u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1);
it                359 drivers/net/ethernet/mellanox/mlx4/alloc.c 		struct mlx4_zone_entry *it = curr_node;
it                361 drivers/net/ethernet/mellanox/mlx4/alloc.c 		list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) {
it                362 drivers/net/ethernet/mellanox/mlx4/alloc.c 			res = mlx4_bitmap_alloc_range(it->bitmap, count,
it                365 drivers/net/ethernet/mellanox/mlx4/alloc.c 				res += it->offset;
it                366 drivers/net/ethernet/mellanox/mlx4/alloc.c 				uid = it->uid;
it                373 drivers/net/ethernet/mellanox/mlx4/alloc.c 		struct mlx4_zone_entry *it = curr_node;
it                375 drivers/net/ethernet/mellanox/mlx4/alloc.c 		list_for_each_entry_from(it, &zone_alloc->entries, list) {
it                376 drivers/net/ethernet/mellanox/mlx4/alloc.c 			if (unlikely(it == zone))
it                379 drivers/net/ethernet/mellanox/mlx4/alloc.c 			if (unlikely(it->priority != curr_node->priority))
it                382 drivers/net/ethernet/mellanox/mlx4/alloc.c 			res = mlx4_bitmap_alloc_range(it->bitmap, count,
it                385 drivers/net/ethernet/mellanox/mlx4/alloc.c 				res += it->offset;
it                386 drivers/net/ethernet/mellanox/mlx4/alloc.c 				uid = it->uid;
it                344 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct bitmap_iterator it;
it                346 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
it                350 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		return bitmap_iterator_count(&it) +
it                369 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct bitmap_iterator it;
it                371 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
it                377 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
it                378 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (bitmap_iterator_test(&it))
it                381 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
it                382 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (bitmap_iterator_test(&it))
it                385 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
it                386 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (bitmap_iterator_test(&it))
it                391 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	     i++, bitmap_iterator_inc(&it))
it                392 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (bitmap_iterator_test(&it))
it                396 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it))
it                397 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (bitmap_iterator_test(&it))
it                401 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	     i++, bitmap_iterator_inc(&it))
it                402 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (bitmap_iterator_test(&it))
it                406 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it))
it                407 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (bitmap_iterator_test(&it))
it                410 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it))
it                411 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (bitmap_iterator_test(&it))
it                414 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	for (i = 0; i < NUM_XDP_STATS; i++, bitmap_iterator_inc(&it))
it                415 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (bitmap_iterator_test(&it))
it                418 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	for (i = 0; i < NUM_PHY_STATS; i++, bitmap_iterator_inc(&it))
it                419 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (bitmap_iterator_test(&it))
it                450 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct bitmap_iterator it;
it                452 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
it                466 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		     bitmap_iterator_inc(&it))
it                467 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			if (bitmap_iterator_test(&it))
it                472 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		     bitmap_iterator_inc(&it))
it                473 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			if (bitmap_iterator_test(&it))
it                478 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		     bitmap_iterator_inc(&it))
it                479 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			if (bitmap_iterator_test(&it))
it                484 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		     bitmap_iterator_inc(&it))
it                485 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			if (bitmap_iterator_test(&it))
it                490 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		     bitmap_iterator_inc(&it))
it                491 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			if (bitmap_iterator_test(&it))
it                496 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		     bitmap_iterator_inc(&it))
it                497 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			if (bitmap_iterator_test(&it))
it                502 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		     bitmap_iterator_inc(&it))
it                503 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			if (bitmap_iterator_test(&it))
it               1464 drivers/net/tun.c 					    const struct iov_iter *it)
it               1471 drivers/net/tun.c 	if (it->nr_segs > MAX_SKB_FRAGS + 1)
it               1480 drivers/net/tun.c 	linear = iov_iter_single_seg_count(it);
it               1489 drivers/net/tun.c 	for (i = 1; i < it->nr_segs; i++) {
it               1490 drivers/net/tun.c 		size_t fragsz = it->iov[i].iov_len;
it               2950 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	int it;
it               2952 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	for (it = 0; it < ARRAY_SIZE(ar9300_eep_templates); it++)
it               2953 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 		if (ar9300_eep_templates[it]->templateVersion == id)
it               2954 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			return ar9300_eep_templates[it];
it               3133 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	int it, checksum = 0;
it               3135 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	for (it = 0; it < dsize; it++) {
it               3136 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 		checksum += data[it];
it               3149 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	int it;
it               3157 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	for (it = 0; it < size; it += (length+2)) {
it               3158 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 		offset = block[it];
it               3161 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 		length = block[it+1];
it               3167 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 				it, spot, offset, length);
it               3168 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			memcpy(&mptr[spot], &block[it+2], length);
it               3173 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 				it, spot, offset, length);
it               3181 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 				    int it,
it               3201 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			it, length);
it               3216 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			it, reference, length);
it               3276 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	int it;
it               3341 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 	for (it = 0; it < MSTATE; it++) {
it               3367 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			ar9300_compress_decision(ah, it, code, reference, mptr,
it               1221 drivers/net/wireless/ath/wil6210/debugfs.c 	struct wil_rx_buff *it;
it               1224 drivers/net/wireless/ath/wil6210/debugfs.c 	list_for_each_entry(it, lh, list) {
it               1227 drivers/net/wireless/ath/wil6210/debugfs.c 		seq_printf(s, "[%4d] ", it->id);
it                134 drivers/net/wireless/st/cw1200/scan.c 	struct ieee80211_channel **it;
it                199 drivers/net/wireless/st/cw1200/scan.c 		for (it = priv->scan.curr + 1, i = 1;
it                200 drivers/net/wireless/st/cw1200/scan.c 		     it != priv->scan.end && i < WSM_SCAN_MAX_NUM_OF_CHANNELS;
it                201 drivers/net/wireless/st/cw1200/scan.c 		     ++it, ++i) {
it                202 drivers/net/wireless/st/cw1200/scan.c 			if ((*it)->band != first->band)
it                204 drivers/net/wireless/st/cw1200/scan.c 			if (((*it)->flags ^ first->flags) &
it                208 drivers/net/wireless/st/cw1200/scan.c 			    (*it)->max_power != first->max_power)
it                221 drivers/net/wireless/st/cw1200/scan.c 		scan.num_channels = it - priv->scan.curr;
it                232 drivers/net/wireless/st/cw1200/scan.c 		scan.ch = kcalloc(it - priv->scan.curr,
it                259 drivers/net/wireless/st/cw1200/scan.c 		priv->scan.curr = it;
it                220 drivers/net/wireless/st/cw1200/txrx.c 	struct tx_policy_cache_entry *it;
it                222 drivers/net/wireless/st/cw1200/txrx.c 	list_for_each_entry(it, &cache->used, link) {
it                223 drivers/net/wireless/st/cw1200/txrx.c 		if (tx_policy_is_equal(wanted, &it->policy))
it                224 drivers/net/wireless/st/cw1200/txrx.c 			return it - cache->cache;
it                227 drivers/net/wireless/st/cw1200/txrx.c 	list_for_each_entry(it, &cache->free, link) {
it                228 drivers/net/wireless/st/cw1200/txrx.c 		if (tx_policy_is_equal(wanted, &it->policy))
it                229 drivers/net/wireless/st/cw1200/txrx.c 			return it - cache->cache;
it               1278 drivers/of/base.c int of_phandle_iterator_init(struct of_phandle_iterator *it,
it               1287 drivers/of/base.c 	memset(it, 0, sizeof(*it));
it               1300 drivers/of/base.c 	it->cells_name = cells_name;
it               1301 drivers/of/base.c 	it->cell_count = cell_count;
it               1302 drivers/of/base.c 	it->parent = np;
it               1303 drivers/of/base.c 	it->list_end = list + size / sizeof(*list);
it               1304 drivers/of/base.c 	it->phandle_end = list;
it               1305 drivers/of/base.c 	it->cur = list;
it               1311 drivers/of/base.c int of_phandle_iterator_next(struct of_phandle_iterator *it)
it               1315 drivers/of/base.c 	if (it->node) {
it               1316 drivers/of/base.c 		of_node_put(it->node);
it               1317 drivers/of/base.c 		it->node = NULL;
it               1320 drivers/of/base.c 	if (!it->cur || it->phandle_end >= it->list_end)
it               1323 drivers/of/base.c 	it->cur = it->phandle_end;
it               1326 drivers/of/base.c 	it->phandle = be32_to_cpup(it->cur++);
it               1328 drivers/of/base.c 	if (it->phandle) {
it               1334 drivers/of/base.c 		it->node = of_find_node_by_phandle(it->phandle);
it               1336 drivers/of/base.c 		if (it->cells_name) {
it               1337 drivers/of/base.c 			if (!it->node) {
it               1339 drivers/of/base.c 				       it->parent);
it               1343 drivers/of/base.c 			if (of_property_read_u32(it->node, it->cells_name,
it               1350 drivers/of/base.c 				if (it->cell_count >= 0) {
it               1351 drivers/of/base.c 					count = it->cell_count;
it               1354 drivers/of/base.c 					       it->parent,
it               1355 drivers/of/base.c 					       it->cells_name,
it               1356 drivers/of/base.c 					       it->node);
it               1361 drivers/of/base.c 			count = it->cell_count;
it               1368 drivers/of/base.c 		if (it->cur + count > it->list_end) {
it               1370 drivers/of/base.c 			       it->parent, it->cells_name,
it               1371 drivers/of/base.c 			       count, it->cell_count);
it               1376 drivers/of/base.c 	it->phandle_end = it->cur + count;
it               1377 drivers/of/base.c 	it->cur_count = count;
it               1382 drivers/of/base.c 	if (it->node) {
it               1383 drivers/of/base.c 		of_node_put(it->node);
it               1384 drivers/of/base.c 		it->node = NULL;
it               1391 drivers/of/base.c int of_phandle_iterator_args(struct of_phandle_iterator *it,
it               1397 drivers/of/base.c 	count = it->cur_count;
it               1403 drivers/of/base.c 		args[i] = be32_to_cpup(it->cur++);
it               1414 drivers/of/base.c 	struct of_phandle_iterator it;
it               1418 drivers/of/base.c 	of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
it               1427 drivers/of/base.c 			if (!it.phandle)
it               1433 drivers/of/base.c 				c = of_phandle_iterator_args(&it,
it               1436 drivers/of/base.c 				out_args->np = it.node;
it               1439 drivers/of/base.c 				of_node_put(it.node);
it               1456 drivers/of/base.c 	of_node_put(it.node);
it               1779 drivers/of/base.c 	struct of_phandle_iterator it;
it               1799 drivers/of/base.c 	rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
it               1803 drivers/of/base.c 	while ((rc = of_phandle_iterator_next(&it)) == 0)
it                629 drivers/of/fdt.c int __init of_scan_flat_dt(int (*it)(unsigned long node,
it                648 drivers/of/fdt.c 		rc = it(offset, pathp, depth, data);
it                661 drivers/of/fdt.c 				    int (*it)(unsigned long node,
it                676 drivers/of/fdt.c 		rc = it(node, pathp, data);
it                125 drivers/remoteproc/st_remoteproc.c 	struct of_phandle_iterator it;
it                128 drivers/remoteproc/st_remoteproc.c 	of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
it                129 drivers/remoteproc/st_remoteproc.c 	while (of_phandle_iterator_next(&it) == 0) {
it                130 drivers/remoteproc/st_remoteproc.c 		rmem = of_reserved_mem_lookup(it.node);
it                137 drivers/remoteproc/st_remoteproc.c 		if (strcmp(it.node->name, "vdev0buffer")) {
it                144 drivers/remoteproc/st_remoteproc.c 						   it.node->name);
it                150 drivers/remoteproc/st_remoteproc.c 							   it.node->name);
it                203 drivers/remoteproc/stm32_rproc.c 	struct of_phandle_iterator it;
it                210 drivers/remoteproc/stm32_rproc.c 	of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
it                211 drivers/remoteproc/stm32_rproc.c 	while (of_phandle_iterator_next(&it) == 0) {
it                212 drivers/remoteproc/stm32_rproc.c 		rmem = of_reserved_mem_lookup(it.node);
it                225 drivers/remoteproc/stm32_rproc.c 		if (strcmp(it.node->name, "vdev0buffer")) {
it                232 drivers/remoteproc/stm32_rproc.c 						   it.node->name);
it                242 drivers/remoteproc/stm32_rproc.c 							   it.node->name);
it                298 drivers/s390/cio/blacklist.c cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
it                303 drivers/s390/cio/blacklist.c cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
it                311 drivers/s390/cio/blacklist.c 	iter = it;
it                323 drivers/s390/cio/blacklist.c cio_ignore_proc_seq_show(struct seq_file *s, void *it)
it                327 drivers/s390/cio/blacklist.c 	iter = it;
it               2445 drivers/scsi/sg.c 	struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
it               2447 drivers/scsi/sg.c 	s->private = it;
it               2448 drivers/scsi/sg.c 	if (! it)
it               2451 drivers/scsi/sg.c 	it->index = *pos;
it               2452 drivers/scsi/sg.c 	it->max = sg_last_dev();
it               2453 drivers/scsi/sg.c 	if (it->index >= it->max)
it               2455 drivers/scsi/sg.c 	return it;
it               2460 drivers/scsi/sg.c 	struct sg_proc_deviter * it = s->private;
it               2462 drivers/scsi/sg.c 	*pos = ++it->index;
it               2463 drivers/scsi/sg.c 	return (it->index < it->max) ? it : NULL;
it               2473 drivers/scsi/sg.c 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
it               2479 drivers/scsi/sg.c 	sdp = it ? sg_lookup_dev(it->index) : NULL;
it               2499 drivers/scsi/sg.c 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
it               2505 drivers/scsi/sg.c 	sdp = it ? sg_lookup_dev(it->index) : NULL;
it               2582 drivers/scsi/sg.c 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
it               2586 drivers/scsi/sg.c 	if (it && (0 == it->index))
it               2588 drivers/scsi/sg.c 			   (int)it->max, sg_big_buff);
it               2591 drivers/scsi/sg.c 	sdp = it ? sg_lookup_dev(it->index) : NULL;
it                815 drivers/staging/comedi/comedi_fops.c 	struct comedi_devconfig it;
it                833 drivers/staging/comedi/comedi_fops.c 	if (copy_from_user(&it, arg, sizeof(it)))
it                836 drivers/staging/comedi/comedi_fops.c 	it.board_name[COMEDI_NAMELEN - 1] = 0;
it                838 drivers/staging/comedi/comedi_fops.c 	if (it.options[COMEDI_DEVCONF_AUX_DATA_LENGTH]) {
it                849 drivers/staging/comedi/comedi_fops.c 	return comedi_device_attach(dev, &it);
it               1053 drivers/staging/comedi/comedi_fops.c 	struct comedi_chaninfo it;
it               1056 drivers/staging/comedi/comedi_fops.c 	if (copy_from_user(&it, arg, sizeof(it)))
it               1059 drivers/staging/comedi/comedi_fops.c 	if (it.subdev >= dev->n_subdevices)
it               1061 drivers/staging/comedi/comedi_fops.c 	s = &dev->subdevices[it.subdev];
it               1063 drivers/staging/comedi/comedi_fops.c 	if (it.maxdata_list) {
it               1066 drivers/staging/comedi/comedi_fops.c 		if (copy_to_user(it.maxdata_list, s->maxdata_list,
it               1071 drivers/staging/comedi/comedi_fops.c 	if (it.flaglist)
it               1074 drivers/staging/comedi/comedi_fops.c 	if (it.rangelist) {
it               1082 drivers/staging/comedi/comedi_fops.c 			x = (dev->minor << 28) | (it.subdev << 24) | (i << 16) |
it               1084 drivers/staging/comedi/comedi_fops.c 			if (put_user(x, it.rangelist + i))
it                 55 drivers/staging/comedi/comedi_internal.h 			 struct comedi_devconfig *it);
it                443 drivers/staging/comedi/comedidev.h 	int (*attach)(struct comedi_device *dev, struct comedi_devconfig *it);
it                950 drivers/staging/comedi/drivers.c int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                964 drivers/staging/comedi/drivers.c 			dev->board_ptr = comedi_recognize(driv, it->board_name);
it                967 drivers/staging/comedi/drivers.c 		} else if (strcmp(driv->driver_name, it->board_name) == 0) {
it                996 drivers/staging/comedi/drivers.c 	ret = driv->attach(dev, it);
it                 48 drivers/staging/comedi/drivers/8255.c 			   struct comedi_devconfig *it)
it                 56 drivers/staging/comedi/drivers/8255.c 		iobase = it->options[i];
it                 71 drivers/staging/comedi/drivers/8255.c 		iobase = it->options[i];
it               1435 drivers/staging/comedi/drivers/adl_pci9118.c 					struct comedi_devconfig *it)
it               1438 drivers/staging/comedi/drivers/adl_pci9118.c 	int bus = it->options[0];
it               1439 drivers/staging/comedi/drivers/adl_pci9118.c 	int slot = it->options[1];
it               1650 drivers/staging/comedi/drivers/adl_pci9118.c 			  struct comedi_devconfig *it)
it               1655 drivers/staging/comedi/drivers/adl_pci9118.c 	ext_mux = it->options[2];
it               1656 drivers/staging/comedi/drivers/adl_pci9118.c 	softsshdelay = it->options[4];
it               1658 drivers/staging/comedi/drivers/adl_pci9118.c 	pcidev = pci9118_find_pci(dev, it);
it                177 drivers/staging/comedi/drivers/adq12b.c static int adq12b_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                183 drivers/staging/comedi/drivers/adq12b.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                200 drivers/staging/comedi/drivers/adq12b.c 	if (it->options[2]) {
it                208 drivers/staging/comedi/drivers/adq12b.c 	s->range_table	= it->options[1] ? &range_adq12b_ai_unipolar
it                200 drivers/staging/comedi/drivers/aio_aio12_8.c 			      struct comedi_devconfig *it)
it                206 drivers/staging/comedi/drivers/aio_aio12_8.c 	ret = comedi_request_region(dev, it->options[0], 32);
it                166 drivers/staging/comedi/drivers/aio_iiro_16.c 			      struct comedi_devconfig *it)
it                171 drivers/staging/comedi/drivers/aio_iiro_16.c 	ret = comedi_request_region(dev, it->options[0], 0x8);
it                181 drivers/staging/comedi/drivers/aio_iiro_16.c 	if ((1 << it->options[1]) & 0xdcfc) {
it                182 drivers/staging/comedi/drivers/aio_iiro_16.c 		ret = request_irq(it->options[1], aio_iiro_16_cos, 0,
it                185 drivers/staging/comedi/drivers/aio_iiro_16.c 			dev->irq = it->options[1];
it                241 drivers/staging/comedi/drivers/amplc_dio200.c static int dio200_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                245 drivers/staging/comedi/drivers/amplc_dio200.c 	ret = comedi_request_region(dev, it->options[0], 0x20);
it                249 drivers/staging/comedi/drivers/amplc_dio200.c 	return amplc_dio200_common_attach(dev, it->options[1], 0);
it                 40 drivers/staging/comedi/drivers/amplc_pc236.c static int pc236_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                 49 drivers/staging/comedi/drivers/amplc_pc236.c 	ret = comedi_request_region(dev, it->options[0], 0x4);
it                 53 drivers/staging/comedi/drivers/amplc_pc236.c 	return amplc_pc236_common_attach(dev, dev->iobase, it->options[1], 0);
it                 59 drivers/staging/comedi/drivers/amplc_pc263.c static int pc263_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                 64 drivers/staging/comedi/drivers/amplc_pc263.c 	ret = comedi_request_region(dev, it->options[0], 0x2);
it                240 drivers/staging/comedi/drivers/c6xdigio.c 			   struct comedi_devconfig *it)
it                245 drivers/staging/comedi/drivers/c6xdigio.c 	ret = comedi_request_region(dev, it->options[0], 0x03);
it                169 drivers/staging/comedi/drivers/comedi_bond.c static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
it                181 drivers/staging/comedi/drivers/comedi_bond.c 	for (i = 0; i < COMEDI_NDEVCONFOPTS && (!i || it->options[i]); ++i) {
it                183 drivers/staging/comedi/drivers/comedi_bond.c 		int minor = it->options[i];
it                274 drivers/staging/comedi/drivers/comedi_bond.c 			  struct comedi_devconfig *it)
it                287 drivers/staging/comedi/drivers/comedi_bond.c 	ret = do_dev_config(dev, it);
it                225 drivers/staging/comedi/drivers/comedi_parport.c 			  struct comedi_devconfig *it)
it                230 drivers/staging/comedi/drivers/comedi_parport.c 	ret = comedi_request_region(dev, it->options[0], 0x03);
it                234 drivers/staging/comedi/drivers/comedi_parport.c 	if (it->options[1]) {
it                235 drivers/staging/comedi/drivers/comedi_parport.c 		ret = request_irq(it->options[1], parport_interrupt, 0,
it                238 drivers/staging/comedi/drivers/comedi_parport.c 			dev->irq = it->options[1];
it                738 drivers/staging/comedi/drivers/comedi_test.c 			   struct comedi_devconfig *it)
it                740 drivers/staging/comedi/drivers/comedi_test.c 	int amplitude = it->options[0];
it                741 drivers/staging/comedi/drivers/comedi_test.c 	int period = it->options[1];
it                102 drivers/staging/comedi/drivers/dac02.c static int dac02_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                107 drivers/staging/comedi/drivers/dac02.c 	ret = comedi_request_region(dev, it->options[0], 0x08);
it                160 drivers/staging/comedi/drivers/das08_isa.c 			    struct comedi_devconfig *it)
it                170 drivers/staging/comedi/drivers/das08_isa.c 	ret = comedi_request_region(dev, it->options[0], board->iosize);
it                901 drivers/staging/comedi/drivers/das16.c static int das16_probe(struct comedi_device *dev, struct comedi_devconfig *it)
it                952 drivers/staging/comedi/drivers/das16.c 						  struct comedi_devconfig *it,
it                956 drivers/staging/comedi/drivers/das16.c 	unsigned int min = it->options[4];
it                957 drivers/staging/comedi/drivers/das16.c 	unsigned int max = it->options[5];
it                988 drivers/staging/comedi/drivers/das16.c 						  struct comedi_devconfig *it)
it                990 drivers/staging/comedi/drivers/das16.c 	unsigned int min = it->options[6];
it                991 drivers/staging/comedi/drivers/das16.c 	unsigned int max = it->options[7];
it               1017 drivers/staging/comedi/drivers/das16.c static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it               1027 drivers/staging/comedi/drivers/das16.c 	if (it->options[3]) {
it               1028 drivers/staging/comedi/drivers/das16.c 		if (it->options[3] != 1 && it->options[3] != 10) {
it               1041 drivers/staging/comedi/drivers/das16.c 		ret = comedi_request_region(dev, it->options[0], board->size);
it               1045 drivers/staging/comedi/drivers/das16.c 		ret = comedi_request_region(dev, it->options[0], 0x10);
it               1058 drivers/staging/comedi/drivers/das16.c 	if (das16_probe(dev, it))
it               1068 drivers/staging/comedi/drivers/das16.c 		if (it->options[3])
it               1069 drivers/staging/comedi/drivers/das16.c 			osc_base = I8254_OSC_BASE_1MHZ / it->options[3];
it               1077 drivers/staging/comedi/drivers/das16.c 	das16_alloc_dma(dev, it->options[2]);
it               1098 drivers/staging/comedi/drivers/das16.c 	s->range_table	= das16_ai_range(dev, s, it, board->ai_pg, status);
it               1116 drivers/staging/comedi/drivers/das16.c 		s->range_table	= das16_ao_range(dev, s, it);
it                505 drivers/staging/comedi/drivers/das16m1.c 			  struct comedi_devconfig *it)
it                515 drivers/staging/comedi/drivers/das16m1.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                526 drivers/staging/comedi/drivers/das16m1.c 	if ((1 << it->options[1]) & 0xdcfc) {
it                527 drivers/staging/comedi/drivers/das16m1.c 		ret = request_irq(it->options[1], das16m1_interrupt, 0,
it                530 drivers/staging/comedi/drivers/das16m1.c 			dev->irq = it->options[1];
it               1046 drivers/staging/comedi/drivers/das1800.c 			     struct comedi_devconfig *it)
it               1057 drivers/staging/comedi/drivers/das1800.c 	dma_chan = &it->options[2];
it               1163 drivers/staging/comedi/drivers/das1800.c 			  struct comedi_devconfig *it)
it               1168 drivers/staging/comedi/drivers/das1800.c 	unsigned int irq = it->options[1];
it               1177 drivers/staging/comedi/drivers/das1800.c 	ret = comedi_request_region(dev, it->options[0], DAS1800_SIZE);
it               1229 drivers/staging/comedi/drivers/das1800.c 	if (dev->irq & it->options[2])
it               1230 drivers/staging/comedi/drivers/das1800.c 		das1800_init_dma(dev, it);
it                554 drivers/staging/comedi/drivers/das6402.c 			  struct comedi_devconfig *it)
it                565 drivers/staging/comedi/drivers/das6402.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                572 drivers/staging/comedi/drivers/das6402.c 	if ((1 << it->options[1]) & 0x8cec) {
it                573 drivers/staging/comedi/drivers/das6402.c 		ret = request_irq(it->options[1], das6402_interrupt, 0,
it                576 drivers/staging/comedi/drivers/das6402.c 			dev->irq = it->options[1];
it                647 drivers/staging/comedi/drivers/das800.c static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                652 drivers/staging/comedi/drivers/das800.c 	unsigned int irq = it->options[1];
it                660 drivers/staging/comedi/drivers/das800.c 	ret = comedi_request_region(dev, it->options[0], 0x8);
it                545 drivers/staging/comedi/drivers/dmm32at.c 			  struct comedi_devconfig *it)
it                550 drivers/staging/comedi/drivers/dmm32at.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                560 drivers/staging/comedi/drivers/dmm32at.c 	if (it->options[1]) {
it                561 drivers/staging/comedi/drivers/dmm32at.c 		ret = request_irq(it->options[1], dmm32at_isr, 0,
it                564 drivers/staging/comedi/drivers/dmm32at.c 			dev->irq = it->options[1];
it                534 drivers/staging/comedi/drivers/dt2801.c static int dt2801_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                543 drivers/staging/comedi/drivers/dt2801.c 	ret = comedi_request_region(dev, it->options[0], 0x2);
it                586 drivers/staging/comedi/drivers/dt2801.c 	if (it->options[2])
it                592 drivers/staging/comedi/drivers/dt2801.c 	s->range_table = ai_range_lkup(board->adrangetype, it->options[3]);
it                602 drivers/staging/comedi/drivers/dt2801.c 	devpriv->dac_range_types[0] = dac_range_lkup(it->options[4]);
it                603 drivers/staging/comedi/drivers/dt2801.c 	devpriv->dac_range_types[1] = dac_range_lkup(it->options[5]);
it                549 drivers/staging/comedi/drivers/dt2811.c static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                560 drivers/staging/comedi/drivers/dt2811.c 	ret = comedi_request_region(dev, it->options[0], 0x8);
it                567 drivers/staging/comedi/drivers/dt2811.c 	if (it->options[1] <= 7  && (BIT(it->options[1]) & 0xac)) {
it                568 drivers/staging/comedi/drivers/dt2811.c 		ret = request_irq(it->options[1], dt2811_interrupt, 0,
it                571 drivers/staging/comedi/drivers/dt2811.c 			dev->irq = it->options[1];
it                582 drivers/staging/comedi/drivers/dt2811.c 			  ((it->options[2] == 1) ? SDF_DIFF :
it                583 drivers/staging/comedi/drivers/dt2811.c 			   (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND);
it                584 drivers/staging/comedi/drivers/dt2811.c 	s->n_chan	= (it->options[2] == 1) ? 8 : 16;
it                227 drivers/staging/comedi/drivers/dt2814.c static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                234 drivers/staging/comedi/drivers/dt2814.c 	ret = comedi_request_region(dev, it->options[0], 0x2);
it                247 drivers/staging/comedi/drivers/dt2814.c 	if (it->options[1]) {
it                248 drivers/staging/comedi/drivers/dt2814.c 		ret = request_irq(it->options[1], dt2814_interrupt, 0,
it                251 drivers/staging/comedi/drivers/dt2814.c 			dev->irq = it->options[1];
it                140 drivers/staging/comedi/drivers/dt2815.c static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                148 drivers/staging/comedi/drivers/dt2815.c 	ret = comedi_request_region(dev, it->options[0], 0x2);
it                170 drivers/staging/comedi/drivers/dt2815.c 	current_range_type = (it->options[3])
it                172 drivers/staging/comedi/drivers/dt2815.c 	voltage_range_type = (it->options[2])
it                175 drivers/staging/comedi/drivers/dt2815.c 		devpriv->range_type_list[i] = (it->options[5 + i])
it                190 drivers/staging/comedi/drivers/dt2815.c 			program = (it->options[4] & 0x3) << 3 | 0x7;
it                101 drivers/staging/comedi/drivers/dt2817.c static int dt2817_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                106 drivers/staging/comedi/drivers/dt2817.c 	ret = comedi_request_region(dev, it->options[0], 0x5);
it               1030 drivers/staging/comedi/drivers/dt282x.c 			     struct comedi_devconfig *it)
it               1033 drivers/staging/comedi/drivers/dt282x.c 	unsigned int irq_num = it->options[1];
it               1036 drivers/staging/comedi/drivers/dt282x.c 	if (it->options[2] < it->options[3]) {
it               1037 drivers/staging/comedi/drivers/dt282x.c 		dma_chan[0] = it->options[2];
it               1038 drivers/staging/comedi/drivers/dt282x.c 		dma_chan[1] = it->options[3];
it               1040 drivers/staging/comedi/drivers/dt282x.c 		dma_chan[0] = it->options[3];
it               1041 drivers/staging/comedi/drivers/dt282x.c 		dma_chan[1] = it->options[2];
it               1090 drivers/staging/comedi/drivers/dt282x.c static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it               1097 drivers/staging/comedi/drivers/dt282x.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it               1110 drivers/staging/comedi/drivers/dt282x.c 	dt282x_alloc_dma(dev, it);
it               1120 drivers/staging/comedi/drivers/dt282x.c 	if ((it->options[4] && board->adchan_di) || board->adchan_se == 0) {
it               1129 drivers/staging/comedi/drivers/dt282x.c 	s->range_table = opt_ai_range_lkup(board->ispgl, it->options[8]);
it               1130 drivers/staging/comedi/drivers/dt282x.c 	devpriv->ad_2scomp = it->options[5] ? 1 : 0;
it                 99 drivers/staging/comedi/drivers/fl512.c static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                104 drivers/staging/comedi/drivers/fl512.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                416 drivers/staging/comedi/drivers/ii_pci20kc.c 			struct comedi_devconfig *it)
it                424 drivers/staging/comedi/drivers/ii_pci20kc.c 	membase = it->options[0];
it                235 drivers/staging/comedi/drivers/mpc624.c static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                241 drivers/staging/comedi/drivers/mpc624.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                249 drivers/staging/comedi/drivers/mpc624.c 	switch (it->options[1]) {
it                294 drivers/staging/comedi/drivers/mpc624.c 	s->range_table	= (it->options[1] == 0) ? &range_mpc624_bipolar1
it                252 drivers/staging/comedi/drivers/multiq3.c 			  struct comedi_devconfig *it)
it                258 drivers/staging/comedi/drivers/multiq3.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                310 drivers/staging/comedi/drivers/multiq3.c 	s->n_chan	= it->options[2] * 2;
it                638 drivers/staging/comedi/drivers/ni_at_a2150.c 				    struct comedi_devconfig *it)
it                641 drivers/staging/comedi/drivers/ni_at_a2150.c 	unsigned int irq_num = it->options[1];
it                642 drivers/staging/comedi/drivers/ni_at_a2150.c 	unsigned int dma_chan = it->options[2];
it                686 drivers/staging/comedi/drivers/ni_at_a2150.c static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                699 drivers/staging/comedi/drivers/ni_at_a2150.c 	ret = comedi_request_region(dev, it->options[0], 0x1c);
it                710 drivers/staging/comedi/drivers/ni_at_a2150.c 	a2150_alloc_irq_and_dma(dev, it);
it                293 drivers/staging/comedi/drivers/ni_at_ao.c static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                300 drivers/staging/comedi/drivers/ni_at_ao.c 	ret = comedi_request_region(dev, it->options[0], 0x20);
it                323 drivers/staging/comedi/drivers/ni_at_ao.c 	s->range_table	= it->options[3] ? &range_unipolar10 : &range_bipolar10;
it                281 drivers/staging/comedi/drivers/ni_atmio.c 			   struct comedi_devconfig *it)
it                293 drivers/staging/comedi/drivers/ni_atmio.c 	iobase = it->options[0];
it                294 drivers/staging/comedi/drivers/ni_atmio.c 	irq = it->options[1];
it                581 drivers/staging/comedi/drivers/ni_atmio16d.c 			   struct comedi_devconfig *it)
it                588 drivers/staging/comedi/drivers/ni_atmio16d.c 	ret = comedi_request_region(dev, it->options[0], 0x20);
it                603 drivers/staging/comedi/drivers/ni_atmio16d.c 	if (it->options[1]) {
it                604 drivers/staging/comedi/drivers/ni_atmio16d.c 		ret = request_irq(it->options[1], atmio16d_interrupt, 0,
it                607 drivers/staging/comedi/drivers/ni_atmio16d.c 			dev->irq = it->options[1];
it                611 drivers/staging/comedi/drivers/ni_atmio16d.c 	devpriv->adc_mux = it->options[5];
it                612 drivers/staging/comedi/drivers/ni_atmio16d.c 	devpriv->adc_range = it->options[6];
it                614 drivers/staging/comedi/drivers/ni_atmio16d.c 	devpriv->dac0_range = it->options[7];
it                615 drivers/staging/comedi/drivers/ni_atmio16d.c 	devpriv->dac0_reference = it->options[8];
it                616 drivers/staging/comedi/drivers/ni_atmio16d.c 	devpriv->dac0_coding = it->options[9];
it                617 drivers/staging/comedi/drivers/ni_atmio16d.c 	devpriv->dac1_range = it->options[10];
it                618 drivers/staging/comedi/drivers/ni_atmio16d.c 	devpriv->dac1_reference = it->options[11];
it                619 drivers/staging/comedi/drivers/ni_atmio16d.c 	devpriv->dac1_coding = it->options[12];
it                 76 drivers/staging/comedi/drivers/ni_labpc.c static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                 78 drivers/staging/comedi/drivers/ni_labpc.c 	unsigned int irq = it->options[1];
it                 79 drivers/staging/comedi/drivers/ni_labpc.c 	unsigned int dma_chan = it->options[2];
it                 82 drivers/staging/comedi/drivers/ni_labpc.c 	ret = comedi_request_region(dev, it->options[0], 0x20);
it                417 drivers/staging/comedi/drivers/pcl711.c static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                423 drivers/staging/comedi/drivers/pcl711.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                427 drivers/staging/comedi/drivers/pcl711.c 	if (it->options[1] && it->options[1] <= board->maxirq) {
it                428 drivers/staging/comedi/drivers/pcl711.c 		ret = request_irq(it->options[1], pcl711_interrupt, 0,
it                431 drivers/staging/comedi/drivers/pcl711.c 			dev->irq = it->options[1];
it                 96 drivers/staging/comedi/drivers/pcl724.c 			 struct comedi_devconfig *it)
it                111 drivers/staging/comedi/drivers/pcl724.c 	    (it->options[2] == 1 || it->options[2] == 96)) {
it                116 drivers/staging/comedi/drivers/pcl724.c 	ret = comedi_request_region(dev, it->options[0], iorange);
it                309 drivers/staging/comedi/drivers/pcl726.c 			 struct comedi_devconfig *it)
it                318 drivers/staging/comedi/drivers/pcl726.c 	ret = comedi_request_region(dev, it->options[0], board->io_len);
it                330 drivers/staging/comedi/drivers/pcl726.c 	if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) {
it                331 drivers/staging/comedi/drivers/pcl726.c 		ret = request_irq(it->options[1], pcl726_interrupt, 0,
it                335 drivers/staging/comedi/drivers/pcl726.c 			dev->irq = it->options[1];
it                341 drivers/staging/comedi/drivers/pcl726.c 		unsigned int opt = it->options[2 + i];
it                265 drivers/staging/comedi/drivers/pcl730.c 			 struct comedi_devconfig *it)
it                272 drivers/staging/comedi/drivers/pcl730.c 	ret = comedi_request_region(dev, it->options[0], board->io_range);
it               1018 drivers/staging/comedi/drivers/pcl812.c 				      struct comedi_devconfig *it)
it               1025 drivers/staging/comedi/drivers/pcl812.c 		if (it->options[4] == 1)
it               1031 drivers/staging/comedi/drivers/pcl812.c 		switch (it->options[4]) {
it               1056 drivers/staging/comedi/drivers/pcl812.c 		if (it->options[1] == 1)
it               1062 drivers/staging/comedi/drivers/pcl812.c 		switch (it->options[1]) {
it               1083 drivers/staging/comedi/drivers/pcl812.c 		switch (it->options[1]) {
it               1130 drivers/staging/comedi/drivers/pcl812.c static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it               1143 drivers/staging/comedi/drivers/pcl812.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it               1154 drivers/staging/comedi/drivers/pcl812.c 		if ((1 << it->options[1]) & board->irq_bits) {
it               1155 drivers/staging/comedi/drivers/pcl812.c 			ret = request_irq(it->options[1], pcl812_interrupt, 0,
it               1158 drivers/staging/comedi/drivers/pcl812.c 				dev->irq = it->options[1];
it               1164 drivers/staging/comedi/drivers/pcl812.c 		pcl812_alloc_dma(dev, it->options[2]);
it               1169 drivers/staging/comedi/drivers/pcl812.c 		if (it->options[2] == 1)
it               1174 drivers/staging/comedi/drivers/pcl812.c 		if (it->options[4] == 1)
it               1206 drivers/staging/comedi/drivers/pcl812.c 	pcl812_set_ai_range_table(dev, s, it);
it               1233 drivers/staging/comedi/drivers/pcl812.c 			if (it->options[3] == 1)
it               1242 drivers/staging/comedi/drivers/pcl812.c 			switch (it->options[5]) {
it               1295 drivers/staging/comedi/drivers/pcl812.c 		if (it->options[3] > 0)
it                571 drivers/staging/comedi/drivers/pcl816.c 				     struct comedi_devconfig *it)
it                574 drivers/staging/comedi/drivers/pcl816.c 	unsigned int irq_num = it->options[1];
it                575 drivers/staging/comedi/drivers/pcl816.c 	unsigned int dma_chan = it->options[2];
it                602 drivers/staging/comedi/drivers/pcl816.c static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                613 drivers/staging/comedi/drivers/pcl816.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                618 drivers/staging/comedi/drivers/pcl816.c 	pcl816_alloc_irq_and_dma(dev, it);
it                911 drivers/staging/comedi/drivers/pcl818.c 				      struct comedi_devconfig *it)
it                920 drivers/staging/comedi/drivers/pcl818.c 		if (it->options[4] == 1 || it->options[4] == 10) {
it                925 drivers/staging/comedi/drivers/pcl818.c 		switch (it->options[4]) {
it                981 drivers/staging/comedi/drivers/pcl818.c static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                993 drivers/staging/comedi/drivers/pcl818.c 	ret = comedi_request_region(dev, it->options[0],
it                999 drivers/staging/comedi/drivers/pcl818.c 	if (it->options[1] >= 2 && it->options[1] <= 7) {
it               1000 drivers/staging/comedi/drivers/pcl818.c 		ret = request_irq(it->options[1], pcl818_interrupt, 0,
it               1003 drivers/staging/comedi/drivers/pcl818.c 			dev->irq = it->options[1];
it               1007 drivers/staging/comedi/drivers/pcl818.c 	if (dev->irq && board->has_fifo && it->options[2] == -1)
it               1012 drivers/staging/comedi/drivers/pcl818.c 		pcl818_alloc_dma(dev, it->options[2]);
it               1015 drivers/staging/comedi/drivers/pcl818.c 	if ((it->options[3] == 0) || (it->options[3] == 10))
it               1029 drivers/staging/comedi/drivers/pcl818.c 		if ((it->options[6] == 1) || (it->options[6] == 100))
it               1049 drivers/staging/comedi/drivers/pcl818.c 	pcl818_set_ai_range_table(dev, s, it);
it               1070 drivers/staging/comedi/drivers/pcl818.c 			if ((it->options[4] == 1) || (it->options[4] == 10))
it               1072 drivers/staging/comedi/drivers/pcl818.c 			if (it->options[4] == 2)
it               1075 drivers/staging/comedi/drivers/pcl818.c 			if ((it->options[5] == 1) || (it->options[5] == 10))
it               1077 drivers/staging/comedi/drivers/pcl818.c 			if (it->options[5] == 2)
it                189 drivers/staging/comedi/drivers/pcm3724.c 			  struct comedi_devconfig *it)
it                199 drivers/staging/comedi/drivers/pcm3724.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                103 drivers/staging/comedi/drivers/pcmad.c static int pcmad_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                109 drivers/staging/comedi/drivers/pcmad.c 	ret = comedi_request_region(dev, it->options[0], 0x04);
it                119 drivers/staging/comedi/drivers/pcmad.c 	if (it->options[1]) {
it                130 drivers/staging/comedi/drivers/pcmad.c 	s->range_table	= it->options[2] ? &range_bipolar10 : &range_unipolar5;
it                117 drivers/staging/comedi/drivers/pcmda12.c 			  struct comedi_devconfig *it)
it                123 drivers/staging/comedi/drivers/pcmda12.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                131 drivers/staging/comedi/drivers/pcmda12.c 	devpriv->simultaneous_xfer_mode = it->options[1];
it                665 drivers/staging/comedi/drivers/pcmmio.c static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                671 drivers/staging/comedi/drivers/pcmmio.c 	ret = comedi_request_region(dev, it->options[0], 32);
it                684 drivers/staging/comedi/drivers/pcmmio.c 	if (it->options[1]) {
it                685 drivers/staging/comedi/drivers/pcmmio.c 		ret = request_irq(it->options[1], interrupt_pcmmio, 0,
it                688 drivers/staging/comedi/drivers/pcmmio.c 			dev->irq = it->options[1];
it                520 drivers/staging/comedi/drivers/pcmuio.c static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                528 drivers/staging/comedi/drivers/pcmuio.c 	ret = comedi_request_region(dev, it->options[0],
it                546 drivers/staging/comedi/drivers/pcmuio.c 	if (it->options[1]) {
it                548 drivers/staging/comedi/drivers/pcmuio.c 		ret = request_irq(it->options[1], pcmuio_interrupt, 0,
it                551 drivers/staging/comedi/drivers/pcmuio.c 			dev->irq = it->options[1];
it                555 drivers/staging/comedi/drivers/pcmuio.c 		if (it->options[2] == dev->irq) {
it                557 drivers/staging/comedi/drivers/pcmuio.c 			devpriv->irq2 = it->options[2];
it                558 drivers/staging/comedi/drivers/pcmuio.c 		} else if (it->options[2]) {
it                560 drivers/staging/comedi/drivers/pcmuio.c 			ret = request_irq(it->options[2], pcmuio_interrupt, 0,
it                563 drivers/staging/comedi/drivers/pcmuio.c 				devpriv->irq2 = it->options[2];
it                253 drivers/staging/comedi/drivers/rti800.c static int rti800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                260 drivers/staging/comedi/drivers/rti800.c 	ret = comedi_request_region(dev, it->options[0], 0x10);
it                272 drivers/staging/comedi/drivers/rti800.c 	devpriv->adc_2comp = (it->options[4] == 0);
it                273 drivers/staging/comedi/drivers/rti800.c 	devpriv->dac_2comp[0] = (it->options[6] == 0);
it                274 drivers/staging/comedi/drivers/rti800.c 	devpriv->dac_2comp[1] = (it->options[8] == 0);
it                286 drivers/staging/comedi/drivers/rti800.c 	s->n_chan	= (it->options[2] ? 16 : 8);
it                289 drivers/staging/comedi/drivers/rti800.c 	s->range_table	= (it->options[3] < ARRAY_SIZE(rti800_ai_ranges))
it                290 drivers/staging/comedi/drivers/rti800.c 				? rti800_ai_ranges[it->options[3]]
it                302 drivers/staging/comedi/drivers/rti800.c 			(it->options[5] < ARRAY_SIZE(rti800_ao_ranges))
it                303 drivers/staging/comedi/drivers/rti800.c 				? rti800_ao_ranges[it->options[5]]
it                306 drivers/staging/comedi/drivers/rti800.c 			(it->options[7] < ARRAY_SIZE(rti800_ao_ranges))
it                307 drivers/staging/comedi/drivers/rti800.c 				? rti800_ao_ranges[it->options[7]]
it                 68 drivers/staging/comedi/drivers/rti802.c static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                 75 drivers/staging/comedi/drivers/rti802.c 	ret = comedi_request_region(dev, it->options[0], 0x04);
it                101 drivers/staging/comedi/drivers/rti802.c 		devpriv->dac_coding[i] = (it->options[3 + 2 * i])
it                103 drivers/staging/comedi/drivers/rti802.c 		devpriv->range_type_list[i] = (it->options[2 + 2 * i])
it                550 drivers/staging/comedi/drivers/s526.c static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                556 drivers/staging/comedi/drivers/s526.c 	ret = comedi_request_region(dev, it->options[0], 0x40);
it                124 drivers/staging/comedi/drivers/ssv_dnp.c static int dnp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
it                 51 drivers/staging/comedi/range.c 	struct comedi_rangeinfo it;
it                 56 drivers/staging/comedi/range.c 	if (copy_from_user(&it, arg, sizeof(struct comedi_rangeinfo)))
it                 58 drivers/staging/comedi/range.c 	subd = (it.range_type >> 24) & 0xf;
it                 59 drivers/staging/comedi/range.c 	chan = (it.range_type >> 16) & 0xff;
it                 76 drivers/staging/comedi/range.c 	if (RANGE_LENGTH(it.range_type) != lr->length) {
it                 79 drivers/staging/comedi/range.c 			RANGE_LENGTH(it.range_type),
it                 80 drivers/staging/comedi/range.c 			lr->length, it.range_type);
it                 84 drivers/staging/comedi/range.c 	if (copy_to_user(it.range_ptr, lr->range,
it                878 drivers/vfio/vfio.c 	struct vfio_device *it, *device = NULL;
it                881 drivers/vfio/vfio.c 	list_for_each_entry(it, &group->device_list, group_next) {
it                882 drivers/vfio/vfio.c 		if (!strcmp(dev_name(it->dev), buf)) {
it                883 drivers/vfio/vfio.c 			device = it;
it                498 drivers/video/fbdev/aty/mach64_ct.c 	FIXME: is it relevant for us?
it                130 fs/cifs/cifssmb.c 	struct dfs_cache_tgt_iterator *it = NULL;
it                160 fs/cifs/cifssmb.c 	for (it = dfs_cache_get_tgt_iterator(&tl); it;
it                161 fs/cifs/cifssmb.c 	     it = dfs_cache_get_next_tgt(&tl, it)) {
it                162 fs/cifs/cifssmb.c 		const char *tgt = dfs_cache_get_tgt_name(it);
it                185 fs/cifs/cifssmb.c 		if (it)
it                187 fs/cifs/cifssmb.c 							    it);
it                473 fs/cifs/connect.c 					   struct dfs_cache_tgt_iterator **it)
it                766 fs/cifs/dfs_cache.c 	struct dfs_cache_tgt_iterator *it, *nit;
it                772 fs/cifs/dfs_cache.c 		it = kzalloc(sizeof(*it), GFP_KERNEL);
it                773 fs/cifs/dfs_cache.c 		if (!it) {
it                778 fs/cifs/dfs_cache.c 		it->it_name = kstrndup(t->t_name, strlen(t->t_name),
it                780 fs/cifs/dfs_cache.c 		if (!it->it_name) {
it                781 fs/cifs/dfs_cache.c 			kfree(it);
it                787 fs/cifs/dfs_cache.c 			list_add(&it->it_list, head);
it                789 fs/cifs/dfs_cache.c 			list_add_tail(&it->it_list, head);
it                796 fs/cifs/dfs_cache.c 	list_for_each_entry_safe(it, nit, head, it_list) {
it                797 fs/cifs/dfs_cache.c 		kfree(it->it_name);
it                798 fs/cifs/dfs_cache.c 		kfree(it);
it                928 fs/cifs/dfs_cache.c 			     const struct dfs_cache_tgt_iterator *it)
it                955 fs/cifs/dfs_cache.c 	if (likely(!strcasecmp(it->it_name, t->t_name)))
it                959 fs/cifs/dfs_cache.c 		if (!strcasecmp(t->t_name, it->it_name)) {
it                962 fs/cifs/dfs_cache.c 				 it->it_name);
it                988 fs/cifs/dfs_cache.c 				   const struct dfs_cache_tgt_iterator *it)
it                995 fs/cifs/dfs_cache.c 	if (unlikely(!is_path_valid(path)) || !it)
it               1016 fs/cifs/dfs_cache.c 	if (unlikely(!strcasecmp(it->it_name, t->t_name)))
it               1020 fs/cifs/dfs_cache.c 		if (!strcasecmp(t->t_name, it->it_name)) {
it               1023 fs/cifs/dfs_cache.c 				 it->it_name);
it               1045 fs/cifs/dfs_cache.c 			       const struct dfs_cache_tgt_iterator *it,
it               1053 fs/cifs/dfs_cache.c 	if (!it || !ref)
it               1072 fs/cifs/dfs_cache.c 	cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
it               1074 fs/cifs/dfs_cache.c 	rc = setup_ref(path, ce, ref, it->it_name);
it                 39 fs/cifs/dfs_cache.h 				    const struct dfs_cache_tgt_iterator *it);
it                 42 fs/cifs/dfs_cache.h 			       const struct dfs_cache_tgt_iterator *it);
it                 44 fs/cifs/dfs_cache.h 				      const struct dfs_cache_tgt_iterator *it,
it                 54 fs/cifs/dfs_cache.h 		       struct dfs_cache_tgt_iterator *it)
it                 56 fs/cifs/dfs_cache.h 	if (!tl || list_empty(&tl->tl_list) || !it ||
it                 57 fs/cifs/dfs_cache.h 	    list_is_last(&it->it_list, &tl->tl_list))
it                 59 fs/cifs/dfs_cache.h 	return list_next_entry(it, it_list);
it                 74 fs/cifs/dfs_cache.h 	struct dfs_cache_tgt_iterator *it, *nit;
it                 78 fs/cifs/dfs_cache.h 	list_for_each_entry_safe(it, nit, &tl->tl_list, it_list) {
it                 79 fs/cifs/dfs_cache.h 		list_del(&it->it_list);
it                 80 fs/cifs/dfs_cache.h 		kfree(it->it_name);
it                 81 fs/cifs/dfs_cache.h 		kfree(it);
it                 87 fs/cifs/dfs_cache.h dfs_cache_get_tgt_name(const struct dfs_cache_tgt_iterator *it)
it                 89 fs/cifs/dfs_cache.h 	return it ? it->it_name : NULL;
it                164 fs/cifs/smb2pdu.c 	struct dfs_cache_tgt_iterator *it = NULL;
it                194 fs/cifs/smb2pdu.c 	for (it = dfs_cache_get_tgt_iterator(&tl); it;
it                195 fs/cifs/smb2pdu.c 	     it = dfs_cache_get_next_tgt(&tl, it)) {
it                196 fs/cifs/smb2pdu.c 		const char *tgt = dfs_cache_get_tgt_name(it);
it                219 fs/cifs/smb2pdu.c 		if (it)
it                221 fs/cifs/smb2pdu.c 							    it);
it                 19 fs/erofs/xattr.c static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
it                 23 fs/erofs/xattr.c 		kunmap(it->page);
it                 25 fs/erofs/xattr.c 		kunmap_atomic(it->kaddr);
it                 27 fs/erofs/xattr.c 	unlock_page(it->page);
it                 28 fs/erofs/xattr.c 	put_page(it->page);
it                 31 fs/erofs/xattr.c static inline void xattr_iter_end_final(struct xattr_iter *it)
it                 33 fs/erofs/xattr.c 	if (!it->page)
it                 36 fs/erofs/xattr.c 	xattr_iter_end(it, true);
it                 42 fs/erofs/xattr.c 	struct xattr_iter it;
it                 89 fs/erofs/xattr.c 	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
it                 90 fs/erofs/xattr.c 	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
it                 92 fs/erofs/xattr.c 	it.page = erofs_get_meta_page(sb, it.blkaddr);
it                 93 fs/erofs/xattr.c 	if (IS_ERR(it.page)) {
it                 94 fs/erofs/xattr.c 		ret = PTR_ERR(it.page);
it                 99 fs/erofs/xattr.c 	it.kaddr = kmap(it.page);
it                102 fs/erofs/xattr.c 	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
it                108 fs/erofs/xattr.c 		xattr_iter_end(&it, atomic_map);
it                114 fs/erofs/xattr.c 	it.ofs += sizeof(struct erofs_xattr_ibody_header);
it                117 fs/erofs/xattr.c 		if (it.ofs >= EROFS_BLKSIZ) {
it                119 fs/erofs/xattr.c 			DBG_BUGON(it.ofs != EROFS_BLKSIZ);
it                120 fs/erofs/xattr.c 			xattr_iter_end(&it, atomic_map);
it                122 fs/erofs/xattr.c 			it.page = erofs_get_meta_page(sb, ++it.blkaddr);
it                123 fs/erofs/xattr.c 			if (IS_ERR(it.page)) {
it                126 fs/erofs/xattr.c 				ret = PTR_ERR(it.page);
it                130 fs/erofs/xattr.c 			it.kaddr = kmap_atomic(it.page);
it                132 fs/erofs/xattr.c 			it.ofs = 0;
it                135 fs/erofs/xattr.c 			le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
it                136 fs/erofs/xattr.c 		it.ofs += sizeof(__le32);
it                138 fs/erofs/xattr.c 	xattr_iter_end(&it, atomic_map);
it                163 fs/erofs/xattr.c static inline int xattr_iter_fixup(struct xattr_iter *it)
it                165 fs/erofs/xattr.c 	if (it->ofs < EROFS_BLKSIZ)
it                168 fs/erofs/xattr.c 	xattr_iter_end(it, true);
it                170 fs/erofs/xattr.c 	it->blkaddr += erofs_blknr(it->ofs);
it                172 fs/erofs/xattr.c 	it->page = erofs_get_meta_page(it->sb, it->blkaddr);
it                173 fs/erofs/xattr.c 	if (IS_ERR(it->page)) {
it                174 fs/erofs/xattr.c 		int err = PTR_ERR(it->page);
it                176 fs/erofs/xattr.c 		it->page = NULL;
it                180 fs/erofs/xattr.c 	it->kaddr = kmap_atomic(it->page);
it                181 fs/erofs/xattr.c 	it->ofs = erofs_blkoff(it->ofs);
it                185 fs/erofs/xattr.c static int inline_xattr_iter_begin(struct xattr_iter *it,
it                200 fs/erofs/xattr.c 	it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
it                201 fs/erofs/xattr.c 	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
it                203 fs/erofs/xattr.c 	it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
it                204 fs/erofs/xattr.c 	if (IS_ERR(it->page))
it                205 fs/erofs/xattr.c 		return PTR_ERR(it->page);
it                207 fs/erofs/xattr.c 	it->kaddr = kmap_atomic(it->page);
it                215 fs/erofs/xattr.c static int xattr_foreach(struct xattr_iter *it,
it                224 fs/erofs/xattr.c 	err = xattr_iter_fixup(it);
it                233 fs/erofs/xattr.c 	entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
it                245 fs/erofs/xattr.c 	it->ofs += sizeof(struct erofs_xattr_entry);
it                249 fs/erofs/xattr.c 	err = op->entry(it, &entry);
it                251 fs/erofs/xattr.c 		it->ofs += entry.e_name_len + value_sz;
it                259 fs/erofs/xattr.c 		if (it->ofs >= EROFS_BLKSIZ) {
it                260 fs/erofs/xattr.c 			DBG_BUGON(it->ofs > EROFS_BLKSIZ);
it                262 fs/erofs/xattr.c 			err = xattr_iter_fixup(it);
it                265 fs/erofs/xattr.c 			it->ofs = 0;
it                268 fs/erofs/xattr.c 		slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
it                272 fs/erofs/xattr.c 		err = op->name(it, processed, it->kaddr + it->ofs, slice);
it                274 fs/erofs/xattr.c 			it->ofs += entry.e_name_len - processed + value_sz;
it                278 fs/erofs/xattr.c 		it->ofs += slice;
it                286 fs/erofs/xattr.c 		err = op->alloc_buffer(it, value_sz);
it                288 fs/erofs/xattr.c 			it->ofs += value_sz;
it                294 fs/erofs/xattr.c 		if (it->ofs >= EROFS_BLKSIZ) {
it                295 fs/erofs/xattr.c 			DBG_BUGON(it->ofs > EROFS_BLKSIZ);
it                297 fs/erofs/xattr.c 			err = xattr_iter_fixup(it);
it                300 fs/erofs/xattr.c 			it->ofs = 0;
it                303 fs/erofs/xattr.c 		slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
it                305 fs/erofs/xattr.c 		op->value(it, processed, it->kaddr + it->ofs, slice);
it                306 fs/erofs/xattr.c 		it->ofs += slice;
it                312 fs/erofs/xattr.c 	it->ofs = EROFS_XATTR_ALIGN(it->ofs);
it                317 fs/erofs/xattr.c 	struct xattr_iter it;
it                327 fs/erofs/xattr.c 	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
it                329 fs/erofs/xattr.c 	return (it->index != entry->e_name_index ||
it                330 fs/erofs/xattr.c 		it->name.len != entry->e_name_len) ? -ENOATTR : 0;
it                336 fs/erofs/xattr.c 	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
it                338 fs/erofs/xattr.c 	return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
it                344 fs/erofs/xattr.c 	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
it                345 fs/erofs/xattr.c 	int err = it->buffer_size < value_sz ? -ERANGE : 0;
it                347 fs/erofs/xattr.c 	it->buffer_size = value_sz;
it                348 fs/erofs/xattr.c 	return !it->buffer ? 1 : err;
it                355 fs/erofs/xattr.c 	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
it                357 fs/erofs/xattr.c 	memcpy(it->buffer + processed, buf, len);
it                367 fs/erofs/xattr.c static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
it                372 fs/erofs/xattr.c 	ret = inline_xattr_iter_begin(&it->it, inode);
it                378 fs/erofs/xattr.c 		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
it                382 fs/erofs/xattr.c 	xattr_iter_end_final(&it->it);
it                384 fs/erofs/xattr.c 	return ret ? ret : it->buffer_size;
it                387 fs/erofs/xattr.c static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
it                399 fs/erofs/xattr.c 		it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
it                401 fs/erofs/xattr.c 		if (!i || blkaddr != it->it.blkaddr) {
it                403 fs/erofs/xattr.c 				xattr_iter_end(&it->it, true);
it                405 fs/erofs/xattr.c 			it->it.page = erofs_get_meta_page(sb, blkaddr);
it                406 fs/erofs/xattr.c 			if (IS_ERR(it->it.page))
it                407 fs/erofs/xattr.c 				return PTR_ERR(it->it.page);
it                409 fs/erofs/xattr.c 			it->it.kaddr = kmap_atomic(it->it.page);
it                410 fs/erofs/xattr.c 			it->it.blkaddr = blkaddr;
it                413 fs/erofs/xattr.c 		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
it                418 fs/erofs/xattr.c 		xattr_iter_end_final(&it->it);
it                420 fs/erofs/xattr.c 	return ret ? ret : it->buffer_size;
it                438 fs/erofs/xattr.c 	struct getxattr_iter it;
it                447 fs/erofs/xattr.c 	it.index = index;
it                449 fs/erofs/xattr.c 	it.name.len = strlen(name);
it                450 fs/erofs/xattr.c 	if (it.name.len > EROFS_NAME_LEN)
it                452 fs/erofs/xattr.c 	it.name.name = name;
it                454 fs/erofs/xattr.c 	it.buffer = buffer;
it                455 fs/erofs/xattr.c 	it.buffer_size = buffer_size;
it                457 fs/erofs/xattr.c 	it.it.sb = inode->i_sb;
it                458 fs/erofs/xattr.c 	ret = inline_getxattr(inode, &it);
it                460 fs/erofs/xattr.c 		ret = shared_getxattr(inode, &it);
it                524 fs/erofs/xattr.c 	struct xattr_iter it;
it                534 fs/erofs/xattr.c 	struct listxattr_iter *it =
it                535 fs/erofs/xattr.c 		container_of(_it, struct listxattr_iter, it);
it                542 fs/erofs/xattr.c 	if (!h || (h->list && !h->list(it->dentry)))
it                548 fs/erofs/xattr.c 	if (!it->buffer) {
it                549 fs/erofs/xattr.c 		it->buffer_ofs += prefix_len + entry->e_name_len + 1;
it                553 fs/erofs/xattr.c 	if (it->buffer_ofs + prefix_len
it                554 fs/erofs/xattr.c 		+ entry->e_name_len + 1 > it->buffer_size)
it                557 fs/erofs/xattr.c 	memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
it                558 fs/erofs/xattr.c 	it->buffer_ofs += prefix_len;
it                565 fs/erofs/xattr.c 	struct listxattr_iter *it =
it                566 fs/erofs/xattr.c 		container_of(_it, struct listxattr_iter, it);
it                568 fs/erofs/xattr.c 	memcpy(it->buffer + it->buffer_ofs, buf, len);
it                569 fs/erofs/xattr.c 	it->buffer_ofs += len;
it                576 fs/erofs/xattr.c 	struct listxattr_iter *it =
it                577 fs/erofs/xattr.c 		container_of(_it, struct listxattr_iter, it);
it                579 fs/erofs/xattr.c 	it->buffer[it->buffer_ofs++] = '\0';
it                590 fs/erofs/xattr.c static int inline_listxattr(struct listxattr_iter *it)
it                595 fs/erofs/xattr.c 	ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
it                601 fs/erofs/xattr.c 		ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
it                605 fs/erofs/xattr.c 	xattr_iter_end_final(&it->it);
it                606 fs/erofs/xattr.c 	return ret ? ret : it->buffer_ofs;
it                609 fs/erofs/xattr.c static int shared_listxattr(struct listxattr_iter *it)
it                611 fs/erofs/xattr.c 	struct inode *const inode = d_inode(it->dentry);
it                622 fs/erofs/xattr.c 		it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
it                623 fs/erofs/xattr.c 		if (!i || blkaddr != it->it.blkaddr) {
it                625 fs/erofs/xattr.c 				xattr_iter_end(&it->it, true);
it                627 fs/erofs/xattr.c 			it->it.page = erofs_get_meta_page(sb, blkaddr);
it                628 fs/erofs/xattr.c 			if (IS_ERR(it->it.page))
it                629 fs/erofs/xattr.c 				return PTR_ERR(it->it.page);
it                631 fs/erofs/xattr.c 			it->it.kaddr = kmap_atomic(it->it.page);
it                632 fs/erofs/xattr.c 			it->it.blkaddr = blkaddr;
it                635 fs/erofs/xattr.c 		ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
it                640 fs/erofs/xattr.c 		xattr_iter_end_final(&it->it);
it                642 fs/erofs/xattr.c 	return ret ? ret : it->buffer_ofs;
it                649 fs/erofs/xattr.c 	struct listxattr_iter it;
it                657 fs/erofs/xattr.c 	it.dentry = dentry;
it                658 fs/erofs/xattr.c 	it.buffer = buffer;
it                659 fs/erofs/xattr.c 	it.buffer_size = buffer_size;
it                660 fs/erofs/xattr.c 	it.buffer_ofs = 0;
it                662 fs/erofs/xattr.c 	it.it.sb = dentry->d_sb;
it                664 fs/erofs/xattr.c 	ret = inline_listxattr(&it);
it                667 fs/erofs/xattr.c 	return shared_listxattr(&it);
it                139 fs/xfs/xfs_quota.h #define xfs_qm_vop_rename_dqattach(it)					(0)
it                 92 include/linux/ceph/messenger.h #define __ceph_bio_iter_advance_step(it, n, STEP) do {			      \
it                 96 include/linux/ceph/messenger.h 		BUG_ON(!(it)->iter.bi_size);				      \
it                 97 include/linux/ceph/messenger.h 		__cur_n = min((it)->iter.bi_size, __n);			      \
it                 99 include/linux/ceph/messenger.h 		bio_advance_iter((it)->bio, &(it)->iter, __cur_n);	      \
it                100 include/linux/ceph/messenger.h 		if (!(it)->iter.bi_size && (it)->bio->bi_next) {	      \
it                102 include/linux/ceph/messenger.h 			(it)->bio = (it)->bio->bi_next;			      \
it                103 include/linux/ceph/messenger.h 			(it)->iter = (it)->bio->bi_iter;		      \
it                112 include/linux/ceph/messenger.h #define ceph_bio_iter_advance(it, n)					      \
it                113 include/linux/ceph/messenger.h 	__ceph_bio_iter_advance_step(it, n, 0)
it                118 include/linux/ceph/messenger.h #define ceph_bio_iter_advance_step(it, n, BVEC_STEP)			      \
it                119 include/linux/ceph/messenger.h 	__ceph_bio_iter_advance_step(it, n, ({				      \
it                123 include/linux/ceph/messenger.h 		__cur_iter = (it)->iter;				      \
it                125 include/linux/ceph/messenger.h 		__bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \
it                136 include/linux/ceph/messenger.h #define __ceph_bvec_iter_advance_step(it, n, STEP) do {			      \
it                137 include/linux/ceph/messenger.h 	BUG_ON((n) > (it)->iter.bi_size);				      \
it                139 include/linux/ceph/messenger.h 	bvec_iter_advance((it)->bvecs, &(it)->iter, (n));		      \
it                145 include/linux/ceph/messenger.h #define ceph_bvec_iter_advance(it, n)					      \
it                146 include/linux/ceph/messenger.h 	__ceph_bvec_iter_advance_step(it, n, 0)
it                151 include/linux/ceph/messenger.h #define ceph_bvec_iter_advance_step(it, n, BVEC_STEP)			      \
it                152 include/linux/ceph/messenger.h 	__ceph_bvec_iter_advance_step(it, n, ({				      \
it                156 include/linux/ceph/messenger.h 		__cur_iter = (it)->iter;				      \
it                158 include/linux/ceph/messenger.h 		for_each_bvec(bv, (it)->bvecs, __cur_iter, __cur_iter)	      \
it                162 include/linux/ceph/messenger.h #define ceph_bvec_iter_shorten(it, n) do {				      \
it                163 include/linux/ceph/messenger.h 	BUG_ON((n) > (it)->iter.bi_size);				      \
it                164 include/linux/ceph/messenger.h 	(it)->iter.bi_size = (n);					      \
it                156 include/linux/cgroup.h 			 struct css_task_iter *it);
it                157 include/linux/cgroup.h struct task_struct *css_task_iter_next(struct css_task_iter *it);
it                158 include/linux/cgroup.h void css_task_iter_end(struct css_task_iter *it);
it                 39 include/linux/cma.h extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
it                669 include/linux/compat.h 				     struct compat_itimerval __user *it);
it                403 include/linux/mroute_base.h 		     struct mr_mfc_iter *it, loff_t pos);
it                410 include/linux/mroute_base.h 	struct mr_mfc_iter *it = seq->private;
it                412 include/linux/mroute_base.h 	it->mrt = mrt;
it                413 include/linux/mroute_base.h 	it->cache = NULL;
it                414 include/linux/mroute_base.h 	it->lock = lock;
it                423 include/linux/mroute_base.h 	struct mr_mfc_iter *it = seq->private;
it                424 include/linux/mroute_base.h 	struct mr_table *mrt = it->mrt;
it                426 include/linux/mroute_base.h 	if (it->cache == &mrt->mfc_unres_queue)
it                427 include/linux/mroute_base.h 		spin_unlock_bh(it->lock);
it                428 include/linux/mroute_base.h 	else if (it->cache == &mrt->mfc_cache_list)
it                450 include/linux/mroute_base.h 				   struct mr_mfc_iter *it, loff_t pos)
it                380 include/linux/of.h extern int of_phandle_iterator_init(struct of_phandle_iterator *it,
it                386 include/linux/of.h extern int of_phandle_iterator_next(struct of_phandle_iterator *it);
it                387 include/linux/of.h extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
it                876 include/linux/of.h static inline int of_phandle_iterator_init(struct of_phandle_iterator *it,
it                885 include/linux/of.h static inline int of_phandle_iterator_next(struct of_phandle_iterator *it)
it                890 include/linux/of.h static inline int of_phandle_iterator_args(struct of_phandle_iterator *it,
it               1210 include/linux/of.h #define of_for_each_phandle(it, err, np, ln, cn, cc)			\
it               1211 include/linux/of.h 	for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)),	\
it               1212 include/linux/of.h 	     err = of_phandle_iterator_next(it);			\
it               1214 include/linux/of.h 	     err = of_phandle_iterator_next(it))
it                 45 include/linux/of_fdt.h extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
it                 49 include/linux/of_fdt.h 				    int (*it)(unsigned long node,
it                220 include/linux/posix-timers.h 	} it;
it                138 include/linux/sched/signal.h 	struct cpu_itimer it[2];
it                 16 include/linux/time.h int get_itimerspec64(struct itimerspec64 *it,
it                 18 include/linux/time.h int put_itimerspec64(const struct itimerspec64 *it,
it                 14 include/sound/wavefront.h      it is necessary to pack the "wavefront_alias" structure to a size
it               1288 ipc/msg.c      static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
it               1292 ipc/msg.c      	struct kern_ipc_perm *ipcp = it;
it                174 ipc/sem.c      static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
it               2412 ipc/sem.c      static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
it               2415 ipc/sem.c      	struct kern_ipc_perm *ipcp = it;
it                 96 ipc/shm.c      static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
it               1739 ipc/shm.c      static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
it               1743 ipc/shm.c      	struct kern_ipc_perm *ipcp = it;
it                784 ipc/util.c     static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
it                788 ipc/util.c     	struct kern_ipc_perm *ipc = it;
it                827 ipc/util.c     static void sysvipc_proc_stop(struct seq_file *s, void *it)
it                829 ipc/util.c     	struct kern_ipc_perm *ipc = it;
it                843 ipc/util.c     static int sysvipc_proc_show(struct seq_file *s, void *it)
it                848 ipc/util.c     	if (it == SEQ_START_TOKEN) {
it                853 ipc/util.c     	return iface->show(s, it);
it                719 kernel/bpf/core.c 	unsigned int it = 0;
it                727 kernel/bpf/core.c 		if (it++ != symnum)
it                102 kernel/cgroup/cgroup-v1.c 	struct css_task_iter it;
it                132 kernel/cgroup/cgroup-v1.c 		css_task_iter_start(&from->self, 0, &it);
it                135 kernel/cgroup/cgroup-v1.c 			task = css_task_iter_next(&it);
it                140 kernel/cgroup/cgroup-v1.c 		css_task_iter_end(&it);
it                336 kernel/cgroup/cgroup-v1.c 	struct css_task_iter it;
it                353 kernel/cgroup/cgroup-v1.c 	css_task_iter_start(&cgrp->self, 0, &it);
it                354 kernel/cgroup/cgroup-v1.c 	while ((tsk = css_task_iter_next(&it))) {
it                365 kernel/cgroup/cgroup-v1.c 	css_task_iter_end(&it);
it                694 kernel/cgroup/cgroup-v1.c 	struct css_task_iter it;
it                718 kernel/cgroup/cgroup-v1.c 	css_task_iter_start(&cgrp->self, 0, &it);
it                719 kernel/cgroup/cgroup-v1.c 	while ((tsk = css_task_iter_next(&it))) {
it                739 kernel/cgroup/cgroup-v1.c 	css_task_iter_end(&it);
it                218 kernel/cgroup/cgroup.c static void css_task_iter_skip(struct css_task_iter *it,
it                857 kernel/cgroup/cgroup.c 	struct css_task_iter *it, *pos;
it                859 kernel/cgroup/cgroup.c 	list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
it                860 kernel/cgroup/cgroup.c 		css_task_iter_skip(it, task);
it               4391 kernel/cgroup/cgroup.c static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it)
it               4400 kernel/cgroup/cgroup.c 	if (it->tcset_pos) {
it               4401 kernel/cgroup/cgroup.c 		l = it->tcset_pos->next;
it               4403 kernel/cgroup/cgroup.c 		if (l != it->tcset_head) {
it               4404 kernel/cgroup/cgroup.c 			it->tcset_pos = l;
it               4409 kernel/cgroup/cgroup.c 		it->tcset_pos = NULL;
it               4413 kernel/cgroup/cgroup.c 	l = it->cset_pos;
it               4415 kernel/cgroup/cgroup.c 	if (l == it->cset_head) {
it               4416 kernel/cgroup/cgroup.c 		it->cset_pos = NULL;
it               4420 kernel/cgroup/cgroup.c 	if (it->ss) {
it               4421 kernel/cgroup/cgroup.c 		cset = container_of(l, struct css_set, e_cset_node[it->ss->id]);
it               4427 kernel/cgroup/cgroup.c 	it->cset_pos = l;
it               4430 kernel/cgroup/cgroup.c 	if (it->flags & CSS_TASK_ITER_THREADED) {
it               4431 kernel/cgroup/cgroup.c 		if (it->cur_dcset)
it               4432 kernel/cgroup/cgroup.c 			put_css_set_locked(it->cur_dcset);
it               4433 kernel/cgroup/cgroup.c 		it->cur_dcset = cset;
it               4436 kernel/cgroup/cgroup.c 		it->tcset_head = &cset->threaded_csets;
it               4437 kernel/cgroup/cgroup.c 		it->tcset_pos = &cset->threaded_csets;
it               4449 kernel/cgroup/cgroup.c static void css_task_iter_advance_css_set(struct css_task_iter *it)
it               4457 kernel/cgroup/cgroup.c 		cset = css_task_iter_next_css_set(it);
it               4459 kernel/cgroup/cgroup.c 			it->task_pos = NULL;
it               4465 kernel/cgroup/cgroup.c 		it->task_pos = cset->tasks.next;
it               4466 kernel/cgroup/cgroup.c 		it->cur_tasks_head = &cset->tasks;
it               4468 kernel/cgroup/cgroup.c 		it->task_pos = cset->mg_tasks.next;
it               4469 kernel/cgroup/cgroup.c 		it->cur_tasks_head = &cset->mg_tasks;
it               4471 kernel/cgroup/cgroup.c 		it->task_pos = cset->dying_tasks.next;
it               4472 kernel/cgroup/cgroup.c 		it->cur_tasks_head = &cset->dying_tasks;
it               4475 kernel/cgroup/cgroup.c 	it->tasks_head = &cset->tasks;
it               4476 kernel/cgroup/cgroup.c 	it->mg_tasks_head = &cset->mg_tasks;
it               4477 kernel/cgroup/cgroup.c 	it->dying_tasks_head = &cset->dying_tasks;
it               4494 kernel/cgroup/cgroup.c 	if (it->cur_cset) {
it               4495 kernel/cgroup/cgroup.c 		list_del(&it->iters_node);
it               4496 kernel/cgroup/cgroup.c 		put_css_set_locked(it->cur_cset);
it               4499 kernel/cgroup/cgroup.c 	it->cur_cset = cset;
it               4500 kernel/cgroup/cgroup.c 	list_add(&it->iters_node, &cset->task_iters);
it               4503 kernel/cgroup/cgroup.c static void css_task_iter_skip(struct css_task_iter *it,
it               4508 kernel/cgroup/cgroup.c 	if (it->task_pos == &task->cg_list) {
it               4509 kernel/cgroup/cgroup.c 		it->task_pos = it->task_pos->next;
it               4510 kernel/cgroup/cgroup.c 		it->flags |= CSS_TASK_ITER_SKIPPED;
it               4514 kernel/cgroup/cgroup.c static void css_task_iter_advance(struct css_task_iter *it)
it               4520 kernel/cgroup/cgroup.c 	if (it->task_pos) {
it               4526 kernel/cgroup/cgroup.c 		if (it->flags & CSS_TASK_ITER_SKIPPED)
it               4527 kernel/cgroup/cgroup.c 			it->flags &= ~CSS_TASK_ITER_SKIPPED;
it               4529 kernel/cgroup/cgroup.c 			it->task_pos = it->task_pos->next;
it               4531 kernel/cgroup/cgroup.c 		if (it->task_pos == it->tasks_head) {
it               4532 kernel/cgroup/cgroup.c 			it->task_pos = it->mg_tasks_head->next;
it               4533 kernel/cgroup/cgroup.c 			it->cur_tasks_head = it->mg_tasks_head;
it               4535 kernel/cgroup/cgroup.c 		if (it->task_pos == it->mg_tasks_head) {
it               4536 kernel/cgroup/cgroup.c 			it->task_pos = it->dying_tasks_head->next;
it               4537 kernel/cgroup/cgroup.c 			it->cur_tasks_head = it->dying_tasks_head;
it               4539 kernel/cgroup/cgroup.c 		if (it->task_pos == it->dying_tasks_head)
it               4540 kernel/cgroup/cgroup.c 			css_task_iter_advance_css_set(it);
it               4543 kernel/cgroup/cgroup.c 		css_task_iter_advance_css_set(it);
it               4546 kernel/cgroup/cgroup.c 	if (!it->task_pos)
it               4549 kernel/cgroup/cgroup.c 	task = list_entry(it->task_pos, struct task_struct, cg_list);
it               4551 kernel/cgroup/cgroup.c 	if (it->flags & CSS_TASK_ITER_PROCS) {
it               4557 kernel/cgroup/cgroup.c 		if (it->cur_tasks_head == it->dying_tasks_head &&
it               4562 kernel/cgroup/cgroup.c 		if (it->cur_tasks_head == it->dying_tasks_head)
it               4579 kernel/cgroup/cgroup.c 			 struct css_task_iter *it)
it               4584 kernel/cgroup/cgroup.c 	memset(it, 0, sizeof(*it));
it               4588 kernel/cgroup/cgroup.c 	it->ss = css->ss;
it               4589 kernel/cgroup/cgroup.c 	it->flags = flags;
it               4591 kernel/cgroup/cgroup.c 	if (it->ss)
it               4592 kernel/cgroup/cgroup.c 		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
it               4594 kernel/cgroup/cgroup.c 		it->cset_pos = &css->cgroup->cset_links;
it               4596 kernel/cgroup/cgroup.c 	it->cset_head = it->cset_pos;
it               4598 kernel/cgroup/cgroup.c 	css_task_iter_advance(it);
it               4611 kernel/cgroup/cgroup.c struct task_struct *css_task_iter_next(struct css_task_iter *it)
it               4613 kernel/cgroup/cgroup.c 	if (it->cur_task) {
it               4614 kernel/cgroup/cgroup.c 		put_task_struct(it->cur_task);
it               4615 kernel/cgroup/cgroup.c 		it->cur_task = NULL;
it               4621 kernel/cgroup/cgroup.c 	if (it->flags & CSS_TASK_ITER_SKIPPED)
it               4622 kernel/cgroup/cgroup.c 		css_task_iter_advance(it);
it               4624 kernel/cgroup/cgroup.c 	if (it->task_pos) {
it               4625 kernel/cgroup/cgroup.c 		it->cur_task = list_entry(it->task_pos, struct task_struct,
it               4627 kernel/cgroup/cgroup.c 		get_task_struct(it->cur_task);
it               4628 kernel/cgroup/cgroup.c 		css_task_iter_advance(it);
it               4633 kernel/cgroup/cgroup.c 	return it->cur_task;
it               4642 kernel/cgroup/cgroup.c void css_task_iter_end(struct css_task_iter *it)
it               4644 kernel/cgroup/cgroup.c 	if (it->cur_cset) {
it               4646 kernel/cgroup/cgroup.c 		list_del(&it->iters_node);
it               4647 kernel/cgroup/cgroup.c 		put_css_set_locked(it->cur_cset);
it               4651 kernel/cgroup/cgroup.c 	if (it->cur_dcset)
it               4652 kernel/cgroup/cgroup.c 		put_css_set(it->cur_dcset);
it               4654 kernel/cgroup/cgroup.c 	if (it->cur_task)
it               4655 kernel/cgroup/cgroup.c 		put_task_struct(it->cur_task);
it               4669 kernel/cgroup/cgroup.c 	struct css_task_iter *it = of->priv;
it               4674 kernel/cgroup/cgroup.c 	return css_task_iter_next(it);
it               4682 kernel/cgroup/cgroup.c 	struct css_task_iter *it = of->priv;
it               4688 kernel/cgroup/cgroup.c 	if (!it) {
it               4692 kernel/cgroup/cgroup.c 		it = kzalloc(sizeof(*it), GFP_KERNEL);
it               4693 kernel/cgroup/cgroup.c 		if (!it)
it               4695 kernel/cgroup/cgroup.c 		of->priv = it;
it               4696 kernel/cgroup/cgroup.c 		css_task_iter_start(&cgrp->self, iter_flags, it);
it               4698 kernel/cgroup/cgroup.c 		css_task_iter_end(it);
it               4699 kernel/cgroup/cgroup.c 		css_task_iter_start(&cgrp->self, iter_flags, it);
it               4701 kernel/cgroup/cgroup.c 		return it->cur_task;
it                912 kernel/cgroup/cpuset.c 	struct css_task_iter it;
it                915 kernel/cgroup/cpuset.c 	css_task_iter_start(&cs->css, 0, &it);
it                917 kernel/cgroup/cpuset.c 	while ((task = css_task_iter_next(&it)))
it                920 kernel/cgroup/cpuset.c 	css_task_iter_end(&it);
it               1035 kernel/cgroup/cpuset.c 	struct css_task_iter it;
it               1038 kernel/cgroup/cpuset.c 	css_task_iter_start(&cs->css, 0, &it);
it               1039 kernel/cgroup/cpuset.c 	while ((task = css_task_iter_next(&it)))
it               1041 kernel/cgroup/cpuset.c 	css_task_iter_end(&it);
it               1648 kernel/cgroup/cpuset.c 	struct css_task_iter it;
it               1665 kernel/cgroup/cpuset.c 	css_task_iter_start(&cs->css, 0, &it);
it               1666 kernel/cgroup/cpuset.c 	while ((task = css_task_iter_next(&it))) {
it               1684 kernel/cgroup/cpuset.c 	css_task_iter_end(&it);
it               1854 kernel/cgroup/cpuset.c 	struct css_task_iter it;
it               1857 kernel/cgroup/cpuset.c 	css_task_iter_start(&cs->css, 0, &it);
it               1858 kernel/cgroup/cpuset.c 	while ((task = css_task_iter_next(&it)))
it               1860 kernel/cgroup/cpuset.c 	css_task_iter_end(&it);
it                179 kernel/cgroup/freezer.c 	struct css_task_iter it;
it                196 kernel/cgroup/freezer.c 	css_task_iter_start(&cgrp->self, 0, &it);
it                197 kernel/cgroup/freezer.c 	while ((task = css_task_iter_next(&it))) {
it                206 kernel/cgroup/freezer.c 	css_task_iter_end(&it);
it                248 kernel/cgroup/legacy_freezer.c 	struct css_task_iter it;
it                271 kernel/cgroup/legacy_freezer.c 	css_task_iter_start(css, 0, &it);
it                273 kernel/cgroup/legacy_freezer.c 	while ((task = css_task_iter_next(&it))) {
it                288 kernel/cgroup/legacy_freezer.c 	css_task_iter_end(&it);
it                320 kernel/cgroup/legacy_freezer.c 	struct css_task_iter it;
it                323 kernel/cgroup/legacy_freezer.c 	css_task_iter_start(&freezer->css, 0, &it);
it                324 kernel/cgroup/legacy_freezer.c 	while ((task = css_task_iter_next(&it)))
it                326 kernel/cgroup/legacy_freezer.c 	css_task_iter_end(&it);
it                331 kernel/cgroup/legacy_freezer.c 	struct css_task_iter it;
it                334 kernel/cgroup/legacy_freezer.c 	css_task_iter_start(&freezer->css, 0, &it);
it                335 kernel/cgroup/legacy_freezer.c 	while ((task = css_task_iter_next(&it)))
it                337 kernel/cgroup/legacy_freezer.c 	css_task_iter_end(&it);
it               1082 kernel/sched/core.c 	struct css_task_iter it;
it               1085 kernel/sched/core.c 	css_task_iter_start(css, 0, &it);
it               1086 kernel/sched/core.c 	while ((p = css_task_iter_next(&it))) {
it               1092 kernel/sched/core.c 	css_task_iter_end(&it);
it                306 kernel/sched/idle.c 	struct idle_timer *it = container_of(timer, struct idle_timer, timer);
it                308 kernel/sched/idle.c 	WRITE_ONCE(it->done, 1);
it                316 kernel/sched/idle.c 	struct idle_timer it;
it                333 kernel/sched/idle.c 	it.done = 0;
it                334 kernel/sched/idle.c 	hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
it                335 kernel/sched/idle.c 	it.timer.function = idle_inject_timer_fn;
it                336 kernel/sched/idle.c 	hrtimer_start(&it.timer, ns_to_ktime(duration_us * NSEC_PER_USEC),
it                339 kernel/sched/idle.c 	while (!READ_ONCE(it.done))
it                546 kernel/time/alarmtimer.c 					    it.alarm.alarmtimer);
it                578 kernel/time/alarmtimer.c 	struct alarm *alarm = &timr->it.alarm.alarmtimer;
it                591 kernel/time/alarmtimer.c 	struct alarm *alarm = &timr->it.alarm.alarmtimer;
it                603 kernel/time/alarmtimer.c 	struct alarm *alarm = &timr->it.alarm.alarmtimer;
it                614 kernel/time/alarmtimer.c 	return alarm_try_to_cancel(&timr->it.alarm.alarmtimer);
it                627 kernel/time/alarmtimer.c 	hrtimer_cancel_wait_running(&timr->it.alarm.alarmtimer.timer);
it                640 kernel/time/alarmtimer.c 	struct alarm *alarm = &timr->it.alarm.alarmtimer;
it                648 kernel/time/alarmtimer.c 		alarm_start(&timr->it.alarm.alarmtimer, expires);
it                703 kernel/time/alarmtimer.c 	alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer);
it                 51 kernel/time/itimer.c 	struct cpu_itimer *it = &tsk->signal->it[clock_id];
it                 55 kernel/time/itimer.c 	val = it->expires;
it                 56 kernel/time/itimer.c 	interval = it->incr;
it                116 kernel/time/itimer.c 		       struct compat_itimerval __user *, it)
it                121 kernel/time/itimer.c 	if (!error && put_compat_itimerval(it, &kit))
it                148 kernel/time/itimer.c 	struct cpu_itimer *it = &tsk->signal->it[clock_id];
it                159 kernel/time/itimer.c 	oval = it->expires;
it                160 kernel/time/itimer.c 	ointerval = it->incr;
it                166 kernel/time/itimer.c 	it->expires = nval;
it                167 kernel/time/itimer.c 	it->incr = ninterval;
it                127 kernel/time/posix-cpu-timers.c 	u64 delta, incr, expires = timer->it.cpu.node.expires;
it                147 kernel/time/posix-cpu-timers.c 		timer->it.cpu.node.expires += incr;
it                151 kernel/time/posix-cpu-timers.c 	return timer->it.cpu.node.expires;
it                395 kernel/time/posix-cpu-timers.c 	timerqueue_init(&new_timer->it.cpu.node);
it                396 kernel/time/posix-cpu-timers.c 	new_timer->it.cpu.task = p;
it                408 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
it                429 kernel/time/posix-cpu-timers.c 		if (timer->it.cpu.firing)
it                490 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
it                523 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
it                566 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
it                599 kernel/time/posix-cpu-timers.c 	if (unlikely(timer->it.cpu.firing)) {
it                600 kernel/time/posix-cpu-timers.c 		timer->it.cpu.firing = -1;
it                705 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
it                876 kernel/time/posix-cpu-timers.c static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
it                879 kernel/time/posix-cpu-timers.c 	if (!it->expires)
it                882 kernel/time/posix-cpu-timers.c 	if (cur_time >= it->expires) {
it                883 kernel/time/posix-cpu-timers.c 		if (it->incr)
it                884 kernel/time/posix-cpu-timers.c 			it->expires += it->incr;
it                886 kernel/time/posix-cpu-timers.c 			it->expires = 0;
it                894 kernel/time/posix-cpu-timers.c 	if (it->expires && it->expires < *expires)
it                895 kernel/time/posix-cpu-timers.c 		*expires = it->expires;
it                935 kernel/time/posix-cpu-timers.c 	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
it                938 kernel/time/posix-cpu-timers.c 	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
it                979 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
it               1156 kernel/time/posix-cpu-timers.c 	list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
it               1160 kernel/time/posix-cpu-timers.c 		list_del_init(&timer->it.cpu.elist);
it               1161 kernel/time/posix-cpu-timers.c 		cpu_firing = timer->it.cpu.firing;
it               1162 kernel/time/posix-cpu-timers.c 		timer->it.cpu.firing = 0;
it               1222 kernel/time/posix-cpu-timers.c 	struct itimerspec64 it;
it               1241 kernel/time/posix-cpu-timers.c 		memset(&it, 0, sizeof(it));
it               1242 kernel/time/posix-cpu-timers.c 		it.it_value = *rqtp;
it               1245 kernel/time/posix-cpu-timers.c 		error = posix_cpu_timer_set(&timer, flags, &it, NULL);
it               1252 kernel/time/posix-cpu-timers.c 			if (!cpu_timer_getexpires(&timer.it.cpu)) {
it               1274 kernel/time/posix-cpu-timers.c 		expires = cpu_timer_getexpires(&timer.it.cpu);
it               1275 kernel/time/posix-cpu-timers.c 		error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
it               1295 kernel/time/posix-cpu-timers.c 		if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
it               1309 kernel/time/posix-cpu-timers.c 			error = nanosleep_copyout(restart, &it.it_value);
it                269 kernel/time/posix-timers.c 	struct hrtimer *timer = &timr->it.real.timer;
it                347 kernel/time/posix-timers.c 	timr = container_of(timer, struct k_itimer, it.real.timer);
it                467 kernel/time/posix-timers.c 	hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
it                616 kernel/time/posix-timers.c 	struct hrtimer *timer = &timr->it.real.timer;
it                623 kernel/time/posix-timers.c 	struct hrtimer *timer = &timr->it.real.timer;
it                776 kernel/time/posix-timers.c 	struct hrtimer *timer = &timr->it.real.timer;
it                792 kernel/time/posix-timers.c 	hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
it                793 kernel/time/posix-timers.c 	timr->it.real.timer.function = posix_timer_fn;
it                805 kernel/time/posix-timers.c 	return hrtimer_try_to_cancel(&timr->it.real.timer);
it                810 kernel/time/posix-timers.c 	hrtimer_cancel_wait_running(&timer->it.real.timer);
it                950 kernel/time/time.c int get_itimerspec64(struct itimerspec64 *it,
it                955 kernel/time/time.c 	ret = get_timespec64(&it->it_interval, &uit->it_interval);
it                959 kernel/time/time.c 	ret = get_timespec64(&it->it_value, &uit->it_value);
it                965 kernel/time/time.c int put_itimerspec64(const struct itimerspec64 *it,
it                970 kernel/time/time.c 	ret = put_timespec64(&it->it_interval, &uit->it_interval);
it                974 kernel/time/time.c 	ret = put_timespec64(&it->it_value, &uit->it_value);
it                537 mm/cma.c       int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
it                542 mm/cma.c       		int ret = it(&cma_areas[i], data);
it               1223 mm/memcontrol.c 		struct css_task_iter it;
it               1226 mm/memcontrol.c 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
it               1227 mm/memcontrol.c 		while (!ret && (task = css_task_iter_next(&it)))
it               1229 mm/memcontrol.c 		css_task_iter_end(&it);
it                818 net/ceph/messenger.c 	struct ceph_bio_iter *it = &cursor->bio_iter;
it                821 net/ceph/messenger.c 	*it = data->bio_pos;
it                822 net/ceph/messenger.c 	if (cursor->resid < it->iter.bi_size)
it                823 net/ceph/messenger.c 		it->iter.bi_size = cursor->resid;
it                825 net/ceph/messenger.c 	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
it                826 net/ceph/messenger.c 	cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
it                844 net/ceph/messenger.c 	struct ceph_bio_iter *it = &cursor->bio_iter;
it                845 net/ceph/messenger.c 	struct page *page = bio_iter_page(it->bio, it->iter);
it                848 net/ceph/messenger.c 	BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
it                850 net/ceph/messenger.c 	bio_advance_iter(it->bio, &it->iter, bytes);
it                857 net/ceph/messenger.c 	if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
it                858 net/ceph/messenger.c 		       page == bio_iter_page(it->bio, it->iter)))
it                861 net/ceph/messenger.c 	if (!it->iter.bi_size) {
it                862 net/ceph/messenger.c 		it->bio = it->bio->bi_next;
it                863 net/ceph/messenger.c 		it->iter = it->bio->bi_iter;
it                864 net/ceph/messenger.c 		if (cursor->resid < it->iter.bi_size)
it                865 net/ceph/messenger.c 			it->iter.bi_size = cursor->resid;
it                869 net/ceph/messenger.c 	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
it                870 net/ceph/messenger.c 	cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
it                246 net/ceph/osd_client.c 	struct ceph_bvec_iter it = {
it                252 net/ceph/osd_client.c 	ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
it                310 net/ceph/osd_client.c 	struct ceph_bvec_iter it = {
it                316 net/ceph/osd_client.c 	ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
it                122 net/core/netclassid_cgroup.c 	struct css_task_iter it;
it                129 net/core/netclassid_cgroup.c 	css_task_iter_start(css, 0, &it);
it                130 net/core/netclassid_cgroup.c 	while ((p = css_task_iter_next(&it)))
it                132 net/core/netclassid_cgroup.c 	css_task_iter_end(&it);
it                139 net/dsa/dsa2.c 	struct of_phandle_iterator it;
it                143 net/dsa/dsa2.c 	of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
it                144 net/dsa/dsa2.c 		link_dp = dsa_tree_find_port_by_node(dst, it.node);
it                146 net/dsa/dsa2.c 			of_node_put(it.node);
it               2990 net/ipv4/ipmr.c 		const struct mr_mfc_iter *it = seq->private;
it               2991 net/ipv4/ipmr.c 		const struct mr_table *mrt = it->mrt;
it               2998 net/ipv4/ipmr.c 		if (it->cache != &mrt->mfc_unres_queue) {
it                148 net/ipv4/ipmr_base.c 		     struct mr_mfc_iter *it, loff_t pos)
it                150 net/ipv4/ipmr_base.c 	struct mr_table *mrt = it->mrt;
it                154 net/ipv4/ipmr_base.c 	it->cache = &mrt->mfc_cache_list;
it                160 net/ipv4/ipmr_base.c 	spin_lock_bh(it->lock);
it                161 net/ipv4/ipmr_base.c 	it->cache = &mrt->mfc_unres_queue;
it                162 net/ipv4/ipmr_base.c 	list_for_each_entry(mfc, it->cache, list)
it                165 net/ipv4/ipmr_base.c 	spin_unlock_bh(it->lock);
it                167 net/ipv4/ipmr_base.c 	it->cache = NULL;
it                175 net/ipv4/ipmr_base.c 	struct mr_mfc_iter *it = seq->private;
it                177 net/ipv4/ipmr_base.c 	struct mr_table *mrt = it->mrt;
it                185 net/ipv4/ipmr_base.c 	if (c->list.next != it->cache)
it                188 net/ipv4/ipmr_base.c 	if (it->cache == &mrt->mfc_unres_queue)
it                193 net/ipv4/ipmr_base.c 	it->cache = &mrt->mfc_unres_queue;
it                195 net/ipv4/ipmr_base.c 	spin_lock_bh(it->lock);
it                196 net/ipv4/ipmr_base.c 	if (!list_empty(it->cache))
it                197 net/ipv4/ipmr_base.c 		return list_first_entry(it->cache, struct mr_mfc, list);
it                200 net/ipv4/ipmr_base.c 	spin_unlock_bh(it->lock);
it                201 net/ipv4/ipmr_base.c 	it->cache = NULL;
it                476 net/ipv6/ip6mr.c 		const struct mr_mfc_iter *it = seq->private;
it                477 net/ipv6/ip6mr.c 		struct mr_table *mrt = it->mrt;
it                483 net/ipv6/ip6mr.c 		if (it->cache != &mrt->mfc_unres_queue) {
it                 63 scripts/kconfig/qconf.cc 		QStringList::Iterator it;
it                 65 scripts/kconfig/qconf.cc 		for (it = entryList.begin(); it != entryList.end(); ++it)
it                 66 scripts/kconfig/qconf.cc 			result.push_back((*it).toInt());
it                 82 scripts/kconfig/qconf.cc 	QList<int>::ConstIterator it;
it                 84 scripts/kconfig/qconf.cc 	for (it = value.begin(); it != value.end(); ++it)
it                 85 scripts/kconfig/qconf.cc 		stringList.push_back(QString::number(*it));
it                425 scripts/kconfig/qconf.cc 		QTreeWidgetItemIterator it(this);
it                428 scripts/kconfig/qconf.cc 		while (*it) {
it                429 scripts/kconfig/qconf.cc 			item = (ConfigItem*)(*it);
it                434 scripts/kconfig/qconf.cc 			++it;
it                557 scripts/kconfig/qconf.cc 	QTreeWidgetItemIterator it(this);
it                558 scripts/kconfig/qconf.cc 	while (*it) {
it                559 scripts/kconfig/qconf.cc 		item = (ConfigItem *)(*it);
it                566 scripts/kconfig/qconf.cc 		++it;
it                983 scripts/kconfig/qconf.cc 	QTreeWidgetItemIterator it(this);
it                985 scripts/kconfig/qconf.cc 	while (*it) {
it                986 scripts/kconfig/qconf.cc 		(*it)->setExpanded(open);
it                988 scripts/kconfig/qconf.cc 		++it;
it               1161 security/apparmor/apparmorfs.c 	struct label_it it;
it               1167 security/apparmor/apparmorfs.c 		label_for_each(it, label, profile)
it               1058 security/apparmor/domain.c 	struct label_it it;
it               1073 security/apparmor/domain.c 		label_for_each_in_ns(it, labels_ns(label), label, profile) {
it               1109 security/apparmor/domain.c 	label_for_each_in_ns(it, labels_ns(label), label, profile) {
it               1120 security/apparmor/domain.c 	label_for_each_in_ns(it, labels_ns(label), label, profile) {
it                774 sound/core/oss/pcm_oss.c 	const struct snd_interval *it;
it                782 sound/core/oss/pcm_oss.c 	it = hw_param_interval_c(save, SNDRV_PCM_HW_PARAM_RATE);
it                787 sound/core/oss/pcm_oss.c 		if (it->max < rate || (it->max == rate && it->openmax))
it                789 sound/core/oss/pcm_oss.c 		if (it->min < rate || (it->min == rate && !it->openmin)) {
it                407 sound/drivers/serial-u16550.c 					 * it is, it is ESSENTIAL for enabling interrupts
it                 30 sound/firewire/dice/dice-presonus.c 	struct fw_csr_iterator it;
it                 35 sound/firewire/dice/dice-presonus.c 	fw_csr_iterator_init(&it, dice->unit->directory);
it                 36 sound/firewire/dice/dice-presonus.c 	while (fw_csr_iterator_next(&it, &key, &val)) {
it                 72 sound/firewire/dice/dice-tcelectronic.c 	struct fw_csr_iterator it;
it                 77 sound/firewire/dice/dice-tcelectronic.c 	fw_csr_iterator_init(&it, dice->unit->directory);
it                 78 sound/firewire/dice/dice-tcelectronic.c 	while (fw_csr_iterator_next(&it, &key, &val)) {
it                 33 sound/firewire/dice/dice.c 	struct fw_csr_iterator it;
it                 43 sound/firewire/dice/dice.c 	fw_csr_iterator_init(&it, unit->directory);
it                 44 sound/firewire/dice/dice.c 	while (fw_csr_iterator_next(&it, &key, &val)) {
it                 31 sound/firewire/motu/motu.c 	struct fw_csr_iterator it;
it                 35 sound/firewire/motu/motu.c 	fw_csr_iterator_init(&it, motu->unit->directory);
it                 36 sound/firewire/motu/motu.c 	while (fw_csr_iterator_next(&it, &key, &val)) {
it                127 sound/firewire/oxfw/oxfw.c 	struct fw_csr_iterator it;
it                164 sound/firewire/oxfw/oxfw.c 	fw_csr_iterator_init(&it, fw_dev->config_rom + 5);
it                165 sound/firewire/oxfw/oxfw.c 	while (fw_csr_iterator_next(&it, &key, &val)) {
it                418 sound/soc/generic/audio-graph-card.c 	struct of_phandle_iterator it;
it                431 sound/soc/generic/audio-graph-card.c 	of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
it                432 sound/soc/generic/audio-graph-card.c 		cpu_port = it.node;
it               1132 sound/soc/soc-dapm.c 	struct list_head *it;
it               1136 sound/soc/soc-dapm.c 	list_for_each(it, widgets)
it                970 sound/usb/pcm.c 	struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
it                992 sound/usb/pcm.c 	if (fp->rate_min > it->max || (fp->rate_min == it->max && it->openmax)) {
it                993 sound/usb/pcm.c 		hwc_debug("   > check: rate_min %d > max %d\n", fp->rate_min, it->max);
it                996 sound/usb/pcm.c 	if (fp->rate_max < it->min || (fp->rate_max == it->min && it->openmin)) {
it                997 sound/usb/pcm.c 		hwc_debug("   > check: rate_max %d < min %d\n", fp->rate_max, it->min);
it               1016 sound/usb/pcm.c 	struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
it               1020 sound/usb/pcm.c 	hwc_debug("hw_rule_rate: (%d,%d)\n", it->min, it->max);
it               1039 sound/usb/pcm.c 		it->empty = 1;
it               1044 sound/usb/pcm.c 	if (it->min < rmin) {
it               1045 sound/usb/pcm.c 		it->min = rmin;
it               1046 sound/usb/pcm.c 		it->openmin = 0;
it               1049 sound/usb/pcm.c 	if (it->max > rmax) {
it               1050 sound/usb/pcm.c 		it->max = rmax;
it               1051 sound/usb/pcm.c 		it->openmax = 0;
it               1054 sound/usb/pcm.c 	if (snd_interval_checkempty(it)) {
it               1055 sound/usb/pcm.c 		it->empty = 1;
it               1058 sound/usb/pcm.c 	hwc_debug("  --> (%d, %d) (changed = %d)\n", it->min, it->max, changed);
it               1068 sound/usb/pcm.c 	struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
it               1072 sound/usb/pcm.c 	hwc_debug("hw_rule_channels: (%d,%d)\n", it->min, it->max);
it               1091 sound/usb/pcm.c 		it->empty = 1;
it               1096 sound/usb/pcm.c 	if (it->min < rmin) {
it               1097 sound/usb/pcm.c 		it->min = rmin;
it               1098 sound/usb/pcm.c 		it->openmin = 0;
it               1101 sound/usb/pcm.c 	if (it->max > rmax) {
it               1102 sound/usb/pcm.c 		it->max = rmax;
it               1103 sound/usb/pcm.c 		it->openmax = 0;
it               1106 sound/usb/pcm.c 	if (snd_interval_checkempty(it)) {
it               1107 sound/usb/pcm.c 		it->empty = 1;
it               1110 sound/usb/pcm.c 	hwc_debug("  --> (%d, %d) (changed = %d)\n", it->min, it->max, changed);
it               1150 sound/usb/pcm.c 	struct snd_interval *it;
it               1155 sound/usb/pcm.c 	it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME);
it               1156 sound/usb/pcm.c 	hwc_debug("hw_rule_period_time: (%u,%u)\n", it->min, it->max);
it               1165 sound/usb/pcm.c 		it->empty = 1;
it               1170 sound/usb/pcm.c 	if (it->min < pmin) {
it               1171 sound/usb/pcm.c 		it->min = pmin;
it               1172 sound/usb/pcm.c 		it->openmin = 0;
it               1175 sound/usb/pcm.c 	if (snd_interval_checkempty(it)) {
it               1176 sound/usb/pcm.c 		it->empty = 1;
it               1179 sound/usb/pcm.c 	hwc_debug("  --> (%u,%u) (changed = %d)\n", it->min, it->max, changed);
it                108 tools/testing/selftests/sync/sync_stress_consumer.c 	int fence, merged, tmp, valid, it, i;
it                114 tools/testing/selftests/sync/sync_stress_consumer.c 	for (it = 1; it <= iterations; it++) {
it                115 tools/testing/selftests/sync/sync_stress_consumer.c 		fence = sw_sync_fence_create(producer_timelines[0], "name", it);
it                118 tools/testing/selftests/sync/sync_stress_consumer.c 						   "name", it);
it                140 tools/testing/selftests/sync/sync_stress_consumer.c 		ASSERT(test_data_mpsc.counter == n * it,
it                 66 virt/kvm/arm/hyp/aarch32.c 		unsigned long it;
it                 68 virt/kvm/arm/hyp/aarch32.c 		it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
it                 71 virt/kvm/arm/hyp/aarch32.c 		if (it == 0)
it                 75 virt/kvm/arm/hyp/aarch32.c 		cond = (it >> 4);