tb                 41 arch/ia64/include/asm/kprobes.h 	unsigned long long tb : 1;
tb                106 arch/ia64/include/asm/processor.h 	__u64 tb : 1;
tb                201 arch/ia64/kernel/brl_emu.c 	} else if (ia64_psr(regs)->tb) {
tb                156 arch/ia64/kernel/kprobes.c 		if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
tb               1132 arch/ia64/kernel/ptrace.c 	child_psr->tb = 1;
tb               1143 arch/ia64/kernel/ptrace.c 	child_psr->tb = 0;
tb                148 arch/m68k/coldfire/intc-2.c 	u16 pa, tb;
tb                152 arch/m68k/coldfire/intc-2.c 		tb = 0x1;
tb                155 arch/m68k/coldfire/intc-2.c 		tb = 0x2;
tb                158 arch/m68k/coldfire/intc-2.c 		tb = 0x3;
tb                162 arch/m68k/coldfire/intc-2.c 		tb = 0;
tb                166 arch/m68k/coldfire/intc-2.c 	if (tb)
tb                171 arch/m68k/coldfire/intc-2.c 	pa = (pa & ~(0x3 << (irq * 2))) | (tb << (irq * 2));
tb                132 arch/m68k/coldfire/intc-simr.c 	u16 pa, tb;
tb                136 arch/m68k/coldfire/intc-simr.c 		tb = 0x1;
tb                139 arch/m68k/coldfire/intc-simr.c 		tb = 0x2;
tb                142 arch/m68k/coldfire/intc-simr.c 		tb = 0x3;
tb                146 arch/m68k/coldfire/intc-simr.c 		tb = 0;
tb                150 arch/m68k/coldfire/intc-simr.c 	if (tb)
tb                155 arch/m68k/coldfire/intc-simr.c 	pa = (pa & ~(0x3 << ebit)) | (tb << ebit);
tb                335 arch/powerpc/boot/4xx.c 	u32 cpu, plb, opb, ebc, tb, uart0, uart1, m;
tb                359 arch/powerpc/boot/4xx.c 		tb = sys_clk;
tb                362 arch/powerpc/boot/4xx.c 		tb = cpu;
tb                381 arch/powerpc/boot/4xx.c 	dt_fixup_cpu_clocks(cpu, tb, 0);
tb                422 arch/powerpc/boot/4xx.c 	u32 ccr1, tb = tmr_clk;
tb                465 arch/powerpc/boot/4xx.c 	if (tb == 0) {
tb                470 arch/powerpc/boot/4xx.c 		tb = cpu;
tb                472 arch/powerpc/boot/4xx.c 	dt_fixup_cpu_clocks(cpu, tb, 0);
tb                554 arch/powerpc/boot/4xx.c 	u32 cpu, plb, opb, ebc, tb, uart0, uart1, m;
tb                610 arch/powerpc/boot/4xx.c 	tb = cpu;
tb                612 arch/powerpc/boot/4xx.c 	dt_fixup_cpu_clocks(cpu, tb, 0);
tb                628 arch/powerpc/boot/4xx.c 	u32 pllmr0_ccdv, tb, m;
tb                651 arch/powerpc/boot/4xx.c 	tb = cpu;
tb                655 arch/powerpc/boot/4xx.c 	dt_fixup_cpu_clocks(cpu, tb, 0);
tb                749 arch/powerpc/boot/4xx.c 	u32 cpu, plb, opb, ebc, vco, tb, uart0, uart1;
tb                790 arch/powerpc/boot/4xx.c 	tb = cpu;
tb                793 arch/powerpc/boot/4xx.c 	dt_fixup_cpu_clocks(cpu, tb, 0);
tb                 58 arch/powerpc/boot/devtree.c void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus)
tb                 63 arch/powerpc/boot/devtree.c 	printf("CPU timebase-frequency <- 0x%x (%dMHz)\n\r", tb, MHZ(tb));
tb                 69 arch/powerpc/boot/devtree.c 		setprop_val(devp, "timebase-frequency", tb);
tb                 74 arch/powerpc/boot/devtree.c 	timebase_period_ns = 1000000000 / tb;
tb                421 arch/powerpc/include/asm/kvm_host.h 	bool tb		: 1;	/* 1TB segment */
tb                 98 arch/powerpc/include/asm/kvm_ppc.h extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
tb                236 arch/powerpc/include/uapi/asm/kvm.h 			__u64 tb;
tb                 25 arch/powerpc/kernel/smp-tbsync.c 	volatile u64		tb;
tb                 48 arch/powerpc/kernel/smp-tbsync.c 	u64 tb;
tb                 63 arch/powerpc/kernel/smp-tbsync.c 		tb = tbsync->tb;
tb                 72 arch/powerpc/kernel/smp-tbsync.c 			set_tb(tb >> 32, tb & 0xfffffffful);
tb                 81 arch/powerpc/kernel/smp-tbsync.c 	u64 tb;
tb                 88 arch/powerpc/kernel/smp-tbsync.c 		tb = get_tb() + 400;
tb                 89 arch/powerpc/kernel/smp-tbsync.c 		tbsync->tb = tb + offset;
tb                 90 arch/powerpc/kernel/smp-tbsync.c 		tbsync->mark = mark = tb + 400;
tb                 98 arch/powerpc/kernel/smp-tbsync.c 		while (get_tb() <= tb)
tb                175 arch/powerpc/kernel/time.c static inline unsigned long read_spurr(unsigned long tb)
tb                181 arch/powerpc/kernel/time.c 	return tb;
tb                115 arch/powerpc/kernel/watchdog.c 	u64 tb = get_tb();
tb                119 arch/powerpc/kernel/watchdog.c 		 cpu, tb, per_cpu(wd_timer_tb, cpu),
tb                120 arch/powerpc/kernel/watchdog.c 		 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
tb                131 arch/powerpc/kernel/watchdog.c static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
tb                136 arch/powerpc/kernel/watchdog.c 		wd_smp_last_reset_tb = tb;
tb                142 arch/powerpc/kernel/watchdog.c static void set_cpu_stuck(int cpu, u64 tb)
tb                144 arch/powerpc/kernel/watchdog.c 	set_cpumask_stuck(cpumask_of(cpu), tb);
tb                147 arch/powerpc/kernel/watchdog.c static void watchdog_smp_panic(int cpu, u64 tb)
tb                154 arch/powerpc/kernel/watchdog.c 	if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb)
tb                164 arch/powerpc/kernel/watchdog.c 		 cpu, tb, wd_smp_last_reset_tb,
tb                165 arch/powerpc/kernel/watchdog.c 		 tb_to_ns(tb - wd_smp_last_reset_tb) / 1000000);
tb                180 arch/powerpc/kernel/watchdog.c 	set_cpumask_stuck(&wd_smp_cpus_pending, tb);
tb                201 arch/powerpc/kernel/watchdog.c static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
tb                211 arch/powerpc/kernel/watchdog.c 				 cpu, tb);
tb                229 arch/powerpc/kernel/watchdog.c 			wd_smp_last_reset_tb = tb;
tb                240 arch/powerpc/kernel/watchdog.c 	u64 tb = get_tb();
tb                242 arch/powerpc/kernel/watchdog.c 	per_cpu(wd_timer_tb, cpu) = tb;
tb                244 arch/powerpc/kernel/watchdog.c 	wd_smp_clear_cpu_pending(cpu, tb);
tb                246 arch/powerpc/kernel/watchdog.c 	if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
tb                247 arch/powerpc/kernel/watchdog.c 		watchdog_smp_panic(cpu, tb);
tb                254 arch/powerpc/kernel/watchdog.c 	u64 tb;
tb                263 arch/powerpc/kernel/watchdog.c 	tb = get_tb();
tb                264 arch/powerpc/kernel/watchdog.c 	if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
tb                270 arch/powerpc/kernel/watchdog.c 		set_cpu_stuck(cpu, tb);
tb                275 arch/powerpc/kernel/watchdog.c 			 cpu, tb, per_cpu(wd_timer_tb, cpu),
tb                276 arch/powerpc/kernel/watchdog.c 			 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
tb                317 arch/powerpc/kernel/watchdog.c 	u64 tb = get_tb();
tb                319 arch/powerpc/kernel/watchdog.c 	if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
tb                320 arch/powerpc/kernel/watchdog.c 		per_cpu(wd_timer_tb, cpu) = tb;
tb                321 arch/powerpc/kernel/watchdog.c 		wd_smp_clear_cpu_pending(cpu, tb);
tb                 55 arch/powerpc/kvm/book3s_64_mmu.c 		if (vcpu->arch.slb[i].tb)
tb                 69 arch/powerpc/kvm/book3s_64_mmu.c 			vcpu->arch.slb[i].tb    ? 't' : ' ',
tb                 79 arch/powerpc/kvm/book3s_64_mmu.c 	return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
tb                143 arch/powerpc/kvm/book3s_64_mmu.c 	ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
tb                247 arch/powerpc/kvm/book3s_64_mmu.c 	if (slbe->tb)
tb                396 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->tb    = (rs & SLB_VSID_B_1T) ? 1 : 0;
tb                397 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->esid  = slbe->tb ? esid_1t : esid;
tb                602 arch/powerpc/kvm/book3s_64_mmu.c 			if (slb->tb) {
tb               2163 arch/powerpc/kvm/book3s_hv.c 	struct kvmhv_tb_accumulator tb;
tb               2183 arch/powerpc/kvm/book3s_hv.c 					tb = *acc;
tb               2199 arch/powerpc/kvm/book3s_hv.c 					tb_to_ns(tb.tb_total),
tb               2200 arch/powerpc/kvm/book3s_hv.c 					tb_to_ns(tb.tb_min),
tb               2201 arch/powerpc/kvm/book3s_hv.c 					tb_to_ns(tb.tb_max));
tb               3394 arch/powerpc/kvm/book3s_hv.c 	u64 tb, purr, spurr;
tb               3411 arch/powerpc/kvm/book3s_hv.c 		tb = mftb();
tb               3412 arch/powerpc/kvm/book3s_hv.c 		if ((tb & 0xffffff) < (new_tb & 0xffffff))
tb               3503 arch/powerpc/kvm/book3s_hv.c 		tb = mftb();
tb               3504 arch/powerpc/kvm/book3s_hv.c 		if ((tb & 0xffffff) < (new_tb & 0xffffff))
tb               3528 arch/powerpc/kvm/book3s_hv.c 	u64 tb;
tb               3532 arch/powerpc/kvm/book3s_hv.c 	tb = mftb();
tb               3535 arch/powerpc/kvm/book3s_hv.c 	local_paca->kvm_hstate.dec_expires = dec + tb;
tb               3637 arch/powerpc/kvm/book3s_hv.c 	tb = mftb();
tb               3638 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.dec_expires = dec + tb;
tb                552 arch/powerpc/kvm/booke.c 	u64 tb, wdt_tb, wdt_ticks = 0;
tb                557 arch/powerpc/kvm/booke.c 	tb = get_tb();
tb                562 arch/powerpc/kvm/booke.c 	if (tb & wdt_tb)
tb                565 arch/powerpc/kvm/booke.c 	wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
tb               1494 arch/powerpc/kvm/booke.c 	u64 tb = get_tb();
tb               1505 arch/powerpc/kvm/booke.c 	sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
tb               1506 arch/powerpc/kvm/booke.c 	sregs->u.e.tb = tb;
tb                 63 arch/powerpc/kvm/emulate.c u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
tb                 65 arch/powerpc/kvm/emulate.c 	u64 jd = tb - vcpu->arch.dec_jiffies;
tb                 20 arch/sparc/include/asm/tlbflush_64.h void flush_tsb_user(struct tlb_batch *tb);
tb               1002 arch/sparc/kernel/irq_64.c 	struct trap_per_cpu *tb = &trap_block[this_cpu];
tb               1004 arch/sparc/kernel/irq_64.c 	register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
tb               1005 arch/sparc/kernel/irq_64.c 			   tb->cpu_mondo_qmask);
tb               1006 arch/sparc/kernel/irq_64.c 	register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
tb               1007 arch/sparc/kernel/irq_64.c 			   tb->dev_mondo_qmask);
tb               1008 arch/sparc/kernel/irq_64.c 	register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
tb               1009 arch/sparc/kernel/irq_64.c 			   tb->resum_qmask);
tb               1010 arch/sparc/kernel/irq_64.c 	register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
tb               1011 arch/sparc/kernel/irq_64.c 			   tb->nonresum_qmask);
tb               1033 arch/sparc/kernel/irq_64.c static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
tb               1048 arch/sparc/kernel/irq_64.c 	tb->cpu_mondo_block_pa = __pa(mondo);
tb               1056 arch/sparc/kernel/irq_64.c 	tb->cpu_list_pa = __pa(page);
tb               1066 arch/sparc/kernel/irq_64.c 		struct trap_per_cpu *tb = &trap_block[cpu];
tb               1068 arch/sparc/kernel/irq_64.c 		alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
tb               1069 arch/sparc/kernel/irq_64.c 		alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
tb               1070 arch/sparc/kernel/irq_64.c 		alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
tb               1071 arch/sparc/kernel/irq_64.c 		alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
tb               1072 arch/sparc/kernel/irq_64.c 		alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
tb               1073 arch/sparc/kernel/irq_64.c 		alloc_one_queue(&tb->nonresum_kernel_buf_pa,
tb               1074 arch/sparc/kernel/irq_64.c 				tb->nonresum_qmask);
tb               1083 arch/sparc/kernel/irq_64.c 		struct trap_per_cpu *tb = &trap_block[cpu];
tb               1085 arch/sparc/kernel/irq_64.c 		init_cpu_send_mondo_info(tb);
tb               1080 arch/sparc/kernel/mdesc.c 			   struct trap_per_cpu *tb)
tb               1086 arch/sparc/kernel/mdesc.c 	get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2));
tb               1089 arch/sparc/kernel/mdesc.c 	get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8);
tb               1092 arch/sparc/kernel/mdesc.c 	get_one_mondo_bits(val, &tb->resum_qmask, 6, 7);
tb               1095 arch/sparc/kernel/mdesc.c 	get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2);
tb               1099 arch/sparc/kernel/mdesc.c 			tb->cpu_mondo_qmask + 1,
tb               1100 arch/sparc/kernel/mdesc.c 			tb->dev_mondo_qmask + 1,
tb               1101 arch/sparc/kernel/mdesc.c 			tb->resum_qmask + 1,
tb               1102 arch/sparc/kernel/mdesc.c 			tb->nonresum_qmask + 1);
tb               1183 arch/sparc/kernel/mdesc.c 	struct trap_per_cpu *tb;
tb               1200 arch/sparc/kernel/mdesc.c 	tb = &trap_block[cpuid];
tb               1201 arch/sparc/kernel/mdesc.c 	get_mondo_data(hp, mp, tb);
tb                298 arch/sparc/kernel/smp_64.c 	struct trap_per_cpu *tb;
tb                317 arch/sparc/kernel/smp_64.c 	tb = &trap_block[cpu];
tb                319 arch/sparc/kernel/smp_64.c 	hdesc->fault_info_va = (unsigned long) &tb->fault_info;
tb                320 arch/sparc/kernel/smp_64.c 	hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
tb                465 arch/sparc/kernel/smp_64.c static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
tb                473 arch/sparc/kernel/smp_64.c 	cpu_list = __va(tb->cpu_list_pa);
tb                474 arch/sparc/kernel/smp_64.c 	mondo = __va(tb->cpu_mondo_block_pa);
tb                486 arch/sparc/kernel/smp_64.c static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
tb                492 arch/sparc/kernel/smp_64.c 	cpu_list = __va(tb->cpu_list_pa);
tb                493 arch/sparc/kernel/smp_64.c 	mondo = __va(tb->cpu_mondo_block_pa);
tb                648 arch/sparc/kernel/smp_64.c static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
tb                661 arch/sparc/kernel/smp_64.c 	cpu_list = __va(tb->cpu_list_pa);
tb                673 arch/sparc/kernel/smp_64.c 					      tb->cpu_list_pa,
tb                674 arch/sparc/kernel/smp_64.c 					      tb->cpu_mondo_block_pa);
tb                777 arch/sparc/kernel/smp_64.c 	       this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
tb                791 arch/sparc/kernel/smp_64.c 	struct trap_per_cpu *tb;
tb                810 arch/sparc/kernel/smp_64.c 	tb = &trap_block[this_cpu];
tb                812 arch/sparc/kernel/smp_64.c 	mondo = __va(tb->cpu_mondo_block_pa);
tb                818 arch/sparc/kernel/smp_64.c 	cpu_list = __va(tb->cpu_list_pa);
tb                829 arch/sparc/kernel/smp_64.c 		xcall_deliver_impl(tb, cnt);
tb               1347 arch/sparc/kernel/smp_64.c 		struct trap_per_cpu *tb = &trap_block[cpu];
tb               1350 arch/sparc/kernel/smp_64.c 				tb->cpu_mondo_pa, 0);
tb               1352 arch/sparc/kernel/smp_64.c 				tb->dev_mondo_pa, 0);
tb               1354 arch/sparc/kernel/smp_64.c 				tb->resum_mondo_pa, 0);
tb               1356 arch/sparc/kernel/smp_64.c 				tb->nonresum_mondo_pa, 0);
tb               2084 arch/sparc/kernel/traps_64.c 	struct trap_per_cpu *tb;
tb               2090 arch/sparc/kernel/traps_64.c 	tb = &trap_block[cpu];
tb               2091 arch/sparc/kernel/traps_64.c 	paddr = tb->resum_kernel_buf_pa + offset;
tb               2202 arch/sparc/kernel/traps_64.c 	struct trap_per_cpu *tb;
tb               2208 arch/sparc/kernel/traps_64.c 	tb = &trap_block[cpu];
tb               2209 arch/sparc/kernel/traps_64.c 	paddr = tb->nonresum_kernel_buf_pa + offset;
tb                 26 arch/sparc/mm/tlb.c 	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
tb                 27 arch/sparc/mm/tlb.c 	struct mm_struct *mm = tb->mm;
tb                 29 arch/sparc/mm/tlb.c 	if (!tb->tlb_nr)
tb                 32 arch/sparc/mm/tlb.c 	flush_tsb_user(tb);
tb                 35 arch/sparc/mm/tlb.c 		if (tb->tlb_nr == 1) {
tb                 36 arch/sparc/mm/tlb.c 			global_flush_tlb_page(mm, tb->vaddrs[0]);
tb                 39 arch/sparc/mm/tlb.c 			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
tb                 40 arch/sparc/mm/tlb.c 					      &tb->vaddrs[0]);
tb                 42 arch/sparc/mm/tlb.c 			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
tb                 43 arch/sparc/mm/tlb.c 					    tb->tlb_nr, &tb->vaddrs[0]);
tb                 48 arch/sparc/mm/tlb.c 	tb->tlb_nr = 0;
tb                 56 arch/sparc/mm/tlb.c 	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
tb                 58 arch/sparc/mm/tlb.c 	tb->active = 1;
tb                 63 arch/sparc/mm/tlb.c 	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
tb                 65 arch/sparc/mm/tlb.c 	if (tb->tlb_nr)
tb                 67 arch/sparc/mm/tlb.c 	tb->active = 0;
tb                 73 arch/sparc/mm/tlb.c 	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
tb                 80 arch/sparc/mm/tlb.c 	nr = tb->tlb_nr;
tb                 82 arch/sparc/mm/tlb.c 	if (unlikely(nr != 0 && mm != tb->mm)) {
tb                 87 arch/sparc/mm/tlb.c 	if (!tb->active) {
tb                 94 arch/sparc/mm/tlb.c 		tb->mm = mm;
tb                 95 arch/sparc/mm/tlb.c 		tb->hugepage_shift = hugepage_shift;
tb                 98 arch/sparc/mm/tlb.c 	if (tb->hugepage_shift != hugepage_shift) {
tb                100 arch/sparc/mm/tlb.c 		tb->hugepage_shift = hugepage_shift;
tb                104 arch/sparc/mm/tlb.c 	tb->vaddrs[nr] = vaddr;
tb                105 arch/sparc/mm/tlb.c 	tb->tlb_nr = ++nr;
tb                 83 arch/sparc/mm/tsb.c static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
tb                 88 arch/sparc/mm/tsb.c 	for (i = 0; i < tb->tlb_nr; i++)
tb                 89 arch/sparc/mm/tsb.c 		__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
tb                107 arch/sparc/mm/tsb.c static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
tb                113 arch/sparc/mm/tsb.c 	for (i = 0; i < tb->tlb_nr; i++)
tb                114 arch/sparc/mm/tsb.c 		__flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift,
tb                119 arch/sparc/mm/tsb.c void flush_tsb_user(struct tlb_batch *tb)
tb                121 arch/sparc/mm/tsb.c 	struct mm_struct *mm = tb->mm;
tb                126 arch/sparc/mm/tsb.c 	if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
tb                131 arch/sparc/mm/tsb.c 		if (tb->hugepage_shift == PAGE_SHIFT)
tb                132 arch/sparc/mm/tsb.c 			__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
tb                135 arch/sparc/mm/tsb.c 			__flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
tb                136 arch/sparc/mm/tsb.c 					     tb->hugepage_shift);
tb                145 arch/sparc/mm/tsb.c 		__flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries,
tb                146 arch/sparc/mm/tsb.c 				     tb->hugepage_shift);
tb               1202 arch/x86/kernel/cpu/mce/amd.c static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
tb               1248 arch/x86/kernel/cpu/mce/amd.c 	if (tb->blocks)
tb               1249 arch/x86/kernel/cpu/mce/amd.c 		list_add(&b->miscj, &tb->blocks->miscj);
tb               1251 arch/x86/kernel/cpu/mce/amd.c 		tb->blocks = b;
tb               1253 arch/x86/kernel/cpu/mce/amd.c 	err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
tb               1261 arch/x86/kernel/cpu/mce/amd.c 	err = allocate_threshold_blocks(cpu, tb, bank, block, address);
tb                357 block/partitions/ldm.c 	struct tocblock *tb[4];
tb                366 block/partitions/ldm.c 	tb[0] = &ldb->toc;
tb                367 block/partitions/ldm.c 	tb[1] = kmalloc_array(3, sizeof(*tb[1]), GFP_KERNEL);
tb                368 block/partitions/ldm.c 	if (!tb[1]) {
tb                372 block/partitions/ldm.c 	tb[2] = (struct tocblock*)((u8*)tb[1] + sizeof(*tb[1]));
tb                373 block/partitions/ldm.c 	tb[3] = (struct tocblock*)((u8*)tb[2] + sizeof(*tb[2]));
tb                386 block/partitions/ldm.c 		if (ldm_parse_tocblock(data, tb[nr_tbs]))
tb                395 block/partitions/ldm.c 	if (((tb[0]->bitmap1_start + tb[0]->bitmap1_size) > ph->config_size) ||
tb                396 block/partitions/ldm.c 			((tb[0]->bitmap2_start + tb[0]->bitmap2_size) >
tb                403 block/partitions/ldm.c 		if (!ldm_compare_tocblocks(tb[0], tb[i])) {
tb                411 block/partitions/ldm.c 	kfree(tb[1]);
tb                500 crypto/adiantum.c static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                514 crypto/adiantum.c 	algt = crypto_get_attr_type(tb);
tb                521 crypto/adiantum.c 	streamcipher_name = crypto_attr_alg_name(tb[1]);
tb                525 crypto/adiantum.c 	blockcipher_name = crypto_attr_alg_name(tb[2]);
tb                529 crypto/adiantum.c 	nhpoly1305_name = crypto_attr_alg_name(tb[3]);
tb                232 crypto/aead.c  				       struct rtattr **tb, u32 type, u32 mask)
tb                243 crypto/aead.c  	algt = crypto_get_attr_type(tb);
tb                250 crypto/aead.c  	name = crypto_attr_alg_name(tb[1]);
tb                771 crypto/algapi.c struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb)
tb                773 crypto/algapi.c 	struct rtattr *rta = tb[0];
tb                789 crypto/algapi.c int crypto_check_attr_type(struct rtattr **tb, u32 type)
tb                793 crypto/algapi.c 	algt = crypto_get_attr_type(tb);
tb                 24 crypto/algboss.c 	struct rtattr *tb[CRYPTO_MAX_ATTRS + 2];
tb                 70 crypto/algboss.c 			err = tmpl->create(tmpl, param->tb);
tb                 74 crypto/algboss.c 		inst = tmpl->alloc(param->tb);
tb                157 crypto/algboss.c 		param->tb[i + 1] = &param->attrs[i].attr;
tb                173 crypto/algboss.c 	param->tb[i + 1] = NULL;
tb                179 crypto/algboss.c 	param->tb[0] = &param->type.attr;
tb                 34 crypto/asymmetric_keys/asym_tpm.c static int tpm_loadkey2(struct tpm_buf *tb,
tb                 50 crypto/asymmetric_keys/asym_tpm.c 	ret = oiap(tb, &authhandle, enonce);
tb                 71 crypto/asymmetric_keys/asym_tpm.c 	INIT_BUF(tb);
tb                 72 crypto/asymmetric_keys/asym_tpm.c 	store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
tb                 73 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, TPM_LOADKEY2_SIZE + keybloblen);
tb                 74 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, TPM_ORD_LOADKEY2);
tb                 75 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, keyhandle);
tb                 76 crypto/asymmetric_keys/asym_tpm.c 	storebytes(tb, keyblob, keybloblen);
tb                 77 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, authhandle);
tb                 78 crypto/asymmetric_keys/asym_tpm.c 	storebytes(tb, nonceodd, TPM_NONCE_SIZE);
tb                 79 crypto/asymmetric_keys/asym_tpm.c 	store8(tb, cont);
tb                 80 crypto/asymmetric_keys/asym_tpm.c 	storebytes(tb, authdata, SHA1_DIGEST_SIZE);
tb                 82 crypto/asymmetric_keys/asym_tpm.c 	ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
tb                 88 crypto/asymmetric_keys/asym_tpm.c 	ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, keyauth,
tb                 95 crypto/asymmetric_keys/asym_tpm.c 	*newhandle = LOAD32(tb->data, TPM_DATA_OFFSET);
tb                102 crypto/asymmetric_keys/asym_tpm.c static int tpm_flushspecific(struct tpm_buf *tb, uint32_t handle)
tb                104 crypto/asymmetric_keys/asym_tpm.c 	INIT_BUF(tb);
tb                105 crypto/asymmetric_keys/asym_tpm.c 	store16(tb, TPM_TAG_RQU_COMMAND);
tb                106 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, TPM_FLUSHSPECIFIC_SIZE);
tb                107 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, TPM_ORD_FLUSHSPECIFIC);
tb                108 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, handle);
tb                109 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, TPM_RT_KEY);
tb                111 crypto/asymmetric_keys/asym_tpm.c 	return trusted_tpm_send(tb->data, MAX_BUF_SIZE);
tb                118 crypto/asymmetric_keys/asym_tpm.c static int tpm_unbind(struct tpm_buf *tb,
tb                136 crypto/asymmetric_keys/asym_tpm.c 	ret = oiap(tb, &authhandle, enonce);
tb                158 crypto/asymmetric_keys/asym_tpm.c 	INIT_BUF(tb);
tb                159 crypto/asymmetric_keys/asym_tpm.c 	store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
tb                160 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, TPM_UNBIND_SIZE + bloblen);
tb                161 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, TPM_ORD_UNBIND);
tb                162 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, keyhandle);
tb                163 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, bloblen);
tb                164 crypto/asymmetric_keys/asym_tpm.c 	storebytes(tb, blob, bloblen);
tb                165 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, authhandle);
tb                166 crypto/asymmetric_keys/asym_tpm.c 	storebytes(tb, nonceodd, TPM_NONCE_SIZE);
tb                167 crypto/asymmetric_keys/asym_tpm.c 	store8(tb, cont);
tb                168 crypto/asymmetric_keys/asym_tpm.c 	storebytes(tb, authdata, SHA1_DIGEST_SIZE);
tb                170 crypto/asymmetric_keys/asym_tpm.c 	ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
tb                176 crypto/asymmetric_keys/asym_tpm.c 	datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
tb                178 crypto/asymmetric_keys/asym_tpm.c 	ret = TSS_checkhmac1(tb->data, ordinal, nonceodd,
tb                188 crypto/asymmetric_keys/asym_tpm.c 	memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t),
tb                204 crypto/asymmetric_keys/asym_tpm.c static int tpm_sign(struct tpm_buf *tb,
tb                222 crypto/asymmetric_keys/asym_tpm.c 	ret = oiap(tb, &authhandle, enonce);
tb                244 crypto/asymmetric_keys/asym_tpm.c 	INIT_BUF(tb);
tb                245 crypto/asymmetric_keys/asym_tpm.c 	store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
tb                246 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, TPM_SIGN_SIZE + bloblen);
tb                247 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, TPM_ORD_SIGN);
tb                248 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, keyhandle);
tb                249 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, bloblen);
tb                250 crypto/asymmetric_keys/asym_tpm.c 	storebytes(tb, blob, bloblen);
tb                251 crypto/asymmetric_keys/asym_tpm.c 	store32(tb, authhandle);
tb                252 crypto/asymmetric_keys/asym_tpm.c 	storebytes(tb, nonceodd, TPM_NONCE_SIZE);
tb                253 crypto/asymmetric_keys/asym_tpm.c 	store8(tb, cont);
tb                254 crypto/asymmetric_keys/asym_tpm.c 	storebytes(tb, authdata, SHA1_DIGEST_SIZE);
tb                256 crypto/asymmetric_keys/asym_tpm.c 	ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
tb                262 crypto/asymmetric_keys/asym_tpm.c 	datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
tb                264 crypto/asymmetric_keys/asym_tpm.c 	ret = TSS_checkhmac1(tb->data, ordinal, nonceodd,
tb                274 crypto/asymmetric_keys/asym_tpm.c 	memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t),
tb                523 crypto/asymmetric_keys/asym_tpm.c 	struct tpm_buf *tb;
tb                537 crypto/asymmetric_keys/asym_tpm.c 	tb = kzalloc(sizeof(*tb), GFP_KERNEL);
tb                538 crypto/asymmetric_keys/asym_tpm.c 	if (!tb)
tb                544 crypto/asymmetric_keys/asym_tpm.c 	r = tpm_loadkey2(tb, SRKHANDLE, srkauth,
tb                554 crypto/asymmetric_keys/asym_tpm.c 	r = tpm_unbind(tb, keyhandle, keyauth,
tb                559 crypto/asymmetric_keys/asym_tpm.c 	if (tpm_flushspecific(tb, keyhandle) < 0)
tb                563 crypto/asymmetric_keys/asym_tpm.c 	kzfree(tb);
tb                647 crypto/asymmetric_keys/asym_tpm.c 	struct tpm_buf *tb;
tb                686 crypto/asymmetric_keys/asym_tpm.c 	tb = kzalloc(sizeof(*tb), GFP_KERNEL);
tb                687 crypto/asymmetric_keys/asym_tpm.c 	if (!tb)
tb                693 crypto/asymmetric_keys/asym_tpm.c 	r = tpm_loadkey2(tb, SRKHANDLE, srkauth,
tb                703 crypto/asymmetric_keys/asym_tpm.c 	r = tpm_sign(tb, keyhandle, keyauth, in, in_len, out, params->out_len);
tb                707 crypto/asymmetric_keys/asym_tpm.c 	if (tpm_flushspecific(tb, keyhandle) < 0)
tb                711 crypto/asymmetric_keys/asym_tpm.c 	kzfree(tb);
tb                383 crypto/authenc.c 				 struct rtattr **tb)
tb                394 crypto/authenc.c 	algt = crypto_get_attr_type(tb);
tb                401 crypto/authenc.c 	auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
tb                409 crypto/authenc.c 	enc_name = crypto_attr_alg_name(tb[2]);
tb                401 crypto/authencesn.c 				     struct rtattr **tb)
tb                412 crypto/authencesn.c 	algt = crypto_get_attr_type(tb);
tb                419 crypto/authencesn.c 	auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
tb                427 crypto/authencesn.c 	enc_name = crypto_attr_alg_name(tb[2]);
tb                 51 crypto/cbc.c   static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                 57 crypto/cbc.c   	inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
tb                455 crypto/ccm.c   				    struct rtattr **tb,
tb                467 crypto/ccm.c   	algt = crypto_get_attr_type(tb);
tb                564 crypto/ccm.c   static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                570 crypto/ccm.c   	cipher_name = crypto_attr_alg_name(tb[1]);
tb                582 crypto/ccm.c   	return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
tb                586 crypto/ccm.c   				  struct rtattr **tb)
tb                591 crypto/ccm.c   	ctr_name = crypto_attr_alg_name(tb[1]);
tb                595 crypto/ccm.c   	mac_name = crypto_attr_alg_name(tb[2]);
tb                599 crypto/ccm.c   	return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
tb                745 crypto/ccm.c   				 struct rtattr **tb)
tb                754 crypto/ccm.c   	algt = crypto_get_attr_type(tb);
tb                761 crypto/ccm.c   	ccm_name = crypto_attr_alg_name(tb[1]);
tb                917 crypto/ccm.c   static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                923 crypto/ccm.c   	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
tb                927 crypto/ccm.c   	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
tb                200 crypto/cfb.c   static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                206 crypto/cfb.c   	inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
tb                562 crypto/chacha20poly1305.c static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
tb                577 crypto/chacha20poly1305.c 	algt = crypto_get_attr_type(tb);
tb                584 crypto/chacha20poly1305.c 	chacha_name = crypto_attr_alg_name(tb[1]);
tb                587 crypto/chacha20poly1305.c 	poly_name = crypto_attr_alg_name(tb[2]);
tb                681 crypto/chacha20poly1305.c static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                683 crypto/chacha20poly1305.c 	return chachapoly_create(tmpl, tb, "rfc7539", 12);
tb                686 crypto/chacha20poly1305.c static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                688 crypto/chacha20poly1305.c 	return chachapoly_create(tmpl, tb, "rfc7539esp", 8);
tb                222 crypto/cmac.c  static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                229 crypto/cmac.c  	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
tb                233 crypto/cmac.c  	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
tb                194 crypto/cryptd.c static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
tb                199 crypto/cryptd.c 	algt = crypto_get_attr_type(tb);
tb                397 crypto/cryptd.c 				  struct rtattr **tb,
tb                411 crypto/cryptd.c 	cryptd_check_internal(tb, &type, &mask);
tb                413 crypto/cryptd.c 	name = crypto_attr_alg_name(tb[1]);
tb                669 crypto/cryptd.c static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
tb                680 crypto/cryptd.c 	cryptd_check_internal(tb, &type, &mask);
tb                682 crypto/cryptd.c 	salg = shash_attr_alg(tb[1], type, mask);
tb                853 crypto/cryptd.c 		              struct rtattr **tb,
tb                864 crypto/cryptd.c 	cryptd_check_internal(tb, &type, &mask);
tb                866 crypto/cryptd.c 	name = crypto_attr_alg_name(tb[1]);
tb                913 crypto/cryptd.c static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                917 crypto/cryptd.c 	algt = crypto_get_attr_type(tb);
tb                923 crypto/cryptd.c 		return cryptd_create_skcipher(tmpl, tb, &queue);
tb                925 crypto/cryptd.c 		return cryptd_create_hash(tmpl, tb, &queue);
tb                927 crypto/cryptd.c 		return cryptd_create_aead(tmpl, tb, &queue);
tb                126 crypto/ctr.c   static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                132 crypto/ctr.c   	inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
tb                263 crypto/ctr.c   				 struct rtattr **tb)
tb                274 crypto/ctr.c   	algt = crypto_get_attr_type(tb);
tb                281 crypto/ctr.c   	cipher_name = crypto_attr_alg_name(tb[1]);
tb                328 crypto/cts.c   static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                337 crypto/cts.c   	algt = crypto_get_attr_type(tb);
tb                344 crypto/cts.c   	cipher_name = crypto_attr_alg_name(tb[1]);
tb                 61 crypto/ecb.c   static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                 67 crypto/ecb.c   	inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
tb                113 crypto/echainiv.c 				struct rtattr **tb)
tb                118 crypto/echainiv.c 	inst = aead_geniv_alloc(tmpl, tb, 0, 0);
tb                456 crypto/essiv.c static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                474 crypto/essiv.c 	algt = crypto_get_attr_type(tb);
tb                478 crypto/essiv.c 	inner_cipher_name = crypto_attr_alg_name(tb[1]);
tb                482 crypto/essiv.c 	shash_name = crypto_attr_alg_name(tb[2]);
tb                583 crypto/gcm.c   				    struct rtattr **tb,
tb                595 crypto/gcm.c   	algt = crypto_get_attr_type(tb);
tb                692 crypto/gcm.c   static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                697 crypto/gcm.c   	cipher_name = crypto_attr_alg_name(tb[1]);
tb                705 crypto/gcm.c   	return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
tb                709 crypto/gcm.c   				  struct rtattr **tb)
tb                714 crypto/gcm.c   	ctr_name = crypto_attr_alg_name(tb[1]);
tb                718 crypto/gcm.c   	ghash_name = crypto_attr_alg_name(tb[2]);
tb                722 crypto/gcm.c   	return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
tb                867 crypto/gcm.c   				 struct rtattr **tb)
tb                876 crypto/gcm.c   	algt = crypto_get_attr_type(tb);
tb                883 crypto/gcm.c   	ccm_name = crypto_attr_alg_name(tb[1]);
tb               1103 crypto/gcm.c   				struct rtattr **tb)
tb               1113 crypto/gcm.c   	algt = crypto_get_attr_type(tb);
tb               1120 crypto/gcm.c   	ccm_name = crypto_attr_alg_name(tb[1]);
tb                170 crypto/hmac.c  static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                179 crypto/hmac.c  	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
tb                183 crypto/hmac.c  	salg = shash_attr_alg(tb[1], 0, 0);
tb                263 crypto/keywrap.c static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                269 crypto/keywrap.c 	inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
tb                298 crypto/lrw.c   static int create(struct crypto_template *tmpl, struct rtattr **tb)
tb                308 crypto/lrw.c   	algt = crypto_get_attr_type(tb);
tb                315 crypto/lrw.c   	cipher_name = crypto_attr_alg_name(tb[1]);
tb                 52 crypto/ofb.c   static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                 58 crypto/ofb.c   	inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
tb                153 crypto/pcbc.c  static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                159 crypto/pcbc.c  	inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
tb                229 crypto/pcrypt.c static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
tb                239 crypto/pcrypt.c 	algt = crypto_get_attr_type(tb);
tb                243 crypto/pcrypt.c 	name = crypto_attr_alg_name(tb[1]);
tb                308 crypto/pcrypt.c static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                312 crypto/pcrypt.c 	algt = crypto_get_attr_type(tb);
tb                318 crypto/pcrypt.c 		return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
tb                597 crypto/rsa-pkcs1pad.c static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                609 crypto/rsa-pkcs1pad.c 	algt = crypto_get_attr_type(tb);
tb                616 crypto/rsa-pkcs1pad.c 	rsa_alg_name = crypto_attr_alg_name(tb[1]);
tb                620 crypto/rsa-pkcs1pad.c 	hash_name = crypto_attr_alg_name(tb[2]);
tb                138 crypto/seqiv.c static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                143 crypto/seqiv.c 	inst = aead_geniv_alloc(tmpl, tb, 0, 0);
tb                173 crypto/seqiv.c static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                177 crypto/seqiv.c 	algt = crypto_get_attr_type(tb);
tb                184 crypto/seqiv.c 	return seqiv_aead_create(tmpl, tb);
tb               1170 crypto/skcipher.c skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
tb               1180 crypto/skcipher.c 	algt = crypto_get_attr_type(tb);
tb               1191 crypto/skcipher.c 	cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
tb                620 crypto/vmac.c  static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                626 crypto/vmac.c  	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
tb                630 crypto/vmac.c  	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
tb                188 crypto/xcbc.c  static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
tb                195 crypto/xcbc.c  	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
tb                199 crypto/xcbc.c  	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
tb                337 crypto/xts.c   static int create(struct crypto_template *tmpl, struct rtattr **tb)
tb                347 crypto/xts.c   	algt = crypto_get_attr_type(tb);
tb                354 crypto/xts.c   	cipher_name = crypto_attr_alg_name(tb[1]);
tb                 50 drivers/acpi/spcr.c static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb)
tb                 54 drivers/acpi/spcr.c 	if (tb->interface_type != ACPI_DBG2_16550_COMPATIBLE)
tb                 57 drivers/acpi/spcr.c 	if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE) &&
tb                 58 drivers/acpi/spcr.c 	    memcmp(tb->header.oem_id, "HPE   ", ACPI_OEM_ID_SIZE))
tb                 61 drivers/acpi/spcr.c 	if (!memcmp(tb->header.oem_table_id, "XGENESPC",
tb                 62 drivers/acpi/spcr.c 	    ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 0)
tb                 65 drivers/acpi/spcr.c 	if (!memcmp(tb->header.oem_table_id, "ProLiant",
tb                 66 drivers/acpi/spcr.c 	    ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 1)
tb               1802 drivers/atm/ambassador.c   transfer_block * tb = &lb->payload.transfer;
tb               1806 drivers/atm/ambassador.c   tb->address = rec->addr;
tb               1807 drivers/atm/ambassador.c   tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4);
tb               1808 drivers/atm/ambassador.c   memcpy(tb->data, rec->data, be16_to_cpu(rec->len));
tb               1817 drivers/atm/ambassador.c   transfer_block * tb = &lb->payload.transfer;
tb               1822 drivers/atm/ambassador.c   tb->address = rec->addr;
tb               1823 drivers/atm/ambassador.c   tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4);
tb               1825 drivers/atm/ambassador.c   if (!res && memcmp(tb->data, rec->data, be16_to_cpu(rec->len)))
tb                 31 drivers/block/drbd/drbd_nla.c int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
tb                 38 drivers/block/drbd/drbd_nla.c 		err = nla_parse_nested_deprecated(tb, maxtype, nla, policy,
tb                  5 drivers/block/drbd/drbd_nla.h extern int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
tb                846 drivers/block/drbd/drbd_receiver.c 	char tb[4];
tb                851 drivers/block/drbd/drbd_receiver.c 	rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
tb                750 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 	uint32_t lr, tb, req;
tb                770 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 		tb  = (pe_top[i] >= 0) ?
tb                774 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 		tb |= (pe_bottom[i] >= 0) ?
tb                785 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb);
tb                796 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT),
tb                797 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT),
tb                798 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF),
tb                799 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF),
tb               2206 drivers/hid/hid-wiimote-modules.c 	__u8 sx, sy, tb, wb, bd, bm, bp, bo, br, bb, bg, by, bu;
tb               2241 drivers/hid/hid-wiimote-modules.c 	tb = ext[2] & 0x1f;
tb               2261 drivers/hid/hid-wiimote-modules.c 	input_report_abs(wdata->extension.input, ABS_HAT0X, tb);
tb                184 drivers/i2c/busses/i2c-cpm.c 	u_char *tb;
tb                193 drivers/i2c/busses/i2c-cpm.c 	tb = cpm->txbuf[tx];
tb                199 drivers/i2c/busses/i2c-cpm.c 	tb[0] = addr;		/* Device address byte w/rw flag */
tb                230 drivers/i2c/busses/i2c-cpm.c 		memcpy(tb+1, pmsg->buf, pmsg->len);
tb                242 drivers/i2c/busses/i2c-cpm.c 	u_char *tb;
tb                249 drivers/i2c/busses/i2c-cpm.c 	tb = cpm->txbuf[tx];
tb                 84 drivers/infiniband/core/addr.c 	struct nlattr *tb[LS_NLA_TYPE_MAX] = {};
tb                 90 drivers/infiniband/core/addr.c 	ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
tb                800 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb                806 drivers/infiniband/core/nldev.c 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb                808 drivers/infiniband/core/nldev.c 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
tb                811 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb                846 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb                851 drivers/infiniband/core/nldev.c 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb                853 drivers/infiniband/core/nldev.c 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
tb                856 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb                861 drivers/infiniband/core/nldev.c 	if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
tb                864 drivers/infiniband/core/nldev.c 		nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
tb                874 drivers/infiniband/core/nldev.c 	if (tb[RDMA_NLDEV_NET_NS_FD]) {
tb                877 drivers/infiniband/core/nldev.c 		ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
tb                882 drivers/infiniband/core/nldev.c 	if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) {
tb                885 drivers/infiniband/core/nldev.c 		use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]);
tb                936 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb                943 drivers/infiniband/core/nldev.c 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb                946 drivers/infiniband/core/nldev.c 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
tb                947 drivers/infiniband/core/nldev.c 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
tb                950 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb                955 drivers/infiniband/core/nldev.c 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
tb                990 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb                999 drivers/infiniband/core/nldev.c 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1001 drivers/infiniband/core/nldev.c 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
tb               1004 drivers/infiniband/core/nldev.c 	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb               1048 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1054 drivers/infiniband/core/nldev.c 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1056 drivers/infiniband/core/nldev.c 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
tb               1059 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb               1190 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1198 drivers/infiniband/core/nldev.c 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1200 drivers/infiniband/core/nldev.c 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
tb               1203 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb               1208 drivers/infiniband/core/nldev.c 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
tb               1209 drivers/infiniband/core/nldev.c 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
tb               1222 drivers/infiniband/core/nldev.c 	id = nla_get_u32(tb[fe->id]);
tb               1273 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1287 drivers/infiniband/core/nldev.c 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1297 drivers/infiniband/core/nldev.c 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
tb               1300 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb               1308 drivers/infiniband/core/nldev.c 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
tb               1309 drivers/infiniband/core/nldev.c 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
tb               1459 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1467 drivers/infiniband/core/nldev.c 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1469 drivers/infiniband/core/nldev.c 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
tb               1470 drivers/infiniband/core/nldev.c 	    !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
tb               1473 drivers/infiniband/core/nldev.c 	nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
tb               1478 drivers/infiniband/core/nldev.c 	nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
tb               1479 drivers/infiniband/core/nldev.c 	nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
tb               1506 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1511 drivers/infiniband/core/nldev.c 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1513 drivers/infiniband/core/nldev.c 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
tb               1516 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb               1533 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1541 drivers/infiniband/core/nldev.c 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
tb               1543 drivers/infiniband/core/nldev.c 	if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
tb               1546 drivers/infiniband/core/nldev.c 	nla_strlcpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
tb               1549 drivers/infiniband/core/nldev.c 	if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
tb               1550 drivers/infiniband/core/nldev.c 		index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb               1555 drivers/infiniband/core/nldev.c 		if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
tb               1556 drivers/infiniband/core/nldev.c 			data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
tb               1564 drivers/infiniband/core/nldev.c 	} else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
tb               1617 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1621 drivers/infiniband/core/nldev.c 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1648 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1652 drivers/infiniband/core/nldev.c 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1654 drivers/infiniband/core/nldev.c 	if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
tb               1657 drivers/infiniband/core/nldev.c 	enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
tb               1670 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1675 drivers/infiniband/core/nldev.c 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1678 drivers/infiniband/core/nldev.c 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
tb               1679 drivers/infiniband/core/nldev.c 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
tb               1680 drivers/infiniband/core/nldev.c 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
tb               1683 drivers/infiniband/core/nldev.c 	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
tb               1686 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb               1691 drivers/infiniband/core/nldev.c 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
tb               1707 drivers/infiniband/core/nldev.c 	mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
tb               1709 drivers/infiniband/core/nldev.c 		if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
tb               1711 drivers/infiniband/core/nldev.c 				tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
tb               1718 drivers/infiniband/core/nldev.c 		if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
tb               1720 drivers/infiniband/core/nldev.c 		qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
tb               1721 drivers/infiniband/core/nldev.c 		if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
tb               1722 drivers/infiniband/core/nldev.c 			cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
tb               1756 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1762 drivers/infiniband/core/nldev.c 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1764 drivers/infiniband/core/nldev.c 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
tb               1765 drivers/infiniband/core/nldev.c 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
tb               1766 drivers/infiniband/core/nldev.c 	    !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
tb               1767 drivers/infiniband/core/nldev.c 	    !tb[RDMA_NLDEV_ATTR_RES_LQPN])
tb               1770 drivers/infiniband/core/nldev.c 	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
tb               1773 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb               1778 drivers/infiniband/core/nldev.c 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
tb               1794 drivers/infiniband/core/nldev.c 	cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
tb               1795 drivers/infiniband/core/nldev.c 	qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
tb               1822 drivers/infiniband/core/nldev.c 					 struct nlattr *tb[])
tb               1832 drivers/infiniband/core/nldev.c 	if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
tb               1835 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb               1845 drivers/infiniband/core/nldev.c 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
tb               1913 drivers/infiniband/core/nldev.c 			    struct netlink_ext_ack *extack, struct nlattr *tb[])
tb               1923 drivers/infiniband/core/nldev.c 	if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
tb               1926 drivers/infiniband/core/nldev.c 	if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
tb               1927 drivers/infiniband/core/nldev.c 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
tb               1930 drivers/infiniband/core/nldev.c 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
tb               1935 drivers/infiniband/core/nldev.c 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
tb               1983 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               1986 drivers/infiniband/core/nldev.c 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               1991 drivers/infiniband/core/nldev.c 	if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
tb               1992 drivers/infiniband/core/nldev.c 		return stat_get_doit_default_counter(skb, nlh, extack, tb);
tb               1994 drivers/infiniband/core/nldev.c 	switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
tb               1996 drivers/infiniband/core/nldev.c 		ret = stat_get_doit_qp(skb, nlh, extack, tb);
tb               2010 drivers/infiniband/core/nldev.c 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
tb               2013 drivers/infiniband/core/nldev.c 	ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
tb               2015 drivers/infiniband/core/nldev.c 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
tb               2018 drivers/infiniband/core/nldev.c 	switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
tb               1023 drivers/infiniband/core/sa_query.c 	struct nlattr *tb[LS_NLA_TYPE_MAX];
tb               1030 drivers/infiniband/core/sa_query.c 	ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
tb               1032 drivers/infiniband/core/sa_query.c 	attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
tb               1076 drivers/infiniband/core/sa_query.c 	struct nlattr *tb[LS_NLA_TYPE_MAX];
tb               1082 drivers/infiniband/core/sa_query.c 	ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
tb               2925 drivers/infiniband/hw/qib/qib_iba6120.c 		u64 ta, tb, tc, td, te;
tb               2928 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
tb               2931 drivers/infiniband/hw/qib/qib_iba6120.c 		cs->rword = tb - cs->rword;
tb                 67 drivers/infiniband/ulp/ipoib/ipoib_netlink.c static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[],
tb                 97 drivers/infiniband/ulp/ipoib/ipoib_netlink.c 				struct nlattr *tb[], struct nlattr *data[],
tb                105 drivers/infiniband/ulp/ipoib/ipoib_netlink.c 	if (!tb[IFLA_LINK])
tb                108 drivers/infiniband/ulp/ipoib/ipoib_netlink.c 	pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
tb                137 drivers/infiniband/ulp/ipoib/ipoib_netlink.c 		err = ipoib_changelink(dev, tb, data, extack);
tb                787 drivers/media/pci/ngene/ngene-core.c 		     struct SRingBufferDescriptor *tb)
tb                790 drivers/media/pci/ngene/ngene-core.c 	struct SBufferHeader *Cur = tb->Head;
tb                795 drivers/media/pci/ngene/ngene-core.c 	for (j = 0; j < tb->NumBuffers; j++, Cur = Cur->Next) {
tb                118 drivers/net/bonding/bond_netlink.c static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
tb                121 drivers/net/bonding/bond_netlink.c 	if (tb[IFLA_ADDRESS]) {
tb                122 drivers/net/bonding/bond_netlink.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
tb                124 drivers/net/bonding/bond_netlink.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
tb                132 drivers/net/bonding/bond_netlink.c 				 struct nlattr *tb[], struct nlattr *data[],
tb                158 drivers/net/bonding/bond_netlink.c static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
tb                449 drivers/net/bonding/bond_netlink.c 			struct nlattr *tb[], struct nlattr *data[],
tb                454 drivers/net/bonding/bond_netlink.c 	err = bond_changelink(bond_dev, tb, data, extack);
tb               1347 drivers/net/caif/caif_hsi.c static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
tb               1396 drivers/net/caif/caif_hsi.c 			    struct nlattr *tb[], struct nlattr *data[],
tb                890 drivers/net/can/dev.c static int can_validate(struct nlattr *tb[], struct nlattr *data[],
tb                923 drivers/net/can/dev.c static int can_changelink(struct net_device *dev, struct nlattr *tb[],
tb               1209 drivers/net/can/dev.c 		       struct nlattr *tb[], struct nlattr *data[],
tb                157 drivers/net/can/vxcan.c 			 struct nlattr *tb[], struct nlattr *data[],
tb                164 drivers/net/can/vxcan.c 	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
tb                225 drivers/net/can/vxcan.c 	if (tb[IFLA_IFNAME])
tb                226 drivers/net/can/vxcan.c 		nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
tb                166 drivers/net/dummy.c static int dummy_validate(struct nlattr *tb[], struct nlattr *data[],
tb                169 drivers/net/dummy.c 	if (tb[IFLA_ADDRESS]) {
tb                170 drivers/net/dummy.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
tb                172 drivers/net/dummy.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
tb                 54 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
tb                 56 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	pos -= tb->skip_first;
tb                 57 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	return pos >= tb->rows ? NULL : &tb->data[pos * tb->width];
tb                 62 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *tb = seq->private;
tb                 64 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (tb->skip_first && *pos == 0)
tb                 67 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	return seq_tab_get_idx(tb, *pos);
tb                 83 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	const struct seq_tab *tb = seq->private;
tb                 85 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	return tb->show(seq, v, ((char *)v - tb->data) / tb->width);
tb               1365 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
tb               12284 drivers/net/ethernet/intel/i40e/i40e_main.c static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb               3360 drivers/net/ethernet/intel/ice/ice_main.c ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
tb               3398 drivers/net/ethernet/intel/ice/ice_main.c ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
tb               2489 drivers/net/ethernet/intel/igb/igb_main.c static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb               2504 drivers/net/ethernet/intel/igb/igb_main.c 	return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
tb               9922 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb               9937 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
tb                768 drivers/net/ethernet/mscc/ocelot.c static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb                794 drivers/net/ethernet/mscc/ocelot.c static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
tb                372 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
tb                380 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid);
tb                397 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb                406 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags);
tb                108 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c 			 struct nlattr *tb[], struct nlattr *data[],
tb                119 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c 	if (!tb[IFLA_LINK]) {
tb                124 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c 	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
tb                261 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
tb                276 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
tb                 15 drivers/net/ethernet/rocker/rocker_tlv.c void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
tb                 22 drivers/net/ethernet/rocker/rocker_tlv.c 	memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
tb                 28 drivers/net/ethernet/rocker/rocker_tlv.c 			tb[type] = tlv;
tb                109 drivers/net/ethernet/rocker/rocker_tlv.h void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
tb                112 drivers/net/ethernet/rocker/rocker_tlv.h static inline void rocker_tlv_parse_nested(const struct rocker_tlv **tb,
tb                116 drivers/net/ethernet/rocker/rocker_tlv.h 	rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
tb                121 drivers/net/ethernet/rocker/rocker_tlv.h rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
tb                124 drivers/net/ethernet/rocker/rocker_tlv.h 	rocker_tlv_parse(tb, maxtype, desc_info->data,
tb               3542 drivers/net/ethernet/sun/niu.c 	struct tx_buff_info *tb = &rp->tx_buffs[idx];
tb               3543 drivers/net/ethernet/sun/niu.c 	struct sk_buff *skb = tb->skb;
tb               3556 drivers/net/ethernet/sun/niu.c 	np->ops->unmap_single(np->device, tb->mapping,
tb               3562 drivers/net/ethernet/sun/niu.c 	tb->skb = NULL;
tb               3569 drivers/net/ethernet/sun/niu.c 		tb = &rp->tx_buffs[idx];
tb               3570 drivers/net/ethernet/sun/niu.c 		BUG_ON(tb->skb != NULL);
tb               3571 drivers/net/ethernet/sun/niu.c 		np->ops->unmap_page(np->device, tb->mapping,
tb               1061 drivers/net/fddi/skfp/fplustm.c 	struct s_fpmc	*tb ;
tb               1076 drivers/net/fddi/skfp/fplustm.c 	for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
tb               1077 drivers/net/fddi/skfp/fplustm.c 		if (!tb->n) {		/* not used */
tb               1079 drivers/net/fddi/skfp/fplustm.c 				slot = tb ;
tb               1082 drivers/net/fddi/skfp/fplustm.c 		if (!ether_addr_equal((char *)&tb->a, (char *)own))
tb               1084 drivers/net/fddi/skfp/fplustm.c 		return tb;
tb               1102 drivers/net/fddi/skfp/fplustm.c 	struct s_fpmc	*tb ;
tb               1107 drivers/net/fddi/skfp/fplustm.c 	for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
tb               1108 drivers/net/fddi/skfp/fplustm.c 		if (!tb->perm) {
tb               1109 drivers/net/fddi/skfp/fplustm.c 			tb->n = 0 ;
tb               1145 drivers/net/fddi/skfp/fplustm.c 	struct s_fpmc	*tb ;
tb               1164 drivers/net/fddi/skfp/fplustm.c 	if (!(tb = mac_get_mc_table(smc,addr,&own,0,can & ~0x80)))
tb               1166 drivers/net/fddi/skfp/fplustm.c 	tb->n++ ;
tb               1167 drivers/net/fddi/skfp/fplustm.c 	tb->a = own ;
tb               1168 drivers/net/fddi/skfp/fplustm.c 	tb->perm = (can & 0x80) ? 1 : 0 ;
tb               1198 drivers/net/fddi/skfp/fplustm.c 	struct s_fpmc	*tb ;
tb               1230 drivers/net/fddi/skfp/fplustm.c 	for (i = 0, tb = smc->hw.fp.mc.table; i < FPMAX_MULTICAST; i++, tb++) {
tb               1231 drivers/net/fddi/skfp/fplustm.c 		if (tb->n) {
tb               1238 drivers/net/fddi/skfp/fplustm.c 				(u_short)((tb->a.a[0]<<8)+tb->a.a[1])) ;
tb               1240 drivers/net/fddi/skfp/fplustm.c 				(u_short)((tb->a.a[2]<<8)+tb->a.a[3])) ;
tb               1242 drivers/net/fddi/skfp/fplustm.c 				(u_short)((tb->a.a[4]<<8)+tb->a.a[5])) ;
tb               1173 drivers/net/geneve.c static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1176 drivers/net/geneve.c 	if (tb[IFLA_ADDRESS]) {
tb               1177 drivers/net/geneve.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
tb               1178 drivers/net/geneve.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
tb               1183 drivers/net/geneve.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
tb               1184 drivers/net/geneve.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
tb               1327 drivers/net/geneve.c static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
tb               1495 drivers/net/geneve.c 			       struct ip_tunnel_info *info, struct nlattr *tb[])
tb               1500 drivers/net/geneve.c 	if (tb[IFLA_MTU]) {
tb               1501 drivers/net/geneve.c 		geneve_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
tb               1541 drivers/net/geneve.c 			  struct nlattr *tb[], struct nlattr *data[],
tb               1552 drivers/net/geneve.c 	err = geneve_nl2info(tb, data, extack, &info, &metadata,
tb               1562 drivers/net/geneve.c 	geneve_link_config(dev, &info, tb);
tb               1612 drivers/net/geneve.c static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
tb               1636 drivers/net/geneve.c 	err = geneve_nl2info(tb, data, extack, &info, &metadata,
tb               1643 drivers/net/geneve.c 		geneve_link_config(dev, &info, tb);
tb               1760 drivers/net/geneve.c 	struct nlattr *tb[IFLA_MAX + 1];
tb               1766 drivers/net/geneve.c 	memset(tb, 0, sizeof(tb));
tb               1768 drivers/net/geneve.c 			       &geneve_link_ops, tb, NULL);
tb                654 drivers/net/gtp.c 		       struct nlattr *tb[], struct nlattr *data[],
tb                725 drivers/net/gtp.c static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
tb                276 drivers/net/ifb.c static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
tb                279 drivers/net/ifb.c 	if (tb[IFLA_ADDRESS]) {
tb                280 drivers/net/ifb.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
tb                282 drivers/net/ifb.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
tb                169 drivers/net/ipvlan/ipvlan.h 		    struct nlattr *tb[], struct nlattr *data[],
tb                418 drivers/net/ipvlan/ipvlan_main.c 				struct nlattr *tb[], struct nlattr *data[],
tb                461 drivers/net/ipvlan/ipvlan_main.c static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[],
tb                511 drivers/net/ipvlan/ipvlan_main.c 		    struct nlattr *tb[], struct nlattr *data[],
tb                520 drivers/net/ipvlan/ipvlan_main.c 	if (!tb[IFLA_LINK])
tb                523 drivers/net/ipvlan/ipvlan_main.c 	phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
tb                551 drivers/net/ipvlan/ipvlan_main.c 	if (!tb[IFLA_MTU])
tb                 78 drivers/net/ipvlan/ipvtap.c 			  struct nlattr *tb[], struct nlattr *data[],
tb                101 drivers/net/ipvlan/ipvtap.c 	err =  ipvlan_link_new(src_net, dev, tb, data, extack);
tb               3084 drivers/net/macsec.c static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
tb               3226 drivers/net/macsec.c 			  struct nlattr *tb[], struct nlattr *data[],
tb               3236 drivers/net/macsec.c 	if (!tb[IFLA_LINK])
tb               3238 drivers/net/macsec.c 	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
tb               3313 drivers/net/macsec.c static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
tb                973 drivers/net/macvlan.c static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb                999 drivers/net/macvlan.c static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
tb               1255 drivers/net/macvlan.c static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1258 drivers/net/macvlan.c 	if (tb[IFLA_ADDRESS]) {
tb               1259 drivers/net/macvlan.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
tb               1261 drivers/net/macvlan.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
tb               1378 drivers/net/macvlan.c 			   struct nlattr *tb[], struct nlattr *data[],
tb               1388 drivers/net/macvlan.c 	if (!tb[IFLA_LINK])
tb               1391 drivers/net/macvlan.c 	lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
tb               1401 drivers/net/macvlan.c 	if (!tb[IFLA_MTU])
tb               1410 drivers/net/macvlan.c 	if (!tb[IFLA_ADDRESS])
tb               1492 drivers/net/macvlan.c 			   struct nlattr *tb[], struct nlattr *data[],
tb               1495 drivers/net/macvlan.c 	return macvlan_common_newlink(src_net, dev, tb, data, extack);
tb               1511 drivers/net/macvlan.c 			      struct nlattr *tb[], struct nlattr *data[],
tb                 82 drivers/net/macvtap.c 			   struct nlattr *tb[], struct nlattr *data[],
tb                109 drivers/net/macvtap.c 	err = macvlan_common_newlink(src_net, dev, tb, data, extack);
tb                336 drivers/net/netdevsim/netdev.c static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
tb                125 drivers/net/nlmon.c static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[],
tb                128 drivers/net/nlmon.c 	if (tb[IFLA_ADDRESS])
tb               1062 drivers/net/ppp/ppp_generic.c static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1077 drivers/net/ppp/ppp_generic.c 			  struct nlattr *tb[], struct nlattr *data[],
tb               1113 drivers/net/ppp/ppp_generic.c 	if (!tb[IFLA_IFNAME])
tb               2182 drivers/net/team/team.c 			struct nlattr *tb[], struct nlattr *data[],
tb               2185 drivers/net/team/team.c 	if (tb[IFLA_ADDRESS] == NULL)
tb               2191 drivers/net/team/team.c static int team_validate(struct nlattr *tb[], struct nlattr *data[],
tb               2194 drivers/net/team/team.c 	if (tb[IFLA_ADDRESS]) {
tb               2195 drivers/net/team/team.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
tb               2197 drivers/net/team/team.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
tb                857 drivers/net/thunderbolt.c 	ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
tb                868 drivers/net/thunderbolt.c 	ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
tb               2331 drivers/net/tun.c static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1218 drivers/net/veth.c static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1221 drivers/net/veth.c 	if (tb[IFLA_ADDRESS]) {
tb               1222 drivers/net/veth.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
tb               1224 drivers/net/veth.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
tb               1227 drivers/net/veth.c 	if (tb[IFLA_MTU]) {
tb               1228 drivers/net/veth.c 		if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
tb               1237 drivers/net/veth.c 			struct nlattr *tb[], struct nlattr *data[],
tb               1271 drivers/net/veth.c 		tbp = tb;
tb               1321 drivers/net/veth.c 	if (tb[IFLA_ADDRESS] == NULL)
tb               1324 drivers/net/veth.c 	if (tb[IFLA_IFNAME])
tb               1325 drivers/net/veth.c 		nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
tb               1297 drivers/net/vrf.c static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1300 drivers/net/vrf.c 	if (tb[IFLA_ADDRESS]) {
tb               1301 drivers/net/vrf.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
tb               1305 drivers/net/vrf.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
tb               1325 drivers/net/vrf.c 		       struct nlattr *tb[], struct nlattr *data[],
tb               1051 drivers/net/vxlan.c static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
tb               1058 drivers/net/vxlan.c 	if (tb[NDA_DST]) {
tb               1059 drivers/net/vxlan.c 		err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
tb               1075 drivers/net/vxlan.c 	if (tb[NDA_PORT]) {
tb               1076 drivers/net/vxlan.c 		if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
tb               1078 drivers/net/vxlan.c 		*port = nla_get_be16(tb[NDA_PORT]);
tb               1083 drivers/net/vxlan.c 	if (tb[NDA_VNI]) {
tb               1084 drivers/net/vxlan.c 		if (nla_len(tb[NDA_VNI]) != sizeof(u32))
tb               1086 drivers/net/vxlan.c 		*vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
tb               1091 drivers/net/vxlan.c 	if (tb[NDA_SRC_VNI]) {
tb               1092 drivers/net/vxlan.c 		if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32))
tb               1094 drivers/net/vxlan.c 		*src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
tb               1099 drivers/net/vxlan.c 	if (tb[NDA_IFINDEX]) {
tb               1102 drivers/net/vxlan.c 		if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
tb               1104 drivers/net/vxlan.c 		*ifindex = nla_get_u32(tb[NDA_IFINDEX]);
tb               1116 drivers/net/vxlan.c static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb               1136 drivers/net/vxlan.c 	if (tb[NDA_DST] == NULL)
tb               1139 drivers/net/vxlan.c 	err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
tb               1191 drivers/net/vxlan.c static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
tb               1203 drivers/net/vxlan.c 	err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
tb               1252 drivers/net/vxlan.c 			 struct nlattr *tb[],
tb               1263 drivers/net/vxlan.c 	if (tb[NDA_VNI])
tb               1264 drivers/net/vxlan.c 		vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
tb               3114 drivers/net/vxlan.c static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
tb               3117 drivers/net/vxlan.c 	if (tb[IFLA_ADDRESS]) {
tb               3118 drivers/net/vxlan.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
tb               3119 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
tb               3124 drivers/net/vxlan.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
tb               3125 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
tb               3131 drivers/net/vxlan.c 	if (tb[IFLA_MTU]) {
tb               3132 drivers/net/vxlan.c 		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
tb               3135 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
tb               3661 drivers/net/vxlan.c static int vxlan_nl2flag(struct vxlan_config *conf, struct nlattr *tb[],
tb               3668 drivers/net/vxlan.c 	if (!tb[attrtype])
tb               3678 drivers/net/vxlan.c 	else if (nla_get_u8(tb[attrtype]))
tb               3688 drivers/net/vxlan.c static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
tb               3705 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], "Cannot change VNI");
tb               3713 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP], "New group address family does not match old group");
tb               3721 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "IPv6 support not enabled in the kernel");
tb               3726 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "New group address family does not match old group");
tb               3736 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL], "New local address family does not match old");
tb               3744 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "IPv6 support not enabled in the kernel");
tb               3749 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "New local address family does not match old");
tb               3828 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LIMIT],
tb               3850 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
tb               3858 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT],
tb               3867 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_UDP_CSUM],
tb               3930 drivers/net/vxlan.c 	if (tb[IFLA_MTU]) {
tb               3932 drivers/net/vxlan.c 			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
tb               3936 drivers/net/vxlan.c 		conf->mtu = nla_get_u32(tb[IFLA_MTU]);
tb               3946 drivers/net/vxlan.c 			 struct nlattr *tb[], struct nlattr *data[],
tb               3952 drivers/net/vxlan.c 	err = vxlan_nl2conf(tb, data, dev, &conf, false, extack);
tb               3959 drivers/net/vxlan.c static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
tb               3970 drivers/net/vxlan.c 	err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
tb               4196 drivers/net/vxlan.c 	struct nlattr *tb[IFLA_MAX + 1];
tb               4200 drivers/net/vxlan.c 	memset(&tb, 0, sizeof(tb));
tb               4203 drivers/net/vxlan.c 			       &vxlan_link_ops, tb, NULL);
tb                100 drivers/net/wireless/ath/ath10k/testmode.c static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
tb                226 drivers/net/wireless/ath/ath10k/testmode.c static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
tb                347 drivers/net/wireless/ath/ath10k/testmode.c static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[])
tb                371 drivers/net/wireless/ath/ath10k/testmode.c static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
tb                385 drivers/net/wireless/ath/ath10k/testmode.c 	if (!tb[ATH10K_TM_ATTR_DATA]) {
tb                390 drivers/net/wireless/ath/ath10k/testmode.c 	if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) {
tb                395 drivers/net/wireless/ath/ath10k/testmode.c 	buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
tb                396 drivers/net/wireless/ath/ath10k/testmode.c 	buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
tb                397 drivers/net/wireless/ath/ath10k/testmode.c 	cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
tb                431 drivers/net/wireless/ath/ath10k/testmode.c 	struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
tb                434 drivers/net/wireless/ath/ath10k/testmode.c 	ret = nla_parse_deprecated(tb, ATH10K_TM_ATTR_MAX, data, len,
tb                439 drivers/net/wireless/ath/ath10k/testmode.c 	if (!tb[ATH10K_TM_ATTR_CMD])
tb                442 drivers/net/wireless/ath/ath10k/testmode.c 	switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
tb                444 drivers/net/wireless/ath/ath10k/testmode.c 		return ath10k_tm_cmd_get_version(ar, tb);
tb                446 drivers/net/wireless/ath/ath10k/testmode.c 		return ath10k_tm_cmd_utf_start(ar, tb);
tb                448 drivers/net/wireless/ath/ath10k/testmode.c 		return ath10k_tm_cmd_utf_stop(ar, tb);
tb                450 drivers/net/wireless/ath/ath10k/testmode.c 		return ath10k_tm_cmd_wmi(ar, tb);
tb                125 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb = data;
tb                128 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		tb[tag] = ptr;
tb                133 drivers/net/wireless/ath/ath10k/wmi-tlv.c static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
tb                137 drivers/net/wireless/ath/ath10k/wmi-tlv.c 				   (void *)tb);
tb                144 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                147 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
tb                148 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (!tb)
tb                151 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
tb                153 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                157 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	return tb;
tb                171 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                177 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                178 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                179 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                184 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
tb                186 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                211 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                225 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                231 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                232 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                233 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                238 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
tb                239 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
tb                241 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                281 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                288 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                292 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                293 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                294 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                299 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
tb                301 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                309 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                316 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                321 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                322 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                323 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                328 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
tb                329 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
tb                332 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                343 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                350 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                355 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                356 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                357 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                362 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
tb                364 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                408 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                429 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                432 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                433 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                437 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT];
tb                439 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                453 drivers/net/wireless/ath/ath10k/wmi-tlv.c 			kfree(tb);
tb                465 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                660 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                664 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                665 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                666 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                671 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
tb                673 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                684 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                692 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                696 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                697 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                698 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                703 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
tb                713 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                794 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                800 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                801 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                802 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                807 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
tb                808 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
tb                811 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                825 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                835 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                843 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                847 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                848 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                849 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                854 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
tb                856 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                870 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                878 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                882 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                883 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                884 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                889 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
tb                891 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                901 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                909 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb                913 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb                914 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb                915 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb                920 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
tb                922 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb                928 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb               1056 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb               1061 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb               1062 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb               1063 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb               1068 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
tb               1069 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
tb               1072 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb               1082 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb               1230 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb               1234 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb               1235 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb               1236 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb               1241 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
tb               1243 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb               1252 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb               1329 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb               1343 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb               1344 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb               1345 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb               1350 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
tb               1351 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
tb               1354 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb               1378 drivers/net/wireless/ath/ath10k/wmi-tlv.c 			kfree(tb);
tb               1401 drivers/net/wireless/ath/ath10k/wmi-tlv.c 			kfree(tb);
tb               1422 drivers/net/wireless/ath/ath10k/wmi-tlv.c 			kfree(tb);
tb               1460 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb               1468 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb               1472 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb               1473 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb               1474 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb               1479 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
tb               1481 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb               1489 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb               1497 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb               1501 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb               1502 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb               1503 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb               1508 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
tb               1510 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb               1519 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb               1527 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	const void **tb;
tb               1531 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
tb               1532 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	if (IS_ERR(tb)) {
tb               1533 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		ret = PTR_ERR(tb);
tb               1538 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
tb               1540 drivers/net/wireless/ath/ath10k/wmi-tlv.c 		kfree(tb);
tb               1546 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	kfree(tb);
tb                 73 drivers/net/wireless/ath/ath6kl/testmode.c 	struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1];
tb                 77 drivers/net/wireless/ath/ath6kl/testmode.c 	err = nla_parse_deprecated(tb, ATH6KL_TM_ATTR_MAX, data, len,
tb                 82 drivers/net/wireless/ath/ath6kl/testmode.c 	if (!tb[ATH6KL_TM_ATTR_CMD])
tb                 85 drivers/net/wireless/ath/ath6kl/testmode.c 	switch (nla_get_u32(tb[ATH6KL_TM_ATTR_CMD])) {
tb                 87 drivers/net/wireless/ath/ath6kl/testmode.c 		if (!tb[ATH6KL_TM_ATTR_DATA])
tb                 90 drivers/net/wireless/ath/ath6kl/testmode.c 		buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]);
tb                 91 drivers/net/wireless/ath/ath6kl/testmode.c 		buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]);
tb                 44 drivers/net/wireless/ath/wcn36xx/testmode.c 			      struct nlattr *tb[])
tb                 51 drivers/net/wireless/ath/wcn36xx/testmode.c 	if (!tb[WCN36XX_TM_ATTR_DATA])
tb                 54 drivers/net/wireless/ath/wcn36xx/testmode.c 	buf = nla_data(tb[WCN36XX_TM_ATTR_DATA]);
tb                 55 drivers/net/wireless/ath/wcn36xx/testmode.c 	buf_len = nla_len(tb[WCN36XX_TM_ATTR_DATA]);
tb                130 drivers/net/wireless/ath/wcn36xx/testmode.c 	struct nlattr *tb[WCN36XX_TM_ATTR_MAX + 1];
tb                135 drivers/net/wireless/ath/wcn36xx/testmode.c 	ret = nla_parse_deprecated(tb, WCN36XX_TM_ATTR_MAX, data, len,
tb                140 drivers/net/wireless/ath/wcn36xx/testmode.c 	if (!tb[WCN36XX_TM_ATTR_CMD])
tb                143 drivers/net/wireless/ath/wcn36xx/testmode.c 	attr = nla_get_u16(tb[WCN36XX_TM_ATTR_CMD]);
tb                148 drivers/net/wireless/ath/wcn36xx/testmode.c 	return wcn36xx_tm_cmd_ptt(wcn, vif, tb);
tb               2828 drivers/net/wireless/ath/wil6210/cfg80211.c 	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
tb               2847 drivers/net/wireless/ath/wil6210/cfg80211.c 	rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
tb               2854 drivers/net/wireless/ath/wil6210/cfg80211.c 	if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
tb               2855 drivers/net/wireless/ath/wil6210/cfg80211.c 	    !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
tb               2856 drivers/net/wireless/ath/wil6210/cfg80211.c 	    !tb[QCA_ATTR_DMG_RF_MODULE_MASK]) {
tb               2862 drivers/net/wireless/ath/wil6210/cfg80211.c 		tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
tb               2868 drivers/net/wireless/ath/wil6210/cfg80211.c 	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
tb               2875 drivers/net/wireless/ath/wil6210/cfg80211.c 		tb[QCA_ATTR_DMG_RF_MODULE_MASK]);
tb               2949 drivers/net/wireless/ath/wil6210/cfg80211.c 	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
tb               2967 drivers/net/wireless/ath/wil6210/cfg80211.c 	rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
tb               2974 drivers/net/wireless/ath/wil6210/cfg80211.c 	if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
tb               2975 drivers/net/wireless/ath/wil6210/cfg80211.c 	    !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
tb               2976 drivers/net/wireless/ath/wil6210/cfg80211.c 	    !tb[QCA_ATTR_DMG_RF_SECTOR_CFG]) {
tb               2982 drivers/net/wireless/ath/wil6210/cfg80211.c 		tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
tb               2988 drivers/net/wireless/ath/wil6210/cfg80211.c 	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
tb               2998 drivers/net/wireless/ath/wil6210/cfg80211.c 	nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
tb               3061 drivers/net/wireless/ath/wil6210/cfg80211.c 	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
tb               3076 drivers/net/wireless/ath/wil6210/cfg80211.c 	rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
tb               3083 drivers/net/wireless/ath/wil6210/cfg80211.c 	if (!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
tb               3087 drivers/net/wireless/ath/wil6210/cfg80211.c 	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
tb               3093 drivers/net/wireless/ath/wil6210/cfg80211.c 	if (tb[QCA_ATTR_MAC_ADDR]) {
tb               3094 drivers/net/wireless/ath/wil6210/cfg80211.c 		ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
tb               3176 drivers/net/wireless/ath/wil6210/cfg80211.c 	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
tb               3184 drivers/net/wireless/ath/wil6210/cfg80211.c 	rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
tb               3191 drivers/net/wireless/ath/wil6210/cfg80211.c 	if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
tb               3192 drivers/net/wireless/ath/wil6210/cfg80211.c 	    !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
tb               3198 drivers/net/wireless/ath/wil6210/cfg80211.c 		tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
tb               3205 drivers/net/wireless/ath/wil6210/cfg80211.c 	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
tb               3211 drivers/net/wireless/ath/wil6210/cfg80211.c 	if (tb[QCA_ATTR_MAC_ADDR]) {
tb               3212 drivers/net/wireless/ath/wil6210/cfg80211.c 		ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
tb               4611 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	const struct phytbl_info *tb;
tb               4659 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 				tb = dot11lcnphytbl_rx_gain_info_extlna_2G_rev2;
tb               4661 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 				tb = dot11lcnphytbl_rx_gain_info_2G_rev2;
tb               4665 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 				tb = dot11lcnphytbl_rx_gain_info_extlna_5G_rev2;
tb               4667 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 				tb = dot11lcnphytbl_rx_gain_info_5G_rev2;
tb               4671 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 			wlc_lcnphy_write_table(pi, &tb[idx]);
tb               4677 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 				tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;
tb               4679 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 				tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250;
tb               4681 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 			tb = &dot11lcn_sw_ctrl_tbl_info_4313_epa;
tb               4685 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 			tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa;
tb               4687 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 			tb = &dot11lcn_sw_ctrl_tbl_info_4313;
tb               4689 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	wlc_lcnphy_write_table(pi, tb);
tb               3866 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_tfd_tb *tb = &tfd->tbs[idx];
tb               3868 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	dma_addr_t addr = get_unaligned_le32(&tb->lo);
tb               3871 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		    ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
tb               3880 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_tfd_tb *tb = &tfd->tbs[idx];
tb               3882 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	return le16_to_cpu(tb->hi_n_len) >> 4;
tb               3888 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_tfd_tb *tb = &tfd->tbs[idx];
tb               3891 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	put_unaligned_le32(addr, &tb->lo);
tb               3895 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	tb->hi_n_len = cpu_to_le16(hi_n_len);
tb               4337 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
tb               4341 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len,
tb               4346 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	if (!tb[IWL_MVM_TM_ATTR_CMD])
tb               4349 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
tb               4353 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 		    !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
tb               4356 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 		noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
tb               4368 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 		    !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
tb               4371 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 		if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
tb                714 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
tb                716 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 		return le16_to_cpu(tb->tb_len);
tb                719 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
tb                721 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 		return le16_to_cpu(tb->hi_n_len) >> 4;
tb                222 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	struct iwl_tfh_tb *tb;
tb                226 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	tb = &tfd->tbs[idx];
tb                235 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	put_unaligned_le64(addr, &tb->addr);
tb                236 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	tb->tb_len = cpu_to_le16(len);
tb                351 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
tb                353 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		return (dma_addr_t)(le64_to_cpu(tb->addr));
tb                356 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
tb                357 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		dma_addr_t addr = get_unaligned_le32(&tb->lo);
tb                363 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
tb                378 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
tb                382 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	put_unaligned_le32(addr, &tb->lo);
tb                385 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	tb->hi_n_len = cpu_to_le16(hi_n_len);
tb                405 drivers/net/wireless/mac80211_hwsim.c 	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1];
tb                409 drivers/net/wireless/mac80211_hwsim.c 	err = nla_parse_deprecated(tb, QCA_WLAN_VENDOR_ATTR_MAX, data,
tb                413 drivers/net/wireless/mac80211_hwsim.c 	if (!tb[QCA_WLAN_VENDOR_ATTR_TEST])
tb                415 drivers/net/wireless/mac80211_hwsim.c 	val = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_TEST]);
tb               1930 drivers/net/wireless/mac80211_hwsim.c 	struct nlattr *tb[HWSIM_TM_ATTR_MAX + 1];
tb               1934 drivers/net/wireless/mac80211_hwsim.c 	err = nla_parse_deprecated(tb, HWSIM_TM_ATTR_MAX, data, len,
tb               1939 drivers/net/wireless/mac80211_hwsim.c 	if (!tb[HWSIM_TM_ATTR_CMD])
tb               1942 drivers/net/wireless/mac80211_hwsim.c 	switch (nla_get_u32(tb[HWSIM_TM_ATTR_CMD])) {
tb               1944 drivers/net/wireless/mac80211_hwsim.c 		if (!tb[HWSIM_TM_ATTR_PS])
tb               1946 drivers/net/wireless/mac80211_hwsim.c 		ps = nla_get_u32(tb[HWSIM_TM_ATTR_PS]);
tb               4052 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	struct nlattr *tb[MWIFIEX_TM_ATTR_MAX + 1];
tb               4059 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	err = nla_parse_deprecated(tb, MWIFIEX_TM_ATTR_MAX, data, len,
tb               4064 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	if (!tb[MWIFIEX_TM_ATTR_CMD])
tb               4067 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	switch (nla_get_u32(tb[MWIFIEX_TM_ATTR_CMD])) {
tb               4069 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		if (!tb[MWIFIEX_TM_ATTR_DATA])
tb               4076 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		hostcmd->len = nla_len(tb[MWIFIEX_TM_ATTR_DATA]);
tb               4077 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		memcpy(hostcmd->cmd, nla_data(tb[MWIFIEX_TM_ATTR_DATA]),
tb                117 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	unsigned char *tb;
tb                122 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	tb  = (char *)buffer;
tb                128 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 							requesttype, off, tb,
tb                131 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		tb  += bsize;
tb                 58 drivers/net/wireless/ti/wlcore/testmode.c static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
tb                 67 drivers/net/wireless/ti/wlcore/testmode.c 	if (!tb[WL1271_TM_ATTR_DATA])
tb                 70 drivers/net/wireless/ti/wlcore/testmode.c 	buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
tb                 71 drivers/net/wireless/ti/wlcore/testmode.c 	buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]);
tb                 73 drivers/net/wireless/ti/wlcore/testmode.c 	if (tb[WL1271_TM_ATTR_ANSWER])
tb                 74 drivers/net/wireless/ti/wlcore/testmode.c 		answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]);
tb                140 drivers/net/wireless/ti/wlcore/testmode.c static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
tb                149 drivers/net/wireless/ti/wlcore/testmode.c 	if (!tb[WL1271_TM_ATTR_IE_ID])
tb                152 drivers/net/wireless/ti/wlcore/testmode.c 	ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
tb                207 drivers/net/wireless/ti/wlcore/testmode.c static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
tb                215 drivers/net/wireless/ti/wlcore/testmode.c 	if (!tb[WL1271_TM_ATTR_DATA])
tb                217 drivers/net/wireless/ti/wlcore/testmode.c 	if (!tb[WL1271_TM_ATTR_IE_ID])
tb                220 drivers/net/wireless/ti/wlcore/testmode.c 	ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
tb                221 drivers/net/wireless/ti/wlcore/testmode.c 	buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
tb                222 drivers/net/wireless/ti/wlcore/testmode.c 	buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]);
tb                239 drivers/net/wireless/ti/wlcore/testmode.c static int wl1271_tm_detect_fem(struct wl1271 *wl, struct nlattr *tb[])
tb                276 drivers/net/wireless/ti/wlcore/testmode.c static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
tb                283 drivers/net/wireless/ti/wlcore/testmode.c 	if (!tb[WL1271_TM_ATTR_PLT_MODE])
tb                286 drivers/net/wireless/ti/wlcore/testmode.c 	val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]);
tb                297 drivers/net/wireless/ti/wlcore/testmode.c 		ret = wl1271_tm_detect_fem(wl, tb);
tb                307 drivers/net/wireless/ti/wlcore/testmode.c static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
tb                357 drivers/net/wireless/ti/wlcore/testmode.c 	struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
tb                361 drivers/net/wireless/ti/wlcore/testmode.c 	err = nla_parse_deprecated(tb, WL1271_TM_ATTR_MAX, data, len,
tb                366 drivers/net/wireless/ti/wlcore/testmode.c 	if (!tb[WL1271_TM_ATTR_CMD_ID])
tb                369 drivers/net/wireless/ti/wlcore/testmode.c 	nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]);
tb                378 drivers/net/wireless/ti/wlcore/testmode.c 		return wl1271_tm_cmd_test(wl, tb);
tb                380 drivers/net/wireless/ti/wlcore/testmode.c 		return wl1271_tm_cmd_interrogate(wl, tb);
tb                382 drivers/net/wireless/ti/wlcore/testmode.c 		return wl1271_tm_cmd_configure(wl, tb);
tb                384 drivers/net/wireless/ti/wlcore/testmode.c 		return wl1271_tm_cmd_set_plt_mode(wl, tb);
tb                386 drivers/net/wireless/ti/wlcore/testmode.c 		return wl12xx_tm_cmd_get_mac(wl, tb);
tb                 33 drivers/net/wireless/ti/wlcore/vendor_cmd.c 	struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR];
tb                 41 drivers/net/wireless/ti/wlcore/vendor_cmd.c 	ret = nla_parse_deprecated(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
tb                 46 drivers/net/wireless/ti/wlcore/vendor_cmd.c 	if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID])
tb                 63 drivers/net/wireless/ti/wlcore/vendor_cmd.c 			nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]));
tb                114 drivers/net/wireless/ti/wlcore/vendor_cmd.c 	struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR];
tb                122 drivers/net/wireless/ti/wlcore/vendor_cmd.c 	ret = nla_parse_deprecated(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
tb                127 drivers/net/wireless/ti/wlcore/vendor_cmd.c 	if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID] ||
tb                128 drivers/net/wireless/ti/wlcore/vendor_cmd.c 	    !tb[WLCORE_VENDOR_ATTR_GROUP_KEY])
tb                145 drivers/net/wireless/ti/wlcore/vendor_cmd.c 			nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]),
tb                146 drivers/net/wireless/ti/wlcore/vendor_cmd.c 			nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]),
tb                147 drivers/net/wireless/ti/wlcore/vendor_cmd.c 			nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]));
tb                488 drivers/net/wireless/virt_wifi.c 			     struct nlattr *tb[], struct nlattr *data[],
tb                494 drivers/net/wireless/virt_wifi.c 	if (!tb[IFLA_LINK])
tb                501 drivers/net/wireless/virt_wifi.c 					    nla_get_u32(tb[IFLA_LINK]));
tb                505 drivers/net/wireless/virt_wifi.c 	if (!tb[IFLA_MTU])
tb                509 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	const struct mtk_drive_desc *tb;
tb                512 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	tb = &mtk_drive[desc->drv_n];
tb                518 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	if ((arg >= tb->min && arg <= tb->max) && !(arg % tb->step)) {
tb                519 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 		arg = (arg / tb->step - 1) * tb->scal;
tb                537 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	const struct mtk_drive_desc *tb;
tb                540 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	tb = &mtk_drive[desc->drv_n];
tb                553 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	*val = (((val2 << 1) + val1) / tb->scal + 1) * tb->step;
tb                562 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	const struct mtk_drive_desc *tb;
tb                565 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	tb = &mtk_drive[desc->drv_n];
tb                567 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	if ((arg >= tb->min && arg <= tb->max) && !(arg % tb->step)) {
tb                568 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 		arg = (arg / tb->step - 1) * tb->scal;
tb                582 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	const struct mtk_drive_desc *tb;
tb                585 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	tb = &mtk_drive[desc->drv_n];
tb                591 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	*val = ((val1 & 0x7) / tb->scal + 1) * tb->step;
tb               7362 drivers/scsi/hpsa.c static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
tb               7371 drivers/scsi/hpsa.c 		temp_name[i] = readb(&(tb->Signature[i]));
tb               7374 drivers/scsi/hpsa.c 	dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
tb               7376 drivers/scsi/hpsa.c 	       readl(&(tb->TransportSupport)));
tb               7378 drivers/scsi/hpsa.c 	       readl(&(tb->TransportActive)));
tb               7380 drivers/scsi/hpsa.c 	       readl(&(tb->HostWrite.TransportRequest)));
tb               7382 drivers/scsi/hpsa.c 	       readl(&(tb->HostWrite.CoalIntDelay)));
tb               7384 drivers/scsi/hpsa.c 	       readl(&(tb->HostWrite.CoalIntCount)));
tb               7386 drivers/scsi/hpsa.c 	       readl(&(tb->CmdsOutMax)));
tb               7387 drivers/scsi/hpsa.c 	dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
tb               7389 drivers/scsi/hpsa.c 		temp_name[i] = readb(&(tb->ServerName[i]));
tb               7393 drivers/scsi/hpsa.c 		readl(&(tb->HeartBeat)));
tb                 95 drivers/scsi/mesh.c 	u32	tb;
tb                202 drivers/scsi/mesh.c 	u32 tb;
tb                206 drivers/scsi/mesh.c 	asm ("mftb %0" : "=r" (tb) : );
tb                208 drivers/scsi/mesh.c 	tb = 0;
tb                210 drivers/scsi/mesh.c 	return tb;
tb                221 drivers/scsi/mesh.c 	tlp->tb = readtb();
tb                255 drivers/scsi/mesh.c 		printk("tb=%10u ", lp->tb);
tb                280 drivers/scsi/mesh.c 		printk("tb=%10u ", lp->tb);
tb               3880 drivers/scsi/st.c 	struct st_buffer *tb;
tb               3882 drivers/scsi/st.c 	tb = kzalloc(sizeof(struct st_buffer), GFP_KERNEL);
tb               3883 drivers/scsi/st.c 	if (!tb) {
tb               3887 drivers/scsi/st.c 	tb->frp_segs = 0;
tb               3888 drivers/scsi/st.c 	tb->use_sg = max_sg;
tb               3889 drivers/scsi/st.c 	tb->dma = need_dma;
tb               3890 drivers/scsi/st.c 	tb->buffer_size = 0;
tb               3892 drivers/scsi/st.c 	tb->reserved_pages = kcalloc(max_sg, sizeof(struct page *),
tb               3894 drivers/scsi/st.c 	if (!tb->reserved_pages) {
tb               3895 drivers/scsi/st.c 		kfree(tb);
tb               3899 drivers/scsi/st.c 	return tb;
tb                314 drivers/staging/fsl-dpaa2/ethsw/ethsw.c static int port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb                327 drivers/staging/fsl-dpaa2/ethsw/ethsw.c static int port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
tb                 43 drivers/target/target_core_configfs.c static void target_core_setup_##_name##_cit(struct target_backend *tb)	\
tb                 45 drivers/target/target_core_configfs.c 	struct config_item_type *cit = &tb->tb_##_name##_cit;		\
tb                 50 drivers/target/target_core_configfs.c 	cit->ct_owner = tb->ops->owner;					\
tb                 55 drivers/target/target_core_configfs.c static void target_core_setup_##_name##_cit(struct target_backend *tb)	\
tb                 57 drivers/target/target_core_configfs.c 	struct config_item_type *cit = &tb->tb_##_name##_cit;		\
tb                 61 drivers/target/target_core_configfs.c 	cit->ct_attrs = tb->ops->tb_##_name##_attrs;			\
tb                 62 drivers/target/target_core_configfs.c 	cit->ct_owner = tb->ops->owner;					\
tb               3150 drivers/target/target_core_configfs.c 	struct target_backend *tb = hba->backend;
tb               3162 drivers/target/target_core_configfs.c 	config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
tb               3165 drivers/target/target_core_configfs.c 			&tb->tb_dev_action_cit);
tb               3169 drivers/target/target_core_configfs.c 			&tb->tb_dev_attrib_cit);
tb               3173 drivers/target/target_core_configfs.c 			&tb->tb_dev_pr_cit);
tb               3177 drivers/target/target_core_configfs.c 			&tb->tb_dev_wwn_cit);
tb               3182 drivers/target/target_core_configfs.c 			"alua", &tb->tb_dev_alua_tg_pt_gps_cit);
tb               3187 drivers/target/target_core_configfs.c 			"statistics", &tb->tb_dev_stat_cit);
tb               3425 drivers/target/target_core_configfs.c void target_setup_backend_cits(struct target_backend *tb)
tb               3427 drivers/target/target_core_configfs.c 	target_core_setup_dev_cit(tb);
tb               3428 drivers/target/target_core_configfs.c 	target_core_setup_dev_action_cit(tb);
tb               3429 drivers/target/target_core_configfs.c 	target_core_setup_dev_attrib_cit(tb);
tb               3430 drivers/target/target_core_configfs.c 	target_core_setup_dev_pr_cit(tb);
tb               3431 drivers/target/target_core_configfs.c 	target_core_setup_dev_wwn_cit(tb);
tb               3432 drivers/target/target_core_configfs.c 	target_core_setup_dev_alua_tg_pt_gps_cit(tb);
tb               3433 drivers/target/target_core_configfs.c 	target_core_setup_dev_stat_cit(tb);
tb                 40 drivers/target/target_core_hba.c 	struct target_backend *tb, *old;
tb                 42 drivers/target/target_core_hba.c 	tb = kzalloc(sizeof(*tb), GFP_KERNEL);
tb                 43 drivers/target/target_core_hba.c 	if (!tb)
tb                 45 drivers/target/target_core_hba.c 	tb->ops = ops;
tb                 52 drivers/target/target_core_hba.c 			kfree(tb);
tb                 56 drivers/target/target_core_hba.c 	target_setup_backend_cits(tb);
tb                 57 drivers/target/target_core_hba.c 	list_add_tail(&tb->list, &backend_list);
tb                 68 drivers/target/target_core_hba.c 	struct target_backend *tb;
tb                 71 drivers/target/target_core_hba.c 	list_for_each_entry(tb, &backend_list, list) {
tb                 72 drivers/target/target_core_hba.c 		if (tb->ops == ops) {
tb                 73 drivers/target/target_core_hba.c 			list_del(&tb->list);
tb                 82 drivers/target/target_core_hba.c 			kfree(tb);
tb                 92 drivers/target/target_core_hba.c 	struct target_backend *tb;
tb                 95 drivers/target/target_core_hba.c 	list_for_each_entry(tb, &backend_list, list) {
tb                 96 drivers/target/target_core_hba.c 		if (!strcmp(tb->ops->name, name))
tb                102 drivers/target/target_core_hba.c 	if (tb->ops->owner && !try_module_get(tb->ops->owner))
tb                103 drivers/target/target_core_hba.c 		tb = NULL;
tb                105 drivers/target/target_core_hba.c 	return tb;
tb                181 drivers/thunderbolt/dma_port.c 		ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
tb                249 drivers/thunderbolt/dma_port.c 		ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
tb                285 drivers/thunderbolt/dma_port.c 	ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
tb                294 drivers/thunderbolt/dma_port.c 	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
tb                322 drivers/thunderbolt/dma_port.c 	return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
tb                336 drivers/thunderbolt/dma_port.c 	ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
tb                487 drivers/thunderbolt/dma_port.c 	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
tb                126 drivers/thunderbolt/domain.c 	struct tb *tb = container_of(dev, struct tb, dev);
tb                131 drivers/thunderbolt/domain.c 	uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
tb                135 drivers/thunderbolt/domain.c 	pm_runtime_get_sync(&tb->dev);
tb                137 drivers/thunderbolt/domain.c 	if (mutex_lock_interruptible(&tb->lock)) {
tb                141 drivers/thunderbolt/domain.c 	ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
tb                143 drivers/thunderbolt/domain.c 		mutex_unlock(&tb->lock);
tb                146 drivers/thunderbolt/domain.c 	mutex_unlock(&tb->lock);
tb                148 drivers/thunderbolt/domain.c 	for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
tb                154 drivers/thunderbolt/domain.c 			       i < tb->nboot_acl - 1 ? "," : "\n");
tb                158 drivers/thunderbolt/domain.c 	pm_runtime_mark_last_busy(&tb->dev);
tb                159 drivers/thunderbolt/domain.c 	pm_runtime_put_autosuspend(&tb->dev);
tb                168 drivers/thunderbolt/domain.c 	struct tb *tb = container_of(dev, struct tb, dev);
tb                179 drivers/thunderbolt/domain.c 	if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
tb                181 drivers/thunderbolt/domain.c 	if (count < tb->nboot_acl - 1)
tb                188 drivers/thunderbolt/domain.c 	acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
tb                195 drivers/thunderbolt/domain.c 	while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
tb                211 drivers/thunderbolt/domain.c 	if (s || i < tb->nboot_acl) {
tb                216 drivers/thunderbolt/domain.c 	pm_runtime_get_sync(&tb->dev);
tb                218 drivers/thunderbolt/domain.c 	if (mutex_lock_interruptible(&tb->lock)) {
tb                222 drivers/thunderbolt/domain.c 	ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
tb                225 drivers/thunderbolt/domain.c 		kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
tb                227 drivers/thunderbolt/domain.c 	mutex_unlock(&tb->lock);
tb                230 drivers/thunderbolt/domain.c 	pm_runtime_mark_last_busy(&tb->dev);
tb                231 drivers/thunderbolt/domain.c 	pm_runtime_put_autosuspend(&tb->dev);
tb                258 drivers/thunderbolt/domain.c 	struct tb *tb = container_of(dev, struct tb, dev);
tb                261 drivers/thunderbolt/domain.c 	if (tb->security_level < ARRAY_SIZE(tb_security_names))
tb                262 drivers/thunderbolt/domain.c 		name = tb_security_names[tb->security_level];
tb                279 drivers/thunderbolt/domain.c 	struct tb *tb = container_of(dev, struct tb, dev);
tb                282 drivers/thunderbolt/domain.c 		if (tb->nboot_acl &&
tb                283 drivers/thunderbolt/domain.c 		    tb->cm_ops->get_boot_acl &&
tb                284 drivers/thunderbolt/domain.c 		    tb->cm_ops->set_boot_acl)
tb                312 drivers/thunderbolt/domain.c 	struct tb *tb = container_of(dev, struct tb, dev);
tb                314 drivers/thunderbolt/domain.c 	tb_ctl_free(tb->ctl);
tb                315 drivers/thunderbolt/domain.c 	destroy_workqueue(tb->wq);
tb                316 drivers/thunderbolt/domain.c 	ida_simple_remove(&tb_domain_ida, tb->index);
tb                317 drivers/thunderbolt/domain.c 	mutex_destroy(&tb->lock);
tb                318 drivers/thunderbolt/domain.c 	kfree(tb);
tb                340 drivers/thunderbolt/domain.c struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
tb                342 drivers/thunderbolt/domain.c 	struct tb *tb;
tb                352 drivers/thunderbolt/domain.c 	tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
tb                353 drivers/thunderbolt/domain.c 	if (!tb)
tb                356 drivers/thunderbolt/domain.c 	tb->nhi = nhi;
tb                357 drivers/thunderbolt/domain.c 	mutex_init(&tb->lock);
tb                359 drivers/thunderbolt/domain.c 	tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
tb                360 drivers/thunderbolt/domain.c 	if (tb->index < 0)
tb                363 drivers/thunderbolt/domain.c 	tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
tb                364 drivers/thunderbolt/domain.c 	if (!tb->wq)
tb                367 drivers/thunderbolt/domain.c 	tb->dev.parent = &nhi->pdev->dev;
tb                368 drivers/thunderbolt/domain.c 	tb->dev.bus = &tb_bus_type;
tb                369 drivers/thunderbolt/domain.c 	tb->dev.type = &tb_domain_type;
tb                370 drivers/thunderbolt/domain.c 	tb->dev.groups = domain_attr_groups;
tb                371 drivers/thunderbolt/domain.c 	dev_set_name(&tb->dev, "domain%d", tb->index);
tb                372 drivers/thunderbolt/domain.c 	device_initialize(&tb->dev);
tb                374 drivers/thunderbolt/domain.c 	return tb;
tb                377 drivers/thunderbolt/domain.c 	ida_simple_remove(&tb_domain_ida, tb->index);
tb                379 drivers/thunderbolt/domain.c 	kfree(tb);
tb                387 drivers/thunderbolt/domain.c 	struct tb *tb = data;
tb                389 drivers/thunderbolt/domain.c 	if (!tb->cm_ops->handle_event) {
tb                390 drivers/thunderbolt/domain.c 		tb_warn(tb, "domain does not have event handler\n");
tb                397 drivers/thunderbolt/domain.c 		return tb_xdomain_handle_request(tb, type, buf, size);
tb                400 drivers/thunderbolt/domain.c 		tb->cm_ops->handle_event(tb, type, buf, size);
tb                417 drivers/thunderbolt/domain.c int tb_domain_add(struct tb *tb)
tb                421 drivers/thunderbolt/domain.c 	if (WARN_ON(!tb->cm_ops))
tb                424 drivers/thunderbolt/domain.c 	mutex_lock(&tb->lock);
tb                426 drivers/thunderbolt/domain.c 	tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
tb                427 drivers/thunderbolt/domain.c 	if (!tb->ctl) {
tb                436 drivers/thunderbolt/domain.c 	tb_ctl_start(tb->ctl);
tb                438 drivers/thunderbolt/domain.c 	if (tb->cm_ops->driver_ready) {
tb                439 drivers/thunderbolt/domain.c 		ret = tb->cm_ops->driver_ready(tb);
tb                444 drivers/thunderbolt/domain.c 	ret = device_add(&tb->dev);
tb                449 drivers/thunderbolt/domain.c 	if (tb->cm_ops->start) {
tb                450 drivers/thunderbolt/domain.c 		ret = tb->cm_ops->start(tb);
tb                456 drivers/thunderbolt/domain.c 	mutex_unlock(&tb->lock);
tb                458 drivers/thunderbolt/domain.c 	pm_runtime_no_callbacks(&tb->dev);
tb                459 drivers/thunderbolt/domain.c 	pm_runtime_set_active(&tb->dev);
tb                460 drivers/thunderbolt/domain.c 	pm_runtime_enable(&tb->dev);
tb                461 drivers/thunderbolt/domain.c 	pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
tb                462 drivers/thunderbolt/domain.c 	pm_runtime_mark_last_busy(&tb->dev);
tb                463 drivers/thunderbolt/domain.c 	pm_runtime_use_autosuspend(&tb->dev);
tb                468 drivers/thunderbolt/domain.c 	device_del(&tb->dev);
tb                470 drivers/thunderbolt/domain.c 	tb_ctl_stop(tb->ctl);
tb                472 drivers/thunderbolt/domain.c 	mutex_unlock(&tb->lock);
tb                484 drivers/thunderbolt/domain.c void tb_domain_remove(struct tb *tb)
tb                486 drivers/thunderbolt/domain.c 	mutex_lock(&tb->lock);
tb                487 drivers/thunderbolt/domain.c 	if (tb->cm_ops->stop)
tb                488 drivers/thunderbolt/domain.c 		tb->cm_ops->stop(tb);
tb                490 drivers/thunderbolt/domain.c 	tb_ctl_stop(tb->ctl);
tb                491 drivers/thunderbolt/domain.c 	mutex_unlock(&tb->lock);
tb                493 drivers/thunderbolt/domain.c 	flush_workqueue(tb->wq);
tb                494 drivers/thunderbolt/domain.c 	device_unregister(&tb->dev);
tb                503 drivers/thunderbolt/domain.c int tb_domain_suspend_noirq(struct tb *tb)
tb                512 drivers/thunderbolt/domain.c 	mutex_lock(&tb->lock);
tb                513 drivers/thunderbolt/domain.c 	if (tb->cm_ops->suspend_noirq)
tb                514 drivers/thunderbolt/domain.c 		ret = tb->cm_ops->suspend_noirq(tb);
tb                516 drivers/thunderbolt/domain.c 		tb_ctl_stop(tb->ctl);
tb                517 drivers/thunderbolt/domain.c 	mutex_unlock(&tb->lock);
tb                529 drivers/thunderbolt/domain.c int tb_domain_resume_noirq(struct tb *tb)
tb                533 drivers/thunderbolt/domain.c 	mutex_lock(&tb->lock);
tb                534 drivers/thunderbolt/domain.c 	tb_ctl_start(tb->ctl);
tb                535 drivers/thunderbolt/domain.c 	if (tb->cm_ops->resume_noirq)
tb                536 drivers/thunderbolt/domain.c 		ret = tb->cm_ops->resume_noirq(tb);
tb                537 drivers/thunderbolt/domain.c 	mutex_unlock(&tb->lock);
tb                542 drivers/thunderbolt/domain.c int tb_domain_suspend(struct tb *tb)
tb                544 drivers/thunderbolt/domain.c 	return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
tb                547 drivers/thunderbolt/domain.c void tb_domain_complete(struct tb *tb)
tb                549 drivers/thunderbolt/domain.c 	if (tb->cm_ops->complete)
tb                550 drivers/thunderbolt/domain.c 		tb->cm_ops->complete(tb);
tb                553 drivers/thunderbolt/domain.c int tb_domain_runtime_suspend(struct tb *tb)
tb                555 drivers/thunderbolt/domain.c 	if (tb->cm_ops->runtime_suspend) {
tb                556 drivers/thunderbolt/domain.c 		int ret = tb->cm_ops->runtime_suspend(tb);
tb                560 drivers/thunderbolt/domain.c 	tb_ctl_stop(tb->ctl);
tb                564 drivers/thunderbolt/domain.c int tb_domain_runtime_resume(struct tb *tb)
tb                566 drivers/thunderbolt/domain.c 	tb_ctl_start(tb->ctl);
tb                567 drivers/thunderbolt/domain.c 	if (tb->cm_ops->runtime_resume) {
tb                568 drivers/thunderbolt/domain.c 		int ret = tb->cm_ops->runtime_resume(tb);
tb                584 drivers/thunderbolt/domain.c int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
tb                588 drivers/thunderbolt/domain.c 	if (!tb->cm_ops->approve_switch)
tb                596 drivers/thunderbolt/domain.c 	return tb->cm_ops->approve_switch(tb, sw);
tb                610 drivers/thunderbolt/domain.c int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
tb                615 drivers/thunderbolt/domain.c 	if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
tb                623 drivers/thunderbolt/domain.c 	ret = tb->cm_ops->add_switch_key(tb, sw);
tb                627 drivers/thunderbolt/domain.c 	return tb->cm_ops->approve_switch(tb, sw);
tb                642 drivers/thunderbolt/domain.c int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
tb                652 drivers/thunderbolt/domain.c 	if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
tb                661 drivers/thunderbolt/domain.c 	ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
tb                696 drivers/thunderbolt/domain.c 	return tb->cm_ops->approve_switch(tb, sw);
tb                715 drivers/thunderbolt/domain.c int tb_domain_disconnect_pcie_paths(struct tb *tb)
tb                717 drivers/thunderbolt/domain.c 	if (!tb->cm_ops->disconnect_pcie_paths)
tb                720 drivers/thunderbolt/domain.c 	return tb->cm_ops->disconnect_pcie_paths(tb);
tb                735 drivers/thunderbolt/domain.c int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
tb                737 drivers/thunderbolt/domain.c 	if (!tb->cm_ops->approve_xdomain_paths)
tb                740 drivers/thunderbolt/domain.c 	return tb->cm_ops->approve_xdomain_paths(tb, xd);
tb                755 drivers/thunderbolt/domain.c int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
tb                757 drivers/thunderbolt/domain.c 	if (!tb->cm_ops->disconnect_xdomain_paths)
tb                760 drivers/thunderbolt/domain.c 	return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
tb                766 drivers/thunderbolt/domain.c 	struct tb *tb = data;
tb                770 drivers/thunderbolt/domain.c 	if (xd && xd->tb == tb)
tb                786 drivers/thunderbolt/domain.c int tb_domain_disconnect_all_paths(struct tb *tb)
tb                790 drivers/thunderbolt/domain.c 	ret = tb_domain_disconnect_pcie_paths(tb);
tb                794 drivers/thunderbolt/domain.c 	return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
tb                414 drivers/thunderbolt/eeprom.c 	struct device *dev = &sw->tb->nhi->pdev->dev;
tb                 83 drivers/thunderbolt/icm.c 	bool (*is_supported)(struct tb *tb);
tb                 84 drivers/thunderbolt/icm.c 	int (*cio_reset)(struct tb *tb);
tb                 85 drivers/thunderbolt/icm.c 	int (*get_mode)(struct tb *tb);
tb                 86 drivers/thunderbolt/icm.c 	int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
tb                 87 drivers/thunderbolt/icm.c 	void (*save_devices)(struct tb *tb);
tb                 88 drivers/thunderbolt/icm.c 	int (*driver_ready)(struct tb *tb,
tb                 91 drivers/thunderbolt/icm.c 	void (*set_uuid)(struct tb *tb);
tb                 92 drivers/thunderbolt/icm.c 	void (*device_connected)(struct tb *tb,
tb                 94 drivers/thunderbolt/icm.c 	void (*device_disconnected)(struct tb *tb,
tb                 96 drivers/thunderbolt/icm.c 	void (*xdomain_connected)(struct tb *tb,
tb                 98 drivers/thunderbolt/icm.c 	void (*xdomain_disconnected)(struct tb *tb,
tb                100 drivers/thunderbolt/icm.c 	void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
tb                106 drivers/thunderbolt/icm.c 	struct tb *tb;
tb                150 drivers/thunderbolt/icm.c static inline struct tb *icm_to_tb(struct icm *icm)
tb                152 drivers/thunderbolt/icm.c 	return ((void *)icm - sizeof(struct tb));
tb                265 drivers/thunderbolt/icm.c static int icm_request(struct tb *tb, const void *request, size_t request_size,
tb                269 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb                291 drivers/thunderbolt/icm.c 		res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
tb                310 drivers/thunderbolt/icm.c static void icm_postpone_rescan(struct tb *tb)
tb                312 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb                315 drivers/thunderbolt/icm.c 		mod_delayed_work(tb->wq, &icm->rescan_work,
tb                319 drivers/thunderbolt/icm.c static void icm_veto_begin(struct tb *tb)
tb                321 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb                326 drivers/thunderbolt/icm.c 		pm_runtime_get(&tb->dev);
tb                330 drivers/thunderbolt/icm.c static void icm_veto_end(struct tb *tb)
tb                332 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb                337 drivers/thunderbolt/icm.c 		pm_runtime_mark_last_busy(&tb->dev);
tb                338 drivers/thunderbolt/icm.c 		pm_runtime_put_autosuspend(&tb->dev);
tb                342 drivers/thunderbolt/icm.c static bool icm_fr_is_supported(struct tb *tb)
tb                358 drivers/thunderbolt/icm.c static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
tb                372 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), switches,
tb                409 drivers/thunderbolt/icm.c static void icm_fr_save_devices(struct tb *tb)
tb                411 drivers/thunderbolt/icm.c 	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
tb                415 drivers/thunderbolt/icm.c icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
tb                425 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb                436 drivers/thunderbolt/icm.c static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
tb                450 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb                456 drivers/thunderbolt/icm.c 		tb_warn(tb, "PCIe tunnel creation failed\n");
tb                463 drivers/thunderbolt/icm.c static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
tb                477 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb                483 drivers/thunderbolt/icm.c 		tb_warn(tb, "Adding key to switch failed\n");
tb                490 drivers/thunderbolt/icm.c static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
tb                505 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb                520 drivers/thunderbolt/icm.c static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
tb                537 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb                548 drivers/thunderbolt/icm.c static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
tb                559 drivers/thunderbolt/icm.c 	nhi_mailbox_cmd(tb->nhi, cmd, 1);
tb                561 drivers/thunderbolt/icm.c 	nhi_mailbox_cmd(tb->nhi, cmd, 2);
tb                578 drivers/thunderbolt/icm.c 	sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
tb                661 drivers/thunderbolt/icm.c 	xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
tb                694 drivers/thunderbolt/icm.c icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
tb                700 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb                708 drivers/thunderbolt/icm.c 	icm_postpone_rescan(tb);
tb                719 drivers/thunderbolt/icm.c 		tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
tb                724 drivers/thunderbolt/icm.c 	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
tb                747 drivers/thunderbolt/icm.c 				ret = icm->get_route(tb, link, depth, &route);
tb                749 drivers/thunderbolt/icm.c 					tb_err(tb, "failed to update route string for switch at %u.%u\n",
tb                779 drivers/thunderbolt/icm.c 	sw = tb_switch_find_by_link_depth(tb, link, depth);
tb                785 drivers/thunderbolt/icm.c 			sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
tb                793 drivers/thunderbolt/icm.c 	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
tb                799 drivers/thunderbolt/icm.c 	parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
tb                801 drivers/thunderbolt/icm.c 		tb_err(tb, "failed to find parent switch for %u.%u\n",
tb                806 drivers/thunderbolt/icm.c 	ret = icm->get_route(tb, link, depth, &route);
tb                808 drivers/thunderbolt/icm.c 		tb_err(tb, "failed to find route string for switch at %u.%u\n",
tb                823 drivers/thunderbolt/icm.c icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
tb                835 drivers/thunderbolt/icm.c 		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
tb                839 drivers/thunderbolt/icm.c 	sw = tb_switch_find_by_link_depth(tb, link, depth);
tb                841 drivers/thunderbolt/icm.c 		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
tb                851 drivers/thunderbolt/icm.c icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
tb                865 drivers/thunderbolt/icm.c 		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
tb                871 drivers/thunderbolt/icm.c 	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
tb                899 drivers/thunderbolt/icm.c 	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
tb                905 drivers/thunderbolt/icm.c 			xd = tb_xdomain_find_by_link_depth(tb, dual_link,
tb                918 drivers/thunderbolt/icm.c 	sw = tb_switch_find_by_route(tb, route);
tb                924 drivers/thunderbolt/icm.c 	sw = tb_switch_find_by_link_depth(tb, link, depth);
tb                926 drivers/thunderbolt/icm.c 		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
tb                937 drivers/thunderbolt/icm.c icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
tb                948 drivers/thunderbolt/icm.c 	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
tb                955 drivers/thunderbolt/icm.c static int icm_tr_cio_reset(struct tb *tb)
tb                957 drivers/thunderbolt/icm.c 	return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1));
tb                961 drivers/thunderbolt/icm.c icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
tb                971 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb                987 drivers/thunderbolt/icm.c static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
tb               1001 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb               1007 drivers/thunderbolt/icm.c 		tb_warn(tb, "PCIe tunnel creation failed\n");
tb               1014 drivers/thunderbolt/icm.c static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
tb               1029 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb               1035 drivers/thunderbolt/icm.c 		tb_warn(tb, "Adding key to switch failed\n");
tb               1042 drivers/thunderbolt/icm.c static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
tb               1058 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb               1073 drivers/thunderbolt/icm.c static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
tb               1090 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb               1101 drivers/thunderbolt/icm.c static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
tb               1116 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb               1127 drivers/thunderbolt/icm.c static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
tb               1131 drivers/thunderbolt/icm.c 	ret = icm_tr_xdomain_tear_down(tb, xd, 1);
tb               1136 drivers/thunderbolt/icm.c 	return icm_tr_xdomain_tear_down(tb, xd, 2);
tb               1140 drivers/thunderbolt/icm.c __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
tb               1151 drivers/thunderbolt/icm.c 	icm_postpone_rescan(tb);
tb               1168 drivers/thunderbolt/icm.c 		tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
tb               1173 drivers/thunderbolt/icm.c 	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
tb               1189 drivers/thunderbolt/icm.c 	sw = tb_switch_find_by_route(tb, route);
tb               1196 drivers/thunderbolt/icm.c 	xd = tb_xdomain_find_by_route(tb, route);
tb               1202 drivers/thunderbolt/icm.c 	parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
tb               1204 drivers/thunderbolt/icm.c 		tb_err(tb, "failed to find parent switch for %llx\n", route);
tb               1218 drivers/thunderbolt/icm.c icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
tb               1220 drivers/thunderbolt/icm.c 	__icm_tr_device_connected(tb, hdr, false);
tb               1224 drivers/thunderbolt/icm.c icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
tb               1233 drivers/thunderbolt/icm.c 	sw = tb_switch_find_by_route(tb, route);
tb               1235 drivers/thunderbolt/icm.c 		tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
tb               1244 drivers/thunderbolt/icm.c icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
tb               1252 drivers/thunderbolt/icm.c 	if (!tb->root_switch)
tb               1257 drivers/thunderbolt/icm.c 	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
tb               1270 drivers/thunderbolt/icm.c 	xd = tb_xdomain_find_by_route(tb, route);
tb               1281 drivers/thunderbolt/icm.c 	sw = tb_switch_find_by_route(tb, route);
tb               1287 drivers/thunderbolt/icm.c 	sw = tb_switch_find_by_route(tb, get_parent_route(route));
tb               1289 drivers/thunderbolt/icm.c 		tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
tb               1298 drivers/thunderbolt/icm.c icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
tb               1307 drivers/thunderbolt/icm.c 	xd = tb_xdomain_find_by_route(tb, route);
tb               1344 drivers/thunderbolt/icm.c static bool icm_ar_is_supported(struct tb *tb)
tb               1347 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               1360 drivers/thunderbolt/icm.c 	upstream_port = get_upstream_port(tb->nhi->pdev);
tb               1377 drivers/thunderbolt/icm.c static int icm_ar_cio_reset(struct tb *tb)
tb               1379 drivers/thunderbolt/icm.c 	return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9));
tb               1382 drivers/thunderbolt/icm.c static int icm_ar_get_mode(struct tb *tb)
tb               1384 drivers/thunderbolt/icm.c 	struct tb_nhi *nhi = tb->nhi;
tb               1404 drivers/thunderbolt/icm.c icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
tb               1414 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb               1430 drivers/thunderbolt/icm.c static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
tb               1440 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb               1452 drivers/thunderbolt/icm.c static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
tb               1461 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb               1489 drivers/thunderbolt/icm.c static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
tb               1522 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb               1534 drivers/thunderbolt/icm.c icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
tb               1544 drivers/thunderbolt/icm.c 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
tb               1556 drivers/thunderbolt/icm.c static void icm_icl_set_uuid(struct tb *tb)
tb               1558 drivers/thunderbolt/icm.c 	struct tb_nhi *nhi = tb->nhi;
tb               1566 drivers/thunderbolt/icm.c 	tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
tb               1570 drivers/thunderbolt/icm.c icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
tb               1572 drivers/thunderbolt/icm.c 	__icm_tr_device_connected(tb, hdr, true);
tb               1575 drivers/thunderbolt/icm.c static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
tb               1580 drivers/thunderbolt/icm.c 	tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
tb               1583 drivers/thunderbolt/icm.c 		icm_veto_begin(tb);
tb               1585 drivers/thunderbolt/icm.c 		icm_veto_end(tb);
tb               1591 drivers/thunderbolt/icm.c 	struct tb *tb = n->tb;
tb               1592 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               1594 drivers/thunderbolt/icm.c 	mutex_lock(&tb->lock);
tb               1601 drivers/thunderbolt/icm.c 	if (tb->root_switch) {
tb               1604 drivers/thunderbolt/icm.c 			icm->device_connected(tb, n->pkg);
tb               1607 drivers/thunderbolt/icm.c 			icm->device_disconnected(tb, n->pkg);
tb               1610 drivers/thunderbolt/icm.c 			icm->xdomain_connected(tb, n->pkg);
tb               1613 drivers/thunderbolt/icm.c 			icm->xdomain_disconnected(tb, n->pkg);
tb               1616 drivers/thunderbolt/icm.c 			icm->rtd3_veto(tb, n->pkg);
tb               1621 drivers/thunderbolt/icm.c 	mutex_unlock(&tb->lock);
tb               1627 drivers/thunderbolt/icm.c static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
tb               1638 drivers/thunderbolt/icm.c 	n->tb = tb;
tb               1640 drivers/thunderbolt/icm.c 	queue_work(tb->wq, &n->work);
tb               1644 drivers/thunderbolt/icm.c __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
tb               1647 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               1651 drivers/thunderbolt/icm.c 	ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
tb               1653 drivers/thunderbolt/icm.c 		tb_err(tb, "failed to send driver ready to ICM\n");
tb               1665 drivers/thunderbolt/icm.c 		res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
tb               1673 drivers/thunderbolt/icm.c 	tb_err(tb, "failed to read root switch config space, giving up\n");
tb               1677 drivers/thunderbolt/icm.c static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
tb               1679 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               1697 drivers/thunderbolt/icm.c 	return icm->cio_reset(tb);
tb               1700 drivers/thunderbolt/icm.c static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
tb               1713 drivers/thunderbolt/icm.c 	ret = icm_firmware_reset(tb, nhi);
tb               1730 drivers/thunderbolt/icm.c static int icm_reset_phy_port(struct tb *tb, int phy_port)
tb               1732 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               1798 drivers/thunderbolt/icm.c static int icm_firmware_init(struct tb *tb)
tb               1800 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               1801 drivers/thunderbolt/icm.c 	struct tb_nhi *nhi = tb->nhi;
tb               1804 drivers/thunderbolt/icm.c 	ret = icm_firmware_start(tb, nhi);
tb               1811 drivers/thunderbolt/icm.c 		ret = icm->get_mode(tb);
tb               1827 drivers/thunderbolt/icm.c 			tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
tb               1836 drivers/thunderbolt/icm.c 	ret = icm_reset_phy_port(tb, 0);
tb               1839 drivers/thunderbolt/icm.c 	ret = icm_reset_phy_port(tb, 1);
tb               1846 drivers/thunderbolt/icm.c static int icm_driver_ready(struct tb *tb)
tb               1848 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               1851 drivers/thunderbolt/icm.c 	ret = icm_firmware_init(tb);
tb               1856 drivers/thunderbolt/icm.c 		tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
tb               1857 drivers/thunderbolt/icm.c 		tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
tb               1858 drivers/thunderbolt/icm.c 		tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
tb               1862 drivers/thunderbolt/icm.c 	ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
tb               1871 drivers/thunderbolt/icm.c 	if (tb->nboot_acl > icm->max_boot_acl)
tb               1872 drivers/thunderbolt/icm.c 		tb->nboot_acl = 0;
tb               1877 drivers/thunderbolt/icm.c static int icm_suspend(struct tb *tb)
tb               1879 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               1882 drivers/thunderbolt/icm.c 		icm->save_devices(tb);
tb               1884 drivers/thunderbolt/icm.c 	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
tb               1961 drivers/thunderbolt/icm.c 	struct tb *tb = icm_to_tb(icm);
tb               1963 drivers/thunderbolt/icm.c 	mutex_lock(&tb->lock);
tb               1964 drivers/thunderbolt/icm.c 	if (tb->root_switch)
tb               1965 drivers/thunderbolt/icm.c 		icm_free_unplugged_children(tb->root_switch);
tb               1966 drivers/thunderbolt/icm.c 	mutex_unlock(&tb->lock);
tb               1969 drivers/thunderbolt/icm.c static void icm_complete(struct tb *tb)
tb               1971 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               1973 drivers/thunderbolt/icm.c 	if (tb->nhi->going_away)
tb               1982 drivers/thunderbolt/icm.c 	icm_veto_end(tb);
tb               1983 drivers/thunderbolt/icm.c 	icm_unplug_children(tb->root_switch);
tb               1989 drivers/thunderbolt/icm.c 	__icm_driver_ready(tb, NULL, NULL, NULL);
tb               1996 drivers/thunderbolt/icm.c 	queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
tb               1999 drivers/thunderbolt/icm.c static int icm_runtime_suspend(struct tb *tb)
tb               2001 drivers/thunderbolt/icm.c 	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
tb               2023 drivers/thunderbolt/icm.c static int icm_runtime_resume(struct tb *tb)
tb               2029 drivers/thunderbolt/icm.c 	icm_complete(tb);
tb               2033 drivers/thunderbolt/icm.c static int icm_start(struct tb *tb)
tb               2035 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               2039 drivers/thunderbolt/icm.c 		tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
tb               2041 drivers/thunderbolt/icm.c 		tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
tb               2042 drivers/thunderbolt/icm.c 	if (IS_ERR(tb->root_switch))
tb               2043 drivers/thunderbolt/icm.c 		return PTR_ERR(tb->root_switch);
tb               2045 drivers/thunderbolt/icm.c 	tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
tb               2046 drivers/thunderbolt/icm.c 	tb->root_switch->rpm = icm->rpm;
tb               2049 drivers/thunderbolt/icm.c 		icm->set_uuid(tb);
tb               2051 drivers/thunderbolt/icm.c 	ret = tb_switch_add(tb->root_switch);
tb               2053 drivers/thunderbolt/icm.c 		tb_switch_put(tb->root_switch);
tb               2054 drivers/thunderbolt/icm.c 		tb->root_switch = NULL;
tb               2060 drivers/thunderbolt/icm.c static void icm_stop(struct tb *tb)
tb               2062 drivers/thunderbolt/icm.c 	struct icm *icm = tb_priv(tb);
tb               2065 drivers/thunderbolt/icm.c 	tb_switch_remove(tb->root_switch);
tb               2066 drivers/thunderbolt/icm.c 	tb->root_switch = NULL;
tb               2067 drivers/thunderbolt/icm.c 	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
tb               2070 drivers/thunderbolt/icm.c static int icm_disconnect_pcie_paths(struct tb *tb)
tb               2072 drivers/thunderbolt/icm.c 	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
tb               2148 drivers/thunderbolt/icm.c struct tb *icm_probe(struct tb_nhi *nhi)
tb               2151 drivers/thunderbolt/icm.c 	struct tb *tb;
tb               2153 drivers/thunderbolt/icm.c 	tb = tb_domain_alloc(nhi, sizeof(struct icm));
tb               2154 drivers/thunderbolt/icm.c 	if (!tb)
tb               2157 drivers/thunderbolt/icm.c 	icm = tb_priv(tb);
tb               2173 drivers/thunderbolt/icm.c 		tb->cm_ops = &icm_fr_ops;
tb               2199 drivers/thunderbolt/icm.c 		tb->cm_ops = &icm_ar_ops;
tb               2214 drivers/thunderbolt/icm.c 		tb->cm_ops = &icm_tr_ops;
tb               2227 drivers/thunderbolt/icm.c 		tb->cm_ops = &icm_icl_ops;
tb               2231 drivers/thunderbolt/icm.c 	if (!icm->is_supported || !icm->is_supported(tb)) {
tb               2233 drivers/thunderbolt/icm.c 		tb_domain_put(tb);
tb               2237 drivers/thunderbolt/icm.c 	return tb;
tb                866 drivers/thunderbolt/nhi.c 	struct tb *tb = pci_get_drvdata(pdev);
tb                867 drivers/thunderbolt/nhi.c 	struct tb_nhi *nhi = tb->nhi;
tb                870 drivers/thunderbolt/nhi.c 	ret = tb_domain_suspend_noirq(tb);
tb                875 drivers/thunderbolt/nhi.c 		ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
tb                930 drivers/thunderbolt/nhi.c 	struct tb *tb = pci_get_drvdata(pdev);
tb                931 drivers/thunderbolt/nhi.c 	struct tb_nhi *nhi = tb->nhi;
tb                947 drivers/thunderbolt/nhi.c 		nhi_enable_int_throttling(tb->nhi);
tb                950 drivers/thunderbolt/nhi.c 	return tb_domain_resume_noirq(tb);
tb                956 drivers/thunderbolt/nhi.c 	struct tb *tb = pci_get_drvdata(pdev);
tb                958 drivers/thunderbolt/nhi.c 	return tb_domain_suspend(tb);
tb                964 drivers/thunderbolt/nhi.c 	struct tb *tb = pci_get_drvdata(pdev);
tb                974 drivers/thunderbolt/nhi.c 		tb_domain_complete(tb);
tb                980 drivers/thunderbolt/nhi.c 	struct tb *tb = pci_get_drvdata(pdev);
tb                981 drivers/thunderbolt/nhi.c 	struct tb_nhi *nhi = tb->nhi;
tb                984 drivers/thunderbolt/nhi.c 	ret = tb_domain_runtime_suspend(tb);
tb                989 drivers/thunderbolt/nhi.c 		ret = nhi->ops->runtime_suspend(tb->nhi);
tb                999 drivers/thunderbolt/nhi.c 	struct tb *tb = pci_get_drvdata(pdev);
tb               1000 drivers/thunderbolt/nhi.c 	struct tb_nhi *nhi = tb->nhi;
tb               1010 drivers/thunderbolt/nhi.c 	return tb_domain_runtime_resume(tb);
tb               1097 drivers/thunderbolt/nhi.c 	struct tb *tb;
tb               1161 drivers/thunderbolt/nhi.c 	tb = icm_probe(nhi);
tb               1162 drivers/thunderbolt/nhi.c 	if (!tb)
tb               1163 drivers/thunderbolt/nhi.c 		tb = tb_probe(nhi);
tb               1164 drivers/thunderbolt/nhi.c 	if (!tb) {
tb               1172 drivers/thunderbolt/nhi.c 	res = tb_domain_add(tb);
tb               1178 drivers/thunderbolt/nhi.c 		tb_domain_put(tb);
tb               1182 drivers/thunderbolt/nhi.c 	pci_set_drvdata(pdev, tb);
tb               1194 drivers/thunderbolt/nhi.c 	struct tb *tb = pci_get_drvdata(pdev);
tb               1195 drivers/thunderbolt/nhi.c 	struct tb_nhi *nhi = tb->nhi;
tb               1201 drivers/thunderbolt/nhi.c 	tb_domain_remove(tb);
tb                 27 drivers/thunderbolt/nhi_ops.c 	struct tb *tb = pci_get_drvdata(nhi->pdev);
tb                 30 drivers/thunderbolt/nhi_ops.c 	ret = device_for_each_child(&tb->root_switch->dev, NULL,
tb                156 drivers/thunderbolt/path.c 	path->tb = src->sw->tb;
tb                227 drivers/thunderbolt/path.c struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
tb                296 drivers/thunderbolt/path.c 	path->tb = tb;
tb                413 drivers/thunderbolt/path.c 		tb_WARN(path->tb, "trying to deactivate an inactive path\n");
tb                416 drivers/thunderbolt/path.c 	tb_dbg(path->tb,
tb                440 drivers/thunderbolt/path.c 		tb_WARN(path->tb, "trying to activate already activated path\n");
tb                444 drivers/thunderbolt/path.c 	tb_dbg(path->tb,
tb                515 drivers/thunderbolt/path.c 	tb_dbg(path->tb, "path activation complete\n");
tb                518 drivers/thunderbolt/path.c 	tb_WARN(path->tb, "path activation failed\n");
tb                181 drivers/thunderbolt/switch.c 		ret = tb_domain_disconnect_all_paths(sw->tb);
tb                262 drivers/thunderbolt/switch.c 	if (!mutex_trylock(&sw->tb->lock)) {
tb                268 drivers/thunderbolt/switch.c 	mutex_unlock(&sw->tb->lock);
tb                289 drivers/thunderbolt/switch.c 	if (!mutex_trylock(&sw->tb->lock))
tb                310 drivers/thunderbolt/switch.c 	mutex_unlock(&sw->tb->lock);
tb                469 drivers/thunderbolt/switch.c static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
tb                471 drivers/thunderbolt/switch.c 	tb_dbg(tb,
tb                476 drivers/thunderbolt/switch.c 	tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
tb                478 drivers/thunderbolt/switch.c 	tb_dbg(tb, "  Max counters: %d\n", port->max_counters);
tb                479 drivers/thunderbolt/switch.c 	tb_dbg(tb, "  NFC Credits: %#x\n", port->nfc_credits);
tb                645 drivers/thunderbolt/switch.c 			tb_dbg(port->sw->tb, " Port %d: not implemented\n",
tb                666 drivers/thunderbolt/switch.c 	tb_dump_port(port->sw->tb, &port->config);
tb                963 drivers/thunderbolt/switch.c static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
tb                965 drivers/thunderbolt/switch.c 	tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
tb                968 drivers/thunderbolt/switch.c 	tb_dbg(tb, "  Max Port Number: %d\n", sw->max_port_number);
tb                969 drivers/thunderbolt/switch.c 	tb_dbg(tb, "  Config:\n");
tb                970 drivers/thunderbolt/switch.c 	tb_dbg(tb,
tb                975 drivers/thunderbolt/switch.c 	tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
tb                984 drivers/thunderbolt/switch.c int tb_switch_reset(struct tb *tb, u64 route)
tb                992 drivers/thunderbolt/switch.c 	tb_dbg(tb, "resetting switch at %llx\n", route);
tb                993 drivers/thunderbolt/switch.c 	res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
tb                997 drivers/thunderbolt/switch.c 	res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
tb               1057 drivers/thunderbolt/switch.c 	if (!mutex_trylock(&sw->tb->lock))
tb               1067 drivers/thunderbolt/switch.c 			ret = tb_domain_approve_switch_key(sw->tb, sw);
tb               1069 drivers/thunderbolt/switch.c 			ret = tb_domain_approve_switch(sw->tb, sw);
tb               1075 drivers/thunderbolt/switch.c 			ret = tb_domain_challenge_switch_key(sw->tb, sw);
tb               1089 drivers/thunderbolt/switch.c 	mutex_unlock(&sw->tb->lock);
tb               1149 drivers/thunderbolt/switch.c 	if (!mutex_trylock(&sw->tb->lock))
tb               1157 drivers/thunderbolt/switch.c 	mutex_unlock(&sw->tb->lock);
tb               1174 drivers/thunderbolt/switch.c 	if (!mutex_trylock(&sw->tb->lock))
tb               1190 drivers/thunderbolt/switch.c 	mutex_unlock(&sw->tb->lock);
tb               1205 drivers/thunderbolt/switch.c 	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
tb               1214 drivers/thunderbolt/switch.c 	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
tb               1238 drivers/thunderbolt/switch.c 	if (!mutex_trylock(&sw->tb->lock)) {
tb               1281 drivers/thunderbolt/switch.c 	mutex_unlock(&sw->tb->lock);
tb               1298 drivers/thunderbolt/switch.c 	if (!mutex_trylock(&sw->tb->lock))
tb               1308 drivers/thunderbolt/switch.c 	mutex_unlock(&sw->tb->lock);
tb               1375 drivers/thunderbolt/switch.c 		    sw->tb->security_level == TB_SECURITY_SECURE &&
tb               1436 drivers/thunderbolt/switch.c 	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
tb               1447 drivers/thunderbolt/switch.c 	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
tb               1520 drivers/thunderbolt/switch.c struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
tb               1532 drivers/thunderbolt/switch.c 	upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
tb               1540 drivers/thunderbolt/switch.c 	sw->tb = tb;
tb               1541 drivers/thunderbolt/switch.c 	ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
tb               1545 drivers/thunderbolt/switch.c 	tb_dbg(tb, "current switch config:\n");
tb               1546 drivers/thunderbolt/switch.c 	tb_dump_switch(tb, &sw->config);
tb               1591 drivers/thunderbolt/switch.c 	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
tb               1617 drivers/thunderbolt/switch.c tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
tb               1625 drivers/thunderbolt/switch.c 	sw->tb = tb;
tb               1636 drivers/thunderbolt/switch.c 	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
tb               1653 drivers/thunderbolt/switch.c 	struct tb *tb = sw->tb;
tb               1658 drivers/thunderbolt/switch.c 	tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
tb               1917 drivers/thunderbolt/switch.c 	if (sw == sw->tb->root_switch) {
tb               1951 drivers/thunderbolt/switch.c 		err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
tb               2024 drivers/thunderbolt/switch.c 	struct tb *tb;
tb               2038 drivers/thunderbolt/switch.c 	if (sw->tb != lookup->tb)
tb               2065 drivers/thunderbolt/switch.c struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
tb               2071 drivers/thunderbolt/switch.c 	lookup.tb = tb;
tb               2090 drivers/thunderbolt/switch.c struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
tb               2096 drivers/thunderbolt/switch.c 	lookup.tb = tb;
tb               2114 drivers/thunderbolt/switch.c struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
tb               2120 drivers/thunderbolt/switch.c 		return tb_switch_get(tb->root_switch);
tb               2123 drivers/thunderbolt/switch.c 	lookup.tb = tb;
tb                 33 drivers/thunderbolt/tb.c 	struct tb *tb;
tb                 41 drivers/thunderbolt/tb.c static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
tb                 49 drivers/thunderbolt/tb.c 	ev->tb = tb;
tb                 54 drivers/thunderbolt/tb.c 	queue_work(tb->wq, &ev->work);
tb                 61 drivers/thunderbolt/tb.c 	struct tb *tb = sw->tb;
tb                 62 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                 72 drivers/thunderbolt/tb.c 			tunnel = tb_tunnel_discover_dp(tb, port);
tb                 76 drivers/thunderbolt/tb.c 			tunnel = tb_tunnel_discover_pci(tb, port);
tb                107 drivers/thunderbolt/tb.c 	struct tb *tb = sw->tb;
tb                112 drivers/thunderbolt/tb.c 	xd = tb_xdomain_find_by_route(tb, route);
tb                118 drivers/thunderbolt/tb.c 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
tb                143 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(port->sw->tb);
tb                153 drivers/thunderbolt/tb.c 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
tb                171 drivers/thunderbolt/tb.c 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
tb                223 drivers/thunderbolt/tb.c static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
tb                226 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                246 drivers/thunderbolt/tb.c static void tb_free_invalid_tunnels(struct tb *tb)
tb                248 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                361 drivers/thunderbolt/tb.c static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
tb                363 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                378 drivers/thunderbolt/tb.c 	tunnel = tb_tunnel_alloc_dp(tb, in, out);
tb                394 drivers/thunderbolt/tb.c static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
tb                396 drivers/thunderbolt/tb.c 	tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
tb                399 drivers/thunderbolt/tb.c static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
tb                402 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                420 drivers/thunderbolt/tb.c 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
tb                435 drivers/thunderbolt/tb.c static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
tb                437 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                444 drivers/thunderbolt/tb.c 	nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
tb                446 drivers/thunderbolt/tb.c 	mutex_lock(&tb->lock);
tb                447 drivers/thunderbolt/tb.c 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
tb                451 drivers/thunderbolt/tb.c 		mutex_unlock(&tb->lock);
tb                459 drivers/thunderbolt/tb.c 		mutex_unlock(&tb->lock);
tb                464 drivers/thunderbolt/tb.c 	mutex_unlock(&tb->lock);
tb                468 drivers/thunderbolt/tb.c static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
tb                481 drivers/thunderbolt/tb.c 	tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
tb                484 drivers/thunderbolt/tb.c static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
tb                487 drivers/thunderbolt/tb.c 		mutex_lock(&tb->lock);
tb                488 drivers/thunderbolt/tb.c 		__tb_disconnect_xdomain_paths(tb, xd);
tb                489 drivers/thunderbolt/tb.c 		mutex_unlock(&tb->lock);
tb                504 drivers/thunderbolt/tb.c 	struct tb *tb = ev->tb;
tb                505 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                508 drivers/thunderbolt/tb.c 	mutex_lock(&tb->lock);
tb                512 drivers/thunderbolt/tb.c 	sw = tb_switch_find_by_route(tb, ev->route);
tb                514 drivers/thunderbolt/tb.c 		tb_warn(tb,
tb                520 drivers/thunderbolt/tb.c 		tb_warn(tb,
tb                527 drivers/thunderbolt/tb.c 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
tb                535 drivers/thunderbolt/tb.c 			tb_free_invalid_tunnels(tb);
tb                554 drivers/thunderbolt/tb.c 			__tb_disconnect_xdomain_paths(tb, xd);
tb                557 drivers/thunderbolt/tb.c 			tb_teardown_dp(tb, port);
tb                571 drivers/thunderbolt/tb.c 			tb_tunnel_dp(tb, port);
tb                578 drivers/thunderbolt/tb.c 	mutex_unlock(&tb->lock);
tb                587 drivers/thunderbolt/tb.c static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
tb                594 drivers/thunderbolt/tb.c 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
tb                600 drivers/thunderbolt/tb.c 	if (tb_cfg_error(tb->ctl, route, pkg->port,
tb                602 drivers/thunderbolt/tb.c 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
tb                606 drivers/thunderbolt/tb.c 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
tb                609 drivers/thunderbolt/tb.c static void tb_stop(struct tb *tb)
tb                611 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                626 drivers/thunderbolt/tb.c 	tb_switch_remove(tb->root_switch);
tb                651 drivers/thunderbolt/tb.c static int tb_start(struct tb *tb)
tb                653 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                656 drivers/thunderbolt/tb.c 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
tb                657 drivers/thunderbolt/tb.c 	if (IS_ERR(tb->root_switch))
tb                658 drivers/thunderbolt/tb.c 		return PTR_ERR(tb->root_switch);
tb                665 drivers/thunderbolt/tb.c 	tb->root_switch->no_nvm_upgrade = true;
tb                667 drivers/thunderbolt/tb.c 	ret = tb_switch_configure(tb->root_switch);
tb                669 drivers/thunderbolt/tb.c 		tb_switch_put(tb->root_switch);
tb                674 drivers/thunderbolt/tb.c 	ret = tb_switch_add(tb->root_switch);
tb                676 drivers/thunderbolt/tb.c 		tb_switch_put(tb->root_switch);
tb                681 drivers/thunderbolt/tb.c 	tb_scan_switch(tb->root_switch);
tb                683 drivers/thunderbolt/tb.c 	tb_discover_tunnels(tb->root_switch);
tb                685 drivers/thunderbolt/tb.c 	device_for_each_child(&tb->root_switch->dev, NULL,
tb                693 drivers/thunderbolt/tb.c static int tb_suspend_noirq(struct tb *tb)
tb                695 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                697 drivers/thunderbolt/tb.c 	tb_dbg(tb, "suspending...\n");
tb                698 drivers/thunderbolt/tb.c 	tb_switch_suspend(tb->root_switch);
tb                700 drivers/thunderbolt/tb.c 	tb_dbg(tb, "suspend finished\n");
tb                705 drivers/thunderbolt/tb.c static int tb_resume_noirq(struct tb *tb)
tb                707 drivers/thunderbolt/tb.c 	struct tb_cm *tcm = tb_priv(tb);
tb                710 drivers/thunderbolt/tb.c 	tb_dbg(tb, "resuming...\n");
tb                713 drivers/thunderbolt/tb.c 	tb_switch_reset(tb, 0);
tb                715 drivers/thunderbolt/tb.c 	tb_switch_resume(tb->root_switch);
tb                716 drivers/thunderbolt/tb.c 	tb_free_invalid_tunnels(tb);
tb                717 drivers/thunderbolt/tb.c 	tb_free_unplugged_children(tb->root_switch);
tb                725 drivers/thunderbolt/tb.c 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
tb                730 drivers/thunderbolt/tb.c 	tb_dbg(tb, "resume finished\n");
tb                756 drivers/thunderbolt/tb.c static void tb_complete(struct tb *tb)
tb                763 drivers/thunderbolt/tb.c 	mutex_lock(&tb->lock);
tb                764 drivers/thunderbolt/tb.c 	if (tb_free_unplugged_xdomains(tb->root_switch))
tb                765 drivers/thunderbolt/tb.c 		tb_scan_switch(tb->root_switch);
tb                766 drivers/thunderbolt/tb.c 	mutex_unlock(&tb->lock);
tb                781 drivers/thunderbolt/tb.c struct tb *tb_probe(struct tb_nhi *nhi)
tb                784 drivers/thunderbolt/tb.c 	struct tb *tb;
tb                789 drivers/thunderbolt/tb.c 	tb = tb_domain_alloc(nhi, sizeof(*tcm));
tb                790 drivers/thunderbolt/tb.c 	if (!tb)
tb                793 drivers/thunderbolt/tb.c 	tb->security_level = TB_SECURITY_USER;
tb                794 drivers/thunderbolt/tb.c 	tb->cm_ops = &tb_cm_ops;
tb                796 drivers/thunderbolt/tb.c 	tcm = tb_priv(tb);
tb                799 drivers/thunderbolt/tb.c 	return tb;
tb                 93 drivers/thunderbolt/tb.h 	struct tb *tb;
tb                223 drivers/thunderbolt/tb.h 	struct tb *tb;
tb                269 drivers/thunderbolt/tb.h 	int (*driver_ready)(struct tb *tb);
tb                270 drivers/thunderbolt/tb.h 	int (*start)(struct tb *tb);
tb                271 drivers/thunderbolt/tb.h 	void (*stop)(struct tb *tb);
tb                272 drivers/thunderbolt/tb.h 	int (*suspend_noirq)(struct tb *tb);
tb                273 drivers/thunderbolt/tb.h 	int (*resume_noirq)(struct tb *tb);
tb                274 drivers/thunderbolt/tb.h 	int (*suspend)(struct tb *tb);
tb                275 drivers/thunderbolt/tb.h 	void (*complete)(struct tb *tb);
tb                276 drivers/thunderbolt/tb.h 	int (*runtime_suspend)(struct tb *tb);
tb                277 drivers/thunderbolt/tb.h 	int (*runtime_resume)(struct tb *tb);
tb                280 drivers/thunderbolt/tb.h 	void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
tb                282 drivers/thunderbolt/tb.h 	int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
tb                283 drivers/thunderbolt/tb.h 	int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids);
tb                284 drivers/thunderbolt/tb.h 	int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
tb                285 drivers/thunderbolt/tb.h 	int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
tb                286 drivers/thunderbolt/tb.h 	int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
tb                288 drivers/thunderbolt/tb.h 	int (*disconnect_pcie_paths)(struct tb *tb);
tb                289 drivers/thunderbolt/tb.h 	int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
tb                290 drivers/thunderbolt/tb.h 	int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
tb                293 drivers/thunderbolt/tb.h static inline void *tb_priv(struct tb *tb)
tb                295 drivers/thunderbolt/tb.h 	return (void *)tb->privdata;
tb                393 drivers/thunderbolt/tb.h 	return tb_cfg_read(sw->tb->ctl,
tb                407 drivers/thunderbolt/tb.h 	return tb_cfg_write(sw->tb->ctl,
tb                421 drivers/thunderbolt/tb.h 	return tb_cfg_read(port->sw->tb->ctl,
tb                435 drivers/thunderbolt/tb.h 	return tb_cfg_write(port->sw->tb->ctl,
tb                444 drivers/thunderbolt/tb.h #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
tb                445 drivers/thunderbolt/tb.h #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
tb                446 drivers/thunderbolt/tb.h #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
tb                447 drivers/thunderbolt/tb.h #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
tb                448 drivers/thunderbolt/tb.h #define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg)
tb                453 drivers/thunderbolt/tb.h 		level(__sw->tb, "%llx: " fmt,           \
tb                464 drivers/thunderbolt/tb.h 		level(__port->sw->tb, "%llx:%x: " fmt,                  \
tb                476 drivers/thunderbolt/tb.h struct tb *icm_probe(struct tb_nhi *nhi);
tb                477 drivers/thunderbolt/tb.h struct tb *tb_probe(struct tb_nhi *nhi);
tb                488 drivers/thunderbolt/tb.h struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
tb                489 drivers/thunderbolt/tb.h int tb_domain_add(struct tb *tb);
tb                490 drivers/thunderbolt/tb.h void tb_domain_remove(struct tb *tb);
tb                491 drivers/thunderbolt/tb.h int tb_domain_suspend_noirq(struct tb *tb);
tb                492 drivers/thunderbolt/tb.h int tb_domain_resume_noirq(struct tb *tb);
tb                493 drivers/thunderbolt/tb.h int tb_domain_suspend(struct tb *tb);
tb                494 drivers/thunderbolt/tb.h void tb_domain_complete(struct tb *tb);
tb                495 drivers/thunderbolt/tb.h int tb_domain_runtime_suspend(struct tb *tb);
tb                496 drivers/thunderbolt/tb.h int tb_domain_runtime_resume(struct tb *tb);
tb                497 drivers/thunderbolt/tb.h int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
tb                498 drivers/thunderbolt/tb.h int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
tb                499 drivers/thunderbolt/tb.h int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
tb                500 drivers/thunderbolt/tb.h int tb_domain_disconnect_pcie_paths(struct tb *tb);
tb                501 drivers/thunderbolt/tb.h int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
tb                502 drivers/thunderbolt/tb.h int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
tb                503 drivers/thunderbolt/tb.h int tb_domain_disconnect_all_paths(struct tb *tb);
tb                505 drivers/thunderbolt/tb.h static inline struct tb *tb_domain_get(struct tb *tb)
tb                507 drivers/thunderbolt/tb.h 	if (tb)
tb                508 drivers/thunderbolt/tb.h 		get_device(&tb->dev);
tb                509 drivers/thunderbolt/tb.h 	return tb;
tb                512 drivers/thunderbolt/tb.h static inline void tb_domain_put(struct tb *tb)
tb                514 drivers/thunderbolt/tb.h 	put_device(&tb->dev);
tb                517 drivers/thunderbolt/tb.h struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
tb                519 drivers/thunderbolt/tb.h struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
tb                526 drivers/thunderbolt/tb.h int tb_switch_reset(struct tb *tb, u64 route);
tb                528 drivers/thunderbolt/tb.h struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
tb                530 drivers/thunderbolt/tb.h struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
tb                531 drivers/thunderbolt/tb.h struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
tb                622 drivers/thunderbolt/tb.h struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
tb                656 drivers/thunderbolt/tb.h bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
tb                658 drivers/thunderbolt/tb.h struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
tb                663 drivers/thunderbolt/tb.h struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
tb                 38 drivers/thunderbolt/tunnel.c 		level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
tb                 56 drivers/thunderbolt/tunnel.c static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tb                 72 drivers/thunderbolt/tunnel.c 	tunnel->tb = tb;
tb                116 drivers/thunderbolt/tunnel.c struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
tb                124 drivers/thunderbolt/tunnel.c 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
tb                193 drivers/thunderbolt/tunnel.c struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
tb                199 drivers/thunderbolt/tunnel.c 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
tb                207 drivers/thunderbolt/tunnel.c 	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
tb                216 drivers/thunderbolt/tunnel.c 	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
tb                349 drivers/thunderbolt/tunnel.c struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
tb                358 drivers/thunderbolt/tunnel.c 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
tb                428 drivers/thunderbolt/tunnel.c struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tb                438 drivers/thunderbolt/tunnel.c 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
tb                449 drivers/thunderbolt/tunnel.c 	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
tb                456 drivers/thunderbolt/tunnel.c 	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
tb                463 drivers/thunderbolt/tunnel.c 	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
tb                526 drivers/thunderbolt/tunnel.c struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tb                535 drivers/thunderbolt/tunnel.c 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
tb                545 drivers/thunderbolt/tunnel.c 	path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
tb                554 drivers/thunderbolt/tunnel.c 	path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
tb                 34 drivers/thunderbolt/tunnel.h 	struct tb *tb;
tb                 45 drivers/thunderbolt/tunnel.h struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
tb                 46 drivers/thunderbolt/tunnel.h struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
tb                 48 drivers/thunderbolt/tunnel.h struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
tb                 49 drivers/thunderbolt/tunnel.h struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tb                 51 drivers/thunderbolt/tunnel.h struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tb                 28 drivers/thunderbolt/xdomain.c 	struct tb *tb;
tb                127 drivers/thunderbolt/xdomain.c 	return __tb_xdomain_response(xd->tb->ctl, response, size, type);
tb                181 drivers/thunderbolt/xdomain.c 	return __tb_xdomain_request(xd->tb->ctl, request, request_size,
tb                376 drivers/thunderbolt/xdomain.c static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
tb                509 drivers/thunderbolt/xdomain.c 	struct tb *tb = xw->tb;
tb                510 drivers/thunderbolt/xdomain.c 	struct tb_ctl *ctl = tb->ctl;
tb                520 drivers/thunderbolt/xdomain.c 	mutex_lock(&tb->lock);
tb                521 drivers/thunderbolt/xdomain.c 	if (tb->root_switch)
tb                522 drivers/thunderbolt/xdomain.c 		uuid = tb->root_switch->uuid;
tb                525 drivers/thunderbolt/xdomain.c 	mutex_unlock(&tb->lock);
tb                534 drivers/thunderbolt/xdomain.c 		ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
tb                550 drivers/thunderbolt/xdomain.c 		xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
tb                552 drivers/thunderbolt/xdomain.c 			queue_delayed_work(tb->wq, &xd->get_properties_work,
tb                572 drivers/thunderbolt/xdomain.c 		tb_warn(tb, "failed to send XDomain response for %#x\n",
tb                580 drivers/thunderbolt/xdomain.c 	tb_domain_put(tb);
tb                584 drivers/thunderbolt/xdomain.c tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
tb                599 drivers/thunderbolt/xdomain.c 	xw->tb = tb_domain_get(tb);
tb                890 drivers/thunderbolt/xdomain.c 		tb_domain_approve_xdomain_paths(xd->tb, xd);
tb                898 drivers/thunderbolt/xdomain.c 	struct tb *tb = xd->tb;
tb                902 drivers/thunderbolt/xdomain.c 	ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
tb                905 drivers/thunderbolt/xdomain.c 			queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
tb                937 drivers/thunderbolt/xdomain.c 	queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
tb                939 drivers/thunderbolt/xdomain.c 	queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
tb                948 drivers/thunderbolt/xdomain.c 	struct tb *tb = xd->tb;
tb                954 drivers/thunderbolt/xdomain.c 	ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
tb                959 drivers/thunderbolt/xdomain.c 			queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
tb               1043 drivers/thunderbolt/xdomain.c 	ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
tb               1047 drivers/thunderbolt/xdomain.c 			queue_delayed_work(xd->tb->wq,
tb               1154 drivers/thunderbolt/xdomain.c 		queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
tb               1158 drivers/thunderbolt/xdomain.c 		queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
tb               1160 drivers/thunderbolt/xdomain.c 		queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
tb               1219 drivers/thunderbolt/xdomain.c struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
tb               1229 drivers/thunderbolt/xdomain.c 	xd->tb = tb;
tb               1256 drivers/thunderbolt/xdomain.c 	dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
tb               1360 drivers/thunderbolt/xdomain.c 	ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
tb               1389 drivers/thunderbolt/xdomain.c 		ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
tb               1453 drivers/thunderbolt/xdomain.c struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
tb               1461 drivers/thunderbolt/xdomain.c 	xd = switch_find_xdomain(tb->root_switch, &lookup);
tb               1482 drivers/thunderbolt/xdomain.c struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
tb               1492 drivers/thunderbolt/xdomain.c 	xd = switch_find_xdomain(tb->root_switch, &lookup);
tb               1511 drivers/thunderbolt/xdomain.c struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
tb               1519 drivers/thunderbolt/xdomain.c 	xd = switch_find_xdomain(tb->root_switch, &lookup);
tb               1524 drivers/thunderbolt/xdomain.c bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
tb               1546 drivers/thunderbolt/xdomain.c 			return tb_xdp_schedule_request(tb, hdr, size);
tb               1602 drivers/thunderbolt/xdomain.c 		queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
tb                319 drivers/tty/tty_buffer.c 		struct tty_buffer *tb = port->buf.tail;
tb                322 drivers/tty/tty_buffer.c 		memcpy(char_buf_ptr(tb, tb->used), chars, space);
tb                323 drivers/tty/tty_buffer.c 		if (~tb->flags & TTYB_NORMAL)
tb                324 drivers/tty/tty_buffer.c 			memset(flag_buf_ptr(tb, tb->used), flag, space);
tb                325 drivers/tty/tty_buffer.c 		tb->used += space;
tb                354 drivers/tty/tty_buffer.c 		struct tty_buffer *tb = port->buf.tail;
tb                357 drivers/tty/tty_buffer.c 		memcpy(char_buf_ptr(tb, tb->used), chars, space);
tb                358 drivers/tty/tty_buffer.c 		memcpy(flag_buf_ptr(tb, tb->used), flags, space);
tb                359 drivers/tty/tty_buffer.c 		tb->used += space;
tb                381 drivers/tty/tty_buffer.c 	struct tty_buffer *tb;
tb                387 drivers/tty/tty_buffer.c 	tb = port->buf.tail;
tb                388 drivers/tty/tty_buffer.c 	if (~tb->flags & TTYB_NORMAL)
tb                389 drivers/tty/tty_buffer.c 		*flag_buf_ptr(tb, tb->used) = flag;
tb                390 drivers/tty/tty_buffer.c 	*char_buf_ptr(tb, tb->used++) = ch;
tb                435 drivers/tty/tty_buffer.c 		struct tty_buffer *tb = port->buf.tail;
tb                436 drivers/tty/tty_buffer.c 		*chars = char_buf_ptr(tb, tb->used);
tb                437 drivers/tty/tty_buffer.c 		if (~tb->flags & TTYB_NORMAL)
tb                438 drivers/tty/tty_buffer.c 			memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
tb                439 drivers/tty/tty_buffer.c 		tb->used += space;
tb                230 fs/nfsd/nfs4xdr.c 	struct svcxdr_tmpbuf *tb;
tb                232 fs/nfsd/nfs4xdr.c 	tb = kmalloc(sizeof(*tb) + len, GFP_KERNEL);
tb                233 fs/nfsd/nfs4xdr.c 	if (!tb)
tb                235 fs/nfsd/nfs4xdr.c 	tb->next = argp->to_free;
tb                236 fs/nfsd/nfs4xdr.c 	argp->to_free = tb;
tb                237 fs/nfsd/nfs4xdr.c 	return tb->buf;
tb               4544 fs/nfsd/nfs4xdr.c 		struct svcxdr_tmpbuf *tb = args->to_free;
tb               4545 fs/nfsd/nfs4xdr.c 		args->to_free = tb->next;
tb               4546 fs/nfsd/nfs4xdr.c 		kfree(tb);
tb               1286 fs/ntfs/mft.c  	u8 *b, tb;
tb               1333 fs/ntfs/mft.c  	tb = 1 << (lcn & 7ull);
tb               1335 fs/ntfs/mft.c  	if (*b != 0xff && !(*b & tb)) {
tb               1337 fs/ntfs/mft.c  		*b |= tb;
tb                 19 fs/reiserfs/do_balan.c static inline void buffer_info_init_left(struct tree_balance *tb,
tb                 22 fs/reiserfs/do_balan.c 	bi->tb          = tb;
tb                 23 fs/reiserfs/do_balan.c 	bi->bi_bh       = tb->L[0];
tb                 24 fs/reiserfs/do_balan.c 	bi->bi_parent   = tb->FL[0];
tb                 25 fs/reiserfs/do_balan.c 	bi->bi_position = get_left_neighbor_position(tb, 0);
tb                 28 fs/reiserfs/do_balan.c static inline void buffer_info_init_right(struct tree_balance *tb,
tb                 31 fs/reiserfs/do_balan.c 	bi->tb          = tb;
tb                 32 fs/reiserfs/do_balan.c 	bi->bi_bh       = tb->R[0];
tb                 33 fs/reiserfs/do_balan.c 	bi->bi_parent   = tb->FR[0];
tb                 34 fs/reiserfs/do_balan.c 	bi->bi_position = get_right_neighbor_position(tb, 0);
tb                 37 fs/reiserfs/do_balan.c static inline void buffer_info_init_tbS0(struct tree_balance *tb,
tb                 40 fs/reiserfs/do_balan.c 	bi->tb          = tb;
tb                 41 fs/reiserfs/do_balan.c 	bi->bi_bh        = PATH_PLAST_BUFFER(tb->tb_path);
tb                 42 fs/reiserfs/do_balan.c 	bi->bi_parent   = PATH_H_PPARENT(tb->tb_path, 0);
tb                 43 fs/reiserfs/do_balan.c 	bi->bi_position = PATH_H_POSITION(tb->tb_path, 1);
tb                 46 fs/reiserfs/do_balan.c static inline void buffer_info_init_bh(struct tree_balance *tb,
tb                 50 fs/reiserfs/do_balan.c 	bi->tb          = tb;
tb                 56 fs/reiserfs/do_balan.c inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
tb                 59 fs/reiserfs/do_balan.c 	journal_mark_dirty(tb->transaction_handle, bh);
tb                 77 fs/reiserfs/do_balan.c static void balance_leaf_when_delete_del(struct tree_balance *tb)
tb                 79 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                 80 fs/reiserfs/do_balan.c 	int item_pos = PATH_LAST_POSITION(tb->tb_path);
tb                 86 fs/reiserfs/do_balan.c 	RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0],
tb                 88 fs/reiserfs/do_balan.c 	       -tb->insert_size[0], ih);
tb                 90 fs/reiserfs/do_balan.c 	buffer_info_init_tbS0(tb, &bi);
tb                 93 fs/reiserfs/do_balan.c 	if (!item_pos && tb->CFL[0]) {
tb                 95 fs/reiserfs/do_balan.c 			replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
tb                 97 fs/reiserfs/do_balan.c 			if (!PATH_H_POSITION(tb->tb_path, 1))
tb                 98 fs/reiserfs/do_balan.c 				replace_key(tb, tb->CFL[0], tb->lkey[0],
tb                 99 fs/reiserfs/do_balan.c 					    PATH_H_PPARENT(tb->tb_path, 0), 0);
tb                103 fs/reiserfs/do_balan.c 	RFALSE(!item_pos && !tb->CFL[0],
tb                104 fs/reiserfs/do_balan.c 	       "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0],
tb                105 fs/reiserfs/do_balan.c 	       tb->L[0]);
tb                109 fs/reiserfs/do_balan.c static void balance_leaf_when_delete_cut(struct tree_balance *tb)
tb                111 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                112 fs/reiserfs/do_balan.c 	int item_pos = PATH_LAST_POSITION(tb->tb_path);
tb                114 fs/reiserfs/do_balan.c 	int pos_in_item = tb->tb_path->pos_in_item;
tb                116 fs/reiserfs/do_balan.c 	buffer_info_init_tbS0(tb, &bi);
tb                126 fs/reiserfs/do_balan.c 		tb->insert_size[0] = -1;
tb                128 fs/reiserfs/do_balan.c 				     -tb->insert_size[0]);
tb                130 fs/reiserfs/do_balan.c 		RFALSE(!item_pos && !pos_in_item && !tb->CFL[0],
tb                132 fs/reiserfs/do_balan.c 		       tb->CFL[0]);
tb                134 fs/reiserfs/do_balan.c 		if (!item_pos && !pos_in_item && tb->CFL[0])
tb                135 fs/reiserfs/do_balan.c 			replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
tb                138 fs/reiserfs/do_balan.c 				     -tb->insert_size[0]);
tb                146 fs/reiserfs/do_balan.c static int balance_leaf_when_delete_left(struct tree_balance *tb)
tb                148 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                152 fs/reiserfs/do_balan.c 	if (tb->lnum[0] == -1) {
tb                154 fs/reiserfs/do_balan.c 		if (tb->rnum[0] == -1) {
tb                155 fs/reiserfs/do_balan.c 			if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) {
tb                160 fs/reiserfs/do_balan.c 				if (PATH_H_POSITION(tb->tb_path, 1) == 0 &&
tb                161 fs/reiserfs/do_balan.c 				    1 < B_NR_ITEMS(tb->FR[0]))
tb                162 fs/reiserfs/do_balan.c 					replace_key(tb, tb->CFL[0],
tb                163 fs/reiserfs/do_balan.c 						    tb->lkey[0], tb->FR[0], 1);
tb                165 fs/reiserfs/do_balan.c 				leaf_move_items(LEAF_FROM_S_TO_L, tb, n, -1,
tb                167 fs/reiserfs/do_balan.c 				leaf_move_items(LEAF_FROM_R_TO_L, tb,
tb                168 fs/reiserfs/do_balan.c 						B_NR_ITEMS(tb->R[0]), -1,
tb                171 fs/reiserfs/do_balan.c 				reiserfs_invalidate_buffer(tb, tbS0);
tb                172 fs/reiserfs/do_balan.c 				reiserfs_invalidate_buffer(tb, tb->R[0]);
tb                178 fs/reiserfs/do_balan.c 			leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, NULL);
tb                179 fs/reiserfs/do_balan.c 			leaf_move_items(LEAF_FROM_L_TO_R, tb,
tb                180 fs/reiserfs/do_balan.c 					B_NR_ITEMS(tb->L[0]), -1, NULL);
tb                183 fs/reiserfs/do_balan.c 			replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
tb                185 fs/reiserfs/do_balan.c 			reiserfs_invalidate_buffer(tb, tbS0);
tb                186 fs/reiserfs/do_balan.c 			reiserfs_invalidate_buffer(tb, tb->L[0]);
tb                191 fs/reiserfs/do_balan.c 		RFALSE(tb->rnum[0] != 0,
tb                192 fs/reiserfs/do_balan.c 		       "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]);
tb                194 fs/reiserfs/do_balan.c 		leaf_shift_left(tb, n, -1);
tb                196 fs/reiserfs/do_balan.c 		reiserfs_invalidate_buffer(tb, tbS0);
tb                206 fs/reiserfs/do_balan.c 	RFALSE((tb->lnum[0] + tb->rnum[0] < n) ||
tb                207 fs/reiserfs/do_balan.c 	       (tb->lnum[0] + tb->rnum[0] > n + 1),
tb                210 fs/reiserfs/do_balan.c 	       tb->rnum[0], tb->lnum[0], n);
tb                211 fs/reiserfs/do_balan.c 	RFALSE((tb->lnum[0] + tb->rnum[0] == n) &&
tb                212 fs/reiserfs/do_balan.c 	       (tb->lbytes != -1 || tb->rbytes != -1),
tb                215 fs/reiserfs/do_balan.c 	       tb->rbytes, tb->lbytes);
tb                216 fs/reiserfs/do_balan.c 	RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) &&
tb                217 fs/reiserfs/do_balan.c 	       (tb->lbytes < 1 || tb->rbytes != -1),
tb                220 fs/reiserfs/do_balan.c 	       tb->rbytes, tb->lbytes);
tb                222 fs/reiserfs/do_balan.c 	leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
tb                223 fs/reiserfs/do_balan.c 	leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
tb                225 fs/reiserfs/do_balan.c 	reiserfs_invalidate_buffer(tb, tbS0);
tb                239 fs/reiserfs/do_balan.c static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
tb                241 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                245 fs/reiserfs/do_balan.c 	RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1,
tb                246 fs/reiserfs/do_balan.c 	       "vs- 12000: level: wrong FR %z", tb->FR[0]);
tb                247 fs/reiserfs/do_balan.c 	RFALSE(tb->blknum[0] > 1,
tb                248 fs/reiserfs/do_balan.c 	       "PAP-12005: tb->blknum == %d, can not be > 1", tb->blknum[0]);
tb                249 fs/reiserfs/do_balan.c 	RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0),
tb                252 fs/reiserfs/do_balan.c 	buffer_info_init_tbS0(tb, &bi);
tb                258 fs/reiserfs/do_balan.c 		balance_leaf_when_delete_del(tb);
tb                260 fs/reiserfs/do_balan.c 		balance_leaf_when_delete_cut(tb);
tb                271 fs/reiserfs/do_balan.c 	if (tb->lnum[0])
tb                272 fs/reiserfs/do_balan.c 		return balance_leaf_when_delete_left(tb);
tb                274 fs/reiserfs/do_balan.c 	if (tb->rnum[0] == -1) {
tb                276 fs/reiserfs/do_balan.c 		leaf_shift_right(tb, n, -1);
tb                277 fs/reiserfs/do_balan.c 		reiserfs_invalidate_buffer(tb, tbS0);
tb                281 fs/reiserfs/do_balan.c 	RFALSE(tb->rnum[0],
tb                282 fs/reiserfs/do_balan.c 	       "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]);
tb                286 fs/reiserfs/do_balan.c static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
tb                292 fs/reiserfs/do_balan.c 	int n = B_NR_ITEMS(tb->L[0]);
tb                295 fs/reiserfs/do_balan.c 	if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
tb                299 fs/reiserfs/do_balan.c 		ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
tb                302 fs/reiserfs/do_balan.c 		new_item_len = ih_item_len(ih) - tb->lbytes;
tb                312 fs/reiserfs/do_balan.c 		buffer_info_init_left(tb, &bi);
tb                313 fs/reiserfs/do_balan.c 		leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
tb                314 fs/reiserfs/do_balan.c 			     min_t(int, tb->zeroes_num, ih_item_len(ih)));
tb                322 fs/reiserfs/do_balan.c 			shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
tb                324 fs/reiserfs/do_balan.c 		add_le_ih_k_offset(ih, tb->lbytes << shift);
tb                327 fs/reiserfs/do_balan.c 		if (tb->lbytes > tb->zeroes_num) {
tb                328 fs/reiserfs/do_balan.c 			body_shift_bytes = tb->lbytes - tb->zeroes_num;
tb                329 fs/reiserfs/do_balan.c 			tb->zeroes_num = 0;
tb                331 fs/reiserfs/do_balan.c 			tb->zeroes_num -= tb->lbytes;
tb                339 fs/reiserfs/do_balan.c 		ret = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes);
tb                342 fs/reiserfs/do_balan.c 		buffer_info_init_left(tb, &bi);
tb                343 fs/reiserfs/do_balan.c 		leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
tb                344 fs/reiserfs/do_balan.c 				     tb->zeroes_num);
tb                345 fs/reiserfs/do_balan.c 		tb->insert_size[0] = 0;
tb                346 fs/reiserfs/do_balan.c 		tb->zeroes_num = 0;
tb                351 fs/reiserfs/do_balan.c static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
tb                355 fs/reiserfs/do_balan.c 	int n = B_NR_ITEMS(tb->L[0]);
tb                358 fs/reiserfs/do_balan.c 	RFALSE(tb->zeroes_num,
tb                362 fs/reiserfs/do_balan.c 	if (tb->lbytes > tb->pos_in_item) {
tb                365 fs/reiserfs/do_balan.c 		int ret, l_pos_in_item = tb->pos_in_item;
tb                371 fs/reiserfs/do_balan.c 		ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes - 1);
tb                372 fs/reiserfs/do_balan.c 		if (ret && !tb->item_pos) {
tb                373 fs/reiserfs/do_balan.c 			pasted = item_head(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1);
tb                375 fs/reiserfs/do_balan.c 					 (tb->lbytes - 1);
tb                379 fs/reiserfs/do_balan.c 		buffer_info_init_left(tb, &bi);
tb                380 fs/reiserfs/do_balan.c 		leaf_paste_in_buffer(&bi, n + tb->item_pos - ret,
tb                381 fs/reiserfs/do_balan.c 				     l_pos_in_item, tb->insert_size[0],
tb                382 fs/reiserfs/do_balan.c 				     body, tb->zeroes_num);
tb                395 fs/reiserfs/do_balan.c 		leaf_paste_entries(&bi, n + tb->item_pos - ret,
tb                398 fs/reiserfs/do_balan.c 				   body + DEH_SIZE, tb->insert_size[0]);
tb                399 fs/reiserfs/do_balan.c 		tb->insert_size[0] = 0;
tb                406 fs/reiserfs/do_balan.c 		leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
tb                410 fs/reiserfs/do_balan.c 	tb->pos_in_item -= tb->lbytes;
tb                413 fs/reiserfs/do_balan.c static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb,
tb                417 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                418 fs/reiserfs/do_balan.c 	int n = B_NR_ITEMS(tb->L[0]);
tb                422 fs/reiserfs/do_balan.c 	if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
tb                423 fs/reiserfs/do_balan.c 		balance_leaf_paste_left_shift_dirent(tb, ih, body);
tb                427 fs/reiserfs/do_balan.c 	RFALSE(tb->lbytes <= 0,
tb                429 fs/reiserfs/do_balan.c 	       "lbytes=%d", tb->lbytes);
tb                430 fs/reiserfs/do_balan.c 	RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)),
tb                433 fs/reiserfs/do_balan.c 	       ih_item_len(item_head(tbS0, tb->item_pos)), tb->pos_in_item);
tb                436 fs/reiserfs/do_balan.c 	if (tb->lbytes >= tb->pos_in_item) {
tb                442 fs/reiserfs/do_balan.c 		tbS0_pos_ih = item_head(tbS0, tb->item_pos);
tb                449 fs/reiserfs/do_balan.c 		l_n = tb->lbytes - tb->pos_in_item;
tb                452 fs/reiserfs/do_balan.c 		tb->insert_size[0] -= l_n;
tb                454 fs/reiserfs/do_balan.c 		RFALSE(tb->insert_size[0] <= 0,
tb                456 fs/reiserfs/do_balan.c 		       "L[0]. insert_size=%d", tb->insert_size[0]);
tb                458 fs/reiserfs/do_balan.c 		ret = leaf_shift_left(tb, tb->lnum[0],
tb                461 fs/reiserfs/do_balan.c 		tbL0_ih = item_head(tb->L[0], n + tb->item_pos - ret);
tb                464 fs/reiserfs/do_balan.c 		buffer_info_init_left(tb, &bi);
tb                465 fs/reiserfs/do_balan.c 		leaf_paste_in_buffer(&bi, n + tb->item_pos - ret,
tb                467 fs/reiserfs/do_balan.c 				     min_t(int, l_n, tb->zeroes_num));
tb                478 fs/reiserfs/do_balan.c 		       leaf_key(tb->L[0], n + tb->item_pos - ret)),
tb                482 fs/reiserfs/do_balan.c 			int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
tb                490 fs/reiserfs/do_balan.c 		left_delim_key = internal_key(tb->CFL[0], tb->lkey[0]);
tb                497 fs/reiserfs/do_balan.c 		if (l_n > tb->zeroes_num) {
tb                498 fs/reiserfs/do_balan.c 			body_shift_bytes = l_n - tb->zeroes_num;
tb                499 fs/reiserfs/do_balan.c 			tb->zeroes_num = 0;
tb                501 fs/reiserfs/do_balan.c 			tb->zeroes_num -= l_n;
tb                502 fs/reiserfs/do_balan.c 		tb->pos_in_item = 0;
tb                505 fs/reiserfs/do_balan.c 					  leaf_key(tb->L[0],
tb                506 fs/reiserfs/do_balan.c 						 B_NR_ITEMS(tb->L[0]) - 1)) ||
tb                515 fs/reiserfs/do_balan.c 		tb->pos_in_item -= tb->lbytes;
tb                517 fs/reiserfs/do_balan.c 		RFALSE(tb->pos_in_item <= 0,
tb                519 fs/reiserfs/do_balan.c 		       tb->pos_in_item);
tb                525 fs/reiserfs/do_balan.c 		leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
tb                532 fs/reiserfs/do_balan.c static void balance_leaf_paste_left_whole(struct tree_balance *tb,
tb                536 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                537 fs/reiserfs/do_balan.c 	int n = B_NR_ITEMS(tb->L[0]);
tb                543 fs/reiserfs/do_balan.c 	if (!tb->item_pos &&
tb                549 fs/reiserfs/do_balan.c 		pasted = item_head(tb->L[0], n - 1);
tb                551 fs/reiserfs/do_balan.c 			tb->pos_in_item += ih_entry_count(pasted);
tb                553 fs/reiserfs/do_balan.c 			tb->pos_in_item += ih_item_len(pasted);
tb                560 fs/reiserfs/do_balan.c 	ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
tb                563 fs/reiserfs/do_balan.c 	buffer_info_init_left(tb, &bi);
tb                564 fs/reiserfs/do_balan.c 	leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, tb->pos_in_item,
tb                565 fs/reiserfs/do_balan.c 			     tb->insert_size[0], body, tb->zeroes_num);
tb                568 fs/reiserfs/do_balan.c 	pasted = item_head(tb->L[0], n + tb->item_pos - ret);
tb                570 fs/reiserfs/do_balan.c 		leaf_paste_entries(&bi, n + tb->item_pos - ret,
tb                571 fs/reiserfs/do_balan.c 				   tb->pos_in_item, 1,
tb                573 fs/reiserfs/do_balan.c 				   body + DEH_SIZE, tb->insert_size[0]);
tb                582 fs/reiserfs/do_balan.c 	tb->insert_size[0] = 0;
tb                583 fs/reiserfs/do_balan.c 	tb->zeroes_num = 0;
tb                586 fs/reiserfs/do_balan.c static unsigned int balance_leaf_paste_left(struct tree_balance *tb,
tb                591 fs/reiserfs/do_balan.c 	if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1)
tb                592 fs/reiserfs/do_balan.c 		return balance_leaf_paste_left_shift(tb, ih, body);
tb                594 fs/reiserfs/do_balan.c 		balance_leaf_paste_left_whole(tb, ih, body);
tb                599 fs/reiserfs/do_balan.c static unsigned int balance_leaf_left(struct tree_balance *tb,
tb                603 fs/reiserfs/do_balan.c 	if (tb->lnum[0] <= 0)
tb                607 fs/reiserfs/do_balan.c 	if (tb->item_pos < tb->lnum[0]) {
tb                611 fs/reiserfs/do_balan.c 			return balance_leaf_insert_left(tb, ih, body);
tb                613 fs/reiserfs/do_balan.c 			return balance_leaf_paste_left(tb, ih, body);
tb                616 fs/reiserfs/do_balan.c 		leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
tb                621 fs/reiserfs/do_balan.c static void balance_leaf_insert_right(struct tree_balance *tb,
tb                626 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                631 fs/reiserfs/do_balan.c 	if (n - tb->rnum[0] >= tb->item_pos) {
tb                632 fs/reiserfs/do_balan.c 		leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
tb                639 fs/reiserfs/do_balan.c 	if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) {
tb                645 fs/reiserfs/do_balan.c 		leaf_shift_right(tb, tb->rnum[0] - 1, -1);
tb                657 fs/reiserfs/do_balan.c 			shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
tb                658 fs/reiserfs/do_balan.c 		offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << shift);
tb                660 fs/reiserfs/do_balan.c 		put_ih_item_len(ih, tb->rbytes);
tb                663 fs/reiserfs/do_balan.c 		buffer_info_init_right(tb, &bi);
tb                664 fs/reiserfs/do_balan.c 		if ((old_len - tb->rbytes) > tb->zeroes_num) {
tb                666 fs/reiserfs/do_balan.c 			r_body = body + (old_len - tb->rbytes) - tb->zeroes_num;
tb                669 fs/reiserfs/do_balan.c 			r_zeroes_number = tb->zeroes_num -
tb                670 fs/reiserfs/do_balan.c 					  (old_len - tb->rbytes);
tb                671 fs/reiserfs/do_balan.c 			tb->zeroes_num -= r_zeroes_number;
tb                677 fs/reiserfs/do_balan.c 		replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
tb                684 fs/reiserfs/do_balan.c 		put_ih_item_len(ih, old_len - tb->rbytes);
tb                686 fs/reiserfs/do_balan.c 		tb->insert_size[0] -= tb->rbytes;
tb                692 fs/reiserfs/do_balan.c 		leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
tb                695 fs/reiserfs/do_balan.c 		buffer_info_init_right(tb, &bi);
tb                696 fs/reiserfs/do_balan.c 		leaf_insert_into_buf(&bi, tb->item_pos - n + tb->rnum[0] - 1,
tb                697 fs/reiserfs/do_balan.c 				     ih, body, tb->zeroes_num);
tb                699 fs/reiserfs/do_balan.c 		if (tb->item_pos - n + tb->rnum[0] - 1 == 0)
tb                700 fs/reiserfs/do_balan.c 			replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
tb                702 fs/reiserfs/do_balan.c 		tb->zeroes_num = tb->insert_size[0] = 0;
tb                707 fs/reiserfs/do_balan.c static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
tb                711 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                715 fs/reiserfs/do_balan.c 	RFALSE(tb->zeroes_num,
tb                717 fs/reiserfs/do_balan.c 	entry_count = ih_entry_count(item_head(tbS0, tb->item_pos));
tb                720 fs/reiserfs/do_balan.c 	if (entry_count - tb->rbytes < tb->pos_in_item) {
tb                723 fs/reiserfs/do_balan.c 		RFALSE(tb->rbytes - 1 >= entry_count || !tb->insert_size[0],
tb                725 fs/reiserfs/do_balan.c 		       "rbytes=%d, entry_count=%d", tb->rbytes, entry_count);
tb                732 fs/reiserfs/do_balan.c 		leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1);
tb                735 fs/reiserfs/do_balan.c 		paste_entry_position = tb->pos_in_item - entry_count +
tb                736 fs/reiserfs/do_balan.c 				       tb->rbytes - 1;
tb                737 fs/reiserfs/do_balan.c 		buffer_info_init_right(tb, &bi);
tb                739 fs/reiserfs/do_balan.c 				     tb->insert_size[0], body, tb->zeroes_num);
tb                744 fs/reiserfs/do_balan.c 				   body + DEH_SIZE, tb->insert_size[0]);
tb                748 fs/reiserfs/do_balan.c 			replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
tb                750 fs/reiserfs/do_balan.c 		tb->insert_size[0] = 0;
tb                751 fs/reiserfs/do_balan.c 		tb->pos_in_item++;
tb                754 fs/reiserfs/do_balan.c 		leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
tb                758 fs/reiserfs/do_balan.c static void balance_leaf_paste_right_shift(struct tree_balance *tb,
tb                762 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                769 fs/reiserfs/do_balan.c 	if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
tb                770 fs/reiserfs/do_balan.c 		balance_leaf_paste_right_shift_dirent(tb, ih, body);
tb                780 fs/reiserfs/do_balan.c 	n_shift = tb->rbytes - tb->insert_size[0];
tb                784 fs/reiserfs/do_balan.c 	RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)),
tb                786 fs/reiserfs/do_balan.c 	       "pos_in_item=%d", tb->pos_in_item,
tb                787 fs/reiserfs/do_balan.c 	       ih_item_len(item_head(tbS0, tb->item_pos)));
tb                789 fs/reiserfs/do_balan.c 	leaf_shift_right(tb, tb->rnum[0], n_shift);
tb                795 fs/reiserfs/do_balan.c 	n_rem = tb->insert_size[0] - tb->rbytes;
tb                801 fs/reiserfs/do_balan.c 	version = ih_version(item_head(tb->R[0], 0));
tb                803 fs/reiserfs/do_balan.c 	if (is_indirect_le_key(version, leaf_key(tb->R[0], 0))) {
tb                804 fs/reiserfs/do_balan.c 		int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
tb                808 fs/reiserfs/do_balan.c 	add_le_key_k_offset(version, leaf_key(tb->R[0], 0), temp_rem);
tb                809 fs/reiserfs/do_balan.c 	add_le_key_k_offset(version, internal_key(tb->CFR[0], tb->rkey[0]),
tb                812 fs/reiserfs/do_balan.c 	do_balance_mark_internal_dirty(tb, tb->CFR[0], 0);
tb                815 fs/reiserfs/do_balan.c 	buffer_info_init_right(tb, &bi);
tb                816 fs/reiserfs/do_balan.c 	if (n_rem > tb->zeroes_num) {
tb                818 fs/reiserfs/do_balan.c 		r_body = body + n_rem - tb->zeroes_num;
tb                821 fs/reiserfs/do_balan.c 		r_zeroes_number = tb->zeroes_num - n_rem;
tb                822 fs/reiserfs/do_balan.c 		tb->zeroes_num -= r_zeroes_number;
tb                825 fs/reiserfs/do_balan.c 	leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem,
tb                828 fs/reiserfs/do_balan.c 	if (is_indirect_le_ih(item_head(tb->R[0], 0)))
tb                829 fs/reiserfs/do_balan.c 		set_ih_free_space(item_head(tb->R[0], 0), 0);
tb                831 fs/reiserfs/do_balan.c 	tb->insert_size[0] = n_rem;
tb                833 fs/reiserfs/do_balan.c 		tb->pos_in_item++;
tb                836 fs/reiserfs/do_balan.c static void balance_leaf_paste_right_whole(struct tree_balance *tb,
tb                840 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                845 fs/reiserfs/do_balan.c 							buffer_info_init_right(tb, &bi);
tb                846 fs/reiserfs/do_balan.c 	leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
tb                849 fs/reiserfs/do_balan.c 	if (tb->pos_in_item >= 0) {
tb                850 fs/reiserfs/do_balan.c 		buffer_info_init_right(tb, &bi);
tb                851 fs/reiserfs/do_balan.c 		leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->rnum[0],
tb                852 fs/reiserfs/do_balan.c 				     tb->pos_in_item, tb->insert_size[0], body,
tb                853 fs/reiserfs/do_balan.c 				     tb->zeroes_num);
tb                857 fs/reiserfs/do_balan.c 	pasted = item_head(tb->R[0], tb->item_pos - n + tb->rnum[0]);
tb                858 fs/reiserfs/do_balan.c 	if (is_direntry_le_ih(pasted) && tb->pos_in_item >= 0) {
tb                859 fs/reiserfs/do_balan.c 		leaf_paste_entries(&bi, tb->item_pos - n + tb->rnum[0],
tb                860 fs/reiserfs/do_balan.c 				   tb->pos_in_item, 1,
tb                862 fs/reiserfs/do_balan.c 				   body + DEH_SIZE, tb->insert_size[0]);
tb                864 fs/reiserfs/do_balan.c 		if (!tb->pos_in_item) {
tb                866 fs/reiserfs/do_balan.c 			RFALSE(tb->item_pos - n + tb->rnum[0],
tb                871 fs/reiserfs/do_balan.c 			replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
tb                877 fs/reiserfs/do_balan.c 	tb->zeroes_num = tb->insert_size[0] = 0;
tb                880 fs/reiserfs/do_balan.c static void balance_leaf_paste_right(struct tree_balance *tb,
tb                884 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                888 fs/reiserfs/do_balan.c 	if (n - tb->rnum[0] > tb->item_pos) {
tb                889 fs/reiserfs/do_balan.c 		leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
tb                895 fs/reiserfs/do_balan.c 	if (tb->item_pos == n - tb->rnum[0] && tb->rbytes != -1)
tb                897 fs/reiserfs/do_balan.c 		balance_leaf_paste_right_shift(tb, ih, body);
tb                900 fs/reiserfs/do_balan.c 		balance_leaf_paste_right_whole(tb, ih, body);
tb                904 fs/reiserfs/do_balan.c static void balance_leaf_right(struct tree_balance *tb,
tb                908 fs/reiserfs/do_balan.c 	if (tb->rnum[0] <= 0)
tb                914 fs/reiserfs/do_balan.c 		balance_leaf_insert_right(tb, ih, body);
tb                916 fs/reiserfs/do_balan.c 		balance_leaf_paste_right(tb, ih, body);
tb                919 fs/reiserfs/do_balan.c static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
tb                926 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                932 fs/reiserfs/do_balan.c 	if (n - tb->snum[i] >= tb->item_pos) {
tb                933 fs/reiserfs/do_balan.c 		leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
tb                934 fs/reiserfs/do_balan.c 				tb->snum[i], tb->sbytes[i], tb->S_new[i]);
tb                941 fs/reiserfs/do_balan.c 	if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) {
tb                946 fs/reiserfs/do_balan.c 		leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1,
tb                947 fs/reiserfs/do_balan.c 				tb->S_new[i]);
tb                959 fs/reiserfs/do_balan.c 			shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
tb                962 fs/reiserfs/do_balan.c 				   ((old_len - tb->sbytes[i]) << shift));
tb                964 fs/reiserfs/do_balan.c 		put_ih_item_len(ih, tb->sbytes[i]);
tb                967 fs/reiserfs/do_balan.c 		buffer_info_init_bh(tb, &bi, tb->S_new[i]);
tb                969 fs/reiserfs/do_balan.c 		if ((old_len - tb->sbytes[i]) > tb->zeroes_num) {
tb                971 fs/reiserfs/do_balan.c 			r_body = body + (old_len - tb->sbytes[i]) -
tb                972 fs/reiserfs/do_balan.c 					 tb->zeroes_num;
tb                975 fs/reiserfs/do_balan.c 			r_zeroes_number = tb->zeroes_num - (old_len -
tb                976 fs/reiserfs/do_balan.c 					  tb->sbytes[i]);
tb                977 fs/reiserfs/do_balan.c 			tb->zeroes_num -= r_zeroes_number;
tb                987 fs/reiserfs/do_balan.c 		put_ih_item_len(ih, old_len - tb->sbytes[i]);
tb                988 fs/reiserfs/do_balan.c 		tb->insert_size[0] -= tb->sbytes[i];
tb                996 fs/reiserfs/do_balan.c 		leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
tb                997 fs/reiserfs/do_balan.c 				tb->snum[i] - 1, tb->sbytes[i], tb->S_new[i]);
tb               1000 fs/reiserfs/do_balan.c 		buffer_info_init_bh(tb, &bi, tb->S_new[i]);
tb               1001 fs/reiserfs/do_balan.c 		leaf_insert_into_buf(&bi, tb->item_pos - n + tb->snum[i] - 1,
tb               1002 fs/reiserfs/do_balan.c 				     ih, body, tb->zeroes_num);
tb               1004 fs/reiserfs/do_balan.c 		tb->zeroes_num = tb->insert_size[0] = 0;
tb               1009 fs/reiserfs/do_balan.c static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
tb               1016 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb               1017 fs/reiserfs/do_balan.c 	struct item_head *aux_ih = item_head(tbS0, tb->item_pos);
tb               1021 fs/reiserfs/do_balan.c 	if (entry_count - tb->sbytes[i] < tb->pos_in_item &&
tb               1022 fs/reiserfs/do_balan.c 	    tb->pos_in_item <= entry_count) {
tb               1025 fs/reiserfs/do_balan.c 		RFALSE(!tb->insert_size[0],
tb               1027 fs/reiserfs/do_balan.c 		RFALSE(tb->sbytes[i] - 1 >= entry_count,
tb               1029 fs/reiserfs/do_balan.c 		       tb->sbytes[i] - 1, entry_count);
tb               1036 fs/reiserfs/do_balan.c 		leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
tb               1037 fs/reiserfs/do_balan.c 				tb->sbytes[i] - 1, tb->S_new[i]);
tb               1043 fs/reiserfs/do_balan.c 		buffer_info_init_bh(tb, &bi, tb->S_new[i]);
tb               1044 fs/reiserfs/do_balan.c 		leaf_paste_in_buffer(&bi, 0, tb->pos_in_item - entry_count +
tb               1045 fs/reiserfs/do_balan.c 				     tb->sbytes[i] - 1, tb->insert_size[0],
tb               1046 fs/reiserfs/do_balan.c 				     body, tb->zeroes_num);
tb               1049 fs/reiserfs/do_balan.c 		leaf_paste_entries(&bi, 0, tb->pos_in_item - entry_count +
tb               1050 fs/reiserfs/do_balan.c 				   tb->sbytes[i] - 1, 1,
tb               1052 fs/reiserfs/do_balan.c 				   body + DEH_SIZE, tb->insert_size[0]);
tb               1054 fs/reiserfs/do_balan.c 		tb->insert_size[0] = 0;
tb               1055 fs/reiserfs/do_balan.c 		tb->pos_in_item++;
tb               1058 fs/reiserfs/do_balan.c 		leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
tb               1059 fs/reiserfs/do_balan.c 				tb->sbytes[i], tb->S_new[i]);
tb               1064 fs/reiserfs/do_balan.c static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
tb               1071 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb               1072 fs/reiserfs/do_balan.c 	struct item_head *aux_ih = item_head(tbS0, tb->item_pos);
tb               1081 fs/reiserfs/do_balan.c 		balance_leaf_new_nodes_paste_dirent(tb, ih, body, insert_key,
tb               1089 fs/reiserfs/do_balan.c 	RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)) ||
tb               1090 fs/reiserfs/do_balan.c 	       tb->insert_size[0] <= 0,
tb               1096 fs/reiserfs/do_balan.c 	n_shift = tb->sbytes[i] - tb->insert_size[0];
tb               1099 fs/reiserfs/do_balan.c 	leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], n_shift,
tb               1100 fs/reiserfs/do_balan.c 			tb->S_new[i]);
tb               1106 fs/reiserfs/do_balan.c 	n_rem = tb->insert_size[0] - tb->sbytes[i];
tb               1111 fs/reiserfs/do_balan.c 	buffer_info_init_bh(tb, &bi, tb->S_new[i]);
tb               1112 fs/reiserfs/do_balan.c 	if (n_rem > tb->zeroes_num) {
tb               1114 fs/reiserfs/do_balan.c 		r_body = body + n_rem - tb->zeroes_num;
tb               1117 fs/reiserfs/do_balan.c 		r_zeroes_number = tb->zeroes_num - n_rem;
tb               1118 fs/reiserfs/do_balan.c 		tb->zeroes_num -= r_zeroes_number;
tb               1121 fs/reiserfs/do_balan.c 	leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem,
tb               1124 fs/reiserfs/do_balan.c 	tmp = item_head(tb->S_new[i], 0);
tb               1128 fs/reiserfs/do_balan.c 		shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
tb               1132 fs/reiserfs/do_balan.c 	tb->insert_size[0] = n_rem;
tb               1134 fs/reiserfs/do_balan.c 		tb->pos_in_item++;
tb               1137 fs/reiserfs/do_balan.c static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
tb               1145 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb               1152 fs/reiserfs/do_balan.c 	struct item_head *ih_check = item_head(tbS0, tb->item_pos);
tb               1155 fs/reiserfs/do_balan.c 	    (tb->pos_in_item != ih_item_len(ih_check) ||
tb               1156 fs/reiserfs/do_balan.c 	    tb->insert_size[0] <= 0))
tb               1157 fs/reiserfs/do_balan.c 		reiserfs_panic(tb->tb_sb,
tb               1162 fs/reiserfs/do_balan.c 	leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
tb               1163 fs/reiserfs/do_balan.c 				  tb->sbytes[i], tb->S_new[i]);
tb               1170 fs/reiserfs/do_balan.c 	buffer_info_init_bh(tb, &bi, tb->S_new[i]);
tb               1171 fs/reiserfs/do_balan.c 	leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->snum[i],
tb               1172 fs/reiserfs/do_balan.c 			     tb->pos_in_item, tb->insert_size[0],
tb               1173 fs/reiserfs/do_balan.c 			     body, tb->zeroes_num);
tb               1175 fs/reiserfs/do_balan.c 	pasted = item_head(tb->S_new[i], tb->item_pos - n +
tb               1176 fs/reiserfs/do_balan.c 			   tb->snum[i]);
tb               1178 fs/reiserfs/do_balan.c 		leaf_paste_entries(&bi, tb->item_pos - n + tb->snum[i],
tb               1179 fs/reiserfs/do_balan.c 				   tb->pos_in_item, 1,
tb               1181 fs/reiserfs/do_balan.c 				   body + DEH_SIZE, tb->insert_size[0]);
tb               1187 fs/reiserfs/do_balan.c 	tb->zeroes_num = tb->insert_size[0] = 0;
tb               1190 fs/reiserfs/do_balan.c static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
tb               1197 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb               1201 fs/reiserfs/do_balan.c 	if (n - tb->snum[i] > tb->item_pos) {
tb               1202 fs/reiserfs/do_balan.c 		leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
tb               1203 fs/reiserfs/do_balan.c 				tb->snum[i], tb->sbytes[i], tb->S_new[i]);
tb               1209 fs/reiserfs/do_balan.c 	if (tb->item_pos == n - tb->snum[i] && tb->sbytes[i] != -1)
tb               1211 fs/reiserfs/do_balan.c 		balance_leaf_new_nodes_paste_shift(tb, ih, body, insert_key,
tb               1215 fs/reiserfs/do_balan.c 		balance_leaf_new_nodes_paste_whole(tb, ih, body, insert_key,
tb               1220 fs/reiserfs/do_balan.c static void balance_leaf_new_nodes(struct tree_balance *tb,
tb               1228 fs/reiserfs/do_balan.c 	for (i = tb->blknum[0] - 2; i >= 0; i--) {
tb               1231 fs/reiserfs/do_balan.c 		RFALSE(!tb->snum[i],
tb               1233 fs/reiserfs/do_balan.c 		       tb->snum[i]);
tb               1237 fs/reiserfs/do_balan.c 		tb->S_new[i] = get_FEB(tb);
tb               1240 fs/reiserfs/do_balan.c 		set_blkh_level(B_BLK_HEAD(tb->S_new[i]), DISK_LEAF_NODE_LEVEL);
tb               1243 fs/reiserfs/do_balan.c 			balance_leaf_new_nodes_insert(tb, ih, body, insert_key,
tb               1246 fs/reiserfs/do_balan.c 			balance_leaf_new_nodes_paste(tb, ih, body, insert_key,
tb               1249 fs/reiserfs/do_balan.c 		memcpy(insert_key + i, leaf_key(tb->S_new[i], 0), KEY_SIZE);
tb               1250 fs/reiserfs/do_balan.c 		insert_ptr[i] = tb->S_new[i];
tb               1252 fs/reiserfs/do_balan.c 		RFALSE(!buffer_journaled(tb->S_new[i])
tb               1253 fs/reiserfs/do_balan.c 		       || buffer_journal_dirty(tb->S_new[i])
tb               1254 fs/reiserfs/do_balan.c 		       || buffer_dirty(tb->S_new[i]),
tb               1256 fs/reiserfs/do_balan.c 		       i, tb->S_new[i]);
tb               1260 fs/reiserfs/do_balan.c static void balance_leaf_finish_node_insert(struct tree_balance *tb,
tb               1264 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb               1266 fs/reiserfs/do_balan.c 	buffer_info_init_tbS0(tb, &bi);
tb               1267 fs/reiserfs/do_balan.c 	leaf_insert_into_buf(&bi, tb->item_pos, ih, body, tb->zeroes_num);
tb               1270 fs/reiserfs/do_balan.c 	if (tb->item_pos == 0) {
tb               1271 fs/reiserfs/do_balan.c 		if (tb->CFL[0])	/* can be 0 in reiserfsck */
tb               1272 fs/reiserfs/do_balan.c 			replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
tb               1277 fs/reiserfs/do_balan.c static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
tb               1281 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb               1282 fs/reiserfs/do_balan.c 	struct item_head *pasted = item_head(tbS0, tb->item_pos);
tb               1285 fs/reiserfs/do_balan.c 	if (tb->pos_in_item >= 0 && tb->pos_in_item <= ih_entry_count(pasted)) {
tb               1286 fs/reiserfs/do_balan.c 		RFALSE(!tb->insert_size[0],
tb               1290 fs/reiserfs/do_balan.c 		buffer_info_init_tbS0(tb, &bi);
tb               1291 fs/reiserfs/do_balan.c 		leaf_paste_in_buffer(&bi, tb->item_pos, tb->pos_in_item,
tb               1292 fs/reiserfs/do_balan.c 				     tb->insert_size[0], body, tb->zeroes_num);
tb               1295 fs/reiserfs/do_balan.c 		leaf_paste_entries(&bi, tb->item_pos, tb->pos_in_item, 1,
tb               1297 fs/reiserfs/do_balan.c 				   body + DEH_SIZE, tb->insert_size[0]);
tb               1299 fs/reiserfs/do_balan.c 		if (!tb->item_pos && !tb->pos_in_item) {
tb               1300 fs/reiserfs/do_balan.c 			RFALSE(!tb->CFL[0] || !tb->L[0],
tb               1302 fs/reiserfs/do_balan.c 			if (tb->CFL[0])
tb               1303 fs/reiserfs/do_balan.c 				replace_key(tb, tb->CFL[0], tb->lkey[0],
tb               1307 fs/reiserfs/do_balan.c 		tb->insert_size[0] = 0;
tb               1311 fs/reiserfs/do_balan.c static void balance_leaf_finish_node_paste(struct tree_balance *tb,
tb               1315 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb               1317 fs/reiserfs/do_balan.c 	struct item_head *pasted = item_head(tbS0, tb->item_pos);
tb               1321 fs/reiserfs/do_balan.c 		balance_leaf_finish_node_paste_dirent(tb, ih, body);
tb               1327 fs/reiserfs/do_balan.c 	if (tb->pos_in_item == ih_item_len(pasted)) {
tb               1328 fs/reiserfs/do_balan.c 		RFALSE(tb->insert_size[0] <= 0,
tb               1330 fs/reiserfs/do_balan.c 		       tb->insert_size[0]);
tb               1331 fs/reiserfs/do_balan.c 		buffer_info_init_tbS0(tb, &bi);
tb               1332 fs/reiserfs/do_balan.c 		leaf_paste_in_buffer(&bi, tb->item_pos,
tb               1333 fs/reiserfs/do_balan.c 				     tb->pos_in_item, tb->insert_size[0], body,
tb               1334 fs/reiserfs/do_balan.c 				     tb->zeroes_num);
tb               1339 fs/reiserfs/do_balan.c 		tb->insert_size[0] = 0;
tb               1342 fs/reiserfs/do_balan.c 	else if (tb->insert_size[0]) {
tb               1344 fs/reiserfs/do_balan.c 		reiserfs_panic(tb->tb_sb, "PAP-12285",
tb               1345 fs/reiserfs/do_balan.c 		    "insert_size must be 0 (%d)", tb->insert_size[0]);
tb               1355 fs/reiserfs/do_balan.c static void balance_leaf_finish_node(struct tree_balance *tb,
tb               1360 fs/reiserfs/do_balan.c 	if (0 <= tb->item_pos && tb->item_pos < tb->s0num) {
tb               1362 fs/reiserfs/do_balan.c 			balance_leaf_finish_node_insert(tb, ih, body);
tb               1364 fs/reiserfs/do_balan.c 			balance_leaf_finish_node_paste(tb, ih, body);
tb               1382 fs/reiserfs/do_balan.c static int balance_leaf(struct tree_balance *tb, struct item_head *ih,
tb               1387 fs/reiserfs/do_balan.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb               1389 fs/reiserfs/do_balan.c 	PROC_INFO_INC(tb->tb_sb, balance_at[0]);
tb               1392 fs/reiserfs/do_balan.c 	if (tb->insert_size[0] < 0)
tb               1393 fs/reiserfs/do_balan.c 		return balance_leaf_when_delete(tb, flag);
tb               1395 fs/reiserfs/do_balan.c 	tb->item_pos = PATH_LAST_POSITION(tb->tb_path),
tb               1396 fs/reiserfs/do_balan.c 	tb->pos_in_item = tb->tb_path->pos_in_item,
tb               1397 fs/reiserfs/do_balan.c 	tb->zeroes_num = 0;
tb               1399 fs/reiserfs/do_balan.c 		tb->zeroes_num = ih_item_len(ih);
tb               1406 fs/reiserfs/do_balan.c 	    && is_indirect_le_ih(item_head(tbS0, tb->item_pos)))
tb               1407 fs/reiserfs/do_balan.c 		tb->pos_in_item *= UNFM_P_SIZE;
tb               1409 fs/reiserfs/do_balan.c 	body += balance_leaf_left(tb, ih, body, flag);
tb               1413 fs/reiserfs/do_balan.c 	tb->item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0));
tb               1415 fs/reiserfs/do_balan.c 	balance_leaf_right(tb, ih, body, flag);
tb               1418 fs/reiserfs/do_balan.c 	RFALSE(tb->blknum[0] > 3,
tb               1419 fs/reiserfs/do_balan.c 	       "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]);
tb               1420 fs/reiserfs/do_balan.c 	RFALSE(tb->blknum[0] < 0,
tb               1421 fs/reiserfs/do_balan.c 	       "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]);
tb               1428 fs/reiserfs/do_balan.c 	if (tb->blknum[0] == 0) {	/* node S[0] is empty now */
tb               1430 fs/reiserfs/do_balan.c 		RFALSE(!tb->lnum[0] || !tb->rnum[0],
tb               1437 fs/reiserfs/do_balan.c 		if (tb->CFL[0]) {
tb               1438 fs/reiserfs/do_balan.c 			if (!tb->CFR[0])
tb               1439 fs/reiserfs/do_balan.c 				reiserfs_panic(tb->tb_sb, "vs-12195",
tb               1441 fs/reiserfs/do_balan.c 			copy_key(internal_key(tb->CFL[0], tb->lkey[0]),
tb               1442 fs/reiserfs/do_balan.c 				 internal_key(tb->CFR[0], tb->rkey[0]));
tb               1443 fs/reiserfs/do_balan.c 			do_balance_mark_internal_dirty(tb, tb->CFL[0], 0);
tb               1446 fs/reiserfs/do_balan.c 		reiserfs_invalidate_buffer(tb, tbS0);
tb               1450 fs/reiserfs/do_balan.c 	balance_leaf_new_nodes(tb, ih, body, insert_key, insert_ptr, flag);
tb               1452 fs/reiserfs/do_balan.c 	balance_leaf_finish_node(tb, ih, body, flag);
tb               1455 fs/reiserfs/do_balan.c 	if (flag == M_PASTE && tb->insert_size[0]) {
tb               1457 fs/reiserfs/do_balan.c 		reiserfs_panic(tb->tb_sb,
tb               1459 fs/reiserfs/do_balan.c 			       tb->insert_size[0]);
tb               1483 fs/reiserfs/do_balan.c struct buffer_head *get_FEB(struct tree_balance *tb)
tb               1489 fs/reiserfs/do_balan.c 		if (tb->FEB[i] != NULL)
tb               1493 fs/reiserfs/do_balan.c 		reiserfs_panic(tb->tb_sb, "vs-12300", "FEB list is empty");
tb               1495 fs/reiserfs/do_balan.c 	buffer_info_init_bh(tb, &bi, tb->FEB[i]);
tb               1497 fs/reiserfs/do_balan.c 	set_buffer_uptodate(tb->FEB[i]);
tb               1498 fs/reiserfs/do_balan.c 	tb->used[i] = tb->FEB[i];
tb               1499 fs/reiserfs/do_balan.c 	tb->FEB[i] = NULL;
tb               1501 fs/reiserfs/do_balan.c 	return tb->used[i];
tb               1505 fs/reiserfs/do_balan.c static void store_thrown(struct tree_balance *tb, struct buffer_head *bh)
tb               1510 fs/reiserfs/do_balan.c 		reiserfs_warning(tb->tb_sb, "reiserfs-12320",
tb               1512 fs/reiserfs/do_balan.c 	for (i = 0; i < ARRAY_SIZE(tb->thrown); i++)
tb               1513 fs/reiserfs/do_balan.c 		if (!tb->thrown[i]) {
tb               1514 fs/reiserfs/do_balan.c 			tb->thrown[i] = bh;
tb               1518 fs/reiserfs/do_balan.c 	reiserfs_warning(tb->tb_sb, "reiserfs-12321",
tb               1522 fs/reiserfs/do_balan.c static void free_thrown(struct tree_balance *tb)
tb               1526 fs/reiserfs/do_balan.c 	for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) {
tb               1527 fs/reiserfs/do_balan.c 		if (tb->thrown[i]) {
tb               1528 fs/reiserfs/do_balan.c 			blocknr = tb->thrown[i]->b_blocknr;
tb               1529 fs/reiserfs/do_balan.c 			if (buffer_dirty(tb->thrown[i]))
tb               1530 fs/reiserfs/do_balan.c 				reiserfs_warning(tb->tb_sb, "reiserfs-12322",
tb               1533 fs/reiserfs/do_balan.c 			brelse(tb->thrown[i]);	/* incremented in store_thrown */
tb               1534 fs/reiserfs/do_balan.c 			reiserfs_free_block(tb->transaction_handle, NULL,
tb               1540 fs/reiserfs/do_balan.c void reiserfs_invalidate_buffer(struct tree_balance *tb, struct buffer_head *bh)
tb               1548 fs/reiserfs/do_balan.c 	store_thrown(tb, bh);
tb               1552 fs/reiserfs/do_balan.c void replace_key(struct tree_balance *tb, struct buffer_head *dest, int n_dest,
tb               1576 fs/reiserfs/do_balan.c 	do_balance_mark_internal_dirty(tb, dest, 0);
tb               1579 fs/reiserfs/do_balan.c int get_left_neighbor_position(struct tree_balance *tb, int h)
tb               1581 fs/reiserfs/do_balan.c 	int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1);
tb               1583 fs/reiserfs/do_balan.c 	RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FL[h] == NULL,
tb               1585 fs/reiserfs/do_balan.c 	       h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h));
tb               1588 fs/reiserfs/do_balan.c 		return B_NR_ITEMS(tb->FL[h]);
tb               1593 fs/reiserfs/do_balan.c int get_right_neighbor_position(struct tree_balance *tb, int h)
tb               1595 fs/reiserfs/do_balan.c 	int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1);
tb               1597 fs/reiserfs/do_balan.c 	RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FR[h] == NULL,
tb               1599 fs/reiserfs/do_balan.c 	       h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]);
tb               1601 fs/reiserfs/do_balan.c 	if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h)))
tb               1636 fs/reiserfs/do_balan.c static int locked_or_not_in_tree(struct tree_balance *tb,
tb               1641 fs/reiserfs/do_balan.c 		reiserfs_warning(tb->tb_sb, "vs-12339", "%s (%b)", which, bh);
tb               1647 fs/reiserfs/do_balan.c static int check_before_balancing(struct tree_balance *tb)
tb               1651 fs/reiserfs/do_balan.c 	if (REISERFS_SB(tb->tb_sb)->cur_tb) {
tb               1652 fs/reiserfs/do_balan.c 		reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule "
tb               1663 fs/reiserfs/do_balan.c 	if (tb->lnum[0]) {
tb               1664 fs/reiserfs/do_balan.c 		retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]");
tb               1665 fs/reiserfs/do_balan.c 		retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]");
tb               1666 fs/reiserfs/do_balan.c 		retval |= locked_or_not_in_tree(tb, tb->CFL[0], "CFL[0]");
tb               1667 fs/reiserfs/do_balan.c 		check_leaf(tb->L[0]);
tb               1669 fs/reiserfs/do_balan.c 	if (tb->rnum[0]) {
tb               1670 fs/reiserfs/do_balan.c 		retval |= locked_or_not_in_tree(tb, tb->R[0], "R[0]");
tb               1671 fs/reiserfs/do_balan.c 		retval |= locked_or_not_in_tree(tb, tb->FR[0], "FR[0]");
tb               1672 fs/reiserfs/do_balan.c 		retval |= locked_or_not_in_tree(tb, tb->CFR[0], "CFR[0]");
tb               1673 fs/reiserfs/do_balan.c 		check_leaf(tb->R[0]);
tb               1675 fs/reiserfs/do_balan.c 	retval |= locked_or_not_in_tree(tb, PATH_PLAST_BUFFER(tb->tb_path),
tb               1677 fs/reiserfs/do_balan.c 	check_leaf(PATH_PLAST_BUFFER(tb->tb_path));
tb               1682 fs/reiserfs/do_balan.c static void check_after_balance_leaf(struct tree_balance *tb)
tb               1684 fs/reiserfs/do_balan.c 	if (tb->lnum[0]) {
tb               1685 fs/reiserfs/do_balan.c 		if (B_FREE_SPACE(tb->L[0]) !=
tb               1686 fs/reiserfs/do_balan.c 		    MAX_CHILD_SIZE(tb->L[0]) -
tb               1688 fs/reiserfs/do_balan.c 			    (tb->FL[0], get_left_neighbor_position(tb, 0)))) {
tb               1690 fs/reiserfs/do_balan.c 			reiserfs_panic(tb->tb_sb, "PAP-12355",
tb               1694 fs/reiserfs/do_balan.c 	if (tb->rnum[0]) {
tb               1695 fs/reiserfs/do_balan.c 		if (B_FREE_SPACE(tb->R[0]) !=
tb               1696 fs/reiserfs/do_balan.c 		    MAX_CHILD_SIZE(tb->R[0]) -
tb               1698 fs/reiserfs/do_balan.c 			    (tb->FR[0], get_right_neighbor_position(tb, 0)))) {
tb               1700 fs/reiserfs/do_balan.c 			reiserfs_panic(tb->tb_sb, "PAP-12360",
tb               1704 fs/reiserfs/do_balan.c 	if (PATH_H_PBUFFER(tb->tb_path, 1) &&
tb               1705 fs/reiserfs/do_balan.c 	    (B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)) !=
tb               1706 fs/reiserfs/do_balan.c 	     (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) -
tb               1707 fs/reiserfs/do_balan.c 	      dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1),
tb               1708 fs/reiserfs/do_balan.c 				PATH_H_POSITION(tb->tb_path, 1)))))) {
tb               1709 fs/reiserfs/do_balan.c 		int left = B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0));
tb               1710 fs/reiserfs/do_balan.c 		int right = (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) -
tb               1711 fs/reiserfs/do_balan.c 			     dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1),
tb               1712 fs/reiserfs/do_balan.c 					       PATH_H_POSITION(tb->tb_path,
tb               1715 fs/reiserfs/do_balan.c 		reiserfs_warning(tb->tb_sb, "reiserfs-12363",
tb               1719 fs/reiserfs/do_balan.c 				 MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)),
tb               1720 fs/reiserfs/do_balan.c 				 PATH_H_PBUFFER(tb->tb_path, 1),
tb               1721 fs/reiserfs/do_balan.c 				 PATH_H_POSITION(tb->tb_path, 1),
tb               1723 fs/reiserfs/do_balan.c 					 (PATH_H_PBUFFER(tb->tb_path, 1),
tb               1724 fs/reiserfs/do_balan.c 					  PATH_H_POSITION(tb->tb_path, 1))),
tb               1726 fs/reiserfs/do_balan.c 		reiserfs_panic(tb->tb_sb, "PAP-12365", "S is incorrect");
tb               1730 fs/reiserfs/do_balan.c static void check_leaf_level(struct tree_balance *tb)
tb               1732 fs/reiserfs/do_balan.c 	check_leaf(tb->L[0]);
tb               1733 fs/reiserfs/do_balan.c 	check_leaf(tb->R[0]);
tb               1734 fs/reiserfs/do_balan.c 	check_leaf(PATH_PLAST_BUFFER(tb->tb_path));
tb               1737 fs/reiserfs/do_balan.c static void check_internal_levels(struct tree_balance *tb)
tb               1742 fs/reiserfs/do_balan.c 	for (h = 1; tb->insert_size[h]; h++) {
tb               1743 fs/reiserfs/do_balan.c 		check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h),
tb               1745 fs/reiserfs/do_balan.c 		if (tb->lnum[h])
tb               1746 fs/reiserfs/do_balan.c 			check_internal_node(tb->tb_sb, tb->L[h], "BAD L");
tb               1747 fs/reiserfs/do_balan.c 		if (tb->rnum[h])
tb               1748 fs/reiserfs/do_balan.c 			check_internal_node(tb->tb_sb, tb->R[h], "BAD R");
tb               1789 fs/reiserfs/do_balan.c static inline void do_balance_starts(struct tree_balance *tb)
tb               1800 fs/reiserfs/do_balan.c 	RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
tb               1802 fs/reiserfs/do_balan.c 	REISERFS_SB(tb->tb_sb)->cur_tb = tb;
tb               1806 fs/reiserfs/do_balan.c static inline void do_balance_completed(struct tree_balance *tb)
tb               1810 fs/reiserfs/do_balan.c 	check_leaf_level(tb);
tb               1811 fs/reiserfs/do_balan.c 	check_internal_levels(tb);
tb               1812 fs/reiserfs/do_balan.c 	REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
tb               1821 fs/reiserfs/do_balan.c 	REISERFS_SB(tb->tb_sb)->s_do_balance++;
tb               1824 fs/reiserfs/do_balan.c 	unfix_nodes(tb);
tb               1826 fs/reiserfs/do_balan.c 	free_thrown(tb);
tb               1847 fs/reiserfs/do_balan.c void do_balance(struct tree_balance *tb, struct item_head *ih,
tb               1864 fs/reiserfs/do_balan.c 	tb->tb_mode = flag;
tb               1865 fs/reiserfs/do_balan.c 	tb->need_balance_dirty = 0;
tb               1867 fs/reiserfs/do_balan.c 	if (FILESYSTEM_CHANGED_TB(tb)) {
tb               1868 fs/reiserfs/do_balan.c 		reiserfs_panic(tb->tb_sb, "clm-6000", "fs generation has "
tb               1872 fs/reiserfs/do_balan.c 	if (!tb->insert_size[0]) {
tb               1873 fs/reiserfs/do_balan.c 		reiserfs_warning(tb->tb_sb, "PAP-12350",
tb               1875 fs/reiserfs/do_balan.c 		unfix_nodes(tb);
tb               1879 fs/reiserfs/do_balan.c 	atomic_inc(&fs_generation(tb->tb_sb));
tb               1880 fs/reiserfs/do_balan.c 	do_balance_starts(tb);
tb               1887 fs/reiserfs/do_balan.c 	child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) +
tb               1888 fs/reiserfs/do_balan.c 	    balance_leaf(tb, ih, body, flag, insert_key, insert_ptr);
tb               1891 fs/reiserfs/do_balan.c 	check_after_balance_leaf(tb);
tb               1895 fs/reiserfs/do_balan.c 	for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++)
tb               1896 fs/reiserfs/do_balan.c 		child_pos = balance_internal(tb, h, child_pos, insert_key,
tb               1899 fs/reiserfs/do_balan.c 	do_balance_completed(tb);
tb                 51 fs/reiserfs/fix_node.c static void create_virtual_node(struct tree_balance *tb, int h)
tb                 54 fs/reiserfs/fix_node.c 	struct virtual_node *vn = tb->tb_vn;
tb                 58 fs/reiserfs/fix_node.c 	Sh = PATH_H_PBUFFER(tb->tb_path, h);
tb                 62 fs/reiserfs/fix_node.c 	    MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h];
tb                 76 fs/reiserfs/fix_node.c 	vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1);
tb                115 fs/reiserfs/fix_node.c 		    op_create_vi(vn, vi, is_affected, tb->insert_size[0]);
tb                116 fs/reiserfs/fix_node.c 		if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr)
tb                117 fs/reiserfs/fix_node.c 			reiserfs_panic(tb->tb_sb, "vs-8030",
tb                125 fs/reiserfs/fix_node.c 			vn->vn_vi[new_num].vi_item_len += tb->insert_size[0];
tb                137 fs/reiserfs/fix_node.c 		vi->vi_item_len = tb->insert_size[0];
tb                143 fs/reiserfs/fix_node.c 			     tb->insert_size[0]);
tb                150 fs/reiserfs/fix_node.c 	if (tb->CFR[0]) {
tb                153 fs/reiserfs/fix_node.c 		key = internal_key(tb->CFR[0], tb->rkey[0]);
tb                178 fs/reiserfs/fix_node.c 				reiserfs_panic(tb->tb_sb, "vs-8045",
tb                194 fs/reiserfs/fix_node.c static void check_left(struct tree_balance *tb, int h, int cur_free)
tb                197 fs/reiserfs/fix_node.c 	struct virtual_node *vn = tb->tb_vn;
tb                205 fs/reiserfs/fix_node.c 		tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
tb                213 fs/reiserfs/fix_node.c 		tb->lnum[h] = 0;
tb                214 fs/reiserfs/fix_node.c 		tb->lbytes = -1;
tb                218 fs/reiserfs/fix_node.c 	RFALSE(!PATH_H_PPARENT(tb->tb_path, 0),
tb                230 fs/reiserfs/fix_node.c 		tb->lnum[0] = vn->vn_nr_item;
tb                231 fs/reiserfs/fix_node.c 		tb->lbytes = -1;
tb                241 fs/reiserfs/fix_node.c 	tb->lnum[0] = 0;
tb                248 fs/reiserfs/fix_node.c 			tb->lnum[0]++;
tb                260 fs/reiserfs/fix_node.c 			tb->lbytes = -1;
tb                265 fs/reiserfs/fix_node.c 		tb->lbytes = op_check_left(vi, cur_free, 0, 0);
tb                266 fs/reiserfs/fix_node.c 		if (tb->lbytes != -1)
tb                268 fs/reiserfs/fix_node.c 			tb->lnum[0]++;
tb                280 fs/reiserfs/fix_node.c static void check_right(struct tree_balance *tb, int h, int cur_free)
tb                283 fs/reiserfs/fix_node.c 	struct virtual_node *vn = tb->tb_vn;
tb                291 fs/reiserfs/fix_node.c 		tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
tb                299 fs/reiserfs/fix_node.c 		tb->rnum[h] = 0;
tb                300 fs/reiserfs/fix_node.c 		tb->rbytes = -1;
tb                304 fs/reiserfs/fix_node.c 	RFALSE(!PATH_H_PPARENT(tb->tb_path, 0),
tb                316 fs/reiserfs/fix_node.c 		tb->rnum[h] = vn->vn_nr_item;
tb                317 fs/reiserfs/fix_node.c 		tb->rbytes = -1;
tb                327 fs/reiserfs/fix_node.c 	tb->rnum[0] = 0;
tb                334 fs/reiserfs/fix_node.c 			tb->rnum[0]++;
tb                345 fs/reiserfs/fix_node.c 			tb->rbytes = -1;
tb                355 fs/reiserfs/fix_node.c 		tb->rbytes = op_check_right(vi, cur_free);
tb                356 fs/reiserfs/fix_node.c 		if (tb->rbytes != -1)
tb                358 fs/reiserfs/fix_node.c 			tb->rnum[0]++;
tb                374 fs/reiserfs/fix_node.c static int get_num_ver(int mode, struct tree_balance *tb, int h,
tb                380 fs/reiserfs/fix_node.c 	struct virtual_node *vn = tb->tb_vn;
tb                417 fs/reiserfs/fix_node.c 	RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE),
tb                420 fs/reiserfs/fix_node.c 	max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h));
tb                532 fs/reiserfs/fix_node.c 			reiserfs_warning(tb->tb_sb, "vs-8111",
tb                571 fs/reiserfs/fix_node.c 			reiserfs_warning(tb->tb_sb, "vs-8115",
tb                621 fs/reiserfs/fix_node.c static void set_parameters(struct tree_balance *tb, int h, int lnum,
tb                625 fs/reiserfs/fix_node.c 	tb->lnum[h] = lnum;
tb                626 fs/reiserfs/fix_node.c 	tb->rnum[h] = rnum;
tb                627 fs/reiserfs/fix_node.c 	tb->blknum[h] = blk_num;
tb                632 fs/reiserfs/fix_node.c 			tb->s0num = *s012++;
tb                633 fs/reiserfs/fix_node.c 			tb->snum[0] = *s012++;
tb                634 fs/reiserfs/fix_node.c 			tb->snum[1] = *s012++;
tb                635 fs/reiserfs/fix_node.c 			tb->sbytes[0] = *s012++;
tb                636 fs/reiserfs/fix_node.c 			tb->sbytes[1] = *s012;
tb                638 fs/reiserfs/fix_node.c 		tb->lbytes = lb;
tb                639 fs/reiserfs/fix_node.c 		tb->rbytes = rb;
tb                641 fs/reiserfs/fix_node.c 	PROC_INFO_ADD(tb->tb_sb, lnum[h], lnum);
tb                642 fs/reiserfs/fix_node.c 	PROC_INFO_ADD(tb->tb_sb, rnum[h], rnum);
tb                644 fs/reiserfs/fix_node.c 	PROC_INFO_ADD(tb->tb_sb, lbytes[h], lb);
tb                645 fs/reiserfs/fix_node.c 	PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb);
tb                652 fs/reiserfs/fix_node.c static int is_leaf_removable(struct tree_balance *tb)
tb                654 fs/reiserfs/fix_node.c 	struct virtual_node *vn = tb->tb_vn;
tb                663 fs/reiserfs/fix_node.c 	to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0);
tb                664 fs/reiserfs/fix_node.c 	to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0);
tb                672 fs/reiserfs/fix_node.c 		set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0,
tb                678 fs/reiserfs/fix_node.c 	if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1)
tb                686 fs/reiserfs/fix_node.c 	if (tb->lbytes + tb->rbytes >= size) {
tb                687 fs/reiserfs/fix_node.c 		set_parameters(tb, 0, to_left + 1, to_right + 1, 0, NULL,
tb                688 fs/reiserfs/fix_node.c 			       tb->lbytes, -1);
tb                696 fs/reiserfs/fix_node.c static int are_leaves_removable(struct tree_balance *tb, int lfree, int rfree)
tb                698 fs/reiserfs/fix_node.c 	struct virtual_node *vn = tb->tb_vn;
tb                702 fs/reiserfs/fix_node.c 	S0 = PATH_H_PBUFFER(tb->tb_path, 0);
tb                721 fs/reiserfs/fix_node.c 		if (tb->CFR[0]
tb                723 fs/reiserfs/fix_node.c 					   internal_key(tb->CFR[0],
tb                724 fs/reiserfs/fix_node.c 							  tb->rkey[0])))
tb                749 fs/reiserfs/fix_node.c 		set_parameters(tb, 0, -1, -1, -1, NULL, -1, -1);
tb                750 fs/reiserfs/fix_node.c 		PROC_INFO_INC(tb->tb_sb, leaves_removable);
tb                766 fs/reiserfs/fix_node.c 	      set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\
tb                771 fs/reiserfs/fix_node.c      set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\
tb                772 fs/reiserfs/fix_node.c 		     tb->lbytes, -1);\
tb                774 fs/reiserfs/fix_node.c      set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\
tb                785 fs/reiserfs/fix_node.c    set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\
tb                790 fs/reiserfs/fix_node.c      set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\
tb                791 fs/reiserfs/fix_node.c 		  -1, tb->rbytes);\
tb                793 fs/reiserfs/fix_node.c      set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\
tb                797 fs/reiserfs/fix_node.c static void free_buffers_in_tb(struct tree_balance *tb)
tb                801 fs/reiserfs/fix_node.c 	pathrelse(tb->tb_path);
tb                804 fs/reiserfs/fix_node.c 		brelse(tb->L[i]);
tb                805 fs/reiserfs/fix_node.c 		brelse(tb->R[i]);
tb                806 fs/reiserfs/fix_node.c 		brelse(tb->FL[i]);
tb                807 fs/reiserfs/fix_node.c 		brelse(tb->FR[i]);
tb                808 fs/reiserfs/fix_node.c 		brelse(tb->CFL[i]);
tb                809 fs/reiserfs/fix_node.c 		brelse(tb->CFR[i]);
tb                811 fs/reiserfs/fix_node.c 		tb->L[i] = NULL;
tb                812 fs/reiserfs/fix_node.c 		tb->R[i] = NULL;
tb                813 fs/reiserfs/fix_node.c 		tb->FL[i] = NULL;
tb                814 fs/reiserfs/fix_node.c 		tb->FR[i] = NULL;
tb                815 fs/reiserfs/fix_node.c 		tb->CFL[i] = NULL;
tb                816 fs/reiserfs/fix_node.c 		tb->CFR[i] = NULL;
tb                827 fs/reiserfs/fix_node.c static int get_empty_nodes(struct tree_balance *tb, int h)
tb                829 fs/reiserfs/fix_node.c 	struct buffer_head *new_bh, *Sh = PATH_H_PBUFFER(tb->tb_path, h);
tb                834 fs/reiserfs/fix_node.c 	struct super_block *sb = tb->tb_sb;
tb                857 fs/reiserfs/fix_node.c 	for (counter = 0, number_of_freeblk = tb->cur_blknum;
tb                860 fs/reiserfs/fix_node.c 		    (tb->blknum[counter]) ? (tb->blknum[counter] -
tb                865 fs/reiserfs/fix_node.c 	amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1;
tb                879 fs/reiserfs/fix_node.c 	if (reiserfs_new_form_blocknrs(tb, blocknrs,
tb                898 fs/reiserfs/fix_node.c 		RFALSE(tb->FEB[tb->cur_blknum],
tb                902 fs/reiserfs/fix_node.c 		tb->FEB[tb->cur_blknum++] = new_bh;
tb                905 fs/reiserfs/fix_node.c 	if (retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb))
tb                915 fs/reiserfs/fix_node.c static int get_lfree(struct tree_balance *tb, int h)
tb                920 fs/reiserfs/fix_node.c 	if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
tb                921 fs/reiserfs/fix_node.c 	    (l = tb->FL[h]) == NULL)
tb                925 fs/reiserfs/fix_node.c 		order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) - 1;
tb                938 fs/reiserfs/fix_node.c static int get_rfree(struct tree_balance *tb, int h)
tb                943 fs/reiserfs/fix_node.c 	if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
tb                944 fs/reiserfs/fix_node.c 	    (r = tb->FR[h]) == NULL)
tb                948 fs/reiserfs/fix_node.c 		order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) + 1;
tb                959 fs/reiserfs/fix_node.c static int is_left_neighbor_in_cache(struct tree_balance *tb, int h)
tb                962 fs/reiserfs/fix_node.c 	struct super_block *sb = tb->tb_sb;
tb                967 fs/reiserfs/fix_node.c 	if (!tb->FL[h])
tb                971 fs/reiserfs/fix_node.c 	father = PATH_H_PBUFFER(tb->tb_path, h + 1);
tb                975 fs/reiserfs/fix_node.c 	       !B_IS_IN_TREE(tb->FL[h]) ||
tb                977 fs/reiserfs/fix_node.c 	       !buffer_uptodate(tb->FL[h]),
tb                979 fs/reiserfs/fix_node.c 	       father, tb->FL[h]);
tb                985 fs/reiserfs/fix_node.c 	left_neighbor_position = (father == tb->FL[h]) ?
tb                986 fs/reiserfs/fix_node.c 	    tb->lkey[h] : B_NR_ITEMS(tb->FL[h]);
tb                989 fs/reiserfs/fix_node.c 	    B_N_CHILD_NUM(tb->FL[h], left_neighbor_position);
tb               1023 fs/reiserfs/fix_node.c static int get_far_parent(struct tree_balance *tb,
tb               1030 fs/reiserfs/fix_node.c 	struct treepath *path = tb->tb_path;
tb               1092 fs/reiserfs/fix_node.c 		    (tb->tb_path,
tb               1094 fs/reiserfs/fix_node.c 		    SB_ROOT_BLOCK(tb->tb_sb)) {
tb               1110 fs/reiserfs/fix_node.c 		int depth = reiserfs_write_unlock_nested(tb->tb_sb);
tb               1112 fs/reiserfs/fix_node.c 		reiserfs_write_lock_nested(tb->tb_sb, depth);
tb               1113 fs/reiserfs/fix_node.c 		if (FILESYSTEM_CHANGED_TB(tb)) {
tb               1129 fs/reiserfs/fix_node.c 				       LEFT_PARENTS) ? (tb->lkey[h - 1] =
tb               1131 fs/reiserfs/fix_node.c 							1) : (tb->rkey[h -
tb               1139 fs/reiserfs/fix_node.c 	    (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father,
tb               1144 fs/reiserfs/fix_node.c 	if (FILESYSTEM_CHANGED_TB(tb)) {
tb               1172 fs/reiserfs/fix_node.c static int get_parents(struct tree_balance *tb, int h)
tb               1174 fs/reiserfs/fix_node.c 	struct treepath *path = tb->tb_path;
tb               1177 fs/reiserfs/fix_node.c 	    path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
tb               1187 fs/reiserfs/fix_node.c 		brelse(tb->FL[h]);
tb               1188 fs/reiserfs/fix_node.c 		brelse(tb->CFL[h]);
tb               1189 fs/reiserfs/fix_node.c 		brelse(tb->FR[h]);
tb               1190 fs/reiserfs/fix_node.c 		brelse(tb->CFR[h]);
tb               1191 fs/reiserfs/fix_node.c 		tb->FL[h]  = NULL;
tb               1192 fs/reiserfs/fix_node.c 		tb->CFL[h] = NULL;
tb               1193 fs/reiserfs/fix_node.c 		tb->FR[h]  = NULL;
tb               1194 fs/reiserfs/fix_node.c 		tb->CFR[h] = NULL;
tb               1206 fs/reiserfs/fix_node.c 		tb->lkey[h] = position - 1;
tb               1216 fs/reiserfs/fix_node.c 		if ((ret = get_far_parent(tb, h + 1, &curf,
tb               1222 fs/reiserfs/fix_node.c 	brelse(tb->FL[h]);
tb               1223 fs/reiserfs/fix_node.c 	tb->FL[h] = curf;	/* New initialization of FL[h]. */
tb               1224 fs/reiserfs/fix_node.c 	brelse(tb->CFL[h]);
tb               1225 fs/reiserfs/fix_node.c 	tb->CFL[h] = curcf;	/* New initialization of CFL[h]. */
tb               1242 fs/reiserfs/fix_node.c 		     get_far_parent(tb, h + 1, &curf, &curcf,
tb               1251 fs/reiserfs/fix_node.c 		tb->rkey[h] = position;
tb               1254 fs/reiserfs/fix_node.c 	brelse(tb->FR[h]);
tb               1256 fs/reiserfs/fix_node.c 	tb->FR[h] = curf;
tb               1258 fs/reiserfs/fix_node.c 	brelse(tb->CFR[h]);
tb               1260 fs/reiserfs/fix_node.c 	tb->CFR[h] = curcf;
tb               1274 fs/reiserfs/fix_node.c 				      struct tree_balance *tb, int h)
tb               1276 fs/reiserfs/fix_node.c 	struct buffer_head *Sh = PATH_H_PBUFFER(tb->tb_path, h);
tb               1277 fs/reiserfs/fix_node.c 	int levbytes = tb->insert_size[h];
tb               1282 fs/reiserfs/fix_node.c 	if (tb->CFR[h])
tb               1283 fs/reiserfs/fix_node.c 		r_key = internal_key(tb->CFR[h], tb->rkey[h]);
tb               1298 fs/reiserfs/fix_node.c 				tb->s0num =
tb               1301 fs/reiserfs/fix_node.c 			set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
tb               1305 fs/reiserfs/fix_node.c 	PROC_INFO_INC(tb->tb_sb, can_node_be_removed[h]);
tb               1324 fs/reiserfs/fix_node.c static int ip_check_balance(struct tree_balance *tb, int h)
tb               1326 fs/reiserfs/fix_node.c 	struct virtual_node *vn = tb->tb_vn;
tb               1373 fs/reiserfs/fix_node.c 	Sh = PATH_H_PBUFFER(tb->tb_path, h);
tb               1374 fs/reiserfs/fix_node.c 	levbytes = tb->insert_size[h];
tb               1379 fs/reiserfs/fix_node.c 			reiserfs_panic(tb->tb_sb, "vs-8210",
tb               1381 fs/reiserfs/fix_node.c 		switch (ret = get_empty_nodes(tb, h)) {
tb               1384 fs/reiserfs/fix_node.c 			set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
tb               1391 fs/reiserfs/fix_node.c 			reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect "
tb               1397 fs/reiserfs/fix_node.c 	ret = get_parents(tb, h);
tb               1404 fs/reiserfs/fix_node.c 	rfree = get_rfree(tb, h);
tb               1405 fs/reiserfs/fix_node.c 	lfree = get_lfree(tb, h);
tb               1408 fs/reiserfs/fix_node.c 	if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) ==
tb               1412 fs/reiserfs/fix_node.c 	create_virtual_node(tb, h);
tb               1420 fs/reiserfs/fix_node.c 	check_left(tb, h, lfree);
tb               1428 fs/reiserfs/fix_node.c 	check_right(tb, h, rfree);
tb               1434 fs/reiserfs/fix_node.c 	if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) {
tb               1446 fs/reiserfs/fix_node.c 		    ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
tb               1448 fs/reiserfs/fix_node.c 						tb->rnum[h]);
tb               1449 fs/reiserfs/fix_node.c 		set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
tb               1459 fs/reiserfs/fix_node.c 	       (tb->lnum[h] >= vn->vn_nr_item + 1 ||
tb               1460 fs/reiserfs/fix_node.c 		tb->rnum[h] >= vn->vn_nr_item + 1),
tb               1462 fs/reiserfs/fix_node.c 	RFALSE(!h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) ||
tb               1463 fs/reiserfs/fix_node.c 		      (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))),
tb               1470 fs/reiserfs/fix_node.c 	if (!h && is_leaf_removable(tb))
tb               1483 fs/reiserfs/fix_node.c 			tb->s0num = vn->vn_nr_item;
tb               1484 fs/reiserfs/fix_node.c 		set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
tb               1512 fs/reiserfs/fix_node.c 		lpar = tb->lnum[h];
tb               1513 fs/reiserfs/fix_node.c 		rpar = tb->rnum[h];
tb               1523 fs/reiserfs/fix_node.c 		nver = get_num_ver(vn->vn_mode, tb, h,
tb               1534 fs/reiserfs/fix_node.c 			nver1 = get_num_ver(vn->vn_mode, tb, h,
tb               1550 fs/reiserfs/fix_node.c 		lnver = get_num_ver(vn->vn_mode, tb, h,
tb               1551 fs/reiserfs/fix_node.c 				    lpar - ((h || tb->lbytes == -1) ? 0 : 1),
tb               1557 fs/reiserfs/fix_node.c 			lnver1 = get_num_ver(vn->vn_mode, tb, h,
tb               1559 fs/reiserfs/fix_node.c 					     ((tb->lbytes != -1) ? 1 : 0),
tb               1560 fs/reiserfs/fix_node.c 					     tb->lbytes, 0, -1,
tb               1575 fs/reiserfs/fix_node.c 		rnver = get_num_ver(vn->vn_mode, tb, h,
tb               1578 fs/reiserfs/fix_node.c 								   ((tb->
tb               1586 fs/reiserfs/fix_node.c 			rnver1 = get_num_ver(vn->vn_mode, tb, h,
tb               1589 fs/reiserfs/fix_node.c 					      ((tb->rbytes != -1) ? 1 : 0)),
tb               1590 fs/reiserfs/fix_node.c 					     tb->rbytes,
tb               1605 fs/reiserfs/fix_node.c 		lrnver = get_num_ver(vn->vn_mode, tb, h,
tb               1606 fs/reiserfs/fix_node.c 				     lpar - ((h || tb->lbytes == -1) ? 0 : 1),
tb               1609 fs/reiserfs/fix_node.c 								    ((tb->
tb               1617 fs/reiserfs/fix_node.c 			lrnver1 = get_num_ver(vn->vn_mode, tb, h,
tb               1619 fs/reiserfs/fix_node.c 					      ((tb->lbytes != -1) ? 1 : 0),
tb               1620 fs/reiserfs/fix_node.c 					      tb->lbytes,
tb               1622 fs/reiserfs/fix_node.c 					       ((tb->rbytes != -1) ? 1 : 0)),
tb               1623 fs/reiserfs/fix_node.c 					      tb->rbytes,
tb               1639 fs/reiserfs/fix_node.c 			       (tb->lnum[h] != 1 ||
tb               1640 fs/reiserfs/fix_node.c 				tb->rnum[h] != 1 ||
tb               1644 fs/reiserfs/fix_node.c 				set_parameters(tb, h, tb->lnum[h], tb->rnum[h],
tb               1646 fs/reiserfs/fix_node.c 					       tb->lbytes, tb->rbytes);
tb               1648 fs/reiserfs/fix_node.c 				set_parameters(tb, h,
tb               1649 fs/reiserfs/fix_node.c 					       tb->lnum[h] -
tb               1650 fs/reiserfs/fix_node.c 					       ((tb->lbytes == -1) ? 0 : 1),
tb               1651 fs/reiserfs/fix_node.c 					       tb->rnum[h] -
tb               1652 fs/reiserfs/fix_node.c 					       ((tb->rbytes == -1) ? 0 : 1),
tb               1663 fs/reiserfs/fix_node.c 			set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1,
tb               1695 fs/reiserfs/fix_node.c 		if (is_left_neighbor_in_cache(tb, h)) {
tb               1726 fs/reiserfs/fix_node.c static int dc_check_balance_internal(struct tree_balance *tb, int h)
tb               1728 fs/reiserfs/fix_node.c 	struct virtual_node *vn = tb->tb_vn;
tb               1738 fs/reiserfs/fix_node.c 	Sh = PATH_H_PBUFFER(tb->tb_path, h);
tb               1739 fs/reiserfs/fix_node.c 	Fh = PATH_H_PPARENT(tb->tb_path, h);
tb               1747 fs/reiserfs/fix_node.c 	create_virtual_node(tb, h);
tb               1752 fs/reiserfs/fix_node.c 			set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
tb               1760 fs/reiserfs/fix_node.c 		set_parameters(tb, h, 0, 0, 0, NULL, -1, -1);
tb               1764 fs/reiserfs/fix_node.c 	if ((ret = get_parents(tb, h)) != CARRY_ON)
tb               1768 fs/reiserfs/fix_node.c 	rfree = get_rfree(tb, h);
tb               1769 fs/reiserfs/fix_node.c 	lfree = get_lfree(tb, h);
tb               1772 fs/reiserfs/fix_node.c 	check_left(tb, h, lfree);
tb               1773 fs/reiserfs/fix_node.c 	check_right(tb, h, rfree);
tb               1786 fs/reiserfs/fix_node.c 			if (tb->lnum[h] >= vn->vn_nr_item + 1) {
tb               1792 fs/reiserfs/fix_node.c 				      PATH_H_B_ITEM_ORDER(tb->tb_path,
tb               1794 fs/reiserfs/fix_node.c 				     0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
tb               1795 fs/reiserfs/fix_node.c 				n = dc_size(B_N_CHILD(tb->FL[h], order_L)) /
tb               1797 fs/reiserfs/fix_node.c 				set_parameters(tb, h, -n - 1, 0, 0, NULL, -1,
tb               1803 fs/reiserfs/fix_node.c 			if (tb->rnum[h] >= vn->vn_nr_item + 1) {
tb               1809 fs/reiserfs/fix_node.c 				      PATH_H_B_ITEM_ORDER(tb->tb_path,
tb               1812 fs/reiserfs/fix_node.c 				n = dc_size(B_N_CHILD(tb->FR[h], order_R)) /
tb               1814 fs/reiserfs/fix_node.c 				set_parameters(tb, h, 0, -n - 1, 0, NULL, -1,
tb               1824 fs/reiserfs/fix_node.c 		if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
tb               1828 fs/reiserfs/fix_node.c 			    ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] -
tb               1829 fs/reiserfs/fix_node.c 			     tb->rnum[h] + vn->vn_nr_item + 1) / 2 -
tb               1830 fs/reiserfs/fix_node.c 			    (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
tb               1831 fs/reiserfs/fix_node.c 			set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r,
tb               1837 fs/reiserfs/fix_node.c 		set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
tb               1846 fs/reiserfs/fix_node.c 	if (tb->lnum[h] >= vn->vn_nr_item + 1)
tb               1847 fs/reiserfs/fix_node.c 		if (is_left_neighbor_in_cache(tb, h)
tb               1848 fs/reiserfs/fix_node.c 		    || tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h]) {
tb               1854 fs/reiserfs/fix_node.c 			      PATH_H_B_ITEM_ORDER(tb->tb_path,
tb               1856 fs/reiserfs/fix_node.c 			     0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
tb               1857 fs/reiserfs/fix_node.c 			n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE +
tb               1859 fs/reiserfs/fix_node.c 			set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1);
tb               1864 fs/reiserfs/fix_node.c 	if (tb->rnum[h] >= vn->vn_nr_item + 1) {
tb               1870 fs/reiserfs/fix_node.c 		      PATH_H_B_ITEM_ORDER(tb->tb_path,
tb               1872 fs/reiserfs/fix_node.c 		n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE +
tb               1874 fs/reiserfs/fix_node.c 		set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1);
tb               1879 fs/reiserfs/fix_node.c 	if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
tb               1883 fs/reiserfs/fix_node.c 		    ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
tb               1885 fs/reiserfs/fix_node.c 						tb->rnum[h]);
tb               1886 fs/reiserfs/fix_node.c 		set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
tb               1892 fs/reiserfs/fix_node.c 	RFALSE(!tb->FL[h] && !tb->FR[h], "vs-8235: trying to borrow for root");
tb               1895 fs/reiserfs/fix_node.c 	if (is_left_neighbor_in_cache(tb, h) || !tb->FR[h]) {
tb               1899 fs/reiserfs/fix_node.c 		    (MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item +
tb               1901 fs/reiserfs/fix_node.c 		set_parameters(tb, h, -from_l, 0, 1, NULL, -1, -1);
tb               1905 fs/reiserfs/fix_node.c 	set_parameters(tb, h, 0,
tb               1906 fs/reiserfs/fix_node.c 		       -((MAX_NR_KEY(Sh) + 1 - tb->rnum[h] + vn->vn_nr_item +
tb               1925 fs/reiserfs/fix_node.c static int dc_check_balance_leaf(struct tree_balance *tb, int h)
tb               1927 fs/reiserfs/fix_node.c 	struct virtual_node *vn = tb->tb_vn;
tb               1947 fs/reiserfs/fix_node.c 	S0 = PATH_H_PBUFFER(tb->tb_path, 0);
tb               1948 fs/reiserfs/fix_node.c 	F0 = PATH_H_PPARENT(tb->tb_path, 0);
tb               1950 fs/reiserfs/fix_node.c 	levbytes = tb->insert_size[h];
tb               1959 fs/reiserfs/fix_node.c 		set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
tb               1963 fs/reiserfs/fix_node.c 	if ((ret = get_parents(tb, h)) != CARRY_ON)
tb               1967 fs/reiserfs/fix_node.c 	rfree = get_rfree(tb, h);
tb               1968 fs/reiserfs/fix_node.c 	lfree = get_lfree(tb, h);
tb               1970 fs/reiserfs/fix_node.c 	create_virtual_node(tb, h);
tb               1973 fs/reiserfs/fix_node.c 	if (are_leaves_removable(tb, lfree, rfree))
tb               1982 fs/reiserfs/fix_node.c 	check_left(tb, h, lfree);
tb               1983 fs/reiserfs/fix_node.c 	check_right(tb, h, rfree);
tb               1986 fs/reiserfs/fix_node.c 	if (tb->lnum[0] >= vn->vn_nr_item && tb->lbytes == -1)
tb               1987 fs/reiserfs/fix_node.c 		if (is_left_neighbor_in_cache(tb, h) || ((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) ||	/* S can not be merged with R */
tb               1988 fs/reiserfs/fix_node.c 		    !tb->FR[h]) {
tb               1990 fs/reiserfs/fix_node.c 			RFALSE(!tb->FL[h],
tb               1994 fs/reiserfs/fix_node.c 			set_parameters(tb, h, -1, 0, 0, NULL, -1, -1);
tb               1999 fs/reiserfs/fix_node.c 	if (tb->rnum[0] >= vn->vn_nr_item && tb->rbytes == -1) {
tb               2000 fs/reiserfs/fix_node.c 		set_parameters(tb, h, 0, -1, 0, NULL, -1, -1);
tb               2008 fs/reiserfs/fix_node.c 	if (is_leaf_removable(tb))
tb               2012 fs/reiserfs/fix_node.c 	tb->s0num = vn->vn_nr_item;
tb               2013 fs/reiserfs/fix_node.c 	set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
tb               2031 fs/reiserfs/fix_node.c static int dc_check_balance(struct tree_balance *tb, int h)
tb               2033 fs/reiserfs/fix_node.c 	RFALSE(!(PATH_H_PBUFFER(tb->tb_path, h)),
tb               2037 fs/reiserfs/fix_node.c 		return dc_check_balance_internal(tb, h);
tb               2039 fs/reiserfs/fix_node.c 		return dc_check_balance_leaf(tb, h);
tb               2062 fs/reiserfs/fix_node.c 			 struct tree_balance *tb,
tb               2070 fs/reiserfs/fix_node.c 	vn = tb->tb_vn = (struct virtual_node *)(tb->vn_buf);
tb               2071 fs/reiserfs/fix_node.c 	vn->vn_free_ptr = (char *)(tb->tb_vn + 1);
tb               2082 fs/reiserfs/fix_node.c 	if (tb->insert_size[h] > 0)
tb               2083 fs/reiserfs/fix_node.c 		return ip_check_balance(tb, h);
tb               2086 fs/reiserfs/fix_node.c 	return dc_check_balance(tb, h);
tb               2090 fs/reiserfs/fix_node.c static int get_direct_parent(struct tree_balance *tb, int h)
tb               2093 fs/reiserfs/fix_node.c 	struct treepath *path = tb->tb_path;
tb               2095 fs/reiserfs/fix_node.c 	    path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
tb               2104 fs/reiserfs/fix_node.c 		    b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) {
tb               2130 fs/reiserfs/fix_node.c 		int depth = reiserfs_write_unlock_nested(tb->tb_sb);
tb               2132 fs/reiserfs/fix_node.c 		reiserfs_write_lock_nested(tb->tb_sb, depth);
tb               2133 fs/reiserfs/fix_node.c 		if (FILESYSTEM_CHANGED_TB(tb))
tb               2151 fs/reiserfs/fix_node.c static int get_neighbors(struct tree_balance *tb, int h)
tb               2154 fs/reiserfs/fix_node.c 	    path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h + 1);
tb               2156 fs/reiserfs/fix_node.c 	struct super_block *sb = tb->tb_sb;
tb               2162 fs/reiserfs/fix_node.c 	if (tb->lnum[h]) {
tb               2165 fs/reiserfs/fix_node.c 		bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
tb               2167 fs/reiserfs/fix_node.c 		RFALSE(bh == tb->FL[h] &&
tb               2168 fs/reiserfs/fix_node.c 		       !PATH_OFFSET_POSITION(tb->tb_path, path_offset),
tb               2173 fs/reiserfs/fix_node.c 		     tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
tb               2175 fs/reiserfs/fix_node.c 		son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
tb               2176 fs/reiserfs/fix_node.c 		depth = reiserfs_write_unlock_nested(tb->tb_sb);
tb               2178 fs/reiserfs/fix_node.c 		reiserfs_write_lock_nested(tb->tb_sb, depth);
tb               2181 fs/reiserfs/fix_node.c 		if (FILESYSTEM_CHANGED_TB(tb)) {
tb               2187 fs/reiserfs/fix_node.c 		RFALSE(!B_IS_IN_TREE(tb->FL[h]) ||
tb               2188 fs/reiserfs/fix_node.c 		       child_position > B_NR_ITEMS(tb->FL[h]) ||
tb               2189 fs/reiserfs/fix_node.c 		       B_N_CHILD_NUM(tb->FL[h], child_position) !=
tb               2195 fs/reiserfs/fix_node.c 		       dc_size(B_N_CHILD(tb->FL[0], child_position)),
tb               2198 fs/reiserfs/fix_node.c 		brelse(tb->L[h]);
tb               2199 fs/reiserfs/fix_node.c 		tb->L[h] = bh;
tb               2203 fs/reiserfs/fix_node.c 	if (tb->rnum[h]) {
tb               2205 fs/reiserfs/fix_node.c 		bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
tb               2207 fs/reiserfs/fix_node.c 		RFALSE(bh == tb->FR[h] &&
tb               2208 fs/reiserfs/fix_node.c 		       PATH_OFFSET_POSITION(tb->tb_path,
tb               2214 fs/reiserfs/fix_node.c 		    (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
tb               2215 fs/reiserfs/fix_node.c 		son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
tb               2216 fs/reiserfs/fix_node.c 		depth = reiserfs_write_unlock_nested(tb->tb_sb);
tb               2218 fs/reiserfs/fix_node.c 		reiserfs_write_lock_nested(tb->tb_sb, depth);
tb               2221 fs/reiserfs/fix_node.c 		if (FILESYSTEM_CHANGED_TB(tb)) {
tb               2226 fs/reiserfs/fix_node.c 		brelse(tb->R[h]);
tb               2227 fs/reiserfs/fix_node.c 		tb->R[h] = bh;
tb               2232 fs/reiserfs/fix_node.c 		       dc_size(B_N_CHILD(tb->FR[0], child_position)),
tb               2235 fs/reiserfs/fix_node.c 		       dc_size(B_N_CHILD(tb->FR[0], child_position)));
tb               2264 fs/reiserfs/fix_node.c static int get_mem_for_virtual_node(struct tree_balance *tb)
tb               2270 fs/reiserfs/fix_node.c 	size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path));
tb               2273 fs/reiserfs/fix_node.c 	if (size > tb->vn_buf_size) {
tb               2274 fs/reiserfs/fix_node.c 		if (tb->vn_buf) {
tb               2276 fs/reiserfs/fix_node.c 			kfree(tb->vn_buf);
tb               2282 fs/reiserfs/fix_node.c 		tb->vn_buf_size = size;
tb               2293 fs/reiserfs/fix_node.c 			free_buffers_in_tb(tb);
tb               2296 fs/reiserfs/fix_node.c 				tb->vn_buf_size = 0;
tb               2298 fs/reiserfs/fix_node.c 			tb->vn_buf = buf;
tb               2303 fs/reiserfs/fix_node.c 		tb->vn_buf = buf;
tb               2306 fs/reiserfs/fix_node.c 	if (check_fs && FILESYSTEM_CHANGED_TB(tb))
tb               2363 fs/reiserfs/fix_node.c static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
tb               2375 fs/reiserfs/fix_node.c 		for (i = tb->tb_path->path_length;
tb               2377 fs/reiserfs/fix_node.c 			if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) {
tb               2384 fs/reiserfs/fix_node.c 				if (PATH_PLAST_BUFFER(tb->tb_path) ==
tb               2385 fs/reiserfs/fix_node.c 				    PATH_OFFSET_PBUFFER(tb->tb_path, i))
tb               2386 fs/reiserfs/fix_node.c 					tb_buffer_sanity_check(tb->tb_sb,
tb               2388 fs/reiserfs/fix_node.c 							       (tb->tb_path,
tb               2390 fs/reiserfs/fix_node.c 							       tb->tb_path->
tb               2393 fs/reiserfs/fix_node.c 				if (!clear_all_dirty_bits(tb->tb_sb,
tb               2395 fs/reiserfs/fix_node.c 							  (tb->tb_path,
tb               2398 fs/reiserfs/fix_node.c 					    PATH_OFFSET_PBUFFER(tb->tb_path,
tb               2404 fs/reiserfs/fix_node.c 		for (i = 0; !locked && i < MAX_HEIGHT && tb->insert_size[i];
tb               2407 fs/reiserfs/fix_node.c 			if (tb->lnum[i]) {
tb               2409 fs/reiserfs/fix_node.c 				if (tb->L[i]) {
tb               2410 fs/reiserfs/fix_node.c 					tb_buffer_sanity_check(tb->tb_sb,
tb               2411 fs/reiserfs/fix_node.c 							       tb->L[i],
tb               2414 fs/reiserfs/fix_node.c 					    (tb->tb_sb, tb->L[i]))
tb               2415 fs/reiserfs/fix_node.c 						locked = tb->L[i];
tb               2418 fs/reiserfs/fix_node.c 				if (!locked && tb->FL[i]) {
tb               2419 fs/reiserfs/fix_node.c 					tb_buffer_sanity_check(tb->tb_sb,
tb               2420 fs/reiserfs/fix_node.c 							       tb->FL[i],
tb               2423 fs/reiserfs/fix_node.c 					    (tb->tb_sb, tb->FL[i]))
tb               2424 fs/reiserfs/fix_node.c 						locked = tb->FL[i];
tb               2427 fs/reiserfs/fix_node.c 				if (!locked && tb->CFL[i]) {
tb               2428 fs/reiserfs/fix_node.c 					tb_buffer_sanity_check(tb->tb_sb,
tb               2429 fs/reiserfs/fix_node.c 							       tb->CFL[i],
tb               2432 fs/reiserfs/fix_node.c 					    (tb->tb_sb, tb->CFL[i]))
tb               2433 fs/reiserfs/fix_node.c 						locked = tb->CFL[i];
tb               2438 fs/reiserfs/fix_node.c 			if (!locked && (tb->rnum[i])) {
tb               2440 fs/reiserfs/fix_node.c 				if (tb->R[i]) {
tb               2441 fs/reiserfs/fix_node.c 					tb_buffer_sanity_check(tb->tb_sb,
tb               2442 fs/reiserfs/fix_node.c 							       tb->R[i],
tb               2445 fs/reiserfs/fix_node.c 					    (tb->tb_sb, tb->R[i]))
tb               2446 fs/reiserfs/fix_node.c 						locked = tb->R[i];
tb               2449 fs/reiserfs/fix_node.c 				if (!locked && tb->FR[i]) {
tb               2450 fs/reiserfs/fix_node.c 					tb_buffer_sanity_check(tb->tb_sb,
tb               2451 fs/reiserfs/fix_node.c 							       tb->FR[i],
tb               2454 fs/reiserfs/fix_node.c 					    (tb->tb_sb, tb->FR[i]))
tb               2455 fs/reiserfs/fix_node.c 						locked = tb->FR[i];
tb               2458 fs/reiserfs/fix_node.c 				if (!locked && tb->CFR[i]) {
tb               2459 fs/reiserfs/fix_node.c 					tb_buffer_sanity_check(tb->tb_sb,
tb               2460 fs/reiserfs/fix_node.c 							       tb->CFR[i],
tb               2463 fs/reiserfs/fix_node.c 					    (tb->tb_sb, tb->CFR[i]))
tb               2464 fs/reiserfs/fix_node.c 						locked = tb->CFR[i];
tb               2479 fs/reiserfs/fix_node.c 			if (tb->FEB[i]) {
tb               2481 fs/reiserfs/fix_node.c 				    (tb->tb_sb, tb->FEB[i]))
tb               2482 fs/reiserfs/fix_node.c 					locked = tb->FEB[i];
tb               2491 fs/reiserfs/fix_node.c 				reiserfs_warning(tb->tb_sb, "reiserfs-8200",
tb               2498 fs/reiserfs/fix_node.c 				return (FILESYSTEM_CHANGED_TB(tb)) ?
tb               2502 fs/reiserfs/fix_node.c 			depth = reiserfs_write_unlock_nested(tb->tb_sb);
tb               2504 fs/reiserfs/fix_node.c 			reiserfs_write_lock_nested(tb->tb_sb, depth);
tb               2505 fs/reiserfs/fix_node.c 			if (FILESYSTEM_CHANGED_TB(tb))
tb               2545 fs/reiserfs/fix_node.c int fix_nodes(int op_mode, struct tree_balance *tb,
tb               2548 fs/reiserfs/fix_node.c 	int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path);
tb               2556 fs/reiserfs/fix_node.c 	struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
tb               2558 fs/reiserfs/fix_node.c 	++REISERFS_SB(tb->tb_sb)->s_fix_nodes;
tb               2560 fs/reiserfs/fix_node.c 	pos_in_item = tb->tb_path->pos_in_item;
tb               2562 fs/reiserfs/fix_node.c 	tb->fs_gen = get_generation(tb->tb_sb);
tb               2570 fs/reiserfs/fix_node.c 	reiserfs_prepare_for_journal(tb->tb_sb,
tb               2571 fs/reiserfs/fix_node.c 				     SB_BUFFER_WITH_SB(tb->tb_sb), 1);
tb               2572 fs/reiserfs/fix_node.c 	journal_mark_dirty(tb->transaction_handle,
tb               2573 fs/reiserfs/fix_node.c 			   SB_BUFFER_WITH_SB(tb->tb_sb));
tb               2574 fs/reiserfs/fix_node.c 	if (FILESYSTEM_CHANGED_TB(tb))
tb               2579 fs/reiserfs/fix_node.c 		int depth = reiserfs_write_unlock_nested(tb->tb_sb);
tb               2581 fs/reiserfs/fix_node.c 		reiserfs_write_lock_nested(tb->tb_sb, depth);
tb               2582 fs/reiserfs/fix_node.c 		if (FILESYSTEM_CHANGED_TB(tb))
tb               2586 fs/reiserfs/fix_node.c 	if (REISERFS_SB(tb->tb_sb)->cur_tb) {
tb               2588 fs/reiserfs/fix_node.c 		reiserfs_panic(tb->tb_sb, "PAP-8305",
tb               2593 fs/reiserfs/fix_node.c 		reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is "
tb               2602 fs/reiserfs/fix_node.c 			reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect "
tb               2612 fs/reiserfs/fix_node.c 			reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect "
tb               2616 fs/reiserfs/fix_node.c 				       tb->insert_size[0]);
tb               2620 fs/reiserfs/fix_node.c 		reiserfs_panic(tb->tb_sb, "PAP-8340", "Incorrect mode "
tb               2625 fs/reiserfs/fix_node.c 	if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH)
tb               2630 fs/reiserfs/fix_node.c 	for (h = 0; h < MAX_HEIGHT && tb->insert_size[h]; h++) {
tb               2631 fs/reiserfs/fix_node.c 		ret = get_direct_parent(tb, h);
tb               2635 fs/reiserfs/fix_node.c 		ret = check_balance(op_mode, tb, h, item_num,
tb               2640 fs/reiserfs/fix_node.c 				ret = get_neighbors(tb, h);
tb               2644 fs/reiserfs/fix_node.c 					tb->insert_size[h + 1] = 0;
tb               2654 fs/reiserfs/fix_node.c 		ret = get_neighbors(tb, h);
tb               2662 fs/reiserfs/fix_node.c 		ret = get_empty_nodes(tb, h);
tb               2670 fs/reiserfs/fix_node.c 		if (!PATH_H_PBUFFER(tb->tb_path, h)) {
tb               2672 fs/reiserfs/fix_node.c 			RFALSE(tb->blknum[h] != 1,
tb               2676 fs/reiserfs/fix_node.c 				tb->insert_size[h + 1] = 0;
tb               2677 fs/reiserfs/fix_node.c 		} else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) {
tb               2684 fs/reiserfs/fix_node.c 			if (tb->blknum[h] > 1) {
tb               2689 fs/reiserfs/fix_node.c 				tb->insert_size[h + 1] =
tb               2691 fs/reiserfs/fix_node.c 				     KEY_SIZE) * (tb->blknum[h] - 1) +
tb               2694 fs/reiserfs/fix_node.c 				tb->insert_size[h + 1] = 0;
tb               2696 fs/reiserfs/fix_node.c 			tb->insert_size[h + 1] =
tb               2697 fs/reiserfs/fix_node.c 			    (DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1);
tb               2700 fs/reiserfs/fix_node.c 	ret = wait_tb_buffers_until_unlocked(tb);
tb               2702 fs/reiserfs/fix_node.c 		if (FILESYSTEM_CHANGED_TB(tb)) {
tb               2727 fs/reiserfs/fix_node.c 			pathrelse_and_restore(tb->tb_sb, tb->tb_path);
tb               2729 fs/reiserfs/fix_node.c 			pathrelse(tb->tb_path);
tb               2734 fs/reiserfs/fix_node.c 				reiserfs_restore_prepared_buffer(tb->tb_sb,
tb               2735 fs/reiserfs/fix_node.c 								 tb->L[i]);
tb               2736 fs/reiserfs/fix_node.c 				reiserfs_restore_prepared_buffer(tb->tb_sb,
tb               2737 fs/reiserfs/fix_node.c 								 tb->R[i]);
tb               2738 fs/reiserfs/fix_node.c 				reiserfs_restore_prepared_buffer(tb->tb_sb,
tb               2739 fs/reiserfs/fix_node.c 								 tb->FL[i]);
tb               2740 fs/reiserfs/fix_node.c 				reiserfs_restore_prepared_buffer(tb->tb_sb,
tb               2741 fs/reiserfs/fix_node.c 								 tb->FR[i]);
tb               2742 fs/reiserfs/fix_node.c 				reiserfs_restore_prepared_buffer(tb->tb_sb,
tb               2743 fs/reiserfs/fix_node.c 								 tb->
tb               2745 fs/reiserfs/fix_node.c 				reiserfs_restore_prepared_buffer(tb->tb_sb,
tb               2746 fs/reiserfs/fix_node.c 								 tb->
tb               2750 fs/reiserfs/fix_node.c 			brelse(tb->L[i]);
tb               2751 fs/reiserfs/fix_node.c 			brelse(tb->R[i]);
tb               2752 fs/reiserfs/fix_node.c 			brelse(tb->FL[i]);
tb               2753 fs/reiserfs/fix_node.c 			brelse(tb->FR[i]);
tb               2754 fs/reiserfs/fix_node.c 			brelse(tb->CFL[i]);
tb               2755 fs/reiserfs/fix_node.c 			brelse(tb->CFR[i]);
tb               2757 fs/reiserfs/fix_node.c 			tb->L[i] = NULL;
tb               2758 fs/reiserfs/fix_node.c 			tb->R[i] = NULL;
tb               2759 fs/reiserfs/fix_node.c 			tb->FL[i] = NULL;
tb               2760 fs/reiserfs/fix_node.c 			tb->FR[i] = NULL;
tb               2761 fs/reiserfs/fix_node.c 			tb->CFL[i] = NULL;
tb               2762 fs/reiserfs/fix_node.c 			tb->CFR[i] = NULL;
tb               2767 fs/reiserfs/fix_node.c 				if (tb->FEB[i])
tb               2769 fs/reiserfs/fix_node.c 					    (tb->tb_sb, tb->FEB[i]);
tb               2777 fs/reiserfs/fix_node.c void unfix_nodes(struct tree_balance *tb)
tb               2782 fs/reiserfs/fix_node.c 	pathrelse_and_restore(tb->tb_sb, tb->tb_path);
tb               2786 fs/reiserfs/fix_node.c 		reiserfs_restore_prepared_buffer(tb->tb_sb, tb->L[i]);
tb               2787 fs/reiserfs/fix_node.c 		reiserfs_restore_prepared_buffer(tb->tb_sb, tb->R[i]);
tb               2788 fs/reiserfs/fix_node.c 		reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FL[i]);
tb               2789 fs/reiserfs/fix_node.c 		reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FR[i]);
tb               2790 fs/reiserfs/fix_node.c 		reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFL[i]);
tb               2791 fs/reiserfs/fix_node.c 		reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFR[i]);
tb               2793 fs/reiserfs/fix_node.c 		brelse(tb->L[i]);
tb               2794 fs/reiserfs/fix_node.c 		brelse(tb->R[i]);
tb               2795 fs/reiserfs/fix_node.c 		brelse(tb->FL[i]);
tb               2796 fs/reiserfs/fix_node.c 		brelse(tb->FR[i]);
tb               2797 fs/reiserfs/fix_node.c 		brelse(tb->CFL[i]);
tb               2798 fs/reiserfs/fix_node.c 		brelse(tb->CFR[i]);
tb               2803 fs/reiserfs/fix_node.c 		if (tb->FEB[i]) {
tb               2804 fs/reiserfs/fix_node.c 			b_blocknr_t blocknr = tb->FEB[i]->b_blocknr;
tb               2809 fs/reiserfs/fix_node.c 			brelse(tb->FEB[i]);
tb               2810 fs/reiserfs/fix_node.c 			reiserfs_free_block(tb->transaction_handle, NULL,
tb               2813 fs/reiserfs/fix_node.c 		if (tb->used[i]) {
tb               2815 fs/reiserfs/fix_node.c 			brelse(tb->used[i]);
tb               2819 fs/reiserfs/fix_node.c 	kfree(tb->vn_buf);
tb                 28 fs/reiserfs/ibalance.c 					   struct tree_balance *tb,
tb                 41 fs/reiserfs/ibalance.c 		src_bi->tb = tb;
tb                 42 fs/reiserfs/ibalance.c 		src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
tb                 43 fs/reiserfs/ibalance.c 		src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
tb                 44 fs/reiserfs/ibalance.c 		src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
tb                 45 fs/reiserfs/ibalance.c 		dest_bi->tb = tb;
tb                 46 fs/reiserfs/ibalance.c 		dest_bi->bi_bh = tb->L[h];
tb                 47 fs/reiserfs/ibalance.c 		dest_bi->bi_parent = tb->FL[h];
tb                 48 fs/reiserfs/ibalance.c 		dest_bi->bi_position = get_left_neighbor_position(tb, h);
tb                 49 fs/reiserfs/ibalance.c 		*d_key = tb->lkey[h];
tb                 50 fs/reiserfs/ibalance.c 		*cf = tb->CFL[h];
tb                 53 fs/reiserfs/ibalance.c 		src_bi->tb = tb;
tb                 54 fs/reiserfs/ibalance.c 		src_bi->bi_bh = tb->L[h];
tb                 55 fs/reiserfs/ibalance.c 		src_bi->bi_parent = tb->FL[h];
tb                 56 fs/reiserfs/ibalance.c 		src_bi->bi_position = get_left_neighbor_position(tb, h);
tb                 57 fs/reiserfs/ibalance.c 		dest_bi->tb = tb;
tb                 58 fs/reiserfs/ibalance.c 		dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
tb                 59 fs/reiserfs/ibalance.c 		dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
tb                 61 fs/reiserfs/ibalance.c 		dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
tb                 62 fs/reiserfs/ibalance.c 		*d_key = tb->lkey[h];
tb                 63 fs/reiserfs/ibalance.c 		*cf = tb->CFL[h];
tb                 68 fs/reiserfs/ibalance.c 		src_bi->tb = tb;
tb                 69 fs/reiserfs/ibalance.c 		src_bi->bi_bh = tb->R[h];
tb                 70 fs/reiserfs/ibalance.c 		src_bi->bi_parent = tb->FR[h];
tb                 71 fs/reiserfs/ibalance.c 		src_bi->bi_position = get_right_neighbor_position(tb, h);
tb                 72 fs/reiserfs/ibalance.c 		dest_bi->tb = tb;
tb                 73 fs/reiserfs/ibalance.c 		dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
tb                 74 fs/reiserfs/ibalance.c 		dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
tb                 75 fs/reiserfs/ibalance.c 		dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
tb                 76 fs/reiserfs/ibalance.c 		*d_key = tb->rkey[h];
tb                 77 fs/reiserfs/ibalance.c 		*cf = tb->CFR[h];
tb                 81 fs/reiserfs/ibalance.c 		src_bi->tb = tb;
tb                 82 fs/reiserfs/ibalance.c 		src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
tb                 83 fs/reiserfs/ibalance.c 		src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
tb                 84 fs/reiserfs/ibalance.c 		src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
tb                 85 fs/reiserfs/ibalance.c 		dest_bi->tb = tb;
tb                 86 fs/reiserfs/ibalance.c 		dest_bi->bi_bh = tb->R[h];
tb                 87 fs/reiserfs/ibalance.c 		dest_bi->bi_parent = tb->FR[h];
tb                 88 fs/reiserfs/ibalance.c 		dest_bi->bi_position = get_right_neighbor_position(tb, h);
tb                 89 fs/reiserfs/ibalance.c 		*d_key = tb->rkey[h];
tb                 90 fs/reiserfs/ibalance.c 		*cf = tb->CFR[h];
tb                 94 fs/reiserfs/ibalance.c 		dest_bi->tb = tb;
tb                 95 fs/reiserfs/ibalance.c 		dest_bi->bi_bh = tb->L[h];
tb                 96 fs/reiserfs/ibalance.c 		dest_bi->bi_parent = tb->FL[h];
tb                 97 fs/reiserfs/ibalance.c 		dest_bi->bi_position = get_left_neighbor_position(tb, h);
tb                101 fs/reiserfs/ibalance.c 		dest_bi->tb = tb;
tb                102 fs/reiserfs/ibalance.c 		dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
tb                103 fs/reiserfs/ibalance.c 		dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
tb                104 fs/reiserfs/ibalance.c 		dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
tb                108 fs/reiserfs/ibalance.c 		dest_bi->tb = tb;
tb                109 fs/reiserfs/ibalance.c 		dest_bi->bi_bh = tb->R[h];
tb                110 fs/reiserfs/ibalance.c 		dest_bi->bi_parent = tb->FR[h];
tb                111 fs/reiserfs/ibalance.c 		dest_bi->bi_position = get_right_neighbor_position(tb, h);
tb                115 fs/reiserfs/ibalance.c 		reiserfs_panic(tb->tb_sb, "ibalance-1",
tb                180 fs/reiserfs/ibalance.c 	do_balance_mark_internal_dirty(cur_bi->tb, cur, 0);
tb                191 fs/reiserfs/ibalance.c 		do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent,
tb                257 fs/reiserfs/ibalance.c 	do_balance_mark_internal_dirty(cur_bi->tb, cur, 0);
tb                268 fs/reiserfs/ibalance.c 		do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent,
tb                365 fs/reiserfs/ibalance.c 	do_balance_mark_internal_dirty(dest_bi->tb, dest, 0);
tb                378 fs/reiserfs/ibalance.c 		do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent,
tb                468 fs/reiserfs/ibalance.c 	do_balance_mark_internal_dirty(dest_bi->tb, dest, 0);
tb                475 fs/reiserfs/ibalance.c 		do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent,
tb                493 fs/reiserfs/ibalance.c 				struct tree_balance *tb,
tb                500 fs/reiserfs/ibalance.c 	internal_define_dest_src_infos(mode, tb, h, &dest_bi, &src_bi,
tb                515 fs/reiserfs/ibalance.c 				replace_key(tb, cf, d_key_position,
tb                519 fs/reiserfs/ibalance.c 			replace_key(tb, cf, d_key_position, src_bi.bi_bh,
tb                534 fs/reiserfs/ibalance.c static void internal_shift1_left(struct tree_balance *tb,
tb                541 fs/reiserfs/ibalance.c 	internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
tb                565 fs/reiserfs/ibalance.c 				 struct tree_balance *tb,
tb                573 fs/reiserfs/ibalance.c 	internal_define_dest_src_infos(mode, tb, h, &dest_bi, &src_bi,
tb                585 fs/reiserfs/ibalance.c 			RFALSE(src_bi.bi_bh != PATH_H_PBUFFER(tb->tb_path, h) /*tb->S[h] */ ||
tb                586 fs/reiserfs/ibalance.c 			       dest_bi.bi_bh != tb->R[h],
tb                588 fs/reiserfs/ibalance.c 			       src_bi.bi_bh, PATH_H_PBUFFER(tb->tb_path, h));
tb                590 fs/reiserfs/ibalance.c 			if (tb->CFL[h])
tb                591 fs/reiserfs/ibalance.c 				replace_key(tb, cf, d_key_position, tb->CFL[h],
tb                592 fs/reiserfs/ibalance.c 					    tb->lkey[h]);
tb                594 fs/reiserfs/ibalance.c 			replace_key(tb, cf, d_key_position, src_bi.bi_bh,
tb                609 fs/reiserfs/ibalance.c static void internal_shift1_right(struct tree_balance *tb,
tb                616 fs/reiserfs/ibalance.c 	internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
tb                632 fs/reiserfs/ibalance.c static void balance_internal_when_delete(struct tree_balance *tb,
tb                637 fs/reiserfs/ibalance.c 	struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
tb                640 fs/reiserfs/ibalance.c 	insert_num = tb->insert_size[h] / ((int)(DC_SIZE + KEY_SIZE));
tb                643 fs/reiserfs/ibalance.c 	bi.tb = tb;
tb                645 fs/reiserfs/ibalance.c 	bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
tb                646 fs/reiserfs/ibalance.c 	bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
tb                650 fs/reiserfs/ibalance.c 	RFALSE(tb->blknum[h] > 1,
tb                651 fs/reiserfs/ibalance.c 	       "tb->blknum[%d]=%d when insert_size < 0", h, tb->blknum[h]);
tb                655 fs/reiserfs/ibalance.c 	if (tb->lnum[h] == 0 && tb->rnum[h] == 0) {
tb                656 fs/reiserfs/ibalance.c 		if (tb->blknum[h] == 0) {
tb                668 fs/reiserfs/ibalance.c 			if (!tb->L[h - 1] || !B_NR_ITEMS(tb->L[h - 1]))
tb                669 fs/reiserfs/ibalance.c 				new_root = tb->R[h - 1];
tb                671 fs/reiserfs/ibalance.c 				new_root = tb->L[h - 1];
tb                675 fs/reiserfs/ibalance.c 			PUT_SB_ROOT_BLOCK(tb->tb_sb, new_root->b_blocknr);
tb                677 fs/reiserfs/ibalance.c 			PUT_SB_TREE_HEIGHT(tb->tb_sb,
tb                678 fs/reiserfs/ibalance.c 					   SB_TREE_HEIGHT(tb->tb_sb) - 1);
tb                680 fs/reiserfs/ibalance.c 			do_balance_mark_sb_dirty(tb,
tb                681 fs/reiserfs/ibalance.c 						 REISERFS_SB(tb->tb_sb)->s_sbh,
tb                690 fs/reiserfs/ibalance.c 			reiserfs_invalidate_buffer(tb, tbSh);
tb                697 fs/reiserfs/ibalance.c 	if (tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1) {
tb                699 fs/reiserfs/ibalance.c 		RFALSE(tb->rnum[h] != 0,
tb                701 fs/reiserfs/ibalance.c 		       h, tb->rnum[h]);
tb                703 fs/reiserfs/ibalance.c 		internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1);
tb                704 fs/reiserfs/ibalance.c 		reiserfs_invalidate_buffer(tb, tbSh);
tb                710 fs/reiserfs/ibalance.c 	if (tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1) {
tb                711 fs/reiserfs/ibalance.c 		RFALSE(tb->lnum[h] != 0,
tb                713 fs/reiserfs/ibalance.c 		       h, tb->lnum[h]);
tb                715 fs/reiserfs/ibalance.c 		internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1);
tb                717 fs/reiserfs/ibalance.c 		reiserfs_invalidate_buffer(tb, tbSh);
tb                722 fs/reiserfs/ibalance.c 	if (tb->lnum[h] < 0) {
tb                723 fs/reiserfs/ibalance.c 		RFALSE(tb->rnum[h] != 0,
tb                725 fs/reiserfs/ibalance.c 		       tb->rnum[h]);
tb                726 fs/reiserfs/ibalance.c 		internal_shift_right(INTERNAL_SHIFT_FROM_L_TO_S, tb, h,
tb                727 fs/reiserfs/ibalance.c 				     -tb->lnum[h]);
tb                732 fs/reiserfs/ibalance.c 	if (tb->rnum[h] < 0) {
tb                733 fs/reiserfs/ibalance.c 		RFALSE(tb->lnum[h] != 0,
tb                735 fs/reiserfs/ibalance.c 		       h, tb->lnum[h]);
tb                736 fs/reiserfs/ibalance.c 		internal_shift_left(INTERNAL_SHIFT_FROM_R_TO_S, tb, h, -tb->rnum[h]);	/*tb->S[h], tb->CFR[h], tb->rkey[h], tb->R[h], -tb->rnum[h]); */
tb                741 fs/reiserfs/ibalance.c 	if (tb->lnum[h] > 0) {
tb                742 fs/reiserfs/ibalance.c 		RFALSE(tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1,
tb                744 fs/reiserfs/ibalance.c 		       h, tb->lnum[h], h, tb->rnum[h], n);
tb                746 fs/reiserfs/ibalance.c 		internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]);	/*tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], tb->lnum[h]); */
tb                747 fs/reiserfs/ibalance.c 		internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
tb                748 fs/reiserfs/ibalance.c 				     tb->rnum[h]);
tb                750 fs/reiserfs/ibalance.c 		reiserfs_invalidate_buffer(tb, tbSh);
tb                754 fs/reiserfs/ibalance.c 	reiserfs_panic(tb->tb_sb, "ibalance-2",
tb                756 fs/reiserfs/ibalance.c 		       h, tb->lnum[h], h, tb->rnum[h]);
tb                760 fs/reiserfs/ibalance.c static void replace_lkey(struct tree_balance *tb, int h, struct item_head *key)
tb                762 fs/reiserfs/ibalance.c 	RFALSE(tb->L[h] == NULL || tb->CFL[h] == NULL,
tb                764 fs/reiserfs/ibalance.c 	       tb->L[h], tb->CFL[h]);
tb                766 fs/reiserfs/ibalance.c 	if (B_NR_ITEMS(PATH_H_PBUFFER(tb->tb_path, h)) == 0)
tb                769 fs/reiserfs/ibalance.c 	memcpy(internal_key(tb->CFL[h], tb->lkey[h]), key, KEY_SIZE);
tb                771 fs/reiserfs/ibalance.c 	do_balance_mark_internal_dirty(tb, tb->CFL[h], 0);
tb                775 fs/reiserfs/ibalance.c static void replace_rkey(struct tree_balance *tb, int h, struct item_head *key)
tb                777 fs/reiserfs/ibalance.c 	RFALSE(tb->R[h] == NULL || tb->CFR[h] == NULL,
tb                779 fs/reiserfs/ibalance.c 	       tb->R[h], tb->CFR[h]);
tb                780 fs/reiserfs/ibalance.c 	RFALSE(B_NR_ITEMS(tb->R[h]) == 0,
tb                782 fs/reiserfs/ibalance.c 	       B_NR_ITEMS(tb->R[h]));
tb                784 fs/reiserfs/ibalance.c 	memcpy(internal_key(tb->CFR[h], tb->rkey[h]), key, KEY_SIZE);
tb                786 fs/reiserfs/ibalance.c 	do_balance_mark_internal_dirty(tb, tb->CFR[h], 0);
tb                803 fs/reiserfs/ibalance.c int balance_internal(struct tree_balance *tb,
tb                811 fs/reiserfs/ibalance.c 	struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
tb                827 fs/reiserfs/ibalance.c 	PROC_INFO_INC(tb->tb_sb, balance_at[h]);
tb                830 fs/reiserfs/ibalance.c 	    (tbSh) ? PATH_H_POSITION(tb->tb_path,
tb                837 fs/reiserfs/ibalance.c 	insert_num = tb->insert_size[h] / ((int)(KEY_SIZE + DC_SIZE));
tb                849 fs/reiserfs/ibalance.c 		balance_internal_when_delete(tb, h, child_pos);
tb                854 fs/reiserfs/ibalance.c 	if (tb->lnum[h] > 0) {
tb                860 fs/reiserfs/ibalance.c 		n = B_NR_ITEMS(tb->L[h]);	/* number of items in L[h] */
tb                861 fs/reiserfs/ibalance.c 		if (tb->lnum[h] <= child_pos) {
tb                863 fs/reiserfs/ibalance.c 			internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
tb                864 fs/reiserfs/ibalance.c 					    tb->lnum[h]);
tb                865 fs/reiserfs/ibalance.c 			child_pos -= tb->lnum[h];
tb                866 fs/reiserfs/ibalance.c 		} else if (tb->lnum[h] > child_pos + insert_num) {
tb                868 fs/reiserfs/ibalance.c 			internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
tb                869 fs/reiserfs/ibalance.c 					    tb->lnum[h] - insert_num);
tb                871 fs/reiserfs/ibalance.c 			bi.tb = tb;
tb                872 fs/reiserfs/ibalance.c 			bi.bi_bh = tb->L[h];
tb                873 fs/reiserfs/ibalance.c 			bi.bi_parent = tb->FL[h];
tb                874 fs/reiserfs/ibalance.c 			bi.bi_position = get_left_neighbor_position(tb, h);
tb                889 fs/reiserfs/ibalance.c 			internal_shift1_left(tb, h, child_pos + 1);
tb                891 fs/reiserfs/ibalance.c 			k = tb->lnum[h] - child_pos - 1;
tb                892 fs/reiserfs/ibalance.c 			bi.tb = tb;
tb                893 fs/reiserfs/ibalance.c 			bi.bi_bh = tb->L[h];
tb                894 fs/reiserfs/ibalance.c 			bi.bi_parent = tb->FL[h];
tb                895 fs/reiserfs/ibalance.c 			bi.bi_position = get_left_neighbor_position(tb, h);
tb                901 fs/reiserfs/ibalance.c 			replace_lkey(tb, h, insert_key + k);
tb                913 fs/reiserfs/ibalance.c 			do_balance_mark_internal_dirty(tb, tbSh, 0);
tb                923 fs/reiserfs/ibalance.c 	if (tb->rnum[h] > 0) {
tb                930 fs/reiserfs/ibalance.c 		if (n - tb->rnum[h] >= child_pos)
tb                932 fs/reiserfs/ibalance.c 			internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
tb                933 fs/reiserfs/ibalance.c 					     tb->rnum[h]);
tb                934 fs/reiserfs/ibalance.c 		else if (n + insert_num - tb->rnum[h] < child_pos) {
tb                936 fs/reiserfs/ibalance.c 			internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
tb                937 fs/reiserfs/ibalance.c 					     tb->rnum[h] - insert_num);
tb                940 fs/reiserfs/ibalance.c 			bi.tb = tb;
tb                941 fs/reiserfs/ibalance.c 			bi.bi_bh = tb->R[h];
tb                942 fs/reiserfs/ibalance.c 			bi.bi_parent = tb->FR[h];
tb                943 fs/reiserfs/ibalance.c 			bi.bi_position = get_right_neighbor_position(tb, h);
tb                947 fs/reiserfs/ibalance.c 					       tb->rnum[h] - 1,
tb                955 fs/reiserfs/ibalance.c 			internal_shift1_right(tb, h, n - child_pos + 1);
tb                957 fs/reiserfs/ibalance.c 			k = tb->rnum[h] - n + child_pos - 1;
tb                958 fs/reiserfs/ibalance.c 			bi.tb = tb;
tb                959 fs/reiserfs/ibalance.c 			bi.bi_bh = tb->R[h];
tb                960 fs/reiserfs/ibalance.c 			bi.bi_parent = tb->FR[h];
tb                961 fs/reiserfs/ibalance.c 			bi.bi_position = get_right_neighbor_position(tb, h);
tb                967 fs/reiserfs/ibalance.c 			replace_rkey(tb, h, insert_key + insert_num - k - 1);
tb                973 fs/reiserfs/ibalance.c 			dc = B_N_CHILD(tb->R[h], 0);
tb                983 fs/reiserfs/ibalance.c 			do_balance_mark_internal_dirty(tb, tb->R[h], 0);
tb                990 fs/reiserfs/ibalance.c 	RFALSE(tb->blknum[h] > 2, "blknum can not be > 2 for internal level");
tb                991 fs/reiserfs/ibalance.c 	RFALSE(tb->blknum[h] < 0, "blknum can not be < 0");
tb                993 fs/reiserfs/ibalance.c 	if (!tb->blknum[h]) {	/* node S[h] is empty now */
tb                997 fs/reiserfs/ibalance.c 		reiserfs_invalidate_buffer(tb, tbSh);
tb               1004 fs/reiserfs/ibalance.c 		struct buffer_head *tbSh_1 = PATH_H_PBUFFER(tb->tb_path, h - 1);
tb               1007 fs/reiserfs/ibalance.c 		if (tb->blknum[h] != 1)
tb               1011 fs/reiserfs/ibalance.c 		tbSh = get_FEB(tb);
tb               1022 fs/reiserfs/ibalance.c 		tb->insert_size[h] -= DC_SIZE;
tb               1025 fs/reiserfs/ibalance.c 		do_balance_mark_internal_dirty(tb, tbSh, 0);
tb               1032 fs/reiserfs/ibalance.c 		PATH_OFFSET_PBUFFER(tb->tb_path, ILLEGAL_PATH_ELEMENT_OFFSET) =
tb               1036 fs/reiserfs/ibalance.c 		PUT_SB_ROOT_BLOCK(tb->tb_sb, tbSh->b_blocknr);
tb               1037 fs/reiserfs/ibalance.c 		PUT_SB_TREE_HEIGHT(tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) + 1);
tb               1038 fs/reiserfs/ibalance.c 		do_balance_mark_sb_dirty(tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1);
tb               1041 fs/reiserfs/ibalance.c 	if (tb->blknum[h] == 2) {
tb               1046 fs/reiserfs/ibalance.c 		S_new = get_FEB(tb);
tb               1050 fs/reiserfs/ibalance.c 		dest_bi.tb = tb;
tb               1054 fs/reiserfs/ibalance.c 		src_bi.tb = tb;
tb               1056 fs/reiserfs/ibalance.c 		src_bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
tb               1057 fs/reiserfs/ibalance.c 		src_bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
tb               1129 fs/reiserfs/ibalance.c 			do_balance_mark_internal_dirty(tb, S_new, 0);
tb               1146 fs/reiserfs/ibalance.c 		bi.tb = tb;
tb               1148 fs/reiserfs/ibalance.c 		bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
tb               1149 fs/reiserfs/ibalance.c 		bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
tb                392 fs/reiserfs/lbalance.c 	do_balance_mark_leaf_dirty(dest_bi->tb, dest, 0);
tb                405 fs/reiserfs/lbalance.c 		do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent,
tb                629 fs/reiserfs/lbalance.c static void leaf_define_dest_src_infos(int shift_mode, struct tree_balance *tb,
tb                641 fs/reiserfs/lbalance.c 		src_bi->tb = tb;
tb                642 fs/reiserfs/lbalance.c 		src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
tb                643 fs/reiserfs/lbalance.c 		src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
tb                646 fs/reiserfs/lbalance.c 		src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0);
tb                647 fs/reiserfs/lbalance.c 		dest_bi->tb = tb;
tb                648 fs/reiserfs/lbalance.c 		dest_bi->bi_bh = tb->L[0];
tb                649 fs/reiserfs/lbalance.c 		dest_bi->bi_parent = tb->FL[0];
tb                650 fs/reiserfs/lbalance.c 		dest_bi->bi_position = get_left_neighbor_position(tb, 0);
tb                655 fs/reiserfs/lbalance.c 		src_bi->tb = tb;
tb                656 fs/reiserfs/lbalance.c 		src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
tb                657 fs/reiserfs/lbalance.c 		src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
tb                658 fs/reiserfs/lbalance.c 		src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0);
tb                659 fs/reiserfs/lbalance.c 		dest_bi->tb = tb;
tb                660 fs/reiserfs/lbalance.c 		dest_bi->bi_bh = tb->R[0];
tb                661 fs/reiserfs/lbalance.c 		dest_bi->bi_parent = tb->FR[0];
tb                662 fs/reiserfs/lbalance.c 		dest_bi->bi_position = get_right_neighbor_position(tb, 0);
tb                667 fs/reiserfs/lbalance.c 		src_bi->tb = tb;
tb                668 fs/reiserfs/lbalance.c 		src_bi->bi_bh = tb->R[0];
tb                669 fs/reiserfs/lbalance.c 		src_bi->bi_parent = tb->FR[0];
tb                670 fs/reiserfs/lbalance.c 		src_bi->bi_position = get_right_neighbor_position(tb, 0);
tb                671 fs/reiserfs/lbalance.c 		dest_bi->tb = tb;
tb                672 fs/reiserfs/lbalance.c 		dest_bi->bi_bh = tb->L[0];
tb                673 fs/reiserfs/lbalance.c 		dest_bi->bi_parent = tb->FL[0];
tb                674 fs/reiserfs/lbalance.c 		dest_bi->bi_position = get_left_neighbor_position(tb, 0);
tb                679 fs/reiserfs/lbalance.c 		src_bi->tb = tb;
tb                680 fs/reiserfs/lbalance.c 		src_bi->bi_bh = tb->L[0];
tb                681 fs/reiserfs/lbalance.c 		src_bi->bi_parent = tb->FL[0];
tb                682 fs/reiserfs/lbalance.c 		src_bi->bi_position = get_left_neighbor_position(tb, 0);
tb                683 fs/reiserfs/lbalance.c 		dest_bi->tb = tb;
tb                684 fs/reiserfs/lbalance.c 		dest_bi->bi_bh = tb->R[0];
tb                685 fs/reiserfs/lbalance.c 		dest_bi->bi_parent = tb->FR[0];
tb                686 fs/reiserfs/lbalance.c 		dest_bi->bi_position = get_right_neighbor_position(tb, 0);
tb                691 fs/reiserfs/lbalance.c 		src_bi->tb = tb;
tb                692 fs/reiserfs/lbalance.c 		src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
tb                693 fs/reiserfs/lbalance.c 		src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
tb                694 fs/reiserfs/lbalance.c 		src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0);
tb                695 fs/reiserfs/lbalance.c 		dest_bi->tb = tb;
tb                715 fs/reiserfs/lbalance.c int leaf_move_items(int shift_mode, struct tree_balance *tb, int mov_num,
tb                722 fs/reiserfs/lbalance.c 	leaf_define_dest_src_infos(shift_mode, tb, &dest_bi, &src_bi,
tb                741 fs/reiserfs/lbalance.c int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes)
tb                743 fs/reiserfs/lbalance.c 	struct buffer_head *S0 = PATH_PLAST_BUFFER(tb->tb_path);
tb                750 fs/reiserfs/lbalance.c 	i = leaf_move_items(LEAF_FROM_S_TO_L, tb, shift_num, shift_bytes, NULL);
tb                760 fs/reiserfs/lbalance.c 			if (tb->tb_mode == M_PASTE || tb->tb_mode == M_INSERT) {
tb                762 fs/reiserfs/lbalance.c 				reiserfs_panic(tb->tb_sb, "vs-10275",
tb                764 fs/reiserfs/lbalance.c 					       "(%c)", tb->tb_mode);
tb                768 fs/reiserfs/lbalance.c 			if (PATH_H_POSITION(tb->tb_path, 1) == 0)
tb                769 fs/reiserfs/lbalance.c 				replace_key(tb, tb->CFL[0], tb->lkey[0],
tb                770 fs/reiserfs/lbalance.c 					    PATH_H_PPARENT(tb->tb_path, 0), 0);
tb                774 fs/reiserfs/lbalance.c 			replace_key(tb, tb->CFL[0], tb->lkey[0], S0, 0);
tb                794 fs/reiserfs/lbalance.c int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes)
tb                803 fs/reiserfs/lbalance.c 	    leaf_move_items(LEAF_FROM_S_TO_R, tb, shift_num, shift_bytes, NULL);
tb                807 fs/reiserfs/lbalance.c 		replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
tb                847 fs/reiserfs/lbalance.c 		do_balance_mark_leaf_dirty(cur_bi->tb, bh, 0);
tb                962 fs/reiserfs/lbalance.c 	do_balance_mark_leaf_dirty(bi->tb, bh, 1);
tb                970 fs/reiserfs/lbalance.c 		do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0);
tb               1001 fs/reiserfs/lbalance.c 		if (bi && bi->tb)
tb               1002 fs/reiserfs/lbalance.c 			sb = bi->tb->tb_sb;
tb               1056 fs/reiserfs/lbalance.c 	do_balance_mark_leaf_dirty(bi->tb, bh, 0);
tb               1062 fs/reiserfs/lbalance.c 		do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0);
tb               1229 fs/reiserfs/lbalance.c 	do_balance_mark_leaf_dirty(bi->tb, bh, 0);
tb               1235 fs/reiserfs/lbalance.c 		do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0);
tb               1267 fs/reiserfs/lbalance.c 		do_balance_mark_leaf_dirty(bi->tb, bh, 0);
tb               1298 fs/reiserfs/lbalance.c 	do_balance_mark_leaf_dirty(bi->tb, bh, 0);
tb               1306 fs/reiserfs/lbalance.c 		do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0);
tb                640 fs/reiserfs/prints.c void store_print_tb(struct tree_balance *tb)
tb                646 fs/reiserfs/prints.c 	if (!tb)
tb                654 fs/reiserfs/prints.c 		REISERFS_SB(tb->tb_sb)->s_do_balance,
tb                655 fs/reiserfs/prints.c 		tb->tb_mode, PATH_LAST_POSITION(tb->tb_path),
tb                656 fs/reiserfs/prints.c 		tb->tb_path->pos_in_item);
tb                658 fs/reiserfs/prints.c 	for (h = 0; h < ARRAY_SIZE(tb->insert_size); h++) {
tb                659 fs/reiserfs/prints.c 		if (PATH_H_PATH_OFFSET(tb->tb_path, h) <=
tb                660 fs/reiserfs/prints.c 		    tb->tb_path->path_length
tb                661 fs/reiserfs/prints.c 		    && PATH_H_PATH_OFFSET(tb->tb_path,
tb                663 fs/reiserfs/prints.c 			tbSh = PATH_H_PBUFFER(tb->tb_path, h);
tb                664 fs/reiserfs/prints.c 			tbFh = PATH_H_PPARENT(tb->tb_path, h);
tb                674 fs/reiserfs/prints.c 			(tb->L[h]) ? (long long)(tb->L[h]->b_blocknr) : (-1LL),
tb                675 fs/reiserfs/prints.c 			(tb->L[h]) ? atomic_read(&tb->L[h]->b_count) : -1,
tb                676 fs/reiserfs/prints.c 			(tb->R[h]) ? (long long)(tb->R[h]->b_blocknr) : (-1LL),
tb                677 fs/reiserfs/prints.c 			(tb->R[h]) ? atomic_read(&tb->R[h]->b_count) : -1,
tb                679 fs/reiserfs/prints.c 			(tb->FL[h]) ? (long long)(tb->FL[h]->
tb                681 fs/reiserfs/prints.c 			(tb->FR[h]) ? (long long)(tb->FR[h]->
tb                683 fs/reiserfs/prints.c 			(tb->CFL[h]) ? (long long)(tb->CFL[h]->
tb                685 fs/reiserfs/prints.c 			(tb->CFR[h]) ? (long long)(tb->CFR[h]->
tb                693 fs/reiserfs/prints.c 		tb->insert_size[0], tb->lnum[0], tb->lbytes, tb->rnum[0],
tb                694 fs/reiserfs/prints.c 		tb->rbytes, tb->blknum[0], tb->s0num, tb->snum[0],
tb                695 fs/reiserfs/prints.c 		tb->sbytes[0], tb->snum[1], tb->sbytes[1],
tb                696 fs/reiserfs/prints.c 		tb->cur_blknum, tb->lkey[0], tb->rkey[0]);
tb                704 fs/reiserfs/prints.c 			h, tb->insert_size[h], tb->lnum[h], tb->rnum[h],
tb                705 fs/reiserfs/prints.c 			tb->blknum[h]);
tb                706 fs/reiserfs/prints.c 	} while (tb->insert_size[h]);
tb                714 fs/reiserfs/prints.c 	for (i = 0; i < ARRAY_SIZE(tb->FEB); i++)
tb                716 fs/reiserfs/prints.c 			"%p (%llu %d)%s", tb->FEB[i],
tb                717 fs/reiserfs/prints.c 			tb->FEB[i] ? (unsigned long long)tb->FEB[i]->
tb                719 fs/reiserfs/prints.c 			tb->FEB[i] ? atomic_read(&tb->FEB[i]->b_count) : 0,
tb                720 fs/reiserfs/prints.c 			(i == ARRAY_SIZE(tb->FEB) - 1) ? "\n" : ", ");
tb               2306 fs/reiserfs/reiserfs.h #define FILESYSTEM_CHANGED_TB(tb)  (get_generation((tb)->tb_sb) != (tb)->fs_gen)
tb               2592 fs/reiserfs/reiserfs.h 	struct tree_balance *tb;
tb               2598 fs/reiserfs/reiserfs.h static inline struct super_block *sb_from_tb(struct tree_balance *tb)
tb               2600 fs/reiserfs/reiserfs.h 	return tb ? tb->tb_sb : NULL;
tb               2605 fs/reiserfs/reiserfs.h 	return bi ? sb_from_tb(bi->tb) : NULL;
tb               3184 fs/reiserfs/reiserfs.h int fix_nodes(int n_op_mode, struct tree_balance *tb,
tb               3201 fs/reiserfs/reiserfs.h void store_print_tb(struct tree_balance *tb);
tb               3220 fs/reiserfs/reiserfs.h int leaf_move_items(int shift_mode, struct tree_balance *tb, int mov_num,
tb               3222 fs/reiserfs/reiserfs.h int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes);
tb               3223 fs/reiserfs/reiserfs.h int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes);
tb               3243 fs/reiserfs/reiserfs.h void do_balance_mark_leaf_dirty(struct tree_balance *tb,
tb               3248 fs/reiserfs/reiserfs.h void do_balance(struct tree_balance *tb, struct item_head *ih,
tb               3250 fs/reiserfs/reiserfs.h void reiserfs_invalidate_buffer(struct tree_balance *tb,
tb               3253 fs/reiserfs/reiserfs.h int get_left_neighbor_position(struct tree_balance *tb, int h);
tb               3254 fs/reiserfs/reiserfs.h int get_right_neighbor_position(struct tree_balance *tb, int h);
tb               3255 fs/reiserfs/reiserfs.h void replace_key(struct tree_balance *tb, struct buffer_head *, int,
tb               3330 fs/reiserfs/reiserfs.h static inline int reiserfs_new_form_blocknrs(struct tree_balance *tb,
tb               3335 fs/reiserfs/reiserfs.h 		.th = tb->transaction_handle,
tb               3336 fs/reiserfs/reiserfs.h 		.path = tb->tb_path,
tb               3338 fs/reiserfs/reiserfs.h 		.key = tb->key,
tb               1143 fs/reiserfs/stree.c static int calc_deleted_bytes_number(struct tree_balance *tb, char mode)
tb               1146 fs/reiserfs/stree.c 	struct item_head *p_le_ih = tp_item_head(tb->tb_path);
tb               1153 fs/reiserfs/stree.c 	     M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0];
tb               1165 fs/reiserfs/stree.c 				(PATH_PLAST_BUFFER(tb->tb_path)->b_size);
tb               1170 fs/reiserfs/stree.c 			   struct tree_balance *tb,
tb               1177 fs/reiserfs/stree.c 	memset(tb, '\0', sizeof(struct tree_balance));
tb               1178 fs/reiserfs/stree.c 	tb->transaction_handle = th;
tb               1179 fs/reiserfs/stree.c 	tb->tb_sb = sb;
tb               1180 fs/reiserfs/stree.c 	tb->tb_path = path;
tb               1183 fs/reiserfs/stree.c 	tb->insert_size[0] = size;
tb               1387 fs/reiserfs/stree.c 	struct tree_balance tb;
tb               1429 fs/reiserfs/stree.c 			init_tb_struct(th, &tb, th->t_super, &path,
tb               1434 fs/reiserfs/stree.c 		retval = fix_nodes(M_DELETE, &tb, NULL, NULL);
tb               1441 fs/reiserfs/stree.c 			do_balance(&tb, NULL, NULL, M_DELETE);
tb               1466 fs/reiserfs/stree.c 		unfix_nodes(&tb);
tb               2432 fs/unicode/mkutf8data.c 	unsigned int tb = 0x11a7;
tb               2458 fs/unicode/mkutf8data.c 			mapping[i++] = tb + ti;
tb                 60 include/crypto/algapi.h 	struct crypto_instance *(*alloc)(struct rtattr **tb);
tb                 62 include/crypto/algapi.h 	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
tb                169 include/crypto/algapi.h struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
tb                170 include/crypto/algapi.h int crypto_check_attr_type(struct rtattr **tb, u32 type);
tb                373 include/crypto/algapi.h static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
tb                376 include/crypto/algapi.h 	return crypto_attr_alg(tb[1], type, mask);
tb                 23 include/crypto/internal/geniv.h 				       struct rtattr **tb, u32 type, u32 mask);
tb                250 include/crypto/internal/skcipher.h skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
tb                 25 include/keys/trusted.h #define INIT_BUF(tb) (tb->len = 0)
tb                 49 include/keys/trusted.h int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce);
tb                 60 include/linux/if_macvlan.h 				  struct nlattr *tb[], struct nlattr *data[],
tb               1376 include/linux/netdevice.h 					       struct nlattr *tb[],
tb               1383 include/linux/netdevice.h 					       struct nlattr *tb[],
tb               1393 include/linux/netdevice.h 					       struct nlattr *tb[],
tb                165 include/linux/netfilter/ipset/ip_set.h 	int (*uadt)(struct ip_set *set, struct nlattr *tb[],
tb                223 include/linux/netfilter/ipset/ip_set.h 		      struct nlattr *tb[], u32 flags);
tb                344 include/linux/netfilter/ipset/ip_set.h extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
tb                346 include/linux/netfilter/ipset/ip_set.h extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
tb                386 include/linux/netfilter/ipset/ip_set.h ip_set_attr_netorder(struct nlattr *tb[], int type)
tb                388 include/linux/netfilter/ipset/ip_set.h 	return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
tb                392 include/linux/netfilter/ipset/ip_set.h ip_set_optattr_netorder(struct nlattr *tb[], int type)
tb                394 include/linux/netfilter/ipset/ip_set.h 	return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
tb                477 include/linux/netfilter/ipset/ip_set.h ip_set_timeout_uget(struct nlattr *tb)
tb                479 include/linux/netfilter/ipset/ip_set.h 	unsigned int timeout = ip_set_get_h32(tb);
tb                525 include/linux/netfilter/ipset/ip_set.h ip_set_comment_uget(struct nlattr *tb)
tb                527 include/linux/netfilter/ipset/ip_set.h 	return nla_data(tb);
tb                119 include/linux/rtnetlink.h 			    struct nlattr *tb[],
tb                125 include/linux/rtnetlink.h 			    struct nlattr *tb[],
tb                217 include/linux/thunderbolt.h 	struct tb *tb;
tb                250 include/linux/thunderbolt.h struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
tb                251 include/linux/thunderbolt.h struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
tb                254 include/linux/thunderbolt.h tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
tb                258 include/linux/thunderbolt.h 	mutex_lock(&tb->lock);
tb                259 include/linux/thunderbolt.h 	xd = tb_xdomain_find_by_uuid(tb, uuid);
tb                260 include/linux/thunderbolt.h 	mutex_unlock(&tb->lock);
tb                266 include/linux/thunderbolt.h tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
tb                270 include/linux/thunderbolt.h 	mutex_lock(&tb->lock);
tb                271 include/linux/thunderbolt.h 	xd = tb_xdomain_find_by_route(tb, route);
tb                272 include/linux/thunderbolt.h 	mutex_unlock(&tb->lock);
tb                 21 include/linux/tty_flip.h 	struct tty_buffer *tb = port->buf.tail;
tb                 24 include/linux/tty_flip.h 	change = (tb->flags & TTYB_NORMAL) && (flag != TTY_NORMAL);
tb                 25 include/linux/tty_flip.h 	if (!change && tb->used < tb->size) {
tb                 26 include/linux/tty_flip.h 		if (~tb->flags & TTYB_NORMAL)
tb                 27 include/linux/tty_flip.h 			*flag_buf_ptr(tb, tb->used) = flag;
tb                 28 include/linux/tty_flip.h 		*char_buf_ptr(tb, tb->used++) = ch;
tb                185 include/net/genetlink.h 					   struct nlattr *tb[], int maxtype,
tb                189 include/net/genetlink.h 	return __nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
tb                204 include/net/genetlink.h 				struct nlattr *tb[], int maxtype,
tb                208 include/net/genetlink.h 	return __nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
tb                 98 include/net/inet_hashtables.h #define inet_bind_bucket_for_each(tb, head) \
tb                 99 include/net/inet_hashtables.h 	hlist_for_each_entry(tb, head, node)
tb                216 include/net/inet_hashtables.h 			      struct inet_bind_bucket *tb);
tb                224 include/net/inet_hashtables.h void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
tb                255 include/net/ip_fib.h int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
tb                266 include/net/ip_fib.h void fib_free_table(struct fib_table *tb);
tb                295 include/net/ip_fib.h 	struct fib_table *tb;
tb                300 include/net/ip_fib.h 	tb = fib_get_table(net, RT_TABLE_MAIN);
tb                301 include/net/ip_fib.h 	if (tb)
tb                302 include/net/ip_fib.h 		err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF);
tb                347 include/net/ip_fib.h 	struct fib_table *tb;
tb                358 include/net/ip_fib.h 	tb = rcu_dereference_rtnl(net->ipv4.fib_main);
tb                359 include/net/ip_fib.h 	if (tb)
tb                360 include/net/ip_fib.h 		err = fib_table_lookup(tb, flp, res, flags);
tb                365 include/net/ip_fib.h 	tb = rcu_dereference_rtnl(net->ipv4.fib_default);
tb                366 include/net/ip_fib.h 	if (tb)
tb                367 include/net/ip_fib.h 		err = fib_table_lookup(tb, flp, res, flags);
tb                286 include/net/ip_tunnels.h int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
tb                288 include/net/ip_tunnels.h int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
tb                 38 include/net/netfilter/nf_conntrack_l4proto.h 	int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
tb                 44 include/net/netfilter/nf_conntrack_l4proto.h 	int (*nlattr_to_tuple)(struct nlattr *tb[],
tb                 49 include/net/netfilter/nf_conntrack_l4proto.h 		int (*nlattr_to_obj)(struct nlattr *tb[],
tb                154 include/net/netfilter/nf_conntrack_l4proto.h int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
tb                736 include/net/netfilter/nf_tables.h 						       const struct nlattr * const tb[]);
tb                786 include/net/netfilter/nf_tables.h 						const struct nlattr * const tb[]);
tb               1110 include/net/netfilter/nf_tables.h 						       const struct nlattr * const tb[]);
tb               1135 include/net/netfilter/nf_tables.h 						const struct nlattr *const tb[],
tb                 23 include/net/netfilter/nft_fib.h 		 const struct nlattr * const tb[]);
tb                 19 include/net/netfilter/nft_meta.h 		      const struct nlattr * const tb[]);
tb                 23 include/net/netfilter/nft_meta.h 		      const struct nlattr * const tb[]);
tb                 23 include/net/netfilter/nft_reject.h 		    const struct nlattr * const tb[]);
tb                437 include/net/netlink.h int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
tb                583 include/net/netlink.h static inline int nla_parse(struct nlattr **tb, int maxtype,
tb                588 include/net/netlink.h 	return __nla_parse(tb, maxtype, head, len, policy,
tb                608 include/net/netlink.h static inline int nla_parse_deprecated(struct nlattr **tb, int maxtype,
tb                613 include/net/netlink.h 	return __nla_parse(tb, maxtype, head, len, policy,
tb                633 include/net/netlink.h static inline int nla_parse_deprecated_strict(struct nlattr **tb, int maxtype,
tb                639 include/net/netlink.h 	return __nla_parse(tb, maxtype, head, len, policy,
tb                656 include/net/netlink.h 				struct nlattr *tb[], int maxtype,
tb                666 include/net/netlink.h 	return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
tb                683 include/net/netlink.h 			      struct nlattr *tb[], int maxtype,
tb                687 include/net/netlink.h 	return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
tb                702 include/net/netlink.h 					 struct nlattr *tb[], int maxtype,
tb                706 include/net/netlink.h 	return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
tb                722 include/net/netlink.h 			      struct nlattr *tb[], int maxtype,
tb                726 include/net/netlink.h 	return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
tb               1137 include/net/netlink.h static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
tb               1147 include/net/netlink.h 	return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
tb               1161 include/net/netlink.h static inline int nla_parse_nested_deprecated(struct nlattr *tb[], int maxtype,
tb               1166 include/net/netlink.h 	return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
tb                306 include/net/pkt_cls.h 		      struct nlattr **tb, struct nlattr *rate_tlv,
tb                455 include/net/pkt_cls.h #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
tb                 69 include/net/rtnetlink.h 	int			(*validate)(struct nlattr *tb[],
tb                 75 include/net/rtnetlink.h 					   struct nlattr *tb[],
tb                 79 include/net/rtnetlink.h 					      struct nlattr *tb[],
tb                 99 include/net/rtnetlink.h 						    struct nlattr *tb[],
tb                158 include/net/rtnetlink.h struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
tb                162 include/net/rtnetlink.h 				    struct nlattr *tb[],
tb                167 include/net/rtnetlink.h int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
tb                968 kernel/rcu/tree_plugin.h 	struct list_head *tb;
tb                992 kernel/rcu/tree_plugin.h 		tb = rnp->exp_tasks;
tb                994 kernel/rcu/tree_plugin.h 		tb = rnp->boost_tasks;
tb               1012 kernel/rcu/tree_plugin.h 	t = container_of(tb, struct task_struct, rcu_node_entry);
tb                 41 lib/crypto/arc4.c 	u32 ty, ta, tb;
tb                 60 lib/crypto/arc4.c 		tb = S[ty];
tb                 66 lib/crypto/arc4.c 		b = tb;
tb                361 lib/nlattr.c   				struct nlattr **tb)
tb                366 lib/nlattr.c   	if (tb)
tb                367 lib/nlattr.c   		memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
tb                388 lib/nlattr.c   		if (tb)
tb                389 lib/nlattr.c   			tb[type] = (struct nlattr *)nla;
tb                473 lib/nlattr.c   int __nla_parse(struct nlattr **tb, int maxtype,
tb                479 lib/nlattr.c   				    extack, tb);
tb                 39 net/8021q/vlan_netlink.c static int vlan_validate(struct nlattr *tb[], struct nlattr *data[],
tb                 46 net/8021q/vlan_netlink.c 	if (tb[IFLA_ADDRESS]) {
tb                 47 net/8021q/vlan_netlink.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
tb                 51 net/8021q/vlan_netlink.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
tb                104 net/8021q/vlan_netlink.c static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
tb                137 net/8021q/vlan_netlink.c 			struct nlattr *tb[], struct nlattr *data[],
tb                151 net/8021q/vlan_netlink.c 	if (!tb[IFLA_LINK]) {
tb                156 net/8021q/vlan_netlink.c 	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
tb                180 net/8021q/vlan_netlink.c 	if (!tb[IFLA_MTU])
tb                185 net/8021q/vlan_netlink.c 	err = vlan_changelink(dev, tb, data, extack);
tb                772 net/bridge/br_fdb.c 	       struct nlattr *tb[],
tb                911 net/bridge/br_fdb.c int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb               1008 net/bridge/br_fdb.c int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
tb                558 net/bridge/br_mdb.c 	struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
tb                562 net/bridge/br_mdb.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
tb                586 net/bridge/br_mdb.c 	if (!tb[MDBA_SET_ENTRY] ||
tb                587 net/bridge/br_mdb.c 	    nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
tb                592 net/bridge/br_mdb.c 	entry = nla_data(tb[MDBA_SET_ENTRY]);
tb                703 net/bridge/br_netlink.c static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
tb                709 net/bridge/br_netlink.c 	if (!tb[attrtype])
tb                712 net/bridge/br_netlink.c 	if (nla_get_u8(tb[attrtype]))
tb                726 net/bridge/br_netlink.c static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
tb                732 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
tb                736 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
tb                740 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
tb                744 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
tb                748 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
tb                752 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
tb                756 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
tb                760 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
tb                764 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
tb                768 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
tb                772 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
tb                777 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
tb                784 net/bridge/br_netlink.c 	if (tb[IFLA_BRPORT_COST]) {
tb                785 net/bridge/br_netlink.c 		err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
tb                790 net/bridge/br_netlink.c 	if (tb[IFLA_BRPORT_PRIORITY]) {
tb                791 net/bridge/br_netlink.c 		err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
tb                796 net/bridge/br_netlink.c 	if (tb[IFLA_BRPORT_STATE]) {
tb                797 net/bridge/br_netlink.c 		err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
tb                802 net/bridge/br_netlink.c 	if (tb[IFLA_BRPORT_FLUSH])
tb                806 net/bridge/br_netlink.c 	if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
tb                807 net/bridge/br_netlink.c 		u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
tb                815 net/bridge/br_netlink.c 	if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
tb                816 net/bridge/br_netlink.c 		u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]);
tb                823 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS,
tb                828 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
tb                832 net/bridge/br_netlink.c 	if (tb[IFLA_BRPORT_BACKUP_PORT]) {
tb                836 net/bridge/br_netlink.c 		backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
tb                858 net/bridge/br_netlink.c 	struct nlattr *tb[IFLA_BRPORT_MAX + 1];
tb                879 net/bridge/br_netlink.c 			err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX,
tb                887 net/bridge/br_netlink.c 			err = br_setport(p, tb);
tb                940 net/bridge/br_netlink.c static int br_validate(struct nlattr *tb[], struct nlattr *data[],
tb                943 net/bridge/br_netlink.c 	if (tb[IFLA_ADDRESS]) {
tb                944 net/bridge/br_netlink.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
tb                946 net/bridge/br_netlink.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
tb                977 net/bridge/br_netlink.c 				    struct nlattr *tb[],
tb               1046 net/bridge/br_netlink.c static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
tb               1309 net/bridge/br_netlink.c 			  struct nlattr *tb[], struct nlattr *data[],
tb               1319 net/bridge/br_netlink.c 	if (tb[IFLA_ADDRESS]) {
tb               1321 net/bridge/br_netlink.c 		br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
tb               1325 net/bridge/br_netlink.c 	err = br_changelink(dev, tb, data, extack);
tb                222 net/bridge/br_netlink_tunnel.c 	struct nlattr *tb[IFLA_BRIDGE_VLAN_TUNNEL_MAX + 1];
tb                229 net/bridge/br_netlink_tunnel.c 	err = nla_parse_nested_deprecated(tb, IFLA_BRIDGE_VLAN_TUNNEL_MAX,
tb                234 net/bridge/br_netlink_tunnel.c 	if (!tb[IFLA_BRIDGE_VLAN_TUNNEL_ID] ||
tb                235 net/bridge/br_netlink_tunnel.c 	    !tb[IFLA_BRIDGE_VLAN_TUNNEL_VID])
tb                238 net/bridge/br_netlink_tunnel.c 	tun_id = nla_get_u32(tb[IFLA_BRIDGE_VLAN_TUNNEL_ID]);
tb                239 net/bridge/br_netlink_tunnel.c 	vid = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_VID]);
tb                243 net/bridge/br_netlink_tunnel.c 	if (tb[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS])
tb                244 net/bridge/br_netlink_tunnel.c 		flags = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS]);
tb                571 net/bridge/br_private.h int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
tb                573 net/bridge/br_private.h int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
tb                578 net/bridge/br_private.h int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev,
tb                 71 net/bridge/netfilter/nft_meta_bridge.c 				    const struct nlattr * const tb[])
tb                 76 net/bridge/netfilter/nft_meta_bridge.c 	priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
tb                 87 net/bridge/netfilter/nft_meta_bridge.c 		return nft_meta_get_init(ctx, expr, tb);
tb                 90 net/bridge/netfilter/nft_meta_bridge.c 	priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
tb                116 net/bridge/netfilter/nft_meta_bridge.c 			   const struct nlattr * const tb[])
tb                118 net/bridge/netfilter/nft_meta_bridge.c 	if (tb[NFTA_META_KEY] == NULL)
tb                121 net/bridge/netfilter/nft_meta_bridge.c 	if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
tb                124 net/bridge/netfilter/nft_meta_bridge.c 	if (tb[NFTA_META_DREG])
tb                127 net/bridge/netfilter/nft_meta_bridge.c 	if (tb[NFTA_META_SREG])
tb                369 net/bridge/netfilter/nft_reject_bridge.c 				  const struct nlattr * const tb[])
tb                374 net/bridge/netfilter/nft_reject_bridge.c 	if (tb[NFTA_REJECT_TYPE] == NULL)
tb                377 net/bridge/netfilter/nft_reject_bridge.c 	priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
tb                381 net/bridge/netfilter/nft_reject_bridge.c 		if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
tb                384 net/bridge/netfilter/nft_reject_bridge.c 		icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
tb                463 net/caif/chnl_net.c 			  struct nlattr *tb[], struct nlattr *data[],
tb                486 net/caif/chnl_net.c static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[],
tb                735 net/can/gw.c   	struct nlattr *tb[CGW_MAX + 1];
tb                743 net/can/gw.c   	err = nlmsg_parse_deprecated(nlh, sizeof(struct rtcanmsg), tb,
tb                748 net/can/gw.c   	if (tb[CGW_LIM_HOPS]) {
tb                749 net/can/gw.c   		*limhops = nla_get_u8(tb[CGW_LIM_HOPS]);
tb                759 net/can/gw.c   		if (tb[CGW_FDMOD_AND]) {
tb                760 net/can/gw.c   			nla_memcpy(&mb, tb[CGW_FDMOD_AND], CGW_FDMODATTR_LEN);
tb                778 net/can/gw.c   		if (tb[CGW_FDMOD_OR]) {
tb                779 net/can/gw.c   			nla_memcpy(&mb, tb[CGW_FDMOD_OR], CGW_FDMODATTR_LEN);
tb                797 net/can/gw.c   		if (tb[CGW_FDMOD_XOR]) {
tb                798 net/can/gw.c   			nla_memcpy(&mb, tb[CGW_FDMOD_XOR], CGW_FDMODATTR_LEN);
tb                816 net/can/gw.c   		if (tb[CGW_FDMOD_SET]) {
tb                817 net/can/gw.c   			nla_memcpy(&mb, tb[CGW_FDMOD_SET], CGW_FDMODATTR_LEN);
tb                837 net/can/gw.c   		if (tb[CGW_MOD_AND]) {
tb                838 net/can/gw.c   			nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
tb                853 net/can/gw.c   		if (tb[CGW_MOD_OR]) {
tb                854 net/can/gw.c   			nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
tb                869 net/can/gw.c   		if (tb[CGW_MOD_XOR]) {
tb                870 net/can/gw.c   			nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
tb                885 net/can/gw.c   		if (tb[CGW_MOD_SET]) {
tb                886 net/can/gw.c   			nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
tb                904 net/can/gw.c   		if (tb[CGW_CS_CRC8]) {
tb                905 net/can/gw.c   			struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);
tb                912 net/can/gw.c   			nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
tb                927 net/can/gw.c   		if (tb[CGW_CS_XOR]) {
tb                928 net/can/gw.c   			struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);
tb                935 net/can/gw.c   			nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
tb                950 net/can/gw.c   		if (tb[CGW_MOD_UID])
tb                951 net/can/gw.c   			nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32));
tb                961 net/can/gw.c   		if (tb[CGW_FILTER])
tb                962 net/can/gw.c   			nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
tb                968 net/can/gw.c   		if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
tb                971 net/can/gw.c   		ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
tb                972 net/can/gw.c   		ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);
tb                202 net/core/fib_rules.c static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
tb                207 net/core/fib_rules.c 	in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
tb                389 net/core/fib_rules.c 				  struct nlattr **tb,
tb                457 net/core/fib_rules.c 		if (!ops->compare(r, frh, tb))
tb                489 net/core/fib_rules.c 		       struct nlattr *tb[],
tb                499 net/core/fib_rules.c 		if (!tb[FRA_SRC] ||
tb                501 net/core/fib_rules.c 		    nla_len(tb[FRA_SRC]) != ops->addr_size) {
tb                507 net/core/fib_rules.c 		if (!tb[FRA_DST] ||
tb                509 net/core/fib_rules.c 		    nla_len(tb[FRA_DST]) != ops->addr_size) {
tb                522 net/core/fib_rules.c 	if (tb[FRA_PRIORITY]) {
tb                523 net/core/fib_rules.c 		nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]);
tb                529 net/core/fib_rules.c 	nlrule->proto = tb[FRA_PROTOCOL] ?
tb                530 net/core/fib_rules.c 		nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC;
tb                532 net/core/fib_rules.c 	if (tb[FRA_IIFNAME]) {
tb                536 net/core/fib_rules.c 		nla_strlcpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
tb                542 net/core/fib_rules.c 	if (tb[FRA_OIFNAME]) {
tb                546 net/core/fib_rules.c 		nla_strlcpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
tb                552 net/core/fib_rules.c 	if (tb[FRA_FWMARK]) {
tb                553 net/core/fib_rules.c 		nlrule->mark = nla_get_u32(tb[FRA_FWMARK]);
tb                561 net/core/fib_rules.c 	if (tb[FRA_FWMASK])
tb                562 net/core/fib_rules.c 		nlrule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
tb                564 net/core/fib_rules.c 	if (tb[FRA_TUN_ID])
tb                565 net/core/fib_rules.c 		nlrule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
tb                568 net/core/fib_rules.c 	if (tb[FRA_L3MDEV] &&
tb                569 net/core/fib_rules.c 	    fib_nl2rule_l3mdev(tb[FRA_L3MDEV], nlrule, extack) < 0)
tb                574 net/core/fib_rules.c 	nlrule->table = frh_get_table(frh, tb);
tb                575 net/core/fib_rules.c 	if (tb[FRA_SUPPRESS_PREFIXLEN])
tb                576 net/core/fib_rules.c 		nlrule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
tb                580 net/core/fib_rules.c 	if (tb[FRA_SUPPRESS_IFGROUP])
tb                581 net/core/fib_rules.c 		nlrule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
tb                585 net/core/fib_rules.c 	if (tb[FRA_GOTO]) {
tb                591 net/core/fib_rules.c 		nlrule->target = nla_get_u32(tb[FRA_GOTO]);
tb                607 net/core/fib_rules.c 	if (tb[FRA_UID_RANGE]) {
tb                614 net/core/fib_rules.c 		nlrule->uid_range = nla_get_kuid_range(tb);
tb                625 net/core/fib_rules.c 	if (tb[FRA_IP_PROTO])
tb                626 net/core/fib_rules.c 		nlrule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]);
tb                628 net/core/fib_rules.c 	if (tb[FRA_SPORT_RANGE]) {
tb                629 net/core/fib_rules.c 		err = nla_get_port_range(tb[FRA_SPORT_RANGE],
tb                637 net/core/fib_rules.c 	if (tb[FRA_DPORT_RANGE]) {
tb                638 net/core/fib_rules.c 		err = nla_get_port_range(tb[FRA_DPORT_RANGE],
tb                657 net/core/fib_rules.c 		       struct nlattr **tb, struct fib_rule *rule)
tb                716 net/core/fib_rules.c 		if (!ops->compare(r, frh, tb))
tb                730 net/core/fib_rules.c 	struct nlattr *tb[FRA_MAX + 1];
tb                746 net/core/fib_rules.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
tb                753 net/core/fib_rules.c 	err = fib_nl2rule(skb, nlh, extack, ops, tb, &rule, &user_priority);
tb                758 net/core/fib_rules.c 	    rule_exists(ops, frh, tb, rule)) {
tb                763 net/core/fib_rules.c 	err = ops->configure(rule, skb, frh, tb, extack);
tb                838 net/core/fib_rules.c 	struct nlattr *tb[FRA_MAX+1];
tb                854 net/core/fib_rules.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
tb                861 net/core/fib_rules.c 	err = fib_nl2rule(skb, nlh, extack, ops, tb, &nlrule, &user_priority);
tb                865 net/core/fib_rules.c 	rule = rule_find(ops, frh, tb, nlrule, user_priority);
tb               4654 net/core/filter.c 		struct fib_table *tb;
tb               4656 net/core/filter.c 		tb = fib_get_table(net, tbid);
tb               4657 net/core/filter.c 		if (unlikely(!tb))
tb               4660 net/core/filter.c 		err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
tb               4776 net/core/filter.c 		struct fib6_table *tb;
tb               4778 net/core/filter.c 		tb = ipv6_stub->fib6_get_table(net, tbid);
tb               4779 net/core/filter.c 		if (unlikely(!tb))
tb               4782 net/core/filter.c 		err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
tb                336 net/core/lwt_bpf.c 	struct nlattr *tb[LWT_BPF_PROG_MAX + 1];
tb                341 net/core/lwt_bpf.c 	ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
tb                346 net/core/lwt_bpf.c 	if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
tb                349 net/core/lwt_bpf.c 	prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
tb                353 net/core/lwt_bpf.c 	fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
tb                375 net/core/lwt_bpf.c 	struct nlattr *tb[LWT_BPF_MAX + 1];
tb                383 net/core/lwt_bpf.c 	ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
tb                388 net/core/lwt_bpf.c 	if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT])
tb                398 net/core/lwt_bpf.c 	if (tb[LWT_BPF_IN]) {
tb                400 net/core/lwt_bpf.c 		ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in,
tb                406 net/core/lwt_bpf.c 	if (tb[LWT_BPF_OUT]) {
tb                408 net/core/lwt_bpf.c 		ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out,
tb                414 net/core/lwt_bpf.c 	if (tb[LWT_BPF_XMIT]) {
tb                416 net/core/lwt_bpf.c 		ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit,
tb                422 net/core/lwt_bpf.c 	if (tb[LWT_BPF_XMIT_HEADROOM]) {
tb                423 net/core/lwt_bpf.c 		u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]);
tb               1856 net/core/neighbour.c 	struct nlattr *tb[NDA_MAX+1];
tb               1865 net/core/neighbour.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
tb               1871 net/core/neighbour.c 	if (!tb[NDA_DST]) {
tb               1884 net/core/neighbour.c 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
tb               1894 net/core/neighbour.c 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
tb               1899 net/core/neighbour.c 	dst = nla_data(tb[NDA_DST]);
tb               1900 net/core/neighbour.c 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
tb               1902 net/core/neighbour.c 	if (tb[NDA_PROTOCOL])
tb               1903 net/core/neighbour.c 		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
tb               2181 net/core/neighbour.c 	struct nlattr *tb[NDTA_MAX+1];
tb               2185 net/core/neighbour.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
tb               2190 net/core/neighbour.c 	if (tb[NDTA_NAME] == NULL) {
tb               2203 net/core/neighbour.c 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
tb               2218 net/core/neighbour.c 	if (tb[NDTA_PARMS]) {
tb               2224 net/core/neighbour.c 						  tb[NDTA_PARMS],
tb               2312 net/core/neighbour.c 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
tb               2313 net/core/neighbour.c 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
tb               2317 net/core/neighbour.c 	if (tb[NDTA_THRESH1])
tb               2318 net/core/neighbour.c 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
tb               2320 net/core/neighbour.c 	if (tb[NDTA_THRESH2])
tb               2321 net/core/neighbour.c 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
tb               2323 net/core/neighbour.c 	if (tb[NDTA_THRESH3])
tb               2324 net/core/neighbour.c 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
tb               2326 net/core/neighbour.c 	if (tb[NDTA_GC_INTERVAL])
tb               2327 net/core/neighbour.c 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
tb               2642 net/core/neighbour.c 	struct nlattr *tb[NDA_MAX + 1];
tb               2666 net/core/neighbour.c 						    tb, NDA_MAX, nda_policy,
tb               2669 net/core/neighbour.c 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
tb               2676 net/core/neighbour.c 		if (!tb[i])
tb               2682 net/core/neighbour.c 			filter->dev_idx = nla_get_u32(tb[i]);
tb               2685 net/core/neighbour.c 			filter->master_idx = nla_get_u32(tb[i]);
tb               2749 net/core/neighbour.c 	struct nlattr *tb[NDA_MAX + 1];
tb               2770 net/core/neighbour.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
tb               2784 net/core/neighbour.c 		if (!tb[i])
tb               2789 net/core/neighbour.c 			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
tb               2793 net/core/neighbour.c 			*dst = nla_data(tb[i]);
tb                726 net/core/net_namespace.c 	struct nlattr *tb[NETNSA_MAX + 1];
tb                731 net/core/net_namespace.c 	err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
tb                735 net/core/net_namespace.c 	if (!tb[NETNSA_NSID]) {
tb                739 net/core/net_namespace.c 	nsid = nla_get_s32(tb[NETNSA_NSID]);
tb                741 net/core/net_namespace.c 	if (tb[NETNSA_PID]) {
tb                742 net/core/net_namespace.c 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
tb                743 net/core/net_namespace.c 		nla = tb[NETNSA_PID];
tb                744 net/core/net_namespace.c 	} else if (tb[NETNSA_FD]) {
tb                745 net/core/net_namespace.c 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
tb                746 net/core/net_namespace.c 		nla = tb[NETNSA_FD];
tb                775 net/core/net_namespace.c 		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
tb                831 net/core/net_namespace.c 				    struct nlattr **tb,
tb                838 net/core/net_namespace.c 					      tb, NETNSA_MAX, rtnl_net_policy,
tb                841 net/core/net_namespace.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
tb                848 net/core/net_namespace.c 		if (!tb[i])
tb                870 net/core/net_namespace.c 	struct nlattr *tb[NETNSA_MAX + 1];
tb                881 net/core/net_namespace.c 	err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
tb                884 net/core/net_namespace.c 	if (tb[NETNSA_PID]) {
tb                885 net/core/net_namespace.c 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
tb                886 net/core/net_namespace.c 		nla = tb[NETNSA_PID];
tb                887 net/core/net_namespace.c 	} else if (tb[NETNSA_FD]) {
tb                888 net/core/net_namespace.c 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
tb                889 net/core/net_namespace.c 		nla = tb[NETNSA_FD];
tb                890 net/core/net_namespace.c 	} else if (tb[NETNSA_NSID]) {
tb                891 net/core/net_namespace.c 		peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
tb                894 net/core/net_namespace.c 		nla = tb[NETNSA_NSID];
tb                906 net/core/net_namespace.c 	if (tb[NETNSA_TARGET_NSID]) {
tb                907 net/core/net_namespace.c 		int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
tb                911 net/core/net_namespace.c 			NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
tb                978 net/core/net_namespace.c 	struct nlattr *tb[NETNSA_MAX + 1];
tb                981 net/core/net_namespace.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
tb                988 net/core/net_namespace.c 		if (!tb[i])
tb                994 net/core/net_namespace.c 			net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
tb                996 net/core/net_namespace.c 				NL_SET_BAD_ATTR(extack, tb[i]);
tb               1005 net/core/net_namespace.c 			NL_SET_BAD_ATTR(extack, tb[i]);
tb               1884 net/core/rtnetlink.c 				      bool strict_check, struct nlattr **tb,
tb               1908 net/core/rtnetlink.c 		return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
tb               1923 net/core/rtnetlink.c 	return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
tb               1937 net/core/rtnetlink.c 	struct nlattr *tb[IFLA_MAX+1];
tb               1948 net/core/rtnetlink.c 	err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
tb               1957 net/core/rtnetlink.c 		if (!tb[i])
tb               1963 net/core/rtnetlink.c 			netnsid = nla_get_s32(tb[i]);
tb               1971 net/core/rtnetlink.c 			ext_filter_mask = nla_get_u32(tb[i]);
tb               1974 net/core/rtnetlink.c 			master_idx = nla_get_u32(tb[i]);
tb               1977 net/core/rtnetlink.c 			kind_ops = linkinfo_to_kind_ops(tb[i]);
tb               2029 net/core/rtnetlink.c int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
tb               2032 net/core/rtnetlink.c 	return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
tb               2037 net/core/rtnetlink.c struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
tb               2043 net/core/rtnetlink.c 	if (tb[IFLA_NET_NS_PID])
tb               2044 net/core/rtnetlink.c 		net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
tb               2045 net/core/rtnetlink.c 	else if (tb[IFLA_NET_NS_FD])
tb               2046 net/core/rtnetlink.c 		net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
tb               2061 net/core/rtnetlink.c 					       struct nlattr *tb[])
tb               2065 net/core/rtnetlink.c 	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
tb               2066 net/core/rtnetlink.c 		return rtnl_link_get_net(src_net, tb);
tb               2068 net/core/rtnetlink.c 	if (!tb[IFLA_TARGET_NETNSID])
tb               2071 net/core/rtnetlink.c 	net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
tb               2080 net/core/rtnetlink.c 					     struct nlattr *tb[], int cap)
tb               2084 net/core/rtnetlink.c 	net = rtnl_link_get_net_by_nlattr(src_net, tb);
tb               2099 net/core/rtnetlink.c static int rtnl_ensure_unique_netns(struct nlattr *tb[],
tb               2105 net/core/rtnetlink.c 		if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
tb               2112 net/core/rtnetlink.c 	if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
tb               2115 net/core/rtnetlink.c 	if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
tb               2118 net/core/rtnetlink.c 	if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
tb               2128 net/core/rtnetlink.c static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
tb               2131 net/core/rtnetlink.c 		if (tb[IFLA_ADDRESS] &&
tb               2132 net/core/rtnetlink.c 		    nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
tb               2135 net/core/rtnetlink.c 		if (tb[IFLA_BROADCAST] &&
tb               2136 net/core/rtnetlink.c 		    nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
tb               2140 net/core/rtnetlink.c 	if (tb[IFLA_AF_SPEC]) {
tb               2144 net/core/rtnetlink.c 		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
tb               2190 net/core/rtnetlink.c static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
tb               2195 net/core/rtnetlink.c 	if (tb[IFLA_VF_MAC]) {
tb               2196 net/core/rtnetlink.c 		struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
tb               2208 net/core/rtnetlink.c 	if (tb[IFLA_VF_VLAN]) {
tb               2209 net/core/rtnetlink.c 		struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
tb               2222 net/core/rtnetlink.c 	if (tb[IFLA_VF_VLAN_LIST]) {
tb               2231 net/core/rtnetlink.c 		nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
tb               2253 net/core/rtnetlink.c 	if (tb[IFLA_VF_TX_RATE]) {
tb               2254 net/core/rtnetlink.c 		struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
tb               2274 net/core/rtnetlink.c 	if (tb[IFLA_VF_RATE]) {
tb               2275 net/core/rtnetlink.c 		struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
tb               2288 net/core/rtnetlink.c 	if (tb[IFLA_VF_SPOOFCHK]) {
tb               2289 net/core/rtnetlink.c 		struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
tb               2301 net/core/rtnetlink.c 	if (tb[IFLA_VF_LINK_STATE]) {
tb               2302 net/core/rtnetlink.c 		struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
tb               2314 net/core/rtnetlink.c 	if (tb[IFLA_VF_RSS_QUERY_EN]) {
tb               2318 net/core/rtnetlink.c 		ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
tb               2328 net/core/rtnetlink.c 	if (tb[IFLA_VF_TRUST]) {
tb               2329 net/core/rtnetlink.c 		struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
tb               2340 net/core/rtnetlink.c 	if (tb[IFLA_VF_IB_NODE_GUID]) {
tb               2341 net/core/rtnetlink.c 		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
tb               2350 net/core/rtnetlink.c 	if (tb[IFLA_VF_IB_PORT_GUID]) {
tb               2351 net/core/rtnetlink.c 		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
tb               2407 net/core/rtnetlink.c 		      struct nlattr **tb, char *ifname, int status)
tb               2412 net/core/rtnetlink.c 	err = validate_linkmsg(dev, tb);
tb               2416 net/core/rtnetlink.c 	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
tb               2418 net/core/rtnetlink.c 							    tb, CAP_NET_ADMIN);
tb               2431 net/core/rtnetlink.c 	if (tb[IFLA_MAP]) {
tb               2445 net/core/rtnetlink.c 		u_map = nla_data(tb[IFLA_MAP]);
tb               2460 net/core/rtnetlink.c 	if (tb[IFLA_ADDRESS]) {
tb               2472 net/core/rtnetlink.c 		memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
tb               2481 net/core/rtnetlink.c 	if (tb[IFLA_MTU]) {
tb               2482 net/core/rtnetlink.c 		err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
tb               2488 net/core/rtnetlink.c 	if (tb[IFLA_GROUP]) {
tb               2489 net/core/rtnetlink.c 		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
tb               2505 net/core/rtnetlink.c 	if (tb[IFLA_IFALIAS]) {
tb               2506 net/core/rtnetlink.c 		err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
tb               2507 net/core/rtnetlink.c 				    nla_len(tb[IFLA_IFALIAS]));
tb               2513 net/core/rtnetlink.c 	if (tb[IFLA_BROADCAST]) {
tb               2514 net/core/rtnetlink.c 		nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
tb               2525 net/core/rtnetlink.c 	if (tb[IFLA_MASTER]) {
tb               2526 net/core/rtnetlink.c 		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
tb               2532 net/core/rtnetlink.c 	if (tb[IFLA_CARRIER]) {
tb               2533 net/core/rtnetlink.c 		err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
tb               2539 net/core/rtnetlink.c 	if (tb[IFLA_TXQLEN]) {
tb               2540 net/core/rtnetlink.c 		unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
tb               2548 net/core/rtnetlink.c 	if (tb[IFLA_GSO_MAX_SIZE]) {
tb               2549 net/core/rtnetlink.c 		u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
tb               2562 net/core/rtnetlink.c 	if (tb[IFLA_GSO_MAX_SEGS]) {
tb               2563 net/core/rtnetlink.c 		u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
tb               2576 net/core/rtnetlink.c 	if (tb[IFLA_OPERSTATE])
tb               2577 net/core/rtnetlink.c 		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
tb               2579 net/core/rtnetlink.c 	if (tb[IFLA_LINKMODE]) {
tb               2580 net/core/rtnetlink.c 		unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
tb               2589 net/core/rtnetlink.c 	if (tb[IFLA_VFINFO_LIST]) {
tb               2594 net/core/rtnetlink.c 		nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
tb               2614 net/core/rtnetlink.c 	if (tb[IFLA_VF_PORTS]) {
tb               2624 net/core/rtnetlink.c 		nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
tb               2649 net/core/rtnetlink.c 	if (tb[IFLA_PORT_SELF]) {
tb               2653 net/core/rtnetlink.c 						  tb[IFLA_PORT_SELF],
tb               2666 net/core/rtnetlink.c 	if (tb[IFLA_AF_SPEC]) {
tb               2670 net/core/rtnetlink.c 		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
tb               2689 net/core/rtnetlink.c 	if (tb[IFLA_PROTO_DOWN]) {
tb               2691 net/core/rtnetlink.c 					    nla_get_u8(tb[IFLA_PROTO_DOWN]));
tb               2697 net/core/rtnetlink.c 	if (tb[IFLA_XDP]) {
tb               2702 net/core/rtnetlink.c 						  tb[IFLA_XDP],
tb               2754 net/core/rtnetlink.c 	struct nlattr *tb[IFLA_MAX+1];
tb               2757 net/core/rtnetlink.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
tb               2762 net/core/rtnetlink.c 	err = rtnl_ensure_unique_netns(tb, extack, false);
tb               2766 net/core/rtnetlink.c 	if (tb[IFLA_IFNAME])
tb               2767 net/core/rtnetlink.c 		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
tb               2775 net/core/rtnetlink.c 	else if (tb[IFLA_IFNAME])
tb               2785 net/core/rtnetlink.c 	err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
tb               2850 net/core/rtnetlink.c 	struct nlattr *tb[IFLA_MAX+1];
tb               2854 net/core/rtnetlink.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
tb               2859 net/core/rtnetlink.c 	err = rtnl_ensure_unique_netns(tb, extack, true);
tb               2863 net/core/rtnetlink.c 	if (tb[IFLA_IFNAME])
tb               2864 net/core/rtnetlink.c 		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
tb               2866 net/core/rtnetlink.c 	if (tb[IFLA_TARGET_NETNSID]) {
tb               2867 net/core/rtnetlink.c 		netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
tb               2877 net/core/rtnetlink.c 	else if (tb[IFLA_IFNAME])
tb               2879 net/core/rtnetlink.c 	else if (tb[IFLA_GROUP])
tb               2880 net/core/rtnetlink.c 		err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
tb               2885 net/core/rtnetlink.c 		if (tb[IFLA_IFNAME] || ifm->ifi_index > 0)
tb               2926 net/core/rtnetlink.c 				    struct nlattr *tb[],
tb               2933 net/core/rtnetlink.c 	if (tb[IFLA_NUM_TX_QUEUES])
tb               2934 net/core/rtnetlink.c 		num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
tb               2938 net/core/rtnetlink.c 	if (tb[IFLA_NUM_RX_QUEUES])
tb               2939 net/core/rtnetlink.c 		num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
tb               2962 net/core/rtnetlink.c 	if (tb[IFLA_MTU]) {
tb               2963 net/core/rtnetlink.c 		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
tb               2973 net/core/rtnetlink.c 	if (tb[IFLA_ADDRESS]) {
tb               2974 net/core/rtnetlink.c 		memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
tb               2975 net/core/rtnetlink.c 				nla_len(tb[IFLA_ADDRESS]));
tb               2978 net/core/rtnetlink.c 	if (tb[IFLA_BROADCAST])
tb               2979 net/core/rtnetlink.c 		memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
tb               2980 net/core/rtnetlink.c 				nla_len(tb[IFLA_BROADCAST]));
tb               2981 net/core/rtnetlink.c 	if (tb[IFLA_TXQLEN])
tb               2982 net/core/rtnetlink.c 		dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
tb               2983 net/core/rtnetlink.c 	if (tb[IFLA_OPERSTATE])
tb               2984 net/core/rtnetlink.c 		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
tb               2985 net/core/rtnetlink.c 	if (tb[IFLA_LINKMODE])
tb               2986 net/core/rtnetlink.c 		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
tb               2987 net/core/rtnetlink.c 	if (tb[IFLA_GROUP])
tb               2988 net/core/rtnetlink.c 		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
tb               2989 net/core/rtnetlink.c 	if (tb[IFLA_GSO_MAX_SIZE])
tb               2990 net/core/rtnetlink.c 		netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
tb               2991 net/core/rtnetlink.c 	if (tb[IFLA_GSO_MAX_SEGS])
tb               2992 net/core/rtnetlink.c 		dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
tb               3002 net/core/rtnetlink.c 		struct nlattr **tb)
tb               3009 net/core/rtnetlink.c 			err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
tb               3028 net/core/rtnetlink.c 	struct nlattr *tb[IFLA_MAX + 1];
tb               3041 net/core/rtnetlink.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
tb               3046 net/core/rtnetlink.c 	err = rtnl_ensure_unique_netns(tb, extack, false);
tb               3050 net/core/rtnetlink.c 	if (tb[IFLA_IFNAME])
tb               3051 net/core/rtnetlink.c 		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
tb               3071 net/core/rtnetlink.c 	err = validate_linkmsg(dev, tb);
tb               3075 net/core/rtnetlink.c 	if (tb[IFLA_LINKINFO]) {
tb               3077 net/core/rtnetlink.c 						  tb[IFLA_LINKINFO],
tb               3106 net/core/rtnetlink.c 			err = ops->validate(tb, data, extack);
tb               3143 net/core/rtnetlink.c 			err = ops->changelink(dev, tb, data, extack);
tb               3153 net/core/rtnetlink.c 			err = m_ops->slave_changelink(master_dev, dev, tb,
tb               3160 net/core/rtnetlink.c 		return do_setlink(skb, dev, ifm, extack, tb, ifname, status);
tb               3164 net/core/rtnetlink.c 		if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
tb               3166 net/core/rtnetlink.c 						nla_get_u32(tb[IFLA_GROUP]),
tb               3167 net/core/rtnetlink.c 						ifm, extack, tb);
tb               3171 net/core/rtnetlink.c 	if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
tb               3197 net/core/rtnetlink.c 	dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
tb               3201 net/core/rtnetlink.c 	if (tb[IFLA_LINK_NETNSID]) {
tb               3202 net/core/rtnetlink.c 		int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
tb               3218 net/core/rtnetlink.c 			       name_assign_type, ops, tb, extack);
tb               3227 net/core/rtnetlink.c 		err = ops->newlink(link_net ? : net, dev, tb, data, extack);
tb               3253 net/core/rtnetlink.c 	if (tb[IFLA_MASTER]) {
tb               3254 net/core/rtnetlink.c 		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
tb               3292 net/core/rtnetlink.c 				  struct nlattr **tb,
tb               3304 net/core/rtnetlink.c 		return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
tb               3314 net/core/rtnetlink.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
tb               3320 net/core/rtnetlink.c 		if (!tb[i])
tb               3344 net/core/rtnetlink.c 	struct nlattr *tb[IFLA_MAX+1];
tb               3351 net/core/rtnetlink.c 	err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
tb               3355 net/core/rtnetlink.c 	err = rtnl_ensure_unique_netns(tb, extack, true);
tb               3359 net/core/rtnetlink.c 	if (tb[IFLA_TARGET_NETNSID]) {
tb               3360 net/core/rtnetlink.c 		netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
tb               3366 net/core/rtnetlink.c 	if (tb[IFLA_IFNAME])
tb               3367 net/core/rtnetlink.c 		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
tb               3369 net/core/rtnetlink.c 	if (tb[IFLA_EXT_MASK])
tb               3370 net/core/rtnetlink.c 		ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
tb               3376 net/core/rtnetlink.c 	else if (tb[IFLA_IFNAME])
tb               3411 net/core/rtnetlink.c 	struct nlattr *tb[IFLA_MAX+1];
tb               3420 net/core/rtnetlink.c 	if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
tb               3421 net/core/rtnetlink.c 		if (tb[IFLA_EXT_MASK])
tb               3422 net/core/rtnetlink.c 			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
tb               3627 net/core/rtnetlink.c 		     struct nlattr *tb[],
tb               3687 net/core/rtnetlink.c 	struct nlattr *tb[NDA_MAX+1];
tb               3693 net/core/rtnetlink.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
tb               3710 net/core/rtnetlink.c 	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
tb               3720 net/core/rtnetlink.c 	addr = nla_data(tb[NDA_LLADDR]);
tb               3722 net/core/rtnetlink.c 	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
tb               3734 net/core/rtnetlink.c 		err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
tb               3745 net/core/rtnetlink.c 			err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
tb               3750 net/core/rtnetlink.c 			err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
tb               3767 net/core/rtnetlink.c 		     struct nlattr *tb[],
tb               3795 net/core/rtnetlink.c 	struct nlattr *tb[NDA_MAX+1];
tb               3804 net/core/rtnetlink.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
tb               3821 net/core/rtnetlink.c 	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
tb               3831 net/core/rtnetlink.c 	addr = nla_data(tb[NDA_LLADDR]);
tb               3833 net/core/rtnetlink.c 	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
tb               3846 net/core/rtnetlink.c 			err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
tb               3857 net/core/rtnetlink.c 			err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
tb               3860 net/core/rtnetlink.c 			err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
tb               3938 net/core/rtnetlink.c 	struct nlattr *tb[NDA_MAX + 1];
tb               3954 net/core/rtnetlink.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
tb               3961 net/core/rtnetlink.c 		if (!tb[i])
tb               3966 net/core/rtnetlink.c 			if (nla_len(tb[i]) != sizeof(u32)) {
tb               3970 net/core/rtnetlink.c 			*brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
tb               3973 net/core/rtnetlink.c 			if (nla_len(tb[i]) != sizeof(u32)) {
tb               3977 net/core/rtnetlink.c 			*br_idx = nla_get_u32(tb[NDA_MASTER]);
tb               3992 net/core/rtnetlink.c 	struct nlattr *tb[IFLA_MAX+1];
tb               4008 net/core/rtnetlink.c 					     tb, IFLA_MAX, ifla_policy,
tb               4013 net/core/rtnetlink.c 			if (tb[IFLA_MASTER])
tb               4014 net/core/rtnetlink.c 				*br_idx = nla_get_u32(tb[IFLA_MASTER]);
tb               4124 net/core/rtnetlink.c 				struct nlattr **tb, u8 *ndm_flags,
tb               4148 net/core/rtnetlink.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
tb               4156 net/core/rtnetlink.c 		if (!tb[i])
tb               4161 net/core/rtnetlink.c 			*br_idx = nla_get_u32(tb[i]);
tb               4164 net/core/rtnetlink.c 			if (nla_len(tb[i]) != ETH_ALEN) {
tb               4168 net/core/rtnetlink.c 			*addr = nla_data(tb[i]);
tb               4171 net/core/rtnetlink.c 			err = fdb_vid_parse(tb[i], vid, extack);
tb               4192 net/core/rtnetlink.c 	struct nlattr *tb[NDA_MAX + 1];
tb               4201 net/core/rtnetlink.c 	err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
tb               4270 net/core/rtnetlink.c 	err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
tb               4393 net/core/rtnetlink.c 	struct nlattr *tb[IFLA_MAX+1];
tb               4413 net/core/rtnetlink.c 						    tb, IFLA_MAX, ifla_policy,
tb               4417 net/core/rtnetlink.c 					     tb, IFLA_MAX, ifla_policy,
tb               4425 net/core/rtnetlink.c 		if (!tb[i])
tb               4430 net/core/rtnetlink.c 			*filter_mask = nla_get_u32(tb[i]);
tb                208 net/dcb/dcbnl.c 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                219 net/dcb/dcbnl.c 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                227 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_PFC_CFG])
tb                234 net/dcb/dcbnl.c 					  tb[DCB_ATTR_PFC_CFG],
tb                264 net/dcb/dcbnl.c 				u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                278 net/dcb/dcbnl.c 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                286 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_CAP])
tb                293 net/dcb/dcbnl.c 					  tb[DCB_ATTR_CAP], dcbnl_cap_nest,
tb                323 net/dcb/dcbnl.c 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                331 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_NUMTCS])
tb                338 net/dcb/dcbnl.c 					  tb[DCB_ATTR_NUMTCS],
tb                370 net/dcb/dcbnl.c 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                377 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_NUMTCS])
tb                384 net/dcb/dcbnl.c 					  tb[DCB_ATTR_NUMTCS],
tb                404 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                414 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                418 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_PFC_STATE])
tb                424 net/dcb/dcbnl.c 	value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
tb                432 net/dcb/dcbnl.c 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                440 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_APP])
tb                444 net/dcb/dcbnl.c 					  tb[DCB_ATTR_APP], dcbnl_app_nest,
tb                502 net/dcb/dcbnl.c 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                509 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_APP])
tb                513 net/dcb/dcbnl.c 					  tb[DCB_ATTR_APP], dcbnl_app_nest,
tb                552 net/dcb/dcbnl.c 			     struct nlattr **tb, struct sk_buff *skb, int dir)
tb                562 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_PG_CFG])
tb                572 net/dcb/dcbnl.c 					  tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
tb                688 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                690 net/dcb/dcbnl.c 	return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
tb                694 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                696 net/dcb/dcbnl.c 	return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
tb                700 net/dcb/dcbnl.c 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                704 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_STATE])
tb                710 net/dcb/dcbnl.c 	value = nla_get_u8(tb[DCB_ATTR_STATE]);
tb                717 net/dcb/dcbnl.c 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                724 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_PFC_CFG])
tb                731 net/dcb/dcbnl.c 					  tb[DCB_ATTR_PFC_CFG],
tb                748 net/dcb/dcbnl.c 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                752 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_SET_ALL])
tb                766 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb,
tb                778 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_PG_CFG])
tb                788 net/dcb/dcbnl.c 					  tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
tb                859 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                861 net/dcb/dcbnl.c 	return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
tb                865 net/dcb/dcbnl.c 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                867 net/dcb/dcbnl.c 	return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
tb                871 net/dcb/dcbnl.c 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                881 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_BCN])
tb                889 net/dcb/dcbnl.c 					  tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
tb                933 net/dcb/dcbnl.c 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb                941 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_BCN])
tb                949 net/dcb/dcbnl.c 					  tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest,
tb               1425 net/dcb/dcbnl.c 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb               1434 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_IEEE])
tb               1438 net/dcb/dcbnl.c 					  tb[DCB_ATTR_IEEE],
tb               1515 net/dcb/dcbnl.c 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb               1526 net/dcb/dcbnl.c 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb               1535 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_IEEE])
tb               1539 net/dcb/dcbnl.c 					  tb[DCB_ATTR_IEEE],
tb               1572 net/dcb/dcbnl.c 			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb               1582 net/dcb/dcbnl.c 			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb               1589 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_DCBX])
tb               1592 net/dcb/dcbnl.c 	value = nla_get_u8(tb[DCB_ATTR_DCBX]);
tb               1599 net/dcb/dcbnl.c 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb               1609 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_FEATCFG])
tb               1613 net/dcb/dcbnl.c 					  tb[DCB_ATTR_FEATCFG],
tb               1645 net/dcb/dcbnl.c 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb               1654 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_FEATCFG])
tb               1658 net/dcb/dcbnl.c 					  tb[DCB_ATTR_FEATCFG],
tb               1683 net/dcb/dcbnl.c 			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
tb               1738 net/dcb/dcbnl.c 	struct nlattr *tb[DCB_ATTR_MAX + 1];
tb               1748 net/dcb/dcbnl.c 	ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
tb               1761 net/dcb/dcbnl.c 	if (!tb[DCB_ATTR_IFNAME])
tb               1764 net/dcb/dcbnl.c 	netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
tb               1776 net/dcb/dcbnl.c 	ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
tb                573 net/decnet/dn_dev.c 	struct nlattr *tb[IFA_MAX+1];
tb                586 net/decnet/dn_dev.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
tb                600 net/decnet/dn_dev.c 		if (tb[IFA_LOCAL] &&
tb                601 net/decnet/dn_dev.c 		    nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
tb                604 net/decnet/dn_dev.c 		if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
tb                619 net/decnet/dn_dev.c 	struct nlattr *tb[IFA_MAX+1];
tb                632 net/decnet/dn_dev.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
tb                637 net/decnet/dn_dev.c 	if (tb[IFA_LOCAL] == NULL)
tb                653 net/decnet/dn_dev.c 	if (tb[IFA_ADDRESS] == NULL)
tb                654 net/decnet/dn_dev.c 		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
tb                656 net/decnet/dn_dev.c 	ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]);
tb                657 net/decnet/dn_dev.c 	ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]);
tb                658 net/decnet/dn_dev.c 	ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
tb                663 net/decnet/dn_dev.c 	if (tb[IFA_LABEL])
tb                664 net/decnet/dn_dev.c 		nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
tb                509 net/decnet/dn_fib.c 	struct dn_fib_table *tb;
tb                525 net/decnet/dn_fib.c 	tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 0);
tb                526 net/decnet/dn_fib.c 	if (!tb)
tb                529 net/decnet/dn_fib.c 	return tb->delete(tb, r, attrs, nlh, &NETLINK_CB(skb));
tb                536 net/decnet/dn_fib.c 	struct dn_fib_table *tb;
tb                552 net/decnet/dn_fib.c 	tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 1);
tb                553 net/decnet/dn_fib.c 	if (!tb)
tb                556 net/decnet/dn_fib.c 	return tb->insert(tb, r, attrs, nlh, &NETLINK_CB(skb));
tb                561 net/decnet/dn_fib.c 	struct dn_fib_table *tb;
tb                593 net/decnet/dn_fib.c 		tb = dn_fib_get_table(RT_MIN_TABLE, 1);
tb                595 net/decnet/dn_fib.c 		tb = dn_fib_get_table(RT_TABLE_LOCAL, 1);
tb                597 net/decnet/dn_fib.c 	if (tb == NULL)
tb                607 net/decnet/dn_fib.c 	req.rtm.rtm_table = tb->n;
tb                613 net/decnet/dn_fib.c 		tb->insert(tb, &req.rtm, attrs, &req.nlh, NULL);
tb                615 net/decnet/dn_fib.c 		tb->delete(tb, &req.rtm, attrs, &req.nlh, NULL);
tb               1643 net/decnet/dn_route.c 	struct nlattr *tb[RTA_MAX+1];
tb               1648 net/decnet/dn_route.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               1662 net/decnet/dn_route.c 	if (tb[RTA_SRC])
tb               1663 net/decnet/dn_route.c 		fld.saddr = nla_get_le16(tb[RTA_SRC]);
tb               1665 net/decnet/dn_route.c 	if (tb[RTA_DST])
tb               1666 net/decnet/dn_route.c 		fld.daddr = nla_get_le16(tb[RTA_DST]);
tb               1668 net/decnet/dn_route.c 	if (tb[RTA_IIF])
tb               1669 net/decnet/dn_route.c 		fld.flowidn_iif = nla_get_u32(tb[RTA_IIF]);
tb               1690 net/decnet/dn_route.c 		if (tb[RTA_OIF])
tb               1691 net/decnet/dn_route.c 			fld.flowidn_oif = nla_get_u32(tb[RTA_OIF]);
tb                124 net/decnet/dn_rules.c 				 struct nlattr **tb,
tb                150 net/decnet/dn_rules.c 		r->src = nla_get_le16(tb[FRA_SRC]);
tb                153 net/decnet/dn_rules.c 		r->dst = nla_get_le16(tb[FRA_DST]);
tb                165 net/decnet/dn_rules.c 			       struct nlattr **tb)
tb                175 net/decnet/dn_rules.c 	if (frh->src_len && (r->src != nla_get_le16(tb[FRA_SRC])))
tb                178 net/decnet/dn_rules.c 	if (frh->dst_len && (r->dst != nla_get_le16(tb[FRA_DST])))
tb                189 net/decnet/dn_rules.c 	struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
tb                193 net/decnet/dn_rules.c 	if (tb) {
tb                194 net/decnet/dn_rules.c 		if (!tb->lookup(tb, &fld, &res)) {
tb                411 net/decnet/dn_table.c 				struct dn_fib_table *tb,
tb                426 net/decnet/dn_table.c 				tb->n,
tb                440 net/decnet/dn_table.c 				struct dn_fib_table *tb,
tb                453 net/decnet/dn_table.c 		if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) {
tb                462 net/decnet/dn_table.c static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
tb                467 net/decnet/dn_table.c 	struct dn_hash *table = (struct dn_hash *)tb->data;
tb                477 net/decnet/dn_table.c 		if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) {
tb                494 net/decnet/dn_table.c 	struct dn_fib_table *tb;
tb                509 net/decnet/dn_table.c 		hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) {
tb                515 net/decnet/dn_table.c 			if (tb->dump(tb, skb, cb) < 0)
tb                529 net/decnet/dn_table.c static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[],
tb                532 net/decnet/dn_table.c 	struct dn_hash *table = (struct dn_hash *)tb->data;
tb                648 net/decnet/dn_table.c 			dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
tb                657 net/decnet/dn_table.c 	dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
tb                666 net/decnet/dn_table.c static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[],
tb                669 net/decnet/dn_table.c 	struct dn_hash *table = (struct dn_hash*)tb->data;
tb                721 net/decnet/dn_table.c 		dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
tb                771 net/decnet/dn_table.c static int dn_fib_table_flush(struct dn_fib_table *tb)
tb                773 net/decnet/dn_table.c 	struct dn_hash *table = (struct dn_hash *)tb->data;
tb                790 net/decnet/dn_table.c static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowidn *flp, struct dn_fib_res *res)
tb                794 net/decnet/dn_table.c 	struct dn_hash *t = (struct dn_hash *)tb->data;
tb                894 net/decnet/dn_table.c 	struct dn_fib_table *tb;
tb                898 net/decnet/dn_table.c 		hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist)
tb                899 net/decnet/dn_table.c 			flushed += tb->flush(tb);
tb                 89 net/dsa/dsa_priv.h int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb                 94 net/dsa/dsa_priv.h int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
tb               1197 net/dsa/slave.c int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
tb               1208 net/dsa/slave.c int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
tb                 31 net/hsr/hsr_netlink.c 		       struct nlattr *tb[], struct nlattr *data[],
tb                113 net/ieee802154/6lowpan/core.c static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[],
tb                116 net/ieee802154/6lowpan/core.c 	if (tb[IFLA_ADDRESS]) {
tb                117 net/ieee802154/6lowpan/core.c 		if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
tb                124 net/ieee802154/6lowpan/core.c 			  struct nlattr *tb[], struct nlattr *data[],
tb                134 net/ieee802154/6lowpan/core.c 	if (!tb[IFLA_LINK])
tb                137 net/ieee802154/6lowpan/core.c 	wdev = dev_get_by_index(dev_net(ldev), nla_get_u32(tb[IFLA_LINK]));
tb                560 net/ieee802154/nl802154.c 	struct nlattr **tb = genl_family_attrbuf(&nl802154_fam);
tb                563 net/ieee802154/nl802154.c 					 tb, nl802154_fam.maxattr,
tb                572 net/ieee802154/nl802154.c 	if (tb[NL802154_ATTR_WPAN_PHY])
tb                573 net/ieee802154/nl802154.c 		state->filter_wpan_phy = nla_get_u32(tb[NL802154_ATTR_WPAN_PHY]);
tb                574 net/ieee802154/nl802154.c 	if (tb[NL802154_ATTR_WPAN_DEV])
tb                575 net/ieee802154/nl802154.c 		state->filter_wpan_phy = nla_get_u64(tb[NL802154_ATTR_WPAN_DEV]) >> 32;
tb                576 net/ieee802154/nl802154.c 	if (tb[NL802154_ATTR_IFINDEX]) {
tb                579 net/ieee802154/nl802154.c 		int ifidx = nla_get_u32(tb[NL802154_ATTR_IFINDEX]);
tb                649 net/ipv4/devinet.c 	struct nlattr *tb[IFA_MAX+1];
tb                658 net/ipv4/devinet.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
tb                672 net/ipv4/devinet.c 		if (tb[IFA_LOCAL] &&
tb                673 net/ipv4/devinet.c 		    ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
tb                676 net/ipv4/devinet.c 		if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
tb                679 net/ipv4/devinet.c 		if (tb[IFA_ADDRESS] &&
tb                681 net/ipv4/devinet.c 		    !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
tb                827 net/ipv4/devinet.c 	struct nlattr *tb[IFA_MAX+1];
tb                834 net/ipv4/devinet.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
tb                841 net/ipv4/devinet.c 	if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
tb                866 net/ipv4/devinet.c 	if (!tb[IFA_ADDRESS])
tb                867 net/ipv4/devinet.c 		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
tb                872 net/ipv4/devinet.c 	ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
tb                877 net/ipv4/devinet.c 	ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
tb                878 net/ipv4/devinet.c 	ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
tb                880 net/ipv4/devinet.c 	if (tb[IFA_BROADCAST])
tb                881 net/ipv4/devinet.c 		ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
tb                883 net/ipv4/devinet.c 	if (tb[IFA_LABEL])
tb                884 net/ipv4/devinet.c 		nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
tb                888 net/ipv4/devinet.c 	if (tb[IFA_RT_PRIORITY])
tb                889 net/ipv4/devinet.c 		ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
tb                891 net/ipv4/devinet.c 	if (tb[IFA_CACHEINFO]) {
tb                894 net/ipv4/devinet.c 		ci = nla_data(tb[IFA_CACHEINFO]);
tb               1722 net/ipv4/devinet.c 	struct nlattr *tb[IFA_MAX+1];
tb               1743 net/ipv4/devinet.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
tb               1749 net/ipv4/devinet.c 		if (!tb[i])
tb               1755 net/ipv4/devinet.c 			fillargs->netnsid = nla_get_s32(tb[i]);
tb               1956 net/ipv4/devinet.c 	struct nlattr *a, *tb[IFLA_INET_MAX+1];
tb               1962 net/ipv4/devinet.c 	err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
tb               1967 net/ipv4/devinet.c 	if (tb[IFLA_INET_CONF]) {
tb               1968 net/ipv4/devinet.c 		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
tb               1985 net/ipv4/devinet.c 	struct nlattr *a, *tb[IFLA_INET_MAX+1];
tb               1991 net/ipv4/devinet.c 	if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
tb               1994 net/ipv4/devinet.c 	if (tb[IFLA_INET_CONF]) {
tb               1995 net/ipv4/devinet.c 		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
tb               2122 net/ipv4/devinet.c 				      struct nlattr **tb,
tb               2134 net/ipv4/devinet.c 					      tb, NETCONFA_MAX,
tb               2138 net/ipv4/devinet.c 					    tb, NETCONFA_MAX,
tb               2144 net/ipv4/devinet.c 		if (!tb[i])
tb               2164 net/ipv4/devinet.c 	struct nlattr *tb[NETCONFA_MAX+1];
tb               2172 net/ipv4/devinet.c 	err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
tb               2177 net/ipv4/devinet.c 	if (!tb[NETCONFA_IFINDEX])
tb               2180 net/ipv4/devinet.c 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
tb                 82 net/ipv4/fib_frontend.c 	struct fib_table *tb, *alias = NULL;
tb                 87 net/ipv4/fib_frontend.c 	tb = fib_get_table(net, id);
tb                 88 net/ipv4/fib_frontend.c 	if (tb)
tb                 89 net/ipv4/fib_frontend.c 		return tb;
tb                 94 net/ipv4/fib_frontend.c 	tb = fib_trie_table(id, alias);
tb                 95 net/ipv4/fib_frontend.c 	if (!tb)
tb                100 net/ipv4/fib_frontend.c 		rcu_assign_pointer(net->ipv4.fib_main, tb);
tb                103 net/ipv4/fib_frontend.c 		rcu_assign_pointer(net->ipv4.fib_default, tb);
tb                110 net/ipv4/fib_frontend.c 	hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
tb                111 net/ipv4/fib_frontend.c 	return tb;
tb                118 net/ipv4/fib_frontend.c 	struct fib_table *tb;
tb                127 net/ipv4/fib_frontend.c 	hlist_for_each_entry_rcu(tb, head, tb_hlist,
tb                129 net/ipv4/fib_frontend.c 		if (tb->tb_id == id)
tb                130 net/ipv4/fib_frontend.c 			return tb;
tb                201 net/ipv4/fib_frontend.c 		struct fib_table *tb;
tb                203 net/ipv4/fib_frontend.c 		hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
tb                204 net/ipv4/fib_frontend.c 			flushed += fib_table_flush(net, tb, false);
tb                634 net/ipv4/fib_frontend.c 			struct fib_table *tb;
tb                637 net/ipv4/fib_frontend.c 				tb = fib_get_table(net, cfg.fc_table);
tb                638 net/ipv4/fib_frontend.c 				if (tb)
tb                639 net/ipv4/fib_frontend.c 					err = fib_table_delete(net, tb, &cfg,
tb                644 net/ipv4/fib_frontend.c 				tb = fib_new_table(net, cfg.fc_table);
tb                645 net/ipv4/fib_frontend.c 				if (tb)
tb                646 net/ipv4/fib_frontend.c 					err = fib_table_insert(net, tb,
tb                850 net/ipv4/fib_frontend.c 	struct fib_table *tb;
tb                863 net/ipv4/fib_frontend.c 	tb = fib_get_table(net, cfg.fc_table);
tb                864 net/ipv4/fib_frontend.c 	if (!tb) {
tb                870 net/ipv4/fib_frontend.c 	err = fib_table_delete(net, tb, &cfg, extack);
tb                880 net/ipv4/fib_frontend.c 	struct fib_table *tb;
tb                887 net/ipv4/fib_frontend.c 	tb = fib_new_table(net, cfg.fc_table);
tb                888 net/ipv4/fib_frontend.c 	if (!tb) {
tb                893 net/ipv4/fib_frontend.c 	err = fib_table_insert(net, tb, &cfg, extack);
tb                905 net/ipv4/fib_frontend.c 	struct nlattr *tb[RTA_MAX + 1];
tb                937 net/ipv4/fib_frontend.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
tb                945 net/ipv4/fib_frontend.c 		if (!tb[i])
tb                950 net/ipv4/fib_frontend.c 			filter->table_id = nla_get_u32(tb[i]);
tb                953 net/ipv4/fib_frontend.c 			ifindex = nla_get_u32(tb[i]);
tb                982 net/ipv4/fib_frontend.c 	struct fib_table *tb;
tb               1001 net/ipv4/fib_frontend.c 		tb = fib_get_table(net, filter.table_id);
tb               1002 net/ipv4/fib_frontend.c 		if (!tb) {
tb               1011 net/ipv4/fib_frontend.c 		err = fib_table_dump(tb, skb, cb, &filter);
tb               1024 net/ipv4/fib_frontend.c 		hlist_for_each_entry_rcu(tb, head, tb_hlist) {
tb               1030 net/ipv4/fib_frontend.c 			err = fib_table_dump(tb, skb, cb, &filter);
tb               1064 net/ipv4/fib_frontend.c 	struct fib_table *tb;
tb               1082 net/ipv4/fib_frontend.c 	tb = fib_new_table(net, tb_id);
tb               1083 net/ipv4/fib_frontend.c 	if (!tb)
tb               1086 net/ipv4/fib_frontend.c 	cfg.fc_table = tb->tb_id;
tb               1094 net/ipv4/fib_frontend.c 		fib_table_insert(net, tb, &cfg, NULL);
tb               1096 net/ipv4/fib_frontend.c 		fib_table_delete(net, tb, &cfg, NULL);
tb               1338 net/ipv4/fib_frontend.c 	struct fib_table *tb;
tb               1342 net/ipv4/fib_frontend.c 	tb = fib_get_table(net, frn->tb_id_in);
tb               1345 net/ipv4/fib_frontend.c 	if (tb) {
tb               1348 net/ipv4/fib_frontend.c 		frn->tb_id = tb->tb_id;
tb               1349 net/ipv4/fib_frontend.c 		frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
tb               1566 net/ipv4/fib_frontend.c 		struct fib_table *tb;
tb               1568 net/ipv4/fib_frontend.c 		hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
tb               1569 net/ipv4/fib_frontend.c 			hlist_del(&tb->tb_hlist);
tb               1570 net/ipv4/fib_frontend.c 			fib_table_flush(net, tb, true);
tb               1571 net/ipv4/fib_frontend.c 			fib_free_table(tb);
tb                220 net/ipv4/fib_rules.c 			       struct nlattr **tb,
tb                252 net/ipv4/fib_rules.c 		rule4->src = nla_get_in_addr(tb[FRA_SRC]);
tb                255 net/ipv4/fib_rules.c 		rule4->dst = nla_get_in_addr(tb[FRA_DST]);
tb                258 net/ipv4/fib_rules.c 	if (tb[FRA_FLOW]) {
tb                259 net/ipv4/fib_rules.c 		rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
tb                305 net/ipv4/fib_rules.c 			     struct nlattr **tb)
tb                319 net/ipv4/fib_rules.c 	if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
tb                323 net/ipv4/fib_rules.c 	if (frh->src_len && (rule4->src != nla_get_in_addr(tb[FRA_SRC])))
tb                326 net/ipv4/fib_rules.c 	if (frh->dst_len && (rule4->dst != nla_get_in_addr(tb[FRA_DST])))
tb               1993 net/ipv4/fib_semantics.c 	struct fib_table *tb = res->table;
tb               2008 net/ipv4/fib_semantics.c 		if (fa->tb_id != tb->tb_id)
tb               1120 net/ipv4/fib_trie.c int fib_table_insert(struct net *net, struct fib_table *tb,
tb               1124 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
tb               1140 net/ipv4/fib_trie.c 	pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
tb               1150 net/ipv4/fib_trie.c 				tb->tb_id) : NULL;
tb               1180 net/ipv4/fib_trie.c 			    (fa->tb_id != tb->tb_id) ||
tb               1215 net/ipv4/fib_trie.c 			new_fa->tb_id = tb->tb_id;
tb               1226 net/ipv4/fib_trie.c 				  tb->tb_id, &cfg->fc_nlinfo, nlflags);
tb               1267 net/ipv4/fib_trie.c 	new_fa->tb_id = tb->tb_id;
tb               1280 net/ipv4/fib_trie.c 		tb->tb_num_default++;
tb               1312 net/ipv4/fib_trie.c int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
tb               1315 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *) tb->tb_data;
tb               1330 net/ipv4/fib_trie.c 		trace_fib_table_lookup(tb->tb_id, flp, NULL, -EAGAIN);
tb               1416 net/ipv4/fib_trie.c 					trace_fib_table_lookup(tb->tb_id, flp,
tb               1462 net/ipv4/fib_trie.c 			trace_fib_table_lookup(tb->tb_id, flp, NULL, err);
tb               1498 net/ipv4/fib_trie.c 			res->table = tb;
tb               1503 net/ipv4/fib_trie.c 			trace_fib_table_lookup(tb->tb_id, flp, nhc, err);
tb               1547 net/ipv4/fib_trie.c int fib_table_delete(struct net *net, struct fib_table *tb,
tb               1550 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *) tb->tb_data;
tb               1567 net/ipv4/fib_trie.c 	fa = fib_find_alias(&l->leaf, slen, tos, 0, tb->tb_id);
tb               1578 net/ipv4/fib_trie.c 		    (fa->tb_id != tb->tb_id) ||
tb               1601 net/ipv4/fib_trie.c 	rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
tb               1605 net/ipv4/fib_trie.c 		tb->tb_num_default--;
tb               1675 net/ipv4/fib_trie.c static void fib_trie_free(struct fib_table *tb)
tb               1677 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
tb               1730 net/ipv4/fib_trie.c 	kfree(tb);
tb               1792 net/ipv4/fib_trie.c void fib_table_flush_external(struct fib_table *tb)
tb               1794 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
tb               1840 net/ipv4/fib_trie.c 			if (tb->tb_id != fa->tb_id) {
tb               1861 net/ipv4/fib_trie.c int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
tb               1863 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
tb               1909 net/ipv4/fib_trie.c 			if (!fi || tb->tb_id != fa->tb_id ||
tb               1948 net/ipv4/fib_trie.c static void __fib_info_notify_update(struct net *net, struct fib_table *tb,
tb               1951 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
tb               1986 net/ipv4/fib_trie.c 			if (!fi || !fi->nh_updated || fa->tb_id != tb->tb_id)
tb               1990 net/ipv4/fib_trie.c 				  KEYLENGTH - fa->fa_slen, tb->tb_id,
tb               2011 net/ipv4/fib_trie.c 		struct fib_table *tb;
tb               2013 net/ipv4/fib_trie.c 		hlist_for_each_entry_rcu(tb, head, tb_hlist)
tb               2014 net/ipv4/fib_trie.c 			__fib_info_notify_update(net, tb, info);
tb               2019 net/ipv4/fib_trie.c 			    struct fib_table *tb, struct notifier_block *nb)
tb               2032 net/ipv4/fib_trie.c 		if (tb->tb_id != fa->tb_id)
tb               2040 net/ipv4/fib_trie.c static void fib_table_notify(struct net *net, struct fib_table *tb,
tb               2043 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
tb               2048 net/ipv4/fib_trie.c 		fib_leaf_notify(net, l, tb, nb);
tb               2063 net/ipv4/fib_trie.c 		struct fib_table *tb;
tb               2065 net/ipv4/fib_trie.c 		hlist_for_each_entry_rcu(tb, head, tb_hlist)
tb               2066 net/ipv4/fib_trie.c 			fib_table_notify(net, tb, nb);
tb               2072 net/ipv4/fib_trie.c 	struct fib_table *tb = container_of(head, struct fib_table, rcu);
tb               2074 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
tb               2076 net/ipv4/fib_trie.c 	if (tb->tb_data == tb->__data)
tb               2079 net/ipv4/fib_trie.c 	kfree(tb);
tb               2082 net/ipv4/fib_trie.c void fib_free_table(struct fib_table *tb)
tb               2084 net/ipv4/fib_trie.c 	call_rcu(&tb->rcu, __trie_free_rcu);
tb               2087 net/ipv4/fib_trie.c static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
tb               2113 net/ipv4/fib_trie.c 		if (tb->tb_id != fa->tb_id)
tb               2135 net/ipv4/fib_trie.c 						    tb->tb_id, fa->fa_type,
tb               2147 net/ipv4/fib_trie.c 			err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
tb               2167 net/ipv4/fib_trie.c int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
tb               2170 net/ipv4/fib_trie.c 	struct trie *t = (struct trie *)tb->tb_data;
tb               2187 net/ipv4/fib_trie.c 		err = fn_trie_dump_leaf(l, tb, skb, cb, filter);
tb               2224 net/ipv4/fib_trie.c 	struct fib_table *tb;
tb               2226 net/ipv4/fib_trie.c 	size_t sz = sizeof(*tb);
tb               2231 net/ipv4/fib_trie.c 	tb = kzalloc(sz, GFP_KERNEL);
tb               2232 net/ipv4/fib_trie.c 	if (!tb)
tb               2235 net/ipv4/fib_trie.c 	tb->tb_id = id;
tb               2236 net/ipv4/fib_trie.c 	tb->tb_num_default = 0;
tb               2237 net/ipv4/fib_trie.c 	tb->tb_data = (alias ? alias->__data : tb->__data);
tb               2240 net/ipv4/fib_trie.c 		return tb;
tb               2242 net/ipv4/fib_trie.c 	t = (struct trie *) tb->tb_data;
tb               2248 net/ipv4/fib_trie.c 		kfree(tb);
tb               2249 net/ipv4/fib_trie.c 		tb = NULL;
tb               2253 net/ipv4/fib_trie.c 	return tb;
tb               2260 net/ipv4/fib_trie.c 	struct fib_table *tb;
tb               2437 net/ipv4/fib_trie.c static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
tb               2439 net/ipv4/fib_trie.c 	if (tb->tb_id == RT_TABLE_LOCAL)
tb               2441 net/ipv4/fib_trie.c 	else if (tb->tb_id == RT_TABLE_MAIN)
tb               2444 net/ipv4/fib_trie.c 		seq_printf(seq, "Id %d:\n", tb->tb_id);
tb               2461 net/ipv4/fib_trie.c 		struct fib_table *tb;
tb               2463 net/ipv4/fib_trie.c 		hlist_for_each_entry_rcu(tb, head, tb_hlist) {
tb               2464 net/ipv4/fib_trie.c 			struct trie *t = (struct trie *) tb->tb_data;
tb               2470 net/ipv4/fib_trie.c 			fib_table_print(seq, tb);
tb               2494 net/ipv4/fib_trie.c 		struct fib_table *tb;
tb               2496 net/ipv4/fib_trie.c 		hlist_for_each_entry_rcu(tb, head, tb_hlist) {
tb               2500 net/ipv4/fib_trie.c 						    (struct trie *) tb->tb_data);
tb               2503 net/ipv4/fib_trie.c 					iter->tb = tb;
tb               2523 net/ipv4/fib_trie.c 	struct fib_table *tb = iter->tb;
tb               2535 net/ipv4/fib_trie.c 	h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
tb               2536 net/ipv4/fib_trie.c 	while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
tb               2537 net/ipv4/fib_trie.c 		tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
tb               2538 net/ipv4/fib_trie.c 		n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
tb               2546 net/ipv4/fib_trie.c 		hlist_for_each_entry_rcu(tb, head, tb_hlist) {
tb               2547 net/ipv4/fib_trie.c 			n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
tb               2555 net/ipv4/fib_trie.c 	iter->tb = tb;
tb               2615 net/ipv4/fib_trie.c 		fib_table_print(seq, iter->tb);
tb               2704 net/ipv4/fib_trie.c 	struct fib_table *tb;
tb               2709 net/ipv4/fib_trie.c 	tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
tb               2710 net/ipv4/fib_trie.c 	if (!tb)
tb               2713 net/ipv4/fib_trie.c 	iter->main_tb = tb;
tb               2714 net/ipv4/fib_trie.c 	t = (struct trie *)tb->tb_data;
tb               2781 net/ipv4/fib_trie.c 	struct fib_table *tb = iter->main_tb;
tb               2804 net/ipv4/fib_trie.c 		if (fa->tb_id != tb->tb_id)
tb                134 net/ipv4/inet_connection_sock.c 				  const struct inet_bind_bucket *tb,
tb                149 net/ipv4/inet_connection_sock.c 	sk_for_each_bound(sk2, &tb->owners) {
tb                185 net/ipv4/inet_connection_sock.c 	struct inet_bind_bucket *tb;
tb                224 net/ipv4/inet_connection_sock.c 		inet_bind_bucket_for_each(tb, &head->chain)
tb                225 net/ipv4/inet_connection_sock.c 			if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
tb                226 net/ipv4/inet_connection_sock.c 			    tb->port == port) {
tb                227 net/ipv4/inet_connection_sock.c 				if (!inet_csk_bind_conflict(sk, tb, false, false))
tb                231 net/ipv4/inet_connection_sock.c 		tb = NULL;
tb                250 net/ipv4/inet_connection_sock.c 	*tb_ret = tb;
tb                254 net/ipv4/inet_connection_sock.c static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
tb                259 net/ipv4/inet_connection_sock.c 	if (tb->fastreuseport <= 0)
tb                265 net/ipv4/inet_connection_sock.c 	if (!uid_eq(tb->fastuid, uid))
tb                272 net/ipv4/inet_connection_sock.c 	if (tb->fastreuseport == FASTREUSEPORT_ANY)
tb                275 net/ipv4/inet_connection_sock.c 	if (tb->fast_sk_family == AF_INET6)
tb                276 net/ipv4/inet_connection_sock.c 		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
tb                278 net/ipv4/inet_connection_sock.c 					    tb->fast_rcv_saddr,
tb                280 net/ipv4/inet_connection_sock.c 					    tb->fast_ipv6_only,
tb                283 net/ipv4/inet_connection_sock.c 	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
tb                298 net/ipv4/inet_connection_sock.c 	struct inet_bind_bucket *tb = NULL;
tb                305 net/ipv4/inet_connection_sock.c 		head = inet_csk_find_open_port(sk, &tb, &port);
tb                308 net/ipv4/inet_connection_sock.c 		if (!tb)
tb                315 net/ipv4/inet_connection_sock.c 	inet_bind_bucket_for_each(tb, &head->chain)
tb                316 net/ipv4/inet_connection_sock.c 		if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
tb                317 net/ipv4/inet_connection_sock.c 		    tb->port == port)
tb                320 net/ipv4/inet_connection_sock.c 	tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
tb                322 net/ipv4/inet_connection_sock.c 	if (!tb)
tb                325 net/ipv4/inet_connection_sock.c 	if (!hlist_empty(&tb->owners)) {
tb                329 net/ipv4/inet_connection_sock.c 		if ((tb->fastreuse > 0 && reuse) ||
tb                330 net/ipv4/inet_connection_sock.c 		    sk_reuseport_match(tb, sk))
tb                332 net/ipv4/inet_connection_sock.c 		if (inet_csk_bind_conflict(sk, tb, true, true))
tb                336 net/ipv4/inet_connection_sock.c 	if (hlist_empty(&tb->owners)) {
tb                337 net/ipv4/inet_connection_sock.c 		tb->fastreuse = reuse;
tb                339 net/ipv4/inet_connection_sock.c 			tb->fastreuseport = FASTREUSEPORT_ANY;
tb                340 net/ipv4/inet_connection_sock.c 			tb->fastuid = uid;
tb                341 net/ipv4/inet_connection_sock.c 			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb                342 net/ipv4/inet_connection_sock.c 			tb->fast_ipv6_only = ipv6_only_sock(sk);
tb                343 net/ipv4/inet_connection_sock.c 			tb->fast_sk_family = sk->sk_family;
tb                345 net/ipv4/inet_connection_sock.c 			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
tb                348 net/ipv4/inet_connection_sock.c 			tb->fastreuseport = 0;
tb                352 net/ipv4/inet_connection_sock.c 			tb->fastreuse = 0;
tb                365 net/ipv4/inet_connection_sock.c 			if (!sk_reuseport_match(tb, sk)) {
tb                366 net/ipv4/inet_connection_sock.c 				tb->fastreuseport = FASTREUSEPORT_STRICT;
tb                367 net/ipv4/inet_connection_sock.c 				tb->fastuid = uid;
tb                368 net/ipv4/inet_connection_sock.c 				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb                369 net/ipv4/inet_connection_sock.c 				tb->fast_ipv6_only = ipv6_only_sock(sk);
tb                370 net/ipv4/inet_connection_sock.c 				tb->fast_sk_family = sk->sk_family;
tb                372 net/ipv4/inet_connection_sock.c 				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
tb                376 net/ipv4/inet_connection_sock.c 			tb->fastreuseport = 0;
tb                380 net/ipv4/inet_connection_sock.c 		inet_bind_hash(sk, tb, port);
tb                381 net/ipv4/inet_connection_sock.c 	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
tb                 67 net/ipv4/inet_hashtables.c 	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
tb                 69 net/ipv4/inet_hashtables.c 	if (tb) {
tb                 70 net/ipv4/inet_hashtables.c 		write_pnet(&tb->ib_net, net);
tb                 71 net/ipv4/inet_hashtables.c 		tb->l3mdev    = l3mdev;
tb                 72 net/ipv4/inet_hashtables.c 		tb->port      = snum;
tb                 73 net/ipv4/inet_hashtables.c 		tb->fastreuse = 0;
tb                 74 net/ipv4/inet_hashtables.c 		tb->fastreuseport = 0;
tb                 75 net/ipv4/inet_hashtables.c 		INIT_HLIST_HEAD(&tb->owners);
tb                 76 net/ipv4/inet_hashtables.c 		hlist_add_head(&tb->node, &head->chain);
tb                 78 net/ipv4/inet_hashtables.c 	return tb;
tb                 84 net/ipv4/inet_hashtables.c void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
tb                 86 net/ipv4/inet_hashtables.c 	if (hlist_empty(&tb->owners)) {
tb                 87 net/ipv4/inet_hashtables.c 		__hlist_del(&tb->node);
tb                 88 net/ipv4/inet_hashtables.c 		kmem_cache_free(cachep, tb);
tb                 92 net/ipv4/inet_hashtables.c void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
tb                 96 net/ipv4/inet_hashtables.c 	sk_add_bind_node(sk, &tb->owners);
tb                 97 net/ipv4/inet_hashtables.c 	inet_csk(sk)->icsk_bind_hash = tb;
tb                109 net/ipv4/inet_hashtables.c 	struct inet_bind_bucket *tb;
tb                112 net/ipv4/inet_hashtables.c 	tb = inet_csk(sk)->icsk_bind_hash;
tb                116 net/ipv4/inet_hashtables.c 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
tb                135 net/ipv4/inet_hashtables.c 	struct inet_bind_bucket *tb;
tb                139 net/ipv4/inet_hashtables.c 	tb = inet_csk(sk)->icsk_bind_hash;
tb                140 net/ipv4/inet_hashtables.c 	if (unlikely(!tb)) {
tb                144 net/ipv4/inet_hashtables.c 	if (tb->port != port) {
tb                152 net/ipv4/inet_hashtables.c 		inet_bind_bucket_for_each(tb, &head->chain) {
tb                153 net/ipv4/inet_hashtables.c 			if (net_eq(ib_net(tb), sock_net(sk)) &&
tb                154 net/ipv4/inet_hashtables.c 			    tb->l3mdev == l3mdev && tb->port == port)
tb                157 net/ipv4/inet_hashtables.c 		if (!tb) {
tb                158 net/ipv4/inet_hashtables.c 			tb = inet_bind_bucket_create(table->bind_bucket_cachep,
tb                161 net/ipv4/inet_hashtables.c 			if (!tb) {
tb                167 net/ipv4/inet_hashtables.c 	inet_bind_hash(child, tb, port);
tb                518 net/ipv4/inet_hashtables.c 	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
tb                528 net/ipv4/inet_hashtables.c 		    inet_csk(sk2)->icsk_bind_hash == tb &&
tb                629 net/ipv4/inet_hashtables.c 	struct inet_bind_bucket *tb;
tb                638 net/ipv4/inet_hashtables.c 		tb = inet_csk(sk)->icsk_bind_hash;
tb                640 net/ipv4/inet_hashtables.c 		if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
tb                679 net/ipv4/inet_hashtables.c 		inet_bind_bucket_for_each(tb, &head->chain) {
tb                680 net/ipv4/inet_hashtables.c 			if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
tb                681 net/ipv4/inet_hashtables.c 			    tb->port == port) {
tb                682 net/ipv4/inet_hashtables.c 				if (tb->fastreuse >= 0 ||
tb                683 net/ipv4/inet_hashtables.c 				    tb->fastreuseport >= 0)
tb                685 net/ipv4/inet_hashtables.c 				WARN_ON(hlist_empty(&tb->owners));
tb                693 net/ipv4/inet_hashtables.c 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
tb                695 net/ipv4/inet_hashtables.c 		if (!tb) {
tb                699 net/ipv4/inet_hashtables.c 		tb->fastreuse = -1;
tb                700 net/ipv4/inet_hashtables.c 		tb->fastreuseport = -1;
tb                717 net/ipv4/inet_hashtables.c 	inet_bind_hash(sk, tb, port);
tb                 32 net/ipv4/inet_timewait_sock.c 	struct inet_bind_bucket *tb = tw->tw_tb;
tb                 34 net/ipv4/inet_timewait_sock.c 	if (!tb)
tb                 39 net/ipv4/inet_timewait_sock.c 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
tb               1003 net/ipv4/ip_gre.c static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1027 net/ipv4/ip_gre.c static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1032 net/ipv4/ip_gre.c 	if (tb[IFLA_ADDRESS]) {
tb               1033 net/ipv4/ip_gre.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
tb               1035 net/ipv4/ip_gre.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
tb               1049 net/ipv4/ip_gre.c 	return ipgre_tunnel_validate(tb, data, extack);
tb               1052 net/ipv4/ip_gre.c static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1061 net/ipv4/ip_gre.c 	ret = ipgre_tap_validate(tb, data, extack);
tb               1090 net/ipv4/ip_gre.c 				struct nlattr *tb[],
tb               1157 net/ipv4/ip_gre.c 				struct nlattr *tb[],
tb               1164 net/ipv4/ip_gre.c 	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
tb               1310 net/ipv4/ip_gre.c 			 struct nlattr *tb[], struct nlattr *data[],
tb               1321 net/ipv4/ip_gre.c 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
tb               1324 net/ipv4/ip_gre.c 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
tb               1328 net/ipv4/ip_gre.c 			  struct nlattr *tb[], struct nlattr *data[],
tb               1339 net/ipv4/ip_gre.c 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
tb               1342 net/ipv4/ip_gre.c 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
tb               1345 net/ipv4/ip_gre.c static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
tb               1358 net/ipv4/ip_gre.c 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
tb               1362 net/ipv4/ip_gre.c 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
tb               1369 net/ipv4/ip_gre.c 	ipgre_link_update(dev, !tb[IFLA_MTU]);
tb               1374 net/ipv4/ip_gre.c static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
tb               1387 net/ipv4/ip_gre.c 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
tb               1391 net/ipv4/ip_gre.c 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
tb               1598 net/ipv4/ip_gre.c 	struct nlattr *tb[IFLA_MAX + 1];
tb               1604 net/ipv4/ip_gre.c 	memset(&tb, 0, sizeof(tb));
tb               1607 net/ipv4/ip_gre.c 			       &ipgre_tap_ops, tb, NULL);
tb               1615 net/ipv4/ip_gre.c 	err = ipgre_newlink(net, dev, tb, NULL, NULL);
tb               1114 net/ipv4/ip_tunnel.c int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
tb               1141 net/ipv4/ip_tunnel.c 	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
tb               1145 net/ipv4/ip_tunnel.c 	if (tb[IFLA_MTU]) {
tb               1166 net/ipv4/ip_tunnel.c int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
tb               1199 net/ipv4/ip_tunnel.c 	ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark);
tb                229 net/ipv4/ip_tunnel_core.c 	struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
tb                232 net/ipv4/ip_tunnel_core.c 	err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP_MAX, attr,
tb                253 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP_ID])
tb                254 net/ipv4/ip_tunnel_core.c 		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);
tb                256 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP_DST])
tb                257 net/ipv4/ip_tunnel_core.c 		tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]);
tb                259 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP_SRC])
tb                260 net/ipv4/ip_tunnel_core.c 		tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]);
tb                262 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP_TTL])
tb                263 net/ipv4/ip_tunnel_core.c 		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
tb                265 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP_TOS])
tb                266 net/ipv4/ip_tunnel_core.c 		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
tb                268 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP_FLAGS])
tb                269 net/ipv4/ip_tunnel_core.c 		tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP_FLAGS]);
tb                346 net/ipv4/ip_tunnel_core.c 	struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
tb                349 net/ipv4/ip_tunnel_core.c 	err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP6_MAX, attr,
tb                362 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP6_ID])
tb                363 net/ipv4/ip_tunnel_core.c 		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]);
tb                365 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP6_DST])
tb                366 net/ipv4/ip_tunnel_core.c 		tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
tb                368 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP6_SRC])
tb                369 net/ipv4/ip_tunnel_core.c 		tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
tb                371 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP6_HOPLIMIT])
tb                372 net/ipv4/ip_tunnel_core.c 		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
tb                374 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP6_TC])
tb                375 net/ipv4/ip_tunnel_core.c 		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
tb                377 net/ipv4/ip_tunnel_core.c 	if (tb[LWTUNNEL_IP6_FLAGS])
tb                378 net/ipv4/ip_tunnel_core.c 		tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]);
tb                536 net/ipv4/ip_vti.c static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
tb                575 net/ipv4/ip_vti.c 		       struct nlattr *tb[], struct nlattr *data[],
tb                582 net/ipv4/ip_vti.c 	return ip_tunnel_newlink(dev, tb, &parms, fwmark);
tb                585 net/ipv4/ip_vti.c static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
tb                594 net/ipv4/ip_vti.c 	return ip_tunnel_changelink(dev, tb, &p, fwmark);
tb                400 net/ipv4/ipip.c static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
tb                495 net/ipv4/ipip.c 			struct nlattr *tb[], struct nlattr *data[],
tb                511 net/ipv4/ipip.c 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
tb                514 net/ipv4/ipip.c static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
tb                539 net/ipv4/ipip.c 	return ip_tunnel_changelink(dev, tb, &p, fwmark);
tb                201 net/ipv4/ipmr.c 			       struct fib_rule_hdr *frh, struct nlattr **tb,
tb                208 net/ipv4/ipmr.c 			     struct nlattr **tb)
tb               2485 net/ipv4/ipmr.c 				       struct nlattr **tb,
tb               2497 net/ipv4/ipmr.c 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               2509 net/ipv4/ipmr.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               2514 net/ipv4/ipmr.c 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
tb               2515 net/ipv4/ipmr.c 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
tb               2521 net/ipv4/ipmr.c 		if (!tb[i])
tb               2542 net/ipv4/ipmr.c 	struct nlattr *tb[RTA_MAX + 1];
tb               2550 net/ipv4/ipmr.c 	err = ipmr_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
tb               2554 net/ipv4/ipmr.c 	src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
tb               2555 net/ipv4/ipmr.c 	grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
tb               2556 net/ipv4/ipmr.c 	tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
tb                 35 net/ipv4/netfilter/nft_dup_ipv4.c 			     const struct nlattr * const tb[])
tb                 40 net/ipv4/netfilter/nft_dup_ipv4.c 	if (tb[NFTA_DUP_SREG_ADDR] == NULL)
tb                 43 net/ipv4/netfilter/nft_dup_ipv4.c 	priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
tb                 48 net/ipv4/netfilter/nft_dup_ipv4.c 	if (tb[NFTA_DUP_SREG_DEV] != NULL) {
tb                 49 net/ipv4/netfilter/nft_dup_ipv4.c 		priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
tb                168 net/ipv4/netfilter/nft_fib_ipv4.c 		    const struct nlattr * const tb[])
tb                172 net/ipv4/netfilter/nft_fib_ipv4.c 	if (!tb[NFTA_FIB_RESULT])
tb                175 net/ipv4/netfilter/nft_fib_ipv4.c 	result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT]));
tb                399 net/ipv4/nexthop.c static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
tb                402 net/ipv4/nexthop.c 	unsigned int len = nla_len(tb[NHA_GROUP]);
tb                415 net/ipv4/nexthop.c 	nhg = nla_data(tb[NHA_GROUP]);
tb                433 net/ipv4/nexthop.c 	nhg = nla_data(tb[NHA_GROUP]);
tb                446 net/ipv4/nexthop.c 		if (!tb[i])
tb               1331 net/ipv4/nexthop.c 	struct nlattr *tb[NHA_MAX + 1];
tb               1334 net/ipv4/nexthop.c 	err = nlmsg_parse(nlh, sizeof(*nhm), tb, NHA_MAX, rtm_nh_policy,
tb               1354 net/ipv4/nexthop.c 		if (tb[NHA_GROUP])
tb               1362 net/ipv4/nexthop.c 	if (tb[NHA_GROUPS] || tb[NHA_MASTER]) {
tb               1377 net/ipv4/nexthop.c 	if (tb[NHA_ID])
tb               1378 net/ipv4/nexthop.c 		cfg->nh_id = nla_get_u32(tb[NHA_ID]);
tb               1380 net/ipv4/nexthop.c 	if (tb[NHA_GROUP]) {
tb               1385 net/ipv4/nexthop.c 		cfg->nh_grp = tb[NHA_GROUP];
tb               1388 net/ipv4/nexthop.c 		if (tb[NHA_GROUP_TYPE])
tb               1389 net/ipv4/nexthop.c 			cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
tb               1395 net/ipv4/nexthop.c 		err = nh_check_attr_group(net, tb, extack);
tb               1401 net/ipv4/nexthop.c 	if (tb[NHA_BLACKHOLE]) {
tb               1402 net/ipv4/nexthop.c 		if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
tb               1403 net/ipv4/nexthop.c 		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE]) {
tb               1413 net/ipv4/nexthop.c 	if (!tb[NHA_OIF]) {
tb               1418 net/ipv4/nexthop.c 	cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
tb               1436 net/ipv4/nexthop.c 	if (tb[NHA_GATEWAY]) {
tb               1437 net/ipv4/nexthop.c 		struct nlattr *gwa = tb[NHA_GATEWAY];
tb               1468 net/ipv4/nexthop.c 	if (tb[NHA_ENCAP]) {
tb               1469 net/ipv4/nexthop.c 		cfg->nh_encap = tb[NHA_ENCAP];
tb               1471 net/ipv4/nexthop.c 		if (!tb[NHA_ENCAP_TYPE]) {
tb               1476 net/ipv4/nexthop.c 		cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
tb               1481 net/ipv4/nexthop.c 	} else if (tb[NHA_ENCAP_TYPE]) {
tb               1515 net/ipv4/nexthop.c 	struct nlattr *tb[NHA_MAX + 1];
tb               1518 net/ipv4/nexthop.c 	err = nlmsg_parse(nlh, sizeof(*nhm), tb, NHA_MAX, rtm_nh_policy,
tb               1525 net/ipv4/nexthop.c 		if (!tb[i])
tb               1532 net/ipv4/nexthop.c 			NL_SET_ERR_MSG_ATTR(extack, tb[i],
tb               1542 net/ipv4/nexthop.c 	if (!tb[NHA_ID]) {
tb               1547 net/ipv4/nexthop.c 	*id = nla_get_u32(tb[NHA_ID]);
tb               1664 net/ipv4/nexthop.c 	struct nlattr *tb[NHA_MAX + 1];
tb               1669 net/ipv4/nexthop.c 	err = nlmsg_parse(nlh, sizeof(*nhm), tb, NHA_MAX, rtm_nh_policy,
tb               1675 net/ipv4/nexthop.c 		if (!tb[i])
tb               1680 net/ipv4/nexthop.c 			idx = nla_get_u32(tb[i]);
tb               1688 net/ipv4/nexthop.c 			idx = nla_get_u32(tb[i]);
tb               3008 net/ipv4/route.c 				       struct nlattr **tb,
tb               3021 net/ipv4/route.c 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               3040 net/ipv4/route.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               3045 net/ipv4/route.c 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
tb               3046 net/ipv4/route.c 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
tb               3052 net/ipv4/route.c 		if (!tb[i])
tb               3079 net/ipv4/route.c 	struct nlattr *tb[RTA_MAX+1];
tb               3095 net/ipv4/route.c 	err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
tb               3100 net/ipv4/route.c 	src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
tb               3101 net/ipv4/route.c 	dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
tb               3102 net/ipv4/route.c 	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
tb               3103 net/ipv4/route.c 	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
tb               3104 net/ipv4/route.c 	if (tb[RTA_UID])
tb               3105 net/ipv4/route.c 		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
tb               3109 net/ipv4/route.c 	if (tb[RTA_IP_PROTO]) {
tb               3110 net/ipv4/route.c 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
tb               3116 net/ipv4/route.c 	if (tb[RTA_SPORT])
tb               3117 net/ipv4/route.c 		sport = nla_get_be16(tb[RTA_SPORT]);
tb               3119 net/ipv4/route.c 	if (tb[RTA_DPORT])
tb               3120 net/ipv4/route.c 		dport = nla_get_be16(tb[RTA_DPORT]);
tb               3129 net/ipv4/route.c 	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
tb                599 net/ipv6/addrconf.c 				       struct nlattr **tb,
tb                611 net/ipv6/addrconf.c 					      tb, NETCONFA_MAX,
tb                615 net/ipv6/addrconf.c 					    tb, NETCONFA_MAX,
tb                621 net/ipv6/addrconf.c 		if (!tb[i])
tb                641 net/ipv6/addrconf.c 	struct nlattr *tb[NETCONFA_MAX+1];
tb                649 net/ipv6/addrconf.c 	err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
tb                653 net/ipv6/addrconf.c 	if (!tb[NETCONFA_IFINDEX])
tb                657 net/ipv6/addrconf.c 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
tb               4574 net/ipv6/addrconf.c 	struct nlattr *tb[IFA_MAX+1];
tb               4579 net/ipv6/addrconf.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
tb               4585 net/ipv6/addrconf.c 	pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
tb               4589 net/ipv6/addrconf.c 	ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
tb               4758 net/ipv6/addrconf.c 	struct nlattr *tb[IFA_MAX+1];
tb               4766 net/ipv6/addrconf.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
tb               4774 net/ipv6/addrconf.c 	cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
tb               4780 net/ipv6/addrconf.c 	if (tb[IFA_RT_PRIORITY])
tb               4781 net/ipv6/addrconf.c 		cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
tb               4786 net/ipv6/addrconf.c 	if (tb[IFA_CACHEINFO]) {
tb               4789 net/ipv6/addrconf.c 		ci = nla_data(tb[IFA_CACHEINFO]);
tb               4798 net/ipv6/addrconf.c 	if (tb[IFA_FLAGS])
tb               4799 net/ipv6/addrconf.c 		cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
tb               5102 net/ipv6/addrconf.c 	struct nlattr *tb[IFA_MAX+1];
tb               5123 net/ipv6/addrconf.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
tb               5129 net/ipv6/addrconf.c 		if (!tb[i])
tb               5135 net/ipv6/addrconf.c 			fillargs->netnsid = nla_get_s32(tb[i]);
tb               5256 net/ipv6/addrconf.c 				       struct nlattr **tb,
tb               5268 net/ipv6/addrconf.c 		return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
tb               5277 net/ipv6/addrconf.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
tb               5283 net/ipv6/addrconf.c 		if (!tb[i])
tb               5313 net/ipv6/addrconf.c 	struct nlattr *tb[IFA_MAX+1];
tb               5320 net/ipv6/addrconf.c 	err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
tb               5324 net/ipv6/addrconf.c 	if (tb[IFA_TARGET_NETNSID]) {
tb               5325 net/ipv6/addrconf.c 		fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
tb               5333 net/ipv6/addrconf.c 	addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
tb               5719 net/ipv6/addrconf.c 	struct nlattr *tb[IFLA_INET6_MAX + 1];
tb               5729 net/ipv6/addrconf.c 	err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
tb               5734 net/ipv6/addrconf.c 	if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE])
tb               5737 net/ipv6/addrconf.c 	if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
tb               5738 net/ipv6/addrconf.c 		u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
tb               5752 net/ipv6/addrconf.c 	struct nlattr *tb[IFLA_INET6_MAX + 1];
tb               5758 net/ipv6/addrconf.c 	if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
tb               5761 net/ipv6/addrconf.c 	if (tb[IFLA_INET6_TOKEN]) {
tb               5762 net/ipv6/addrconf.c 		err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
tb               5767 net/ipv6/addrconf.c 	if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
tb               5768 net/ipv6/addrconf.c 		u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
tb                381 net/ipv6/addrlabel.c 	struct nlattr *tb[IFAL_MAX+1];
tb                386 net/ipv6/addrlabel.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifal), tb, IFAL_MAX,
tb                397 net/ipv6/addrlabel.c 	if (!tb[IFAL_ADDRESS])
tb                399 net/ipv6/addrlabel.c 	pfx = nla_data(tb[IFAL_ADDRESS]);
tb                401 net/ipv6/addrlabel.c 	if (!tb[IFAL_LABEL])
tb                403 net/ipv6/addrlabel.c 	label = nla_get_u32(tb[IFAL_LABEL]);
tb                528 net/ipv6/addrlabel.c 				    struct nlattr **tb,
tb                540 net/ipv6/addrlabel.c 		return nlmsg_parse_deprecated(nlh, sizeof(*ifal), tb,
tb                549 net/ipv6/addrlabel.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifal), tb, IFAL_MAX,
tb                555 net/ipv6/addrlabel.c 		if (!tb[i])
tb                575 net/ipv6/addrlabel.c 	struct nlattr *tb[IFAL_MAX+1];
tb                582 net/ipv6/addrlabel.c 	err = ip6addrlbl_valid_get_req(in_skb, nlh, tb, extack);
tb                596 net/ipv6/addrlabel.c 	if (!tb[IFAL_ADDRESS])
tb                598 net/ipv6/addrlabel.c 	addr = nla_data(tb[IFAL_ADDRESS]);
tb                341 net/ipv6/fib6_rules.c 			       struct nlattr **tb,
tb                361 net/ipv6/fib6_rules.c 		rule6->src.addr = nla_get_in6_addr(tb[FRA_SRC]);
tb                364 net/ipv6/fib6_rules.c 		rule6->dst.addr = nla_get_in6_addr(tb[FRA_DST]);
tb                391 net/ipv6/fib6_rules.c 			     struct nlattr **tb)
tb                405 net/ipv6/fib6_rules.c 	    nla_memcmp(tb[FRA_SRC], &rule6->src.addr, sizeof(struct in6_addr)))
tb                409 net/ipv6/fib6_rules.c 	    nla_memcmp(tb[FRA_DST], &rule6->dst.addr, sizeof(struct in6_addr)))
tb                135 net/ipv6/ila/ila_lwt.c 	struct nlattr *tb[ILA_ATTR_MAX + 1];
tb                149 net/ipv6/ila/ila_lwt.c 	ret = nla_parse_nested_deprecated(tb, ILA_ATTR_MAX, nla,
tb                154 net/ipv6/ila/ila_lwt.c 	if (!tb[ILA_ATTR_LOCATOR])
tb                159 net/ipv6/ila/ila_lwt.c 	if (tb[ILA_ATTR_IDENT_TYPE])
tb                160 net/ipv6/ila/ila_lwt.c 		ident_type = nla_get_u8(tb[ILA_ATTR_IDENT_TYPE]);
tb                194 net/ipv6/ila/ila_lwt.c 	if (tb[ILA_ATTR_HOOK_TYPE])
tb                195 net/ipv6/ila/ila_lwt.c 		hook_type = nla_get_u8(tb[ILA_ATTR_HOOK_TYPE]);
tb                208 net/ipv6/ila/ila_lwt.c 	if (tb[ILA_ATTR_CSUM_MODE])
tb                209 net/ipv6/ila/ila_lwt.c 		csum_mode = nla_get_u8(tb[ILA_ATTR_CSUM_MODE]);
tb                236 net/ipv6/ila/ila_lwt.c 	p->locator.v64 = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
tb                217 net/ipv6/ip6_fib.c static void fib6_link_table(struct net *net, struct fib6_table *tb)
tb                225 net/ipv6/ip6_fib.c 	spin_lock_init(&tb->tb6_lock);
tb                226 net/ipv6/ip6_fib.c 	h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1);
tb                232 net/ipv6/ip6_fib.c 	hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]);
tb                255 net/ipv6/ip6_fib.c 	struct fib6_table *tb;
tb                259 net/ipv6/ip6_fib.c 	tb = fib6_get_table(net, id);
tb                260 net/ipv6/ip6_fib.c 	if (tb)
tb                261 net/ipv6/ip6_fib.c 		return tb;
tb                263 net/ipv6/ip6_fib.c 	tb = fib6_alloc_table(net, id);
tb                264 net/ipv6/ip6_fib.c 	if (tb)
tb                265 net/ipv6/ip6_fib.c 		fib6_link_table(net, tb);
tb                267 net/ipv6/ip6_fib.c 	return tb;
tb                273 net/ipv6/ip6_fib.c 	struct fib6_table *tb;
tb                282 net/ipv6/ip6_fib.c 	hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
tb                283 net/ipv6/ip6_fib.c 		if (tb->tb6_id == id) {
tb                285 net/ipv6/ip6_fib.c 			return tb;
tb                350 net/ipv6/ip6_fib.c 		struct fib6_table *tb;
tb                352 net/ipv6/ip6_fib.c 		hlist_for_each_entry_rcu(tb, head, tb6_hlist)
tb                353 net/ipv6/ip6_fib.c 			fib_seq += tb->fib_seq;
tb                423 net/ipv6/ip6_fib.c static void fib6_table_dump(struct net *net, struct fib6_table *tb,
tb                426 net/ipv6/ip6_fib.c 	w->root = &tb->tb6_root;
tb                427 net/ipv6/ip6_fib.c 	spin_lock_bh(&tb->tb6_lock);
tb                429 net/ipv6/ip6_fib.c 	spin_unlock_bh(&tb->tb6_lock);
tb                450 net/ipv6/ip6_fib.c 		struct fib6_table *tb;
tb                452 net/ipv6/ip6_fib.c 		hlist_for_each_entry_rcu(tb, head, tb6_hlist)
tb                453 net/ipv6/ip6_fib.c 			fib6_table_dump(net, tb, w);
tb                572 net/ipv6/ip6_fib.c 	struct fib6_table *tb;
tb                614 net/ipv6/ip6_fib.c 		tb = fib6_get_table(net, arg.filter.table_id);
tb                615 net/ipv6/ip6_fib.c 		if (!tb) {
tb                624 net/ipv6/ip6_fib.c 			res = fib6_dump_table(tb, skb, cb);
tb                638 net/ipv6/ip6_fib.c 		hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
tb                641 net/ipv6/ip6_fib.c 			res = fib6_dump_table(tb, skb, cb);
tb               2316 net/ipv6/ip6_fib.c 		struct fib6_table *tb;
tb               2318 net/ipv6/ip6_fib.c 		hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
tb               2319 net/ipv6/ip6_fib.c 			hlist_del(&tb->tb6_hlist);
tb               2320 net/ipv6/ip6_fib.c 			fib6_free_table(tb);
tb               1614 net/ipv6/ip6_gre.c static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1633 net/ipv6/ip6_gre.c static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1638 net/ipv6/ip6_gre.c 	if (tb[IFLA_ADDRESS]) {
tb               1639 net/ipv6/ip6_gre.c 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
tb               1641 net/ipv6/ip6_gre.c 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
tb               1655 net/ipv6/ip6_gre.c 	return ip6gre_tunnel_validate(tb, data, extack);
tb               1658 net/ipv6/ip6_gre.c static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1667 net/ipv6/ip6_gre.c 	ret = ip6gre_tap_validate(tb, data, extack);
tb               1935 net/ipv6/ip6_gre.c 				 struct nlattr *tb[], struct nlattr *data[],
tb               1951 net/ipv6/ip6_gre.c 	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
tb               1961 net/ipv6/ip6_gre.c 	if (tb[IFLA_MTU])
tb               1962 net/ipv6/ip6_gre.c 		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
tb               1971 net/ipv6/ip6_gre.c 			  struct nlattr *tb[], struct nlattr *data[],
tb               1990 net/ipv6/ip6_gre.c 	err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
tb               1992 net/ipv6/ip6_gre.c 		ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
tb               2000 net/ipv6/ip6_gre.c ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
tb               2033 net/ipv6/ip6_gre.c static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
tb               2041 net/ipv6/ip6_gre.c 	t = ip6gre_changelink_common(dev, tb, data, &p, extack);
tb               2047 net/ipv6/ip6_gre.c 	ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
tb               2204 net/ipv6/ip6_gre.c 			     struct nlattr *tb[], struct nlattr *data[],
tb               2224 net/ipv6/ip6_gre.c 	err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
tb               2226 net/ipv6/ip6_gre.c 		ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
tb               2247 net/ipv6/ip6_gre.c static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
tb               2255 net/ipv6/ip6_gre.c 	t = ip6gre_changelink_common(dev, tb, data, &p, extack);
tb               2262 net/ipv6/ip6_gre.c 	ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
tb               1905 net/ipv6/ip6_tunnel.c static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1995 net/ipv6/ip6_tunnel.c 			   struct nlattr *tb[], struct nlattr *data[],
tb               2024 net/ipv6/ip6_tunnel.c 	if (!err && tb[IFLA_MTU])
tb               2025 net/ipv6/ip6_tunnel.c 		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
tb               2030 net/ipv6/ip6_tunnel.c static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
tb                961 net/ipv6/ip6_vti.c static int vti6_validate(struct nlattr *tb[], struct nlattr *data[],
tb                995 net/ipv6/ip6_vti.c 			struct nlattr *tb[], struct nlattr *data[],
tb               1021 net/ipv6/ip6_vti.c static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
tb               1043 net/ipv6/ip6_vti.c 	return vti6_update(t, &p, tb && tb[IFLA_MTU]);
tb                188 net/ipv6/ip6mr.c 				struct fib_rule_hdr *frh, struct nlattr **tb,
tb                195 net/ipv6/ip6mr.c 			      struct nlattr **tb)
tb                 33 net/ipv6/netfilter/nft_dup_ipv6.c 			     const struct nlattr * const tb[])
tb                 38 net/ipv6/netfilter/nft_dup_ipv6.c 	if (tb[NFTA_DUP_SREG_ADDR] == NULL)
tb                 41 net/ipv6/netfilter/nft_dup_ipv6.c 	priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
tb                 46 net/ipv6/netfilter/nft_dup_ipv6.c 	if (tb[NFTA_DUP_SREG_DEV] != NULL) {
tb                 47 net/ipv6/netfilter/nft_dup_ipv6.c 		priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
tb                213 net/ipv6/netfilter/nft_fib_ipv6.c 		    const struct nlattr * const tb[])
tb                217 net/ipv6/netfilter/nft_fib_ipv6.c 	if (!tb[NFTA_FIB_RESULT])
tb                220 net/ipv6/netfilter/nft_fib_ipv6.c 	result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT]));
tb               4871 net/ipv6/route.c 	struct nlattr *tb[RTA_MAX+1];
tb               4875 net/ipv6/route.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               4910 net/ipv6/route.c 	if (tb[RTA_NH_ID]) {
tb               4911 net/ipv6/route.c 		if (tb[RTA_GATEWAY]   || tb[RTA_OIF] ||
tb               4912 net/ipv6/route.c 		    tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
tb               4917 net/ipv6/route.c 		cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
tb               4920 net/ipv6/route.c 	if (tb[RTA_GATEWAY]) {
tb               4921 net/ipv6/route.c 		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
tb               4924 net/ipv6/route.c 	if (tb[RTA_VIA]) {
tb               4929 net/ipv6/route.c 	if (tb[RTA_DST]) {
tb               4932 net/ipv6/route.c 		if (nla_len(tb[RTA_DST]) < plen)
tb               4935 net/ipv6/route.c 		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
tb               4938 net/ipv6/route.c 	if (tb[RTA_SRC]) {
tb               4941 net/ipv6/route.c 		if (nla_len(tb[RTA_SRC]) < plen)
tb               4944 net/ipv6/route.c 		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
tb               4947 net/ipv6/route.c 	if (tb[RTA_PREFSRC])
tb               4948 net/ipv6/route.c 		cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
tb               4950 net/ipv6/route.c 	if (tb[RTA_OIF])
tb               4951 net/ipv6/route.c 		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
tb               4953 net/ipv6/route.c 	if (tb[RTA_PRIORITY])
tb               4954 net/ipv6/route.c 		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
tb               4956 net/ipv6/route.c 	if (tb[RTA_METRICS]) {
tb               4957 net/ipv6/route.c 		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
tb               4958 net/ipv6/route.c 		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
tb               4961 net/ipv6/route.c 	if (tb[RTA_TABLE])
tb               4962 net/ipv6/route.c 		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
tb               4964 net/ipv6/route.c 	if (tb[RTA_MULTIPATH]) {
tb               4965 net/ipv6/route.c 		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
tb               4966 net/ipv6/route.c 		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
tb               4974 net/ipv6/route.c 	if (tb[RTA_PREF]) {
tb               4975 net/ipv6/route.c 		pref = nla_get_u8(tb[RTA_PREF]);
tb               4982 net/ipv6/route.c 	if (tb[RTA_ENCAP])
tb               4983 net/ipv6/route.c 		cfg->fc_encap = tb[RTA_ENCAP];
tb               4985 net/ipv6/route.c 	if (tb[RTA_ENCAP_TYPE]) {
tb               4986 net/ipv6/route.c 		cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
tb               4993 net/ipv6/route.c 	if (tb[RTA_EXPIRES]) {
tb               4994 net/ipv6/route.c 		unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
tb               5731 net/ipv6/route.c 					struct nlattr **tb,
tb               5744 net/ipv6/route.c 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               5761 net/ipv6/route.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               5766 net/ipv6/route.c 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
tb               5767 net/ipv6/route.c 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
tb               5773 net/ipv6/route.c 		if (!tb[i])
tb               5800 net/ipv6/route.c 	struct nlattr *tb[RTA_MAX+1];
tb               5810 net/ipv6/route.c 	err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
tb               5819 net/ipv6/route.c 	if (tb[RTA_SRC]) {
tb               5820 net/ipv6/route.c 		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
tb               5823 net/ipv6/route.c 		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
tb               5826 net/ipv6/route.c 	if (tb[RTA_DST]) {
tb               5827 net/ipv6/route.c 		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
tb               5830 net/ipv6/route.c 		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
tb               5833 net/ipv6/route.c 	if (tb[RTA_IIF])
tb               5834 net/ipv6/route.c 		iif = nla_get_u32(tb[RTA_IIF]);
tb               5836 net/ipv6/route.c 	if (tb[RTA_OIF])
tb               5837 net/ipv6/route.c 		oif = nla_get_u32(tb[RTA_OIF]);
tb               5839 net/ipv6/route.c 	if (tb[RTA_MARK])
tb               5840 net/ipv6/route.c 		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
tb               5842 net/ipv6/route.c 	if (tb[RTA_UID])
tb               5844 net/ipv6/route.c 					   nla_get_u32(tb[RTA_UID]));
tb               5848 net/ipv6/route.c 	if (tb[RTA_SPORT])
tb               5849 net/ipv6/route.c 		fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
tb               5851 net/ipv6/route.c 	if (tb[RTA_DPORT])
tb               5852 net/ipv6/route.c 		fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
tb               5854 net/ipv6/route.c 	if (tb[RTA_IP_PROTO]) {
tb               5855 net/ipv6/route.c 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
tb                384 net/ipv6/seg6_iptunnel.c 	struct nlattr *tb[SEG6_IPTUNNEL_MAX + 1];
tb                394 net/ipv6/seg6_iptunnel.c 	err = nla_parse_nested_deprecated(tb, SEG6_IPTUNNEL_MAX, nla,
tb                400 net/ipv6/seg6_iptunnel.c 	if (!tb[SEG6_IPTUNNEL_SRH])
tb                403 net/ipv6/seg6_iptunnel.c 	tuninfo = nla_data(tb[SEG6_IPTUNNEL_SRH]);
tb                404 net/ipv6/seg6_iptunnel.c 	tuninfo_len = nla_len(tb[SEG6_IPTUNNEL_SRH]);
tb                829 net/ipv6/seg6_local.c 	struct nlattr *tb[SEG6_LOCAL_BPF_PROG_MAX + 1];
tb                834 net/ipv6/seg6_local.c 	ret = nla_parse_nested_deprecated(tb, SEG6_LOCAL_BPF_PROG_MAX,
tb                840 net/ipv6/seg6_local.c 	if (!tb[SEG6_LOCAL_BPF_PROG] || !tb[SEG6_LOCAL_BPF_PROG_NAME])
tb                843 net/ipv6/seg6_local.c 	slwt->bpf.name = nla_memdup(tb[SEG6_LOCAL_BPF_PROG_NAME], GFP_KERNEL);
tb                847 net/ipv6/seg6_local.c 	fd = nla_get_u32(tb[SEG6_LOCAL_BPF_PROG]);
tb                963 net/ipv6/seg6_local.c 	struct nlattr *tb[SEG6_LOCAL_MAX + 1];
tb                971 net/ipv6/seg6_local.c 	err = nla_parse_nested_deprecated(tb, SEG6_LOCAL_MAX, nla,
tb                977 net/ipv6/seg6_local.c 	if (!tb[SEG6_LOCAL_ACTION])
tb                985 net/ipv6/seg6_local.c 	slwt->action = nla_get_u32(tb[SEG6_LOCAL_ACTION]);
tb                987 net/ipv6/seg6_local.c 	err = parse_nla_action(tb, slwt);
tb               1433 net/ipv6/sit.c static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[],
tb               1565 net/ipv6/sit.c 			 struct nlattr *tb[], struct nlattr *data[],
tb               1593 net/ipv6/sit.c 	if (tb[IFLA_MTU]) {
tb               1594 net/ipv6/sit.c 		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
tb               1609 net/ipv6/sit.c static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[],
tb               1212 net/mpls/af_mpls.c 				      struct nlattr **tb,
tb               1225 net/mpls/af_mpls.c 					      tb, NETCONFA_MAX,
tb               1229 net/mpls/af_mpls.c 					    tb, NETCONFA_MAX,
tb               1235 net/mpls/af_mpls.c 		if (!tb[i])
tb               1255 net/mpls/af_mpls.c 	struct nlattr *tb[NETCONFA_MAX + 1];
tb               1262 net/mpls/af_mpls.c 	err = mpls_netconf_valid_get_req(in_skb, nlh, tb, extack);
tb               1267 net/mpls/af_mpls.c 	if (!tb[NETCONFA_IFINDEX])
tb               1270 net/mpls/af_mpls.c 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
tb               1787 net/mpls/af_mpls.c 	struct nlattr *tb[RTA_MAX+1];
tb               1791 net/mpls/af_mpls.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               1851 net/mpls/af_mpls.c 		struct nlattr *nla = tb[index];
tb               2086 net/mpls/af_mpls.c 	struct nlattr *tb[RTA_MAX + 1];
tb               2109 net/mpls/af_mpls.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               2118 net/mpls/af_mpls.c 			ifindex = nla_get_u32(tb[i]);
tb               2123 net/mpls/af_mpls.c 		} else if (tb[i]) {
tb               2280 net/mpls/af_mpls.c 				   struct nlattr **tb,
tb               2293 net/mpls/af_mpls.c 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               2309 net/mpls/af_mpls.c 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
tb               2314 net/mpls/af_mpls.c 	if ((tb[RTA_DST] || tb[RTA_NEWDST]) && !rtm->rtm_dst_len) {
tb               2320 net/mpls/af_mpls.c 		if (!tb[i])
tb               2342 net/mpls/af_mpls.c 	struct nlattr *tb[RTA_MAX + 1];
tb               2355 net/mpls/af_mpls.c 	err = mpls_valid_getroute_req(in_skb, in_nlh, tb, extack);
tb               2361 net/mpls/af_mpls.c 	if (tb[RTA_DST]) {
tb               2364 net/mpls/af_mpls.c 		if (nla_get_labels(tb[RTA_DST], 1, &label_count,
tb               2400 net/mpls/af_mpls.c 	if (tb[RTA_NEWDST]) {
tb               2401 net/mpls/af_mpls.c 		if (nla_get_labels(tb[RTA_NEWDST], MAX_NEW_LABELS, &n_labels,
tb                171 net/mpls/mpls_iptunnel.c 	struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1];
tb                176 net/mpls/mpls_iptunnel.c 	ret = nla_parse_nested_deprecated(tb, MPLS_IPTUNNEL_MAX, nla,
tb                181 net/mpls/mpls_iptunnel.c 	if (!tb[MPLS_IPTUNNEL_DST]) {
tb                187 net/mpls/mpls_iptunnel.c 	if (nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
tb                197 net/mpls/mpls_iptunnel.c 	ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], n_labels,
tb                205 net/mpls/mpls_iptunnel.c 	if (tb[MPLS_IPTUNNEL_TTL]) {
tb                206 net/mpls/mpls_iptunnel.c 		tun_encap_info->default_ttl = nla_get_u8(tb[MPLS_IPTUNNEL_TTL]);
tb                130 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
tb                140 net/netfilter/ipset/ip_set_bitmap_ip.c 	if (tb[IPSET_ATTR_LINENO])
tb                141 net/netfilter/ipset/ip_set_bitmap_ip.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                143 net/netfilter/ipset/ip_set_bitmap_ip.c 	if (unlikely(!tb[IPSET_ATTR_IP]))
tb                146 net/netfilter/ipset/ip_set_bitmap_ip.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
tb                150 net/netfilter/ipset/ip_set_bitmap_ip.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                162 net/netfilter/ipset/ip_set_bitmap_ip.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                163 net/netfilter/ipset/ip_set_bitmap_ip.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                171 net/netfilter/ipset/ip_set_bitmap_ip.c 	} else if (tb[IPSET_ATTR_CIDR]) {
tb                172 net/netfilter/ipset/ip_set_bitmap_ip.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                241 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
tb                250 net/netfilter/ipset/ip_set_bitmap_ip.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                251 net/netfilter/ipset/ip_set_bitmap_ip.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
tb                252 net/netfilter/ipset/ip_set_bitmap_ip.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                255 net/netfilter/ipset/ip_set_bitmap_ip.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
tb                259 net/netfilter/ipset/ip_set_bitmap_ip.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                260 net/netfilter/ipset/ip_set_bitmap_ip.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
tb                265 net/netfilter/ipset/ip_set_bitmap_ip.c 	} else if (tb[IPSET_ATTR_CIDR]) {
tb                266 net/netfilter/ipset/ip_set_bitmap_ip.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                275 net/netfilter/ipset/ip_set_bitmap_ip.c 	if (tb[IPSET_ATTR_NETMASK]) {
tb                276 net/netfilter/ipset/ip_set_bitmap_ip.c 		netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
tb                308 net/netfilter/ipset/ip_set_bitmap_ip.c 	set->dsize = ip_set_elem_len(set, tb, 0, 0);
tb                320 net/netfilter/ipset/ip_set_bitmap_ip.c 	if (tb[IPSET_ATTR_TIMEOUT]) {
tb                321 net/netfilter/ipset/ip_set_bitmap_ip.c 		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
tb                241 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
tb                251 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (tb[IPSET_ATTR_LINENO])
tb                252 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                254 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (unlikely(!tb[IPSET_ATTR_IP]))
tb                257 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
tb                261 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                269 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (tb[IPSET_ATTR_ETHER]) {
tb                270 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		if (nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN)
tb                272 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
tb                318 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
tb                326 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                327 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
tb                328 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                331 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
tb                335 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                336 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
tb                341 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	} else if (tb[IPSET_ATTR_CIDR]) {
tb                342 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                356 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	set->dsize = ip_set_elem_len(set, tb,
tb                369 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (tb[IPSET_ATTR_TIMEOUT]) {
tb                370 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
tb                126 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
tb                137 net/netfilter/ipset/ip_set_bitmap_port.c 	if (tb[IPSET_ATTR_LINENO])
tb                138 net/netfilter/ipset/ip_set_bitmap_port.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                140 net/netfilter/ipset/ip_set_bitmap_port.c 	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                141 net/netfilter/ipset/ip_set_bitmap_port.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
tb                144 net/netfilter/ipset/ip_set_bitmap_port.c 	port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
tb                147 net/netfilter/ipset/ip_set_bitmap_port.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                156 net/netfilter/ipset/ip_set_bitmap_port.c 	if (tb[IPSET_ATTR_PORT_TO]) {
tb                157 net/netfilter/ipset/ip_set_bitmap_port.c 		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                222 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
tb                229 net/netfilter/ipset/ip_set_bitmap_port.c 	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                230 net/netfilter/ipset/ip_set_bitmap_port.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
tb                231 net/netfilter/ipset/ip_set_bitmap_port.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
tb                232 net/netfilter/ipset/ip_set_bitmap_port.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                235 net/netfilter/ipset/ip_set_bitmap_port.c 	first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
tb                236 net/netfilter/ipset/ip_set_bitmap_port.c 	last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                241 net/netfilter/ipset/ip_set_bitmap_port.c 	set->dsize = ip_set_elem_len(set, tb, 0, 0);
tb                253 net/netfilter/ipset/ip_set_bitmap_port.c 	if (tb[IPSET_ATTR_TIMEOUT]) {
tb                254 net/netfilter/ipset/ip_set_bitmap_port.c 		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
tb                295 net/netfilter/ipset/ip_set_core.c 	struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
tb                299 net/netfilter/ipset/ip_set_core.c 	if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
tb                302 net/netfilter/ipset/ip_set_core.c 	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
tb                305 net/netfilter/ipset/ip_set_core.c 	*ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
tb                313 net/netfilter/ipset/ip_set_core.c 	struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
tb                318 net/netfilter/ipset/ip_set_core.c 	if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
tb                321 net/netfilter/ipset/ip_set_core.c 	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
tb                324 net/netfilter/ipset/ip_set_core.c 	memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
tb                362 net/netfilter/ipset/ip_set_core.c add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
tb                366 net/netfilter/ipset/ip_set_core.c 		!!tb[IPSET_ATTR_TIMEOUT];
tb                370 net/netfilter/ipset/ip_set_core.c ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
tb                376 net/netfilter/ipset/ip_set_core.c 	if (tb[IPSET_ATTR_CADT_FLAGS])
tb                377 net/netfilter/ipset/ip_set_core.c 		cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                383 net/netfilter/ipset/ip_set_core.c 		if (!add_extension(id, cadt_flags, tb))
tb                395 net/netfilter/ipset/ip_set_core.c ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
tb                400 net/netfilter/ipset/ip_set_core.c 	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
tb                401 net/netfilter/ipset/ip_set_core.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
tb                402 net/netfilter/ipset/ip_set_core.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
tb                403 net/netfilter/ipset/ip_set_core.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
tb                404 net/netfilter/ipset/ip_set_core.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
tb                405 net/netfilter/ipset/ip_set_core.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
tb                408 net/netfilter/ipset/ip_set_core.c 	if (tb[IPSET_ATTR_TIMEOUT]) {
tb                411 net/netfilter/ipset/ip_set_core.c 		ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
tb                413 net/netfilter/ipset/ip_set_core.c 	if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) {
tb                416 net/netfilter/ipset/ip_set_core.c 		if (tb[IPSET_ATTR_BYTES])
tb                418 net/netfilter/ipset/ip_set_core.c 						 tb[IPSET_ATTR_BYTES]));
tb                419 net/netfilter/ipset/ip_set_core.c 		if (tb[IPSET_ATTR_PACKETS])
tb                421 net/netfilter/ipset/ip_set_core.c 						   tb[IPSET_ATTR_PACKETS]));
tb                423 net/netfilter/ipset/ip_set_core.c 	if (tb[IPSET_ATTR_COMMENT]) {
tb                426 net/netfilter/ipset/ip_set_core.c 		ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
tb                428 net/netfilter/ipset/ip_set_core.c 	if (tb[IPSET_ATTR_SKBMARK]) {
tb                431 net/netfilter/ipset/ip_set_core.c 		fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK]));
tb                435 net/netfilter/ipset/ip_set_core.c 	if (tb[IPSET_ATTR_SKBPRIO]) {
tb                439 net/netfilter/ipset/ip_set_core.c 			be32_to_cpu(nla_get_be32(tb[IPSET_ATTR_SKBPRIO]));
tb                441 net/netfilter/ipset/ip_set_core.c 	if (tb[IPSET_ATTR_SKBQUEUE]) {
tb                445 net/netfilter/ipset/ip_set_core.c 			be16_to_cpu(nla_get_be16(tb[IPSET_ATTR_SKBQUEUE]));
tb                785 net/netfilter/ipset/ip_set_core.c static inline u8 protocol(const struct nlattr * const tb[])
tb                787 net/netfilter/ipset/ip_set_core.c 	return nla_get_u8(tb[IPSET_ATTR_PROTOCOL]);
tb                791 net/netfilter/ipset/ip_set_core.c protocol_failed(const struct nlattr * const tb[])
tb                793 net/netfilter/ipset/ip_set_core.c 	return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) != IPSET_PROTOCOL;
tb                797 net/netfilter/ipset/ip_set_core.c protocol_min_failed(const struct nlattr * const tb[])
tb                799 net/netfilter/ipset/ip_set_core.c 	return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) < IPSET_PROTOCOL_MIN;
tb                907 net/netfilter/ipset/ip_set_core.c 	struct nlattr *tb[IPSET_ATTR_CREATE_MAX + 1] = {};
tb                953 net/netfilter/ipset/ip_set_core.c 	    nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
tb                959 net/netfilter/ipset/ip_set_core.c 	ret = set->type->create(net, set, tb, flags);
tb               1532 net/netfilter/ipset/ip_set_core.c 	struct nlattr *tb[], enum ipset_adt adt,
tb               1541 net/netfilter/ipset/ip_set_core.c 		ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
tb               1602 net/netfilter/ipset/ip_set_core.c 	struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
tb               1625 net/netfilter/ipset/ip_set_core.c 		if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
tb               1629 net/netfilter/ipset/ip_set_core.c 		ret = call_ad(ctnl, skb, set, tb, adt, flags,
tb               1637 net/netfilter/ipset/ip_set_core.c 			    nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
tb               1640 net/netfilter/ipset/ip_set_core.c 			ret = call_ad(ctnl, skb, set, tb, adt,
tb               1674 net/netfilter/ipset/ip_set_core.c 	struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
tb               1688 net/netfilter/ipset/ip_set_core.c 	if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
tb               1693 net/netfilter/ipset/ip_set_core.c 	ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
tb               1427 net/netfilter/ipset/ip_set_hash_gen.h IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
tb               1452 net/netfilter/ipset/ip_set_hash_gen.h 			    struct nlattr *tb[], u32 flags)
tb               1478 net/netfilter/ipset/ip_set_hash_gen.h 	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
tb               1479 net/netfilter/ipset/ip_set_hash_gen.h 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
tb               1480 net/netfilter/ipset/ip_set_hash_gen.h 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
tb               1481 net/netfilter/ipset/ip_set_hash_gen.h 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb               1486 net/netfilter/ipset/ip_set_hash_gen.h 	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK)))
tb               1490 net/netfilter/ipset/ip_set_hash_gen.h 	if (tb[IPSET_ATTR_MARKMASK]) {
tb               1491 net/netfilter/ipset/ip_set_hash_gen.h 		markmask = ntohl(nla_get_be32(tb[IPSET_ATTR_MARKMASK]));
tb               1499 net/netfilter/ipset/ip_set_hash_gen.h 	if (tb[IPSET_ATTR_NETMASK]) {
tb               1500 net/netfilter/ipset/ip_set_hash_gen.h 		netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
tb               1509 net/netfilter/ipset/ip_set_hash_gen.h 	if (tb[IPSET_ATTR_HASHSIZE]) {
tb               1510 net/netfilter/ipset/ip_set_hash_gen.h 		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
tb               1515 net/netfilter/ipset/ip_set_hash_gen.h 	if (tb[IPSET_ATTR_MAXELEM])
tb               1516 net/netfilter/ipset/ip_set_hash_gen.h 		maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
tb               1562 net/netfilter/ipset/ip_set_hash_gen.h 		set->dsize = ip_set_elem_len(set, tb,
tb               1568 net/netfilter/ipset/ip_set_hash_gen.h 		set->dsize = ip_set_elem_len(set, tb,
tb               1574 net/netfilter/ipset/ip_set_hash_gen.h 	if (tb[IPSET_ATTR_TIMEOUT]) {
tb               1575 net/netfilter/ipset/ip_set_hash_gen.h 		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
tb                 97 net/netfilter/ipset/ip_set_hash_ip.c hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                107 net/netfilter/ipset/ip_set_hash_ip.c 	if (tb[IPSET_ATTR_LINENO])
tb                108 net/netfilter/ipset/ip_set_hash_ip.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                110 net/netfilter/ipset/ip_set_hash_ip.c 	if (unlikely(!tb[IPSET_ATTR_IP]))
tb                113 net/netfilter/ipset/ip_set_hash_ip.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
tb                117 net/netfilter/ipset/ip_set_hash_ip.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                130 net/netfilter/ipset/ip_set_hash_ip.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                131 net/netfilter/ipset/ip_set_hash_ip.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                136 net/netfilter/ipset/ip_set_hash_ip.c 	} else if (tb[IPSET_ATTR_CIDR]) {
tb                137 net/netfilter/ipset/ip_set_hash_ip.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                232 net/netfilter/ipset/ip_set_hash_ip.c hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                241 net/netfilter/ipset/ip_set_hash_ip.c 	if (tb[IPSET_ATTR_LINENO])
tb                242 net/netfilter/ipset/ip_set_hash_ip.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                244 net/netfilter/ipset/ip_set_hash_ip.c 	if (unlikely(!tb[IPSET_ATTR_IP]))
tb                246 net/netfilter/ipset/ip_set_hash_ip.c 	if (unlikely(tb[IPSET_ATTR_IP_TO]))
tb                248 net/netfilter/ipset/ip_set_hash_ip.c 	if (unlikely(tb[IPSET_ATTR_CIDR])) {
tb                249 net/netfilter/ipset/ip_set_hash_ip.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                255 net/netfilter/ipset/ip_set_hash_ip.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
tb                259 net/netfilter/ipset/ip_set_hash_ip.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                110 net/netfilter/ipset/ip_set_hash_ipmac.c hash_ipmac4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                118 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                119 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !tb[IPSET_ATTR_ETHER] ||
tb                120 net/netfilter/ipset/ip_set_hash_ipmac.c 		     nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN ||
tb                121 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
tb                122 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
tb                123 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)   ||
tb                124 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
tb                125 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
tb                126 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
tb                129 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (tb[IPSET_ATTR_LINENO])
tb                130 net/netfilter/ipset/ip_set_hash_ipmac.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                132 net/netfilter/ipset/ip_set_hash_ipmac.c 	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
tb                133 net/netfilter/ipset/ip_set_hash_ipmac.c 		ip_set_get_extensions(set, tb, &ext);
tb                136 net/netfilter/ipset/ip_set_hash_ipmac.c 	memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
tb                226 net/netfilter/ipset/ip_set_hash_ipmac.c hash_ipmac6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                237 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                238 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !tb[IPSET_ATTR_ETHER] ||
tb                239 net/netfilter/ipset/ip_set_hash_ipmac.c 		     nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN ||
tb                240 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
tb                241 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
tb                242 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)   ||
tb                243 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
tb                244 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
tb                245 net/netfilter/ipset/ip_set_hash_ipmac.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
tb                248 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (tb[IPSET_ATTR_LINENO])
tb                249 net/netfilter/ipset/ip_set_hash_ipmac.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                251 net/netfilter/ipset/ip_set_hash_ipmac.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
tb                252 net/netfilter/ipset/ip_set_hash_ipmac.c 		ip_set_get_extensions(set, tb, &ext);
tb                256 net/netfilter/ipset/ip_set_hash_ipmac.c 	memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
tb                 96 net/netfilter/ipset/ip_set_hash_ipmark.c hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                106 net/netfilter/ipset/ip_set_hash_ipmark.c 	if (tb[IPSET_ATTR_LINENO])
tb                107 net/netfilter/ipset/ip_set_hash_ipmark.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                109 net/netfilter/ipset/ip_set_hash_ipmark.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                110 net/netfilter/ipset/ip_set_hash_ipmark.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_MARK)))
tb                113 net/netfilter/ipset/ip_set_hash_ipmark.c 	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
tb                117 net/netfilter/ipset/ip_set_hash_ipmark.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                121 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
tb                125 net/netfilter/ipset/ip_set_hash_ipmark.c 	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) {
tb                131 net/netfilter/ipset/ip_set_hash_ipmark.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                132 net/netfilter/ipset/ip_set_hash_ipmark.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                137 net/netfilter/ipset/ip_set_hash_ipmark.c 	} else if (tb[IPSET_ATTR_CIDR]) {
tb                138 net/netfilter/ipset/ip_set_hash_ipmark.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                222 net/netfilter/ipset/ip_set_hash_ipmark.c hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                231 net/netfilter/ipset/ip_set_hash_ipmark.c 	if (tb[IPSET_ATTR_LINENO])
tb                232 net/netfilter/ipset/ip_set_hash_ipmark.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                234 net/netfilter/ipset/ip_set_hash_ipmark.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                235 net/netfilter/ipset/ip_set_hash_ipmark.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_MARK)))
tb                237 net/netfilter/ipset/ip_set_hash_ipmark.c 	if (unlikely(tb[IPSET_ATTR_IP_TO]))
tb                239 net/netfilter/ipset/ip_set_hash_ipmark.c 	if (unlikely(tb[IPSET_ATTR_CIDR])) {
tb                240 net/netfilter/ipset/ip_set_hash_ipmark.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                246 net/netfilter/ipset/ip_set_hash_ipmark.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
tb                250 net/netfilter/ipset/ip_set_hash_ipmark.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                254 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
tb                104 net/netfilter/ipset/ip_set_hash_ipport.c hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                115 net/netfilter/ipset/ip_set_hash_ipport.c 	if (tb[IPSET_ATTR_LINENO])
tb                116 net/netfilter/ipset/ip_set_hash_ipport.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                118 net/netfilter/ipset/ip_set_hash_ipport.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                119 net/netfilter/ipset/ip_set_hash_ipport.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                120 net/netfilter/ipset/ip_set_hash_ipport.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
tb                123 net/netfilter/ipset/ip_set_hash_ipport.c 	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
tb                127 net/netfilter/ipset/ip_set_hash_ipport.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                131 net/netfilter/ipset/ip_set_hash_ipport.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
tb                133 net/netfilter/ipset/ip_set_hash_ipport.c 	if (tb[IPSET_ATTR_PROTO]) {
tb                134 net/netfilter/ipset/ip_set_hash_ipport.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
tb                147 net/netfilter/ipset/ip_set_hash_ipport.c 	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
tb                148 net/netfilter/ipset/ip_set_hash_ipport.c 	      tb[IPSET_ATTR_PORT_TO])) {
tb                154 net/netfilter/ipset/ip_set_hash_ipport.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                155 net/netfilter/ipset/ip_set_hash_ipport.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                160 net/netfilter/ipset/ip_set_hash_ipport.c 	} else if (tb[IPSET_ATTR_CIDR]) {
tb                161 net/netfilter/ipset/ip_set_hash_ipport.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                169 net/netfilter/ipset/ip_set_hash_ipport.c 	if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
tb                170 net/netfilter/ipset/ip_set_hash_ipport.c 		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                262 net/netfilter/ipset/ip_set_hash_ipport.c hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                273 net/netfilter/ipset/ip_set_hash_ipport.c 	if (tb[IPSET_ATTR_LINENO])
tb                274 net/netfilter/ipset/ip_set_hash_ipport.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                276 net/netfilter/ipset/ip_set_hash_ipport.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                277 net/netfilter/ipset/ip_set_hash_ipport.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                278 net/netfilter/ipset/ip_set_hash_ipport.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
tb                280 net/netfilter/ipset/ip_set_hash_ipport.c 	if (unlikely(tb[IPSET_ATTR_IP_TO]))
tb                282 net/netfilter/ipset/ip_set_hash_ipport.c 	if (unlikely(tb[IPSET_ATTR_CIDR])) {
tb                283 net/netfilter/ipset/ip_set_hash_ipport.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                289 net/netfilter/ipset/ip_set_hash_ipport.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
tb                293 net/netfilter/ipset/ip_set_hash_ipport.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                297 net/netfilter/ipset/ip_set_hash_ipport.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
tb                299 net/netfilter/ipset/ip_set_hash_ipport.c 	if (tb[IPSET_ATTR_PROTO]) {
tb                300 net/netfilter/ipset/ip_set_hash_ipport.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
tb                312 net/netfilter/ipset/ip_set_hash_ipport.c 	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
tb                318 net/netfilter/ipset/ip_set_hash_ipport.c 	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                107 net/netfilter/ipset/ip_set_hash_ipportip.c hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                118 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (tb[IPSET_ATTR_LINENO])
tb                119 net/netfilter/ipset/ip_set_hash_ipportip.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                121 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
tb                122 net/netfilter/ipset/ip_set_hash_ipportip.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                123 net/netfilter/ipset/ip_set_hash_ipportip.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
tb                126 net/netfilter/ipset/ip_set_hash_ipportip.c 	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
tb                130 net/netfilter/ipset/ip_set_hash_ipportip.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                134 net/netfilter/ipset/ip_set_hash_ipportip.c 	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &e.ip2);
tb                138 net/netfilter/ipset/ip_set_hash_ipportip.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
tb                140 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (tb[IPSET_ATTR_PROTO]) {
tb                141 net/netfilter/ipset/ip_set_hash_ipportip.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
tb                154 net/netfilter/ipset/ip_set_hash_ipportip.c 	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
tb                155 net/netfilter/ipset/ip_set_hash_ipportip.c 	      tb[IPSET_ATTR_PORT_TO])) {
tb                161 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                162 net/netfilter/ipset/ip_set_hash_ipportip.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                167 net/netfilter/ipset/ip_set_hash_ipportip.c 	} else if (tb[IPSET_ATTR_CIDR]) {
tb                168 net/netfilter/ipset/ip_set_hash_ipportip.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                176 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
tb                177 net/netfilter/ipset/ip_set_hash_ipportip.c 		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                273 net/netfilter/ipset/ip_set_hash_ipportip.c hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                284 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (tb[IPSET_ATTR_LINENO])
tb                285 net/netfilter/ipset/ip_set_hash_ipportip.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                287 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
tb                288 net/netfilter/ipset/ip_set_hash_ipportip.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                289 net/netfilter/ipset/ip_set_hash_ipportip.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO)))
tb                291 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (unlikely(tb[IPSET_ATTR_IP_TO]))
tb                293 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (unlikely(tb[IPSET_ATTR_CIDR])) {
tb                294 net/netfilter/ipset/ip_set_hash_ipportip.c 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                300 net/netfilter/ipset/ip_set_hash_ipportip.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
tb                304 net/netfilter/ipset/ip_set_hash_ipportip.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                308 net/netfilter/ipset/ip_set_hash_ipportip.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2);
tb                312 net/netfilter/ipset/ip_set_hash_ipportip.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
tb                314 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (tb[IPSET_ATTR_PROTO]) {
tb                315 net/netfilter/ipset/ip_set_hash_ipportip.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
tb                327 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
tb                333 net/netfilter/ipset/ip_set_hash_ipportip.c 	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                159 net/netfilter/ipset/ip_set_hash_ipportnet.c hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                172 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_LINENO])
tb                173 net/netfilter/ipset/ip_set_hash_ipportnet.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                175 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
tb                176 net/netfilter/ipset/ip_set_hash_ipportnet.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                177 net/netfilter/ipset/ip_set_hash_ipportnet.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
tb                178 net/netfilter/ipset/ip_set_hash_ipportnet.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                181 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
tb                185 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                189 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
tb                193 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_CIDR2]) {
tb                194 net/netfilter/ipset/ip_set_hash_ipportnet.c 		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
tb                200 net/netfilter/ipset/ip_set_hash_ipportnet.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
tb                202 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_PROTO]) {
tb                203 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
tb                215 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                216 net/netfilter/ipset/ip_set_hash_ipportnet.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                222 net/netfilter/ipset/ip_set_hash_ipportnet.c 	with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
tb                224 net/netfilter/ipset/ip_set_hash_ipportnet.c 	    !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports ||
tb                225 net/netfilter/ipset/ip_set_hash_ipportnet.c 	      tb[IPSET_ATTR_IP2_TO])) {
tb                234 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                235 net/netfilter/ipset/ip_set_hash_ipportnet.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                240 net/netfilter/ipset/ip_set_hash_ipportnet.c 	} else if (tb[IPSET_ATTR_CIDR]) {
tb                241 net/netfilter/ipset/ip_set_hash_ipportnet.c 		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                249 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_PORT_TO]) {
tb                250 net/netfilter/ipset/ip_set_hash_ipportnet.c 		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                256 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_IP2_TO]) {
tb                257 net/netfilter/ipset/ip_set_hash_ipportnet.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
tb                410 net/netfilter/ipset/ip_set_hash_ipportnet.c hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                422 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_LINENO])
tb                423 net/netfilter/ipset/ip_set_hash_ipportnet.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                425 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
tb                426 net/netfilter/ipset/ip_set_hash_ipportnet.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                427 net/netfilter/ipset/ip_set_hash_ipportnet.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
tb                428 net/netfilter/ipset/ip_set_hash_ipportnet.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                430 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (unlikely(tb[IPSET_ATTR_IP_TO]))
tb                432 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (unlikely(tb[IPSET_ATTR_CIDR])) {
tb                433 net/netfilter/ipset/ip_set_hash_ipportnet.c 		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                439 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
tb                443 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                447 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2);
tb                451 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_CIDR2]) {
tb                452 net/netfilter/ipset/ip_set_hash_ipportnet.c 		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
tb                460 net/netfilter/ipset/ip_set_hash_ipportnet.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
tb                462 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_PROTO]) {
tb                463 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
tb                475 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                476 net/netfilter/ipset/ip_set_hash_ipportnet.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                482 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
tb                489 net/netfilter/ipset/ip_set_hash_ipportnet.c 	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                 95 net/netfilter/ipset/ip_set_hash_mac.c hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                103 net/netfilter/ipset/ip_set_hash_mac.c 	if (tb[IPSET_ATTR_LINENO])
tb                104 net/netfilter/ipset/ip_set_hash_mac.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                106 net/netfilter/ipset/ip_set_hash_mac.c 	if (unlikely(!tb[IPSET_ATTR_ETHER] ||
tb                107 net/netfilter/ipset/ip_set_hash_mac.c 		     nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN))
tb                110 net/netfilter/ipset/ip_set_hash_mac.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                113 net/netfilter/ipset/ip_set_hash_mac.c 	ether_addr_copy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]));
tb                135 net/netfilter/ipset/ip_set_hash_net.c hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                145 net/netfilter/ipset/ip_set_hash_net.c 	if (tb[IPSET_ATTR_LINENO])
tb                146 net/netfilter/ipset/ip_set_hash_net.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                148 net/netfilter/ipset/ip_set_hash_net.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                149 net/netfilter/ipset/ip_set_hash_net.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                152 net/netfilter/ipset/ip_set_hash_net.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
tb                156 net/netfilter/ipset/ip_set_hash_net.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                160 net/netfilter/ipset/ip_set_hash_net.c 	if (tb[IPSET_ATTR_CIDR]) {
tb                161 net/netfilter/ipset/ip_set_hash_net.c 		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                166 net/netfilter/ipset/ip_set_hash_net.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                167 net/netfilter/ipset/ip_set_hash_net.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                173 net/netfilter/ipset/ip_set_hash_net.c 	if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
tb                181 net/netfilter/ipset/ip_set_hash_net.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                182 net/netfilter/ipset/ip_set_hash_net.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                303 net/netfilter/ipset/ip_set_hash_net.c hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                311 net/netfilter/ipset/ip_set_hash_net.c 	if (tb[IPSET_ATTR_LINENO])
tb                312 net/netfilter/ipset/ip_set_hash_net.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                314 net/netfilter/ipset/ip_set_hash_net.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                315 net/netfilter/ipset/ip_set_hash_net.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                317 net/netfilter/ipset/ip_set_hash_net.c 	if (unlikely(tb[IPSET_ATTR_IP_TO]))
tb                320 net/netfilter/ipset/ip_set_hash_net.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
tb                324 net/netfilter/ipset/ip_set_hash_net.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                328 net/netfilter/ipset/ip_set_hash_net.c 	if (tb[IPSET_ATTR_CIDR]) {
tb                329 net/netfilter/ipset/ip_set_hash_net.c 		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                336 net/netfilter/ipset/ip_set_hash_net.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                337 net/netfilter/ipset/ip_set_hash_net.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                192 net/netfilter/ipset/ip_set_hash_netiface.c hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                202 net/netfilter/ipset/ip_set_hash_netiface.c 	if (tb[IPSET_ATTR_LINENO])
tb                203 net/netfilter/ipset/ip_set_hash_netiface.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                205 net/netfilter/ipset/ip_set_hash_netiface.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                206 net/netfilter/ipset/ip_set_hash_netiface.c 		     !tb[IPSET_ATTR_IFACE] ||
tb                207 net/netfilter/ipset/ip_set_hash_netiface.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                210 net/netfilter/ipset/ip_set_hash_netiface.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
tb                214 net/netfilter/ipset/ip_set_hash_netiface.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                218 net/netfilter/ipset/ip_set_hash_netiface.c 	if (tb[IPSET_ATTR_CIDR]) {
tb                219 net/netfilter/ipset/ip_set_hash_netiface.c 		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                223 net/netfilter/ipset/ip_set_hash_netiface.c 	nla_strlcpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
tb                225 net/netfilter/ipset/ip_set_hash_netiface.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                226 net/netfilter/ipset/ip_set_hash_netiface.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                233 net/netfilter/ipset/ip_set_hash_netiface.c 	if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
tb                240 net/netfilter/ipset/ip_set_hash_netiface.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                241 net/netfilter/ipset/ip_set_hash_netiface.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                400 net/netfilter/ipset/ip_set_hash_netiface.c hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                408 net/netfilter/ipset/ip_set_hash_netiface.c 	if (tb[IPSET_ATTR_LINENO])
tb                409 net/netfilter/ipset/ip_set_hash_netiface.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                411 net/netfilter/ipset/ip_set_hash_netiface.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                412 net/netfilter/ipset/ip_set_hash_netiface.c 		     !tb[IPSET_ATTR_IFACE] ||
tb                413 net/netfilter/ipset/ip_set_hash_netiface.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                415 net/netfilter/ipset/ip_set_hash_netiface.c 	if (unlikely(tb[IPSET_ATTR_IP_TO]))
tb                418 net/netfilter/ipset/ip_set_hash_netiface.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
tb                422 net/netfilter/ipset/ip_set_hash_netiface.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                426 net/netfilter/ipset/ip_set_hash_netiface.c 	if (tb[IPSET_ATTR_CIDR]) {
tb                427 net/netfilter/ipset/ip_set_hash_netiface.c 		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                434 net/netfilter/ipset/ip_set_hash_netiface.c 	nla_strlcpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
tb                436 net/netfilter/ipset/ip_set_hash_netiface.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                437 net/netfilter/ipset/ip_set_hash_netiface.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                162 net/netfilter/ipset/ip_set_hash_netnet.c hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                173 net/netfilter/ipset/ip_set_hash_netnet.c 	if (tb[IPSET_ATTR_LINENO])
tb                174 net/netfilter/ipset/ip_set_hash_netnet.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                177 net/netfilter/ipset/ip_set_hash_netnet.c 	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
tb                178 net/netfilter/ipset/ip_set_hash_netnet.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                181 net/netfilter/ipset/ip_set_hash_netnet.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
tb                185 net/netfilter/ipset/ip_set_hash_netnet.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
tb                189 net/netfilter/ipset/ip_set_hash_netnet.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                193 net/netfilter/ipset/ip_set_hash_netnet.c 	if (tb[IPSET_ATTR_CIDR]) {
tb                194 net/netfilter/ipset/ip_set_hash_netnet.c 		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                199 net/netfilter/ipset/ip_set_hash_netnet.c 	if (tb[IPSET_ATTR_CIDR2]) {
tb                200 net/netfilter/ipset/ip_set_hash_netnet.c 		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
tb                205 net/netfilter/ipset/ip_set_hash_netnet.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                206 net/netfilter/ipset/ip_set_hash_netnet.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                212 net/netfilter/ipset/ip_set_hash_netnet.c 	if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] ||
tb                213 net/netfilter/ipset/ip_set_hash_netnet.c 				   tb[IPSET_ATTR_IP2_TO])) {
tb                222 net/netfilter/ipset/ip_set_hash_netnet.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                223 net/netfilter/ipset/ip_set_hash_netnet.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                235 net/netfilter/ipset/ip_set_hash_netnet.c 	if (tb[IPSET_ATTR_IP2_TO]) {
tb                236 net/netfilter/ipset/ip_set_hash_netnet.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
tb                396 net/netfilter/ipset/ip_set_hash_netnet.c hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                404 net/netfilter/ipset/ip_set_hash_netnet.c 	if (tb[IPSET_ATTR_LINENO])
tb                405 net/netfilter/ipset/ip_set_hash_netnet.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                408 net/netfilter/ipset/ip_set_hash_netnet.c 	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
tb                409 net/netfilter/ipset/ip_set_hash_netnet.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                411 net/netfilter/ipset/ip_set_hash_netnet.c 	if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
tb                414 net/netfilter/ipset/ip_set_hash_netnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
tb                418 net/netfilter/ipset/ip_set_hash_netnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
tb                422 net/netfilter/ipset/ip_set_hash_netnet.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                426 net/netfilter/ipset/ip_set_hash_netnet.c 	if (tb[IPSET_ATTR_CIDR]) {
tb                427 net/netfilter/ipset/ip_set_hash_netnet.c 		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                432 net/netfilter/ipset/ip_set_hash_netnet.c 	if (tb[IPSET_ATTR_CIDR2]) {
tb                433 net/netfilter/ipset/ip_set_hash_netnet.c 		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
tb                441 net/netfilter/ipset/ip_set_hash_netnet.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                442 net/netfilter/ipset/ip_set_hash_netnet.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                153 net/netfilter/ipset/ip_set_hash_netport.c hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                165 net/netfilter/ipset/ip_set_hash_netport.c 	if (tb[IPSET_ATTR_LINENO])
tb                166 net/netfilter/ipset/ip_set_hash_netport.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                168 net/netfilter/ipset/ip_set_hash_netport.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                169 net/netfilter/ipset/ip_set_hash_netport.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                170 net/netfilter/ipset/ip_set_hash_netport.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
tb                171 net/netfilter/ipset/ip_set_hash_netport.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                174 net/netfilter/ipset/ip_set_hash_netport.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
tb                178 net/netfilter/ipset/ip_set_hash_netport.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                182 net/netfilter/ipset/ip_set_hash_netport.c 	if (tb[IPSET_ATTR_CIDR]) {
tb                183 net/netfilter/ipset/ip_set_hash_netport.c 		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                189 net/netfilter/ipset/ip_set_hash_netport.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
tb                191 net/netfilter/ipset/ip_set_hash_netport.c 	if (tb[IPSET_ATTR_PROTO]) {
tb                192 net/netfilter/ipset/ip_set_hash_netport.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
tb                204 net/netfilter/ipset/ip_set_hash_netport.c 	with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
tb                206 net/netfilter/ipset/ip_set_hash_netport.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                207 net/netfilter/ipset/ip_set_hash_netport.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                213 net/netfilter/ipset/ip_set_hash_netport.c 	if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
tb                221 net/netfilter/ipset/ip_set_hash_netport.c 	if (tb[IPSET_ATTR_PORT_TO]) {
tb                222 net/netfilter/ipset/ip_set_hash_netport.c 		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                226 net/netfilter/ipset/ip_set_hash_netport.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                227 net/netfilter/ipset/ip_set_hash_netport.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                369 net/netfilter/ipset/ip_set_hash_netport.c hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                381 net/netfilter/ipset/ip_set_hash_netport.c 	if (tb[IPSET_ATTR_LINENO])
tb                382 net/netfilter/ipset/ip_set_hash_netport.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                384 net/netfilter/ipset/ip_set_hash_netport.c 	if (unlikely(!tb[IPSET_ATTR_IP] ||
tb                385 net/netfilter/ipset/ip_set_hash_netport.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                386 net/netfilter/ipset/ip_set_hash_netport.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
tb                387 net/netfilter/ipset/ip_set_hash_netport.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                389 net/netfilter/ipset/ip_set_hash_netport.c 	if (unlikely(tb[IPSET_ATTR_IP_TO]))
tb                392 net/netfilter/ipset/ip_set_hash_netport.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
tb                396 net/netfilter/ipset/ip_set_hash_netport.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                400 net/netfilter/ipset/ip_set_hash_netport.c 	if (tb[IPSET_ATTR_CIDR]) {
tb                401 net/netfilter/ipset/ip_set_hash_netport.c 		cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                408 net/netfilter/ipset/ip_set_hash_netport.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
tb                410 net/netfilter/ipset/ip_set_hash_netport.c 	if (tb[IPSET_ATTR_PROTO]) {
tb                411 net/netfilter/ipset/ip_set_hash_netport.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
tb                423 net/netfilter/ipset/ip_set_hash_netport.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                424 net/netfilter/ipset/ip_set_hash_netport.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                430 net/netfilter/ipset/ip_set_hash_netport.c 	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
tb                437 net/netfilter/ipset/ip_set_hash_netport.c 	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                176 net/netfilter/ipset/ip_set_hash_netportnet.c hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
tb                188 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_LINENO])
tb                189 net/netfilter/ipset/ip_set_hash_netportnet.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                192 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
tb                193 net/netfilter/ipset/ip_set_hash_netportnet.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                194 net/netfilter/ipset/ip_set_hash_netportnet.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
tb                195 net/netfilter/ipset/ip_set_hash_netportnet.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                198 net/netfilter/ipset/ip_set_hash_netportnet.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
tb                202 net/netfilter/ipset/ip_set_hash_netportnet.c 	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
tb                206 net/netfilter/ipset/ip_set_hash_netportnet.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                210 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_CIDR]) {
tb                211 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                216 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_CIDR2]) {
tb                217 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
tb                222 net/netfilter/ipset/ip_set_hash_netportnet.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
tb                224 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_PROTO]) {
tb                225 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
tb                237 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                238 net/netfilter/ipset/ip_set_hash_netportnet.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                244 net/netfilter/ipset/ip_set_hash_netportnet.c 	with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
tb                246 net/netfilter/ipset/ip_set_hash_netportnet.c 	    !(tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) {
tb                255 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_IP_TO]) {
tb                256 net/netfilter/ipset/ip_set_hash_netportnet.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
tb                268 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_PORT_TO]) {
tb                269 net/netfilter/ipset/ip_set_hash_netportnet.c 		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                275 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_IP2_TO]) {
tb                276 net/netfilter/ipset/ip_set_hash_netportnet.c 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
tb                455 net/netfilter/ipset/ip_set_hash_netportnet.c hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
tb                466 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_LINENO])
tb                467 net/netfilter/ipset/ip_set_hash_netportnet.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                470 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
tb                471 net/netfilter/ipset/ip_set_hash_netportnet.c 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
tb                472 net/netfilter/ipset/ip_set_hash_netportnet.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
tb                473 net/netfilter/ipset/ip_set_hash_netportnet.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                475 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
tb                478 net/netfilter/ipset/ip_set_hash_netportnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
tb                482 net/netfilter/ipset/ip_set_hash_netportnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
tb                486 net/netfilter/ipset/ip_set_hash_netportnet.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                490 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_CIDR]) {
tb                491 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
tb                496 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_CIDR2]) {
tb                497 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
tb                505 net/netfilter/ipset/ip_set_hash_netportnet.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
tb                507 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_PROTO]) {
tb                508 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
tb                520 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                521 net/netfilter/ipset/ip_set_hash_netportnet.c 		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                527 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
tb                534 net/netfilter/ipset/ip_set_hash_netportnet.c 	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
tb                346 net/netfilter/ipset/ip_set_list_set.c list_set_uadt(struct ip_set *set, struct nlattr *tb[],
tb                356 net/netfilter/ipset/ip_set_list_set.c 	if (tb[IPSET_ATTR_LINENO])
tb                357 net/netfilter/ipset/ip_set_list_set.c 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
tb                359 net/netfilter/ipset/ip_set_list_set.c 	if (unlikely(!tb[IPSET_ATTR_NAME] ||
tb                360 net/netfilter/ipset/ip_set_list_set.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                363 net/netfilter/ipset/ip_set_list_set.c 	ret = ip_set_get_extensions(set, tb, &ext);
tb                366 net/netfilter/ipset/ip_set_list_set.c 	e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
tb                375 net/netfilter/ipset/ip_set_list_set.c 	if (tb[IPSET_ATTR_CADT_FLAGS]) {
tb                376 net/netfilter/ipset/ip_set_list_set.c 		u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
tb                381 net/netfilter/ipset/ip_set_list_set.c 	if (e.before && !tb[IPSET_ATTR_NAMEREF]) {
tb                386 net/netfilter/ipset/ip_set_list_set.c 	if (tb[IPSET_ATTR_NAMEREF]) {
tb                388 net/netfilter/ipset/ip_set_list_set.c 					    nla_data(tb[IPSET_ATTR_NAMEREF]),
tb                607 net/netfilter/ipset/ip_set_list_set.c list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
tb                612 net/netfilter/ipset/ip_set_list_set.c 	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
tb                613 net/netfilter/ipset/ip_set_list_set.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
tb                614 net/netfilter/ipset/ip_set_list_set.c 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
tb                617 net/netfilter/ipset/ip_set_list_set.c 	if (tb[IPSET_ATTR_SIZE])
tb                618 net/netfilter/ipset/ip_set_list_set.c 		size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
tb                623 net/netfilter/ipset/ip_set_list_set.c 	set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
tb                627 net/netfilter/ipset/ip_set_list_set.c 	if (tb[IPSET_ATTR_TIMEOUT]) {
tb                628 net/netfilter/ipset/ip_set_list_set.c 		set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
tb               1839 net/netfilter/nf_conntrack_core.c int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
tb               1842 net/netfilter/nf_conntrack_core.c 	if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
tb               1845 net/netfilter/nf_conntrack_core.c 	t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
tb               1846 net/netfilter/nf_conntrack_core.c 	t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
tb                996 net/netfilter/nf_conntrack_netlink.c static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
tb                999 net/netfilter/nf_conntrack_netlink.c 	if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
tb               1002 net/netfilter/nf_conntrack_netlink.c 	t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
tb               1003 net/netfilter/nf_conntrack_netlink.c 	t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
tb               1008 net/netfilter/nf_conntrack_netlink.c static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
tb               1011 net/netfilter/nf_conntrack_netlink.c 	if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST])
tb               1014 net/netfilter/nf_conntrack_netlink.c 	t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
tb               1015 net/netfilter/nf_conntrack_netlink.c 	t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
tb               1023 net/netfilter/nf_conntrack_netlink.c 	struct nlattr *tb[CTA_IP_MAX+1];
tb               1026 net/netfilter/nf_conntrack_netlink.c 	ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr, NULL, NULL);
tb               1037 net/netfilter/nf_conntrack_netlink.c 		ret = ipv4_nlattr_to_tuple(tb, tuple);
tb               1040 net/netfilter/nf_conntrack_netlink.c 		ret = ipv6_nlattr_to_tuple(tb, tuple);
tb               1055 net/netfilter/nf_conntrack_netlink.c 	struct nlattr *tb[CTA_PROTO_MAX+1];
tb               1058 net/netfilter/nf_conntrack_netlink.c 	ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr,
tb               1063 net/netfilter/nf_conntrack_netlink.c 	if (!tb[CTA_PROTO_NUM])
tb               1065 net/netfilter/nf_conntrack_netlink.c 	tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
tb               1075 net/netfilter/nf_conntrack_netlink.c 			ret = l4proto->nlattr_to_tuple(tb, tuple);
tb               1131 net/netfilter/nf_conntrack_netlink.c 	struct nlattr *tb[CTA_TUPLE_MAX+1];
tb               1136 net/netfilter/nf_conntrack_netlink.c 	err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type],
tb               1141 net/netfilter/nf_conntrack_netlink.c 	if (!tb[CTA_TUPLE_IP])
tb               1146 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
tb               1150 net/netfilter/nf_conntrack_netlink.c 	if (!tb[CTA_TUPLE_PROTO])
tb               1153 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
tb               1157 net/netfilter/nf_conntrack_netlink.c 	if (tb[CTA_TUPLE_ZONE]) {
tb               1161 net/netfilter/nf_conntrack_netlink.c 		err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
tb               1185 net/netfilter/nf_conntrack_netlink.c 	struct nlattr *tb[CTA_HELP_MAX+1];
tb               1187 net/netfilter/nf_conntrack_netlink.c 	err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr,
tb               1192 net/netfilter/nf_conntrack_netlink.c 	if (!tb[CTA_HELP_NAME])
tb               1195 net/netfilter/nf_conntrack_netlink.c 	*helper_name = nla_data(tb[CTA_HELP_NAME]);
tb               1197 net/netfilter/nf_conntrack_netlink.c 	if (tb[CTA_HELP_INFO])
tb               1198 net/netfilter/nf_conntrack_netlink.c 		*helpinfo = tb[CTA_HELP_INFO];
tb               1727 net/netfilter/nf_conntrack_netlink.c 	struct nlattr *tb[CTA_PROTOINFO_MAX+1];
tb               1730 net/netfilter/nf_conntrack_netlink.c 	err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr,
tb               1737 net/netfilter/nf_conntrack_netlink.c 		err = l4proto->from_nlattr(tb, ct);
tb               1826 net/netfilter/nf_conntrack_netlink.c 	struct nlattr *tb[CTA_SYNPROXY_MAX + 1];
tb               1832 net/netfilter/nf_conntrack_netlink.c 	err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX,
tb               1838 net/netfilter/nf_conntrack_netlink.c 	if (!tb[CTA_SYNPROXY_ISN] ||
tb               1839 net/netfilter/nf_conntrack_netlink.c 	    !tb[CTA_SYNPROXY_ITS] ||
tb               1840 net/netfilter/nf_conntrack_netlink.c 	    !tb[CTA_SYNPROXY_TSOFF])
tb               1843 net/netfilter/nf_conntrack_netlink.c 	synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN]));
tb               1844 net/netfilter/nf_conntrack_netlink.c 	synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS]));
tb               1845 net/netfilter/nf_conntrack_netlink.c 	synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF]));
tb               3217 net/netfilter/nf_conntrack_netlink.c 	struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
tb               3221 net/netfilter/nf_conntrack_netlink.c 	err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr,
tb               3226 net/netfilter/nf_conntrack_netlink.c 	if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
tb               3229 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
tb               3237 net/netfilter/nf_conntrack_netlink.c 	exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
tb                632 net/netfilter/nf_conntrack_proto_dccp.c 	struct nlattr *tb[CTA_PROTOINFO_DCCP_MAX + 1];
tb                638 net/netfilter/nf_conntrack_proto_dccp.c 	err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_DCCP_MAX, attr,
tb                643 net/netfilter/nf_conntrack_proto_dccp.c 	if (!tb[CTA_PROTOINFO_DCCP_STATE] ||
tb                644 net/netfilter/nf_conntrack_proto_dccp.c 	    !tb[CTA_PROTOINFO_DCCP_ROLE] ||
tb                645 net/netfilter/nf_conntrack_proto_dccp.c 	    nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX ||
tb                646 net/netfilter/nf_conntrack_proto_dccp.c 	    nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) {
tb                651 net/netfilter/nf_conntrack_proto_dccp.c 	ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]);
tb                652 net/netfilter/nf_conntrack_proto_dccp.c 	if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) {
tb                659 net/netfilter/nf_conntrack_proto_dccp.c 	if (tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]) {
tb                661 net/netfilter/nf_conntrack_proto_dccp.c 		be64_to_cpu(nla_get_be64(tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]));
tb                673 net/netfilter/nf_conntrack_proto_dccp.c static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
tb                689 net/netfilter/nf_conntrack_proto_dccp.c 		if (tb[i]) {
tb                690 net/netfilter/nf_conntrack_proto_dccp.c 			timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
tb                 20 net/netfilter/nf_conntrack_proto_generic.c static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
tb                 29 net/netfilter/nf_conntrack_proto_generic.c 	if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
tb                 31 net/netfilter/nf_conntrack_proto_generic.c 		    ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
tb                256 net/netfilter/nf_conntrack_proto_gre.c static int gre_timeout_nlattr_to_obj(struct nlattr *tb[],
tb                268 net/netfilter/nf_conntrack_proto_gre.c 	if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
tb                270 net/netfilter/nf_conntrack_proto_gre.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ;
tb                272 net/netfilter/nf_conntrack_proto_gre.c 	if (tb[CTA_TIMEOUT_GRE_REPLIED]) {
tb                274 net/netfilter/nf_conntrack_proto_gre.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ;
tb                277 net/netfilter/nf_conntrack_proto_icmp.c static int icmp_nlattr_to_tuple(struct nlattr *tb[],
tb                280 net/netfilter/nf_conntrack_proto_icmp.c 	if (!tb[CTA_PROTO_ICMP_TYPE] ||
tb                281 net/netfilter/nf_conntrack_proto_icmp.c 	    !tb[CTA_PROTO_ICMP_CODE] ||
tb                282 net/netfilter/nf_conntrack_proto_icmp.c 	    !tb[CTA_PROTO_ICMP_ID])
tb                285 net/netfilter/nf_conntrack_proto_icmp.c 	tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMP_TYPE]);
tb                286 net/netfilter/nf_conntrack_proto_icmp.c 	tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMP_CODE]);
tb                287 net/netfilter/nf_conntrack_proto_icmp.c 	tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMP_ID]);
tb                312 net/netfilter/nf_conntrack_proto_icmp.c static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
tb                318 net/netfilter/nf_conntrack_proto_icmp.c 	if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
tb                322 net/netfilter/nf_conntrack_proto_icmp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
tb                195 net/netfilter/nf_conntrack_proto_icmpv6.c static int icmpv6_nlattr_to_tuple(struct nlattr *tb[],
tb                198 net/netfilter/nf_conntrack_proto_icmpv6.c 	if (!tb[CTA_PROTO_ICMPV6_TYPE] ||
tb                199 net/netfilter/nf_conntrack_proto_icmpv6.c 	    !tb[CTA_PROTO_ICMPV6_CODE] ||
tb                200 net/netfilter/nf_conntrack_proto_icmpv6.c 	    !tb[CTA_PROTO_ICMPV6_ID])
tb                203 net/netfilter/nf_conntrack_proto_icmpv6.c 	tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMPV6_TYPE]);
tb                204 net/netfilter/nf_conntrack_proto_icmpv6.c 	tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMPV6_CODE]);
tb                205 net/netfilter/nf_conntrack_proto_icmpv6.c 	tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMPV6_ID]);
tb                231 net/netfilter/nf_conntrack_proto_icmpv6.c static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
tb                239 net/netfilter/nf_conntrack_proto_icmpv6.c 	if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
tb                241 net/netfilter/nf_conntrack_proto_icmpv6.c 		    ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
tb                556 net/netfilter/nf_conntrack_proto_sctp.c 	struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1];
tb                563 net/netfilter/nf_conntrack_proto_sctp.c 	err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_SCTP_MAX, attr,
tb                568 net/netfilter/nf_conntrack_proto_sctp.c 	if (!tb[CTA_PROTOINFO_SCTP_STATE] ||
tb                569 net/netfilter/nf_conntrack_proto_sctp.c 	    !tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] ||
tb                570 net/netfilter/nf_conntrack_proto_sctp.c 	    !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY])
tb                574 net/netfilter/nf_conntrack_proto_sctp.c 	ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]);
tb                576 net/netfilter/nf_conntrack_proto_sctp.c 		nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]);
tb                578 net/netfilter/nf_conntrack_proto_sctp.c 		nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]);
tb                590 net/netfilter/nf_conntrack_proto_sctp.c static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
tb                606 net/netfilter/nf_conntrack_proto_sctp.c 		if (tb[i]) {
tb                607 net/netfilter/nf_conntrack_proto_sctp.c 			timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
tb               1242 net/netfilter/nf_conntrack_proto_tcp.c 	struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
tb               1250 net/netfilter/nf_conntrack_proto_tcp.c 	err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_TCP_MAX, pattr,
tb               1255 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_PROTOINFO_TCP_STATE] &&
tb               1256 net/netfilter/nf_conntrack_proto_tcp.c 	    nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
tb               1260 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_PROTOINFO_TCP_STATE])
tb               1261 net/netfilter/nf_conntrack_proto_tcp.c 		ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
tb               1263 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
tb               1265 net/netfilter/nf_conntrack_proto_tcp.c 			nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
tb               1270 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
tb               1272 net/netfilter/nf_conntrack_proto_tcp.c 			nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
tb               1277 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
tb               1278 net/netfilter/nf_conntrack_proto_tcp.c 	    tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
tb               1282 net/netfilter/nf_conntrack_proto_tcp.c 			nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
tb               1284 net/netfilter/nf_conntrack_proto_tcp.c 			nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
tb               1307 net/netfilter/nf_conntrack_proto_tcp.c static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
tb               1320 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
tb               1322 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
tb               1325 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
tb               1327 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
tb               1329 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
tb               1331 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
tb               1333 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
tb               1335 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
tb               1337 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
tb               1339 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
tb               1341 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
tb               1343 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
tb               1345 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
tb               1347 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
tb               1349 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
tb               1351 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
tb               1353 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
tb               1355 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
tb               1357 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
tb               1359 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
tb               1361 net/netfilter/nf_conntrack_proto_tcp.c 	if (tb[CTA_TIMEOUT_TCP_UNACK]) {
tb               1363 net/netfilter/nf_conntrack_proto_tcp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
tb                213 net/netfilter/nf_conntrack_proto_udp.c static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
tb                226 net/netfilter/nf_conntrack_proto_udp.c 	if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
tb                228 net/netfilter/nf_conntrack_proto_udp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_UNREPLIED])) * HZ;
tb                230 net/netfilter/nf_conntrack_proto_udp.c 	if (tb[CTA_TIMEOUT_UDP_REPLIED]) {
tb                232 net/netfilter/nf_conntrack_proto_udp.c 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_REPLIED])) * HZ;
tb                868 net/netfilter/nf_nat_core.c static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
tb                871 net/netfilter/nf_nat_core.c 	if (tb[CTA_PROTONAT_PORT_MIN]) {
tb                872 net/netfilter/nf_nat_core.c 		range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
tb                876 net/netfilter/nf_nat_core.c 	if (tb[CTA_PROTONAT_PORT_MAX]) {
tb                877 net/netfilter/nf_nat_core.c 		range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
tb                887 net/netfilter/nf_nat_core.c 	struct nlattr *tb[CTA_PROTONAT_MAX+1];
tb                890 net/netfilter/nf_nat_core.c 	err = nla_parse_nested_deprecated(tb, CTA_PROTONAT_MAX, attr,
tb                895 net/netfilter/nf_nat_core.c 	return nf_nat_l4proto_nlattr_to_range(tb, range);
tb                906 net/netfilter/nf_nat_core.c static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
tb                909 net/netfilter/nf_nat_core.c 	if (tb[CTA_NAT_V4_MINIP]) {
tb                910 net/netfilter/nf_nat_core.c 		range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
tb                914 net/netfilter/nf_nat_core.c 	if (tb[CTA_NAT_V4_MAXIP])
tb                915 net/netfilter/nf_nat_core.c 		range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
tb                922 net/netfilter/nf_nat_core.c static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
tb                925 net/netfilter/nf_nat_core.c 	if (tb[CTA_NAT_V6_MINIP]) {
tb                926 net/netfilter/nf_nat_core.c 		nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
tb                931 net/netfilter/nf_nat_core.c 	if (tb[CTA_NAT_V6_MAXIP])
tb                932 net/netfilter/nf_nat_core.c 		nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
tb                944 net/netfilter/nf_nat_core.c 	struct nlattr *tb[CTA_NAT_MAX+1];
tb                949 net/netfilter/nf_nat_core.c 	err = nla_parse_nested_deprecated(tb, CTA_NAT_MAX, nat,
tb                956 net/netfilter/nf_nat_core.c 		err = nf_nat_ipv4_nlattr_to_range(tb, range);
tb                959 net/netfilter/nf_nat_core.c 		err = nf_nat_ipv6_nlattr_to_range(tb, range);
tb                969 net/netfilter/nf_nat_core.c 	if (!tb[CTA_NAT_PROTO])
tb                972 net/netfilter/nf_nat_core.c 	return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
tb               1462 net/netfilter/nf_tables_api.c 	struct nlattr *tb[NFTA_COUNTER_MAX+1];
tb               1467 net/netfilter/nf_tables_api.c 	err = nla_parse_nested_deprecated(tb, NFTA_COUNTER_MAX, attr,
tb               1472 net/netfilter/nf_tables_api.c 	if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
tb               1484 net/netfilter/nf_tables_api.c 	stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
tb               1485 net/netfilter/nf_tables_api.c 	stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
tb               2173 net/netfilter/nf_tables_api.c 	struct nlattr			*tb[NFT_EXPR_MAXATTR + 1];
tb               2182 net/netfilter/nf_tables_api.c 	struct nlattr *tb[NFTA_EXPR_MAX + 1];
tb               2185 net/netfilter/nf_tables_api.c 	err = nla_parse_nested_deprecated(tb, NFTA_EXPR_MAX, nla,
tb               2190 net/netfilter/nf_tables_api.c 	type = nft_expr_type_get(ctx->net, ctx->family, tb[NFTA_EXPR_NAME]);
tb               2194 net/netfilter/nf_tables_api.c 	if (tb[NFTA_EXPR_DATA]) {
tb               2195 net/netfilter/nf_tables_api.c 		err = nla_parse_nested_deprecated(info->tb, type->maxattr,
tb               2196 net/netfilter/nf_tables_api.c 						  tb[NFTA_EXPR_DATA],
tb               2201 net/netfilter/nf_tables_api.c 		memset(info->tb, 0, sizeof(info->tb[0]) * (type->maxattr + 1));
tb               2205 net/netfilter/nf_tables_api.c 				       (const struct nlattr * const *)info->tb);
tb               2212 net/netfilter/nf_tables_api.c 								 tb[NFTA_EXPR_NAME]) != -EAGAIN)
tb               2237 net/netfilter/nf_tables_api.c 		err = ops->init(ctx, expr, (const struct nlattr **)info->tb);
tb               5087 net/netfilter/nf_tables_api.c 	struct nlattr **tb;
tb               5092 net/netfilter/nf_tables_api.c 	tb = kmalloc_array(type->maxattr + 1, sizeof(*tb), GFP_KERNEL);
tb               5093 net/netfilter/nf_tables_api.c 	if (!tb)
tb               5097 net/netfilter/nf_tables_api.c 		err = nla_parse_nested_deprecated(tb, type->maxattr, attr,
tb               5102 net/netfilter/nf_tables_api.c 		memset(tb, 0, sizeof(tb[0]) * (type->maxattr + 1));
tb               5106 net/netfilter/nf_tables_api.c 		ops = type->select_ops(ctx, (const struct nlattr * const *)tb);
tb               5120 net/netfilter/nf_tables_api.c 	err = ops->init(ctx, (const struct nlattr * const *)tb, obj);
tb               5126 net/netfilter/nf_tables_api.c 	kfree(tb);
tb               5131 net/netfilter/nf_tables_api.c 	kfree(tb);
tb               5722 net/netfilter/nf_tables_api.c 	struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1];
tb               5727 net/netfilter/nf_tables_api.c 	err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX, attr,
tb               5732 net/netfilter/nf_tables_api.c 	if (!tb[NFTA_FLOWTABLE_HOOK_NUM] ||
tb               5733 net/netfilter/nf_tables_api.c 	    !tb[NFTA_FLOWTABLE_HOOK_PRIORITY] ||
tb               5734 net/netfilter/nf_tables_api.c 	    !tb[NFTA_FLOWTABLE_HOOK_DEVS])
tb               5737 net/netfilter/nf_tables_api.c 	hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
tb               5741 net/netfilter/nf_tables_api.c 	priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY]));
tb               5743 net/netfilter/nf_tables_api.c 	err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS],
tb               7469 net/netfilter/nf_tables_api.c 	struct nlattr *tb[NFTA_VERDICT_MAX + 1];
tb               7473 net/netfilter/nf_tables_api.c 	err = nla_parse_nested_deprecated(tb, NFTA_VERDICT_MAX, nla,
tb               7478 net/netfilter/nf_tables_api.c 	if (!tb[NFTA_VERDICT_CODE])
tb               7480 net/netfilter/nf_tables_api.c 	data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
tb               7499 net/netfilter/nf_tables_api.c 		if (!tb[NFTA_VERDICT_CHAIN])
tb               7502 net/netfilter/nf_tables_api.c 					 tb[NFTA_VERDICT_CHAIN], genmask);
tb               7601 net/netfilter/nf_tables_api.c 	struct nlattr *tb[NFTA_DATA_MAX + 1];
tb               7604 net/netfilter/nf_tables_api.c 	err = nla_parse_nested_deprecated(tb, NFTA_DATA_MAX, nla,
tb               7609 net/netfilter/nf_tables_api.c 	if (tb[NFTA_DATA_VALUE])
tb               7611 net/netfilter/nf_tables_api.c 				      tb[NFTA_DATA_VALUE]);
tb               7612 net/netfilter/nf_tables_api.c 	if (tb[NFTA_DATA_VERDICT] && ctx != NULL)
tb               7613 net/netfilter/nf_tables_api.c 		return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
tb                 49 net/netfilter/nfnetlink_acct.c 			 const struct nlattr * const tb[],
tb                 57 net/netfilter/nfnetlink_acct.c 	if (!tb[NFACCT_NAME])
tb                 60 net/netfilter/nfnetlink_acct.c 	acct_name = nla_data(tb[NFACCT_NAME]);
tb                 90 net/netfilter/nfnetlink_acct.c 	if (tb[NFACCT_FLAGS]) {
tb                 91 net/netfilter/nfnetlink_acct.c 		flags = ntohl(nla_get_be32(tb[NFACCT_FLAGS]));
tb                 98 net/netfilter/nfnetlink_acct.c 		if ((flags & NFACCT_F_QUOTA) && !tb[NFACCT_QUOTA])
tb                111 net/netfilter/nfnetlink_acct.c 		*quota = be64_to_cpu(nla_get_be64(tb[NFACCT_QUOTA]));
tb                115 net/netfilter/nfnetlink_acct.c 	nla_strlcpy(nfacct->name, tb[NFACCT_NAME], NFACCT_NAME_MAX);
tb                117 net/netfilter/nfnetlink_acct.c 	if (tb[NFACCT_BYTES]) {
tb                119 net/netfilter/nfnetlink_acct.c 			     be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES])));
tb                121 net/netfilter/nfnetlink_acct.c 	if (tb[NFACCT_PKTS]) {
tb                123 net/netfilter/nfnetlink_acct.c 			     be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
tb                241 net/netfilter/nfnetlink_acct.c 	struct nlattr *tb[NFACCT_FILTER_MAX + 1];
tb                248 net/netfilter/nfnetlink_acct.c 	err = nla_parse_nested_deprecated(tb, NFACCT_FILTER_MAX, attr,
tb                253 net/netfilter/nfnetlink_acct.c 	if (!tb[NFACCT_FILTER_MASK] || !tb[NFACCT_FILTER_VALUE])
tb                260 net/netfilter/nfnetlink_acct.c 	filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK]));
tb                261 net/netfilter/nfnetlink_acct.c 	filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE]));
tb                269 net/netfilter/nfnetlink_acct.c 			 const struct nlattr * const tb[],
tb                281 net/netfilter/nfnetlink_acct.c 			.data = (void *)tb[NFACCT_FILTER],
tb                287 net/netfilter/nfnetlink_acct.c 	if (!tb[NFACCT_NAME])
tb                289 net/netfilter/nfnetlink_acct.c 	acct_name = nla_data(tb[NFACCT_NAME]);
tb                342 net/netfilter/nfnetlink_acct.c 			 const struct nlattr * const tb[],
tb                349 net/netfilter/nfnetlink_acct.c 	if (!tb[NFACCT_NAME]) {
tb                355 net/netfilter/nfnetlink_acct.c 	acct_name = nla_data(tb[NFACCT_NAME]);
tb                 76 net/netfilter/nfnetlink_cthelper.c 	struct nlattr *tb[NFCTH_TUPLE_MAX+1];
tb                 78 net/netfilter/nfnetlink_cthelper.c 	err = nla_parse_nested_deprecated(tb, NFCTH_TUPLE_MAX, attr,
tb                 83 net/netfilter/nfnetlink_cthelper.c 	if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
tb                 89 net/netfilter/nfnetlink_cthelper.c 	tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
tb                 90 net/netfilter/nfnetlink_cthelper.c 	tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
tb                137 net/netfilter/nfnetlink_cthelper.c 	struct nlattr *tb[NFCTH_POLICY_MAX+1];
tb                139 net/netfilter/nfnetlink_cthelper.c 	err = nla_parse_nested_deprecated(tb, NFCTH_POLICY_MAX, attr,
tb                144 net/netfilter/nfnetlink_cthelper.c 	if (!tb[NFCTH_POLICY_NAME] ||
tb                145 net/netfilter/nfnetlink_cthelper.c 	    !tb[NFCTH_POLICY_EXPECT_MAX] ||
tb                146 net/netfilter/nfnetlink_cthelper.c 	    !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
tb                150 net/netfilter/nfnetlink_cthelper.c 		    tb[NFCTH_POLICY_NAME], NF_CT_HELPER_NAME_LEN);
tb                152 net/netfilter/nfnetlink_cthelper.c 		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
tb                157 net/netfilter/nfnetlink_cthelper.c 		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
tb                173 net/netfilter/nfnetlink_cthelper.c 	struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
tb                176 net/netfilter/nfnetlink_cthelper.c 	ret = nla_parse_nested_deprecated(tb, NFCTH_POLICY_SET_MAX, attr,
tb                182 net/netfilter/nfnetlink_cthelper.c 	if (!tb[NFCTH_POLICY_SET_NUM])
tb                185 net/netfilter/nfnetlink_cthelper.c 	class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
tb                198 net/netfilter/nfnetlink_cthelper.c 		if (!tb[NFCTH_POLICY_SET+i])
tb                202 net/netfilter/nfnetlink_cthelper.c 						  tb[NFCTH_POLICY_SET+i]);
tb                216 net/netfilter/nfnetlink_cthelper.c nfnl_cthelper_create(const struct nlattr * const tb[],
tb                224 net/netfilter/nfnetlink_cthelper.c 	if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
tb                232 net/netfilter/nfnetlink_cthelper.c 	ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
tb                237 net/netfilter/nfnetlink_cthelper.c 		    tb[NFCTH_NAME], NF_CT_HELPER_NAME_LEN);
tb                238 net/netfilter/nfnetlink_cthelper.c 	size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
tb                254 net/netfilter/nfnetlink_cthelper.c 	if (tb[NFCTH_QUEUE_NUM])
tb                255 net/netfilter/nfnetlink_cthelper.c 		helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
tb                257 net/netfilter/nfnetlink_cthelper.c 	if (tb[NFCTH_STATUS]) {
tb                258 net/netfilter/nfnetlink_cthelper.c 		int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
tb                288 net/netfilter/nfnetlink_cthelper.c 	struct nlattr *tb[NFCTH_POLICY_MAX + 1];
tb                291 net/netfilter/nfnetlink_cthelper.c 	err = nla_parse_nested_deprecated(tb, NFCTH_POLICY_MAX, attr,
tb                296 net/netfilter/nfnetlink_cthelper.c 	if (!tb[NFCTH_POLICY_NAME] ||
tb                297 net/netfilter/nfnetlink_cthelper.c 	    !tb[NFCTH_POLICY_EXPECT_MAX] ||
tb                298 net/netfilter/nfnetlink_cthelper.c 	    !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
tb                301 net/netfilter/nfnetlink_cthelper.c 	if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
tb                305 net/netfilter/nfnetlink_cthelper.c 		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
tb                310 net/netfilter/nfnetlink_cthelper.c 		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
tb                315 net/netfilter/nfnetlink_cthelper.c static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
tb                332 net/netfilter/nfnetlink_cthelper.c 		if (!tb[NFCTH_POLICY_SET + i]) {
tb                339 net/netfilter/nfnetlink_cthelper.c 						      tb[NFCTH_POLICY_SET + i]);
tb                359 net/netfilter/nfnetlink_cthelper.c 	struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
tb                363 net/netfilter/nfnetlink_cthelper.c 	err = nla_parse_nested_deprecated(tb, NFCTH_POLICY_SET_MAX, attr,
tb                369 net/netfilter/nfnetlink_cthelper.c 	if (!tb[NFCTH_POLICY_SET_NUM])
tb                372 net/netfilter/nfnetlink_cthelper.c 	class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
tb                376 net/netfilter/nfnetlink_cthelper.c 	return nfnl_cthelper_update_policy_all(tb, helper);
tb                380 net/netfilter/nfnetlink_cthelper.c nfnl_cthelper_update(const struct nlattr * const tb[],
tb                385 net/netfilter/nfnetlink_cthelper.c 	if (tb[NFCTH_PRIV_DATA_LEN])
tb                388 net/netfilter/nfnetlink_cthelper.c 	if (tb[NFCTH_POLICY]) {
tb                389 net/netfilter/nfnetlink_cthelper.c 		ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
tb                393 net/netfilter/nfnetlink_cthelper.c 	if (tb[NFCTH_QUEUE_NUM])
tb                394 net/netfilter/nfnetlink_cthelper.c 		helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
tb                396 net/netfilter/nfnetlink_cthelper.c 	if (tb[NFCTH_STATUS]) {
tb                397 net/netfilter/nfnetlink_cthelper.c 		int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
tb                413 net/netfilter/nfnetlink_cthelper.c 			     const struct nlattr * const tb[],
tb                425 net/netfilter/nfnetlink_cthelper.c 	if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
tb                428 net/netfilter/nfnetlink_cthelper.c 	helper_name = nla_data(tb[NFCTH_NAME]);
tb                430 net/netfilter/nfnetlink_cthelper.c 	ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
tb                452 net/netfilter/nfnetlink_cthelper.c 		ret = nfnl_cthelper_create(tb, &tuple);
tb                454 net/netfilter/nfnetlink_cthelper.c 		ret = nfnl_cthelper_update(tb, helper);
tb                617 net/netfilter/nfnetlink_cthelper.c 			     const struct nlattr * const tb[],
tb                638 net/netfilter/nfnetlink_cthelper.c 	if (tb[NFCTH_NAME])
tb                639 net/netfilter/nfnetlink_cthelper.c 		helper_name = nla_data(tb[NFCTH_NAME]);
tb                641 net/netfilter/nfnetlink_cthelper.c 	if (tb[NFCTH_TUPLE]) {
tb                642 net/netfilter/nfnetlink_cthelper.c 		ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
tb                688 net/netfilter/nfnetlink_cthelper.c 			     const struct nlattr * const tb[],
tb                701 net/netfilter/nfnetlink_cthelper.c 	if (tb[NFCTH_NAME])
tb                702 net/netfilter/nfnetlink_cthelper.c 		helper_name = nla_data(tb[NFCTH_NAME]);
tb                704 net/netfilter/nfnetlink_cthelper.c 	if (tb[NFCTH_TUPLE]) {
tb                705 net/netfilter/nfnetlink_cthelper.c 		ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
tb                 50 net/netfilter/nfnetlink_cttimeout.c 	struct nlattr **tb;
tb                 53 net/netfilter/nfnetlink_cttimeout.c 	tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb),
tb                 56 net/netfilter/nfnetlink_cttimeout.c 	if (!tb)
tb                 59 net/netfilter/nfnetlink_cttimeout.c 	ret = nla_parse_nested_deprecated(tb,
tb                 67 net/netfilter/nfnetlink_cttimeout.c 	ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeout);
tb                 70 net/netfilter/nfnetlink_cttimeout.c 	kfree(tb);
tb               1135 net/netfilter/nfnetlink_queue.c 		struct nlattr *tb[NFQA_VLAN_MAX + 1];
tb               1138 net/netfilter/nfnetlink_queue.c 		err = nla_parse_nested_deprecated(tb, NFQA_VLAN_MAX,
tb               1144 net/netfilter/nfnetlink_queue.c 		if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
tb               1148 net/netfilter/nfnetlink_queue.c 			nla_get_be16(tb[NFQA_VLAN_PROTO]),
tb               1149 net/netfilter/nfnetlink_queue.c 			ntohs(nla_get_be16(tb[NFQA_VLAN_TCI])));
tb                 48 net/netfilter/nft_bitwise.c 			    const struct nlattr * const tb[])
tb                 55 net/netfilter/nft_bitwise.c 	if (tb[NFTA_BITWISE_SREG] == NULL ||
tb                 56 net/netfilter/nft_bitwise.c 	    tb[NFTA_BITWISE_DREG] == NULL ||
tb                 57 net/netfilter/nft_bitwise.c 	    tb[NFTA_BITWISE_LEN] == NULL ||
tb                 58 net/netfilter/nft_bitwise.c 	    tb[NFTA_BITWISE_MASK] == NULL ||
tb                 59 net/netfilter/nft_bitwise.c 	    tb[NFTA_BITWISE_XOR] == NULL)
tb                 62 net/netfilter/nft_bitwise.c 	err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len);
tb                 68 net/netfilter/nft_bitwise.c 	priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
tb                 73 net/netfilter/nft_bitwise.c 	priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
tb                 80 net/netfilter/nft_bitwise.c 			    tb[NFTA_BITWISE_MASK]);
tb                 89 net/netfilter/nft_bitwise.c 			    tb[NFTA_BITWISE_XOR]);
tb                 97 net/netfilter/nft_byteorder.c 			      const struct nlattr * const tb[])
tb                103 net/netfilter/nft_byteorder.c 	if (tb[NFTA_BYTEORDER_SREG] == NULL ||
tb                104 net/netfilter/nft_byteorder.c 	    tb[NFTA_BYTEORDER_DREG] == NULL ||
tb                105 net/netfilter/nft_byteorder.c 	    tb[NFTA_BYTEORDER_LEN] == NULL ||
tb                106 net/netfilter/nft_byteorder.c 	    tb[NFTA_BYTEORDER_SIZE] == NULL ||
tb                107 net/netfilter/nft_byteorder.c 	    tb[NFTA_BYTEORDER_OP] == NULL)
tb                110 net/netfilter/nft_byteorder.c 	priv->op = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_OP]));
tb                119 net/netfilter/nft_byteorder.c 	err = nft_parse_u32_check(tb[NFTA_BYTEORDER_SIZE], U8_MAX, &size);
tb                134 net/netfilter/nft_byteorder.c 	priv->sreg = nft_parse_register(tb[NFTA_BYTEORDER_SREG]);
tb                135 net/netfilter/nft_byteorder.c 	err = nft_parse_u32_check(tb[NFTA_BYTEORDER_LEN], U8_MAX, &len);
tb                145 net/netfilter/nft_byteorder.c 	priv->dreg = nft_parse_register(tb[NFTA_BYTEORDER_DREG]);
tb                 72 net/netfilter/nft_cmp.c 			const struct nlattr * const tb[])
tb                 79 net/netfilter/nft_cmp.c 			    tb[NFTA_CMP_DATA]);
tb                 89 net/netfilter/nft_cmp.c 	priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
tb                 94 net/netfilter/nft_cmp.c 	priv->op  = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
tb                159 net/netfilter/nft_cmp.c 			     const struct nlattr * const tb[])
tb                168 net/netfilter/nft_cmp.c 			    tb[NFTA_CMP_DATA]);
tb                172 net/netfilter/nft_cmp.c 	priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
tb                234 net/netfilter/nft_cmp.c nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
tb                241 net/netfilter/nft_cmp.c 	if (tb[NFTA_CMP_SREG] == NULL ||
tb                242 net/netfilter/nft_cmp.c 	    tb[NFTA_CMP_OP] == NULL ||
tb                243 net/netfilter/nft_cmp.c 	    tb[NFTA_CMP_DATA] == NULL)
tb                246 net/netfilter/nft_cmp.c 	op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
tb                260 net/netfilter/nft_cmp.c 			    tb[NFTA_CMP_DATA]);
tb                194 net/netfilter/nft_compat.c 	struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
tb                198 net/netfilter/nft_compat.c 	err = nla_parse_nested_deprecated(tb, NFTA_RULE_COMPAT_MAX, attr,
tb                203 net/netfilter/nft_compat.c 	if (!tb[NFTA_RULE_COMPAT_PROTO] || !tb[NFTA_RULE_COMPAT_FLAGS])
tb                206 net/netfilter/nft_compat.c 	flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
tb                212 net/netfilter/nft_compat.c 	*proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
tb                218 net/netfilter/nft_compat.c 		const struct nlattr * const tb[])
tb                223 net/netfilter/nft_compat.c 	size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
tb                229 net/netfilter/nft_compat.c 	target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
tb                433 net/netfilter/nft_compat.c 		 const struct nlattr * const tb[],
tb                438 net/netfilter/nft_compat.c 	size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
tb                444 net/netfilter/nft_compat.c 	match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
tb                459 net/netfilter/nft_compat.c 	       const struct nlattr * const tb[])
tb                461 net/netfilter/nft_compat.c 	return __nft_match_init(ctx, expr, tb, nft_expr_priv(expr));
tb                466 net/netfilter/nft_compat.c 		     const struct nlattr * const tb[])
tb                476 net/netfilter/nft_compat.c 	ret = __nft_match_init(ctx, expr, tb, priv->info);
tb                604 net/netfilter/nft_compat.c 			       const struct nlattr * const tb[],
tb                614 net/netfilter/nft_compat.c 	if (tb[NFTA_COMPAT_NAME] == NULL ||
tb                615 net/netfilter/nft_compat.c 	    tb[NFTA_COMPAT_REV] == NULL ||
tb                616 net/netfilter/nft_compat.c 	    tb[NFTA_COMPAT_TYPE] == NULL)
tb                619 net/netfilter/nft_compat.c 	name = nla_data(tb[NFTA_COMPAT_NAME]);
tb                620 net/netfilter/nft_compat.c 	rev = ntohl(nla_get_be32(tb[NFTA_COMPAT_REV]));
tb                621 net/netfilter/nft_compat.c 	target = ntohl(nla_get_be32(tb[NFTA_COMPAT_TYPE]));
tb                705 net/netfilter/nft_compat.c 		     const struct nlattr * const tb[])
tb                714 net/netfilter/nft_compat.c 	if (tb[NFTA_MATCH_NAME] == NULL ||
tb                715 net/netfilter/nft_compat.c 	    tb[NFTA_MATCH_REV] == NULL ||
tb                716 net/netfilter/nft_compat.c 	    tb[NFTA_MATCH_INFO] == NULL)
tb                719 net/netfilter/nft_compat.c 	mt_name = nla_data(tb[NFTA_MATCH_NAME]);
tb                720 net/netfilter/nft_compat.c 	rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
tb                727 net/netfilter/nft_compat.c 	if (match->matchsize > nla_len(tb[NFTA_MATCH_INFO])) {
tb                785 net/netfilter/nft_compat.c 		      const struct nlattr * const tb[])
tb                793 net/netfilter/nft_compat.c 	if (tb[NFTA_TARGET_NAME] == NULL ||
tb                794 net/netfilter/nft_compat.c 	    tb[NFTA_TARGET_REV] == NULL ||
tb                795 net/netfilter/nft_compat.c 	    tb[NFTA_TARGET_INFO] == NULL)
tb                798 net/netfilter/nft_compat.c 	tg_name = nla_data(tb[NFTA_TARGET_NAME]);
tb                799 net/netfilter/nft_compat.c 	rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
tb                816 net/netfilter/nft_compat.c 	if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) {
tb                 60 net/netfilter/nft_connlimit.c 				 const struct nlattr * const tb[],
tb                 66 net/netfilter/nft_connlimit.c 	if (!tb[NFTA_CONNLIMIT_COUNT])
tb                 69 net/netfilter/nft_connlimit.c 	limit = ntohl(nla_get_be32(tb[NFTA_CONNLIMIT_COUNT]));
tb                 71 net/netfilter/nft_connlimit.c 	if (tb[NFTA_CONNLIMIT_FLAGS]) {
tb                 72 net/netfilter/nft_connlimit.c 		flags = ntohl(nla_get_be32(tb[NFTA_CONNLIMIT_FLAGS]));
tb                118 net/netfilter/nft_connlimit.c 				const struct nlattr * const tb[],
tb                123 net/netfilter/nft_connlimit.c 	return nft_connlimit_do_init(ctx, tb, priv);
tb                183 net/netfilter/nft_connlimit.c 			      const struct nlattr * const tb[])
tb                187 net/netfilter/nft_connlimit.c 	return nft_connlimit_do_init(ctx, tb, priv);
tb                 57 net/netfilter/nft_counter.c static int nft_counter_do_init(const struct nlattr * const tb[],
tb                 69 net/netfilter/nft_counter.c 	if (tb[NFTA_COUNTER_PACKETS]) {
tb                 71 net/netfilter/nft_counter.c 			be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
tb                 73 net/netfilter/nft_counter.c 	if (tb[NFTA_COUNTER_BYTES]) {
tb                 75 net/netfilter/nft_counter.c 			be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
tb                 83 net/netfilter/nft_counter.c 				const struct nlattr * const tb[],
tb                 88 net/netfilter/nft_counter.c 	return nft_counter_do_init(tb, priv);
tb                212 net/netfilter/nft_counter.c 			    const struct nlattr * const tb[])
tb                216 net/netfilter/nft_counter.c 	return nft_counter_do_init(tb, priv);
tb                389 net/netfilter/nft_ct.c 			   const struct nlattr * const tb[])
tb                395 net/netfilter/nft_ct.c 	priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
tb                399 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION] != NULL)
tb                412 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION] != NULL)
tb                418 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION] != NULL)
tb                424 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION] != NULL)
tb                438 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION] == NULL)
tb                457 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION] == NULL)
tb                464 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION] == NULL)
tb                471 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION] == NULL)
tb                492 net/netfilter/nft_ct.c 	if (tb[NFTA_CT_DIRECTION] != NULL) {
tb                493 net/netfilter/nft_ct.c 		priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
tb                503 net/netfilter/nft_ct.c 	priv->dreg = nft_parse_register(tb[NFTA_CT_DREG]);
tb                541 net/netfilter/nft_ct.c 			   const struct nlattr * const tb[])
tb                548 net/netfilter/nft_ct.c 	priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
tb                552 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION])
tb                559 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION])
tb                577 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION])
tb                584 net/netfilter/nft_ct.c 		if (tb[NFTA_CT_DIRECTION])
tb                593 net/netfilter/nft_ct.c 	if (tb[NFTA_CT_DIRECTION]) {
tb                594 net/netfilter/nft_ct.c 		priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
tb                605 net/netfilter/nft_ct.c 	priv->sreg = nft_parse_register(tb[NFTA_CT_SREG]);
tb                732 net/netfilter/nft_ct.c 		    const struct nlattr * const tb[])
tb                734 net/netfilter/nft_ct.c 	if (tb[NFTA_CT_KEY] == NULL)
tb                737 net/netfilter/nft_ct.c 	if (tb[NFTA_CT_DREG] && tb[NFTA_CT_SREG])
tb                740 net/netfilter/nft_ct.c 	if (tb[NFTA_CT_DREG])
tb                743 net/netfilter/nft_ct.c 	if (tb[NFTA_CT_SREG]) {
tb                745 net/netfilter/nft_ct.c 		if (nla_get_be32(tb[NFTA_CT_KEY]) == htonl(NFT_CT_ZONE))
tb                797 net/netfilter/nft_ct.c 	struct nlattr **tb;
tb                800 net/netfilter/nft_ct.c 	tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb),
tb                803 net/netfilter/nft_ct.c 	if (!tb)
tb                806 net/netfilter/nft_ct.c 	ret = nla_parse_nested_deprecated(tb,
tb                814 net/netfilter/nft_ct.c 	ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeouts);
tb                817 net/netfilter/nft_ct.c 	kfree(tb);
tb                861 net/netfilter/nft_ct.c 				   const struct nlattr * const tb[],
tb                871 net/netfilter/nft_ct.c 	if (!tb[NFTA_CT_TIMEOUT_L4PROTO] ||
tb                872 net/netfilter/nft_ct.c 	    !tb[NFTA_CT_TIMEOUT_DATA])
tb                875 net/netfilter/nft_ct.c 	if (tb[NFTA_CT_TIMEOUT_L3PROTO])
tb                876 net/netfilter/nft_ct.c 		l3num = ntohs(nla_get_be16(tb[NFTA_CT_TIMEOUT_L3PROTO]));
tb                878 net/netfilter/nft_ct.c 	l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]);
tb                896 net/netfilter/nft_ct.c 					  tb[NFTA_CT_TIMEOUT_DATA]);
tb                977 net/netfilter/nft_ct.c 				  const struct nlattr * const tb[],
tb                986 net/netfilter/nft_ct.c 	if (!tb[NFTA_CT_HELPER_NAME] || !tb[NFTA_CT_HELPER_L4PROTO])
tb                989 net/netfilter/nft_ct.c 	priv->l4proto = nla_get_u8(tb[NFTA_CT_HELPER_L4PROTO]);
tb                993 net/netfilter/nft_ct.c 	nla_strlcpy(name, tb[NFTA_CT_HELPER_NAME], sizeof(name));
tb                995 net/netfilter/nft_ct.c 	if (tb[NFTA_CT_HELPER_L3PROTO])
tb                996 net/netfilter/nft_ct.c 		family = ntohs(nla_get_be16(tb[NFTA_CT_HELPER_L3PROTO]));
tb               1166 net/netfilter/nft_ct.c 				  const struct nlattr * const tb[],
tb               1171 net/netfilter/nft_ct.c 	if (!tb[NFTA_CT_EXPECT_L4PROTO] ||
tb               1172 net/netfilter/nft_ct.c 	    !tb[NFTA_CT_EXPECT_DPORT] ||
tb               1173 net/netfilter/nft_ct.c 	    !tb[NFTA_CT_EXPECT_TIMEOUT] ||
tb               1174 net/netfilter/nft_ct.c 	    !tb[NFTA_CT_EXPECT_SIZE])
tb               1178 net/netfilter/nft_ct.c 	if (tb[NFTA_CT_EXPECT_L3PROTO])
tb               1179 net/netfilter/nft_ct.c 		priv->l3num = ntohs(nla_get_be16(tb[NFTA_CT_EXPECT_L3PROTO]));
tb               1181 net/netfilter/nft_ct.c 	priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
tb               1182 net/netfilter/nft_ct.c 	priv->dport = nla_get_be16(tb[NFTA_CT_EXPECT_DPORT]);
tb               1183 net/netfilter/nft_ct.c 	priv->timeout = nla_get_u32(tb[NFTA_CT_EXPECT_TIMEOUT]);
tb               1184 net/netfilter/nft_ct.c 	priv->size = nla_get_u8(tb[NFTA_CT_EXPECT_SIZE]);
tb                 36 net/netfilter/nft_dup_netdev.c 			       const struct nlattr * const tb[])
tb                 40 net/netfilter/nft_dup_netdev.c 	if (tb[NFTA_DUP_SREG_DEV] == NULL)
tb                 43 net/netfilter/nft_dup_netdev.c 	priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
tb                130 net/netfilter/nft_dynset.c 			   const struct nlattr * const tb[])
tb                140 net/netfilter/nft_dynset.c 	if (tb[NFTA_DYNSET_SET_NAME] == NULL ||
tb                141 net/netfilter/nft_dynset.c 	    tb[NFTA_DYNSET_OP] == NULL ||
tb                142 net/netfilter/nft_dynset.c 	    tb[NFTA_DYNSET_SREG_KEY] == NULL)
tb                145 net/netfilter/nft_dynset.c 	if (tb[NFTA_DYNSET_FLAGS]) {
tb                146 net/netfilter/nft_dynset.c 		u32 flags = ntohl(nla_get_be32(tb[NFTA_DYNSET_FLAGS]));
tb                155 net/netfilter/nft_dynset.c 				    tb[NFTA_DYNSET_SET_NAME],
tb                156 net/netfilter/nft_dynset.c 				    tb[NFTA_DYNSET_SET_ID], genmask);
tb                166 net/netfilter/nft_dynset.c 	priv->op = ntohl(nla_get_be32(tb[NFTA_DYNSET_OP]));
tb                180 net/netfilter/nft_dynset.c 	if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
tb                184 net/netfilter/nft_dynset.c 						tb[NFTA_DYNSET_TIMEOUT])));
tb                187 net/netfilter/nft_dynset.c 	priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
tb                192 net/netfilter/nft_dynset.c 	if (tb[NFTA_DYNSET_SREG_DATA] != NULL) {
tb                198 net/netfilter/nft_dynset.c 		priv->sreg_data = nft_parse_register(tb[NFTA_DYNSET_SREG_DATA]);
tb                205 net/netfilter/nft_dynset.c 	if (tb[NFTA_DYNSET_EXPR] != NULL) {
tb                209 net/netfilter/nft_dynset.c 		priv->expr = nft_expr_init(ctx, tb[NFTA_DYNSET_EXPR]);
tb                317 net/netfilter/nft_exthdr.c 			   const struct nlattr * const tb[])
tb                323 net/netfilter/nft_exthdr.c 	if (!tb[NFTA_EXTHDR_DREG] ||
tb                324 net/netfilter/nft_exthdr.c 	    !tb[NFTA_EXTHDR_TYPE] ||
tb                325 net/netfilter/nft_exthdr.c 	    !tb[NFTA_EXTHDR_OFFSET] ||
tb                326 net/netfilter/nft_exthdr.c 	    !tb[NFTA_EXTHDR_LEN])
tb                329 net/netfilter/nft_exthdr.c 	err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
tb                333 net/netfilter/nft_exthdr.c 	err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
tb                337 net/netfilter/nft_exthdr.c 	if (tb[NFTA_EXTHDR_FLAGS]) {
tb                338 net/netfilter/nft_exthdr.c 		err = nft_parse_u32_check(tb[NFTA_EXTHDR_FLAGS], U8_MAX, &flags);
tb                346 net/netfilter/nft_exthdr.c 	if (tb[NFTA_EXTHDR_OP]) {
tb                347 net/netfilter/nft_exthdr.c 		err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
tb                352 net/netfilter/nft_exthdr.c 	priv->type   = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
tb                355 net/netfilter/nft_exthdr.c 	priv->dreg   = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
tb                365 net/netfilter/nft_exthdr.c 				   const struct nlattr * const tb[])
tb                371 net/netfilter/nft_exthdr.c 	if (!tb[NFTA_EXTHDR_SREG] ||
tb                372 net/netfilter/nft_exthdr.c 	    !tb[NFTA_EXTHDR_TYPE] ||
tb                373 net/netfilter/nft_exthdr.c 	    !tb[NFTA_EXTHDR_OFFSET] ||
tb                374 net/netfilter/nft_exthdr.c 	    !tb[NFTA_EXTHDR_LEN])
tb                377 net/netfilter/nft_exthdr.c 	if (tb[NFTA_EXTHDR_DREG] || tb[NFTA_EXTHDR_FLAGS])
tb                380 net/netfilter/nft_exthdr.c 	err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
tb                384 net/netfilter/nft_exthdr.c 	err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
tb                398 net/netfilter/nft_exthdr.c 	err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
tb                402 net/netfilter/nft_exthdr.c 	priv->type   = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
tb                405 net/netfilter/nft_exthdr.c 	priv->sreg   = nft_parse_register(tb[NFTA_EXTHDR_SREG]);
tb                414 net/netfilter/nft_exthdr.c 				const struct nlattr * const tb[])
tb                417 net/netfilter/nft_exthdr.c 	int err = nft_exthdr_init(ctx, expr, tb);
tb                506 net/netfilter/nft_exthdr.c 		      const struct nlattr * const tb[])
tb                510 net/netfilter/nft_exthdr.c 	if (!tb[NFTA_EXTHDR_OP])
tb                513 net/netfilter/nft_exthdr.c 	if (tb[NFTA_EXTHDR_SREG] && tb[NFTA_EXTHDR_DREG])
tb                516 net/netfilter/nft_exthdr.c 	op = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OP]));
tb                519 net/netfilter/nft_exthdr.c 		if (tb[NFTA_EXTHDR_SREG])
tb                521 net/netfilter/nft_exthdr.c 		if (tb[NFTA_EXTHDR_DREG])
tb                525 net/netfilter/nft_exthdr.c 		if (tb[NFTA_EXTHDR_DREG])
tb                530 net/netfilter/nft_exthdr.c 			if (tb[NFTA_EXTHDR_DREG])
tb                 65 net/netfilter/nft_fib.c 		 const struct nlattr * const tb[])
tb                 71 net/netfilter/nft_fib.c 	if (!tb[NFTA_FIB_DREG] || !tb[NFTA_FIB_RESULT] || !tb[NFTA_FIB_FLAGS])
tb                 74 net/netfilter/nft_fib.c 	priv->flags = ntohl(nla_get_be32(tb[NFTA_FIB_FLAGS]));
tb                 88 net/netfilter/nft_fib.c 	priv->result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT]));
tb                 89 net/netfilter/nft_fib.c 	priv->dreg = nft_parse_register(tb[NFTA_FIB_DREG]);
tb                160 net/netfilter/nft_flow_offload.c 				 const struct nlattr * const tb[])
tb                166 net/netfilter/nft_flow_offload.c 	if (!tb[NFTA_FLOW_TABLE_NAME])
tb                169 net/netfilter/nft_flow_offload.c 	flowtable = nft_flowtable_lookup(ctx->table, tb[NFTA_FLOW_TABLE_NAME],
tb                 46 net/netfilter/nft_fwd_netdev.c 			       const struct nlattr * const tb[])
tb                 50 net/netfilter/nft_fwd_netdev.c 	if (tb[NFTA_FWD_SREG_DEV] == NULL)
tb                 53 net/netfilter/nft_fwd_netdev.c 	priv->sreg_dev = nft_parse_register(tb[NFTA_FWD_SREG_DEV]);
tb                148 net/netfilter/nft_fwd_netdev.c 			      const struct nlattr * const tb[])
tb                154 net/netfilter/nft_fwd_netdev.c 	if (!tb[NFTA_FWD_SREG_DEV] ||
tb                155 net/netfilter/nft_fwd_netdev.c 	    !tb[NFTA_FWD_SREG_ADDR] ||
tb                156 net/netfilter/nft_fwd_netdev.c 	    !tb[NFTA_FWD_NFPROTO])
tb                159 net/netfilter/nft_fwd_netdev.c 	priv->sreg_dev = nft_parse_register(tb[NFTA_FWD_SREG_DEV]);
tb                160 net/netfilter/nft_fwd_netdev.c 	priv->sreg_addr = nft_parse_register(tb[NFTA_FWD_SREG_ADDR]);
tb                161 net/netfilter/nft_fwd_netdev.c 	priv->nfproto = ntohl(nla_get_be32(tb[NFTA_FWD_NFPROTO]));
tb                225 net/netfilter/nft_fwd_netdev.c 		   const struct nlattr * const tb[])
tb                227 net/netfilter/nft_fwd_netdev.c 	if (tb[NFTA_FWD_SREG_ADDR])
tb                229 net/netfilter/nft_fwd_netdev.c 	if (tb[NFTA_FWD_SREG_DEV])
tb                 71 net/netfilter/nft_hash.c 			  const struct nlattr * const tb[])
tb                 77 net/netfilter/nft_hash.c 	if (!tb[NFTA_HASH_SREG] ||
tb                 78 net/netfilter/nft_hash.c 	    !tb[NFTA_HASH_DREG] ||
tb                 79 net/netfilter/nft_hash.c 	    !tb[NFTA_HASH_LEN]  ||
tb                 80 net/netfilter/nft_hash.c 	    !tb[NFTA_HASH_MODULUS])
tb                 83 net/netfilter/nft_hash.c 	if (tb[NFTA_HASH_OFFSET])
tb                 84 net/netfilter/nft_hash.c 		priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
tb                 86 net/netfilter/nft_hash.c 	priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]);
tb                 87 net/netfilter/nft_hash.c 	priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
tb                 89 net/netfilter/nft_hash.c 	err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len);
tb                 97 net/netfilter/nft_hash.c 	priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
tb                104 net/netfilter/nft_hash.c 	if (tb[NFTA_HASH_SEED]) {
tb                105 net/netfilter/nft_hash.c 		priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
tb                118 net/netfilter/nft_hash.c 			    const struct nlattr * const tb[])
tb                122 net/netfilter/nft_hash.c 	if (!tb[NFTA_HASH_DREG]    ||
tb                123 net/netfilter/nft_hash.c 	    !tb[NFTA_HASH_MODULUS])
tb                126 net/netfilter/nft_hash.c 	if (tb[NFTA_HASH_OFFSET])
tb                127 net/netfilter/nft_hash.c 		priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
tb                129 net/netfilter/nft_hash.c 	priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
tb                131 net/netfilter/nft_hash.c 	priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
tb                208 net/netfilter/nft_hash.c 		    const struct nlattr * const tb[])
tb                212 net/netfilter/nft_hash.c 	if (!tb[NFTA_HASH_TYPE])
tb                215 net/netfilter/nft_hash.c 	type = ntohl(nla_get_be32(tb[NFTA_HASH_TYPE]));
tb                 34 net/netfilter/nft_immediate.c 			      const struct nlattr * const tb[])
tb                 40 net/netfilter/nft_immediate.c 	if (tb[NFTA_IMMEDIATE_DREG] == NULL ||
tb                 41 net/netfilter/nft_immediate.c 	    tb[NFTA_IMMEDIATE_DATA] == NULL)
tb                 45 net/netfilter/nft_immediate.c 			    tb[NFTA_IMMEDIATE_DATA]);
tb                 51 net/netfilter/nft_immediate.c 	priv->dreg = nft_parse_register(tb[NFTA_IMMEDIATE_DREG]);
tb                 55 net/netfilter/nft_limit.c 			  const struct nlattr * const tb[], bool pkts)
tb                 59 net/netfilter/nft_limit.c 	if (tb[NFTA_LIMIT_RATE] == NULL ||
tb                 60 net/netfilter/nft_limit.c 	    tb[NFTA_LIMIT_UNIT] == NULL)
tb                 63 net/netfilter/nft_limit.c 	limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
tb                 64 net/netfilter/nft_limit.c 	unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
tb                 69 net/netfilter/nft_limit.c 	if (tb[NFTA_LIMIT_BURST])
tb                 70 net/netfilter/nft_limit.c 		limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
tb                 92 net/netfilter/nft_limit.c 	if (tb[NFTA_LIMIT_FLAGS]) {
tb                 93 net/netfilter/nft_limit.c 		u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
tb                149 net/netfilter/nft_limit.c 			       const struct nlattr * const tb[])
tb                154 net/netfilter/nft_limit.c 	err = nft_limit_init(&priv->limit, tb, true);
tb                191 net/netfilter/nft_limit.c 				const struct nlattr * const tb[])
tb                195 net/netfilter/nft_limit.c 	return nft_limit_init(priv, tb, false);
tb                216 net/netfilter/nft_limit.c 		     const struct nlattr * const tb[])
tb                218 net/netfilter/nft_limit.c 	if (tb[NFTA_LIMIT_TYPE] == NULL)
tb                221 net/netfilter/nft_limit.c 	switch (ntohl(nla_get_be32(tb[NFTA_LIMIT_TYPE]))) {
tb                250 net/netfilter/nft_limit.c 				   const struct nlattr * const tb[],
tb                256 net/netfilter/nft_limit.c 	err = nft_limit_init(&priv->limit, tb, true);
tb                294 net/netfilter/nft_limit.c 				    const struct nlattr * const tb[],
tb                299 net/netfilter/nft_limit.c 	return nft_limit_init(priv, tb, false);
tb                322 net/netfilter/nft_limit.c 			 const struct nlattr * const tb[])
tb                324 net/netfilter/nft_limit.c 	if (!tb[NFTA_LIMIT_TYPE])
tb                327 net/netfilter/nft_limit.c 	switch (ntohl(nla_get_be32(tb[NFTA_LIMIT_TYPE]))) {
tb                133 net/netfilter/nft_log.c 			const struct nlattr * const tb[])
tb                141 net/netfilter/nft_log.c 	if (tb[NFTA_LOG_LEVEL] != NULL &&
tb                142 net/netfilter/nft_log.c 	    tb[NFTA_LOG_GROUP] != NULL)
tb                144 net/netfilter/nft_log.c 	if (tb[NFTA_LOG_GROUP] != NULL) {
tb                146 net/netfilter/nft_log.c 		if (tb[NFTA_LOG_FLAGS] != NULL)
tb                150 net/netfilter/nft_log.c 	nla = tb[NFTA_LOG_PREFIX];
tb                162 net/netfilter/nft_log.c 		if (tb[NFTA_LOG_LEVEL] != NULL) {
tb                164 net/netfilter/nft_log.c 				ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL]));
tb                173 net/netfilter/nft_log.c 		if (tb[NFTA_LOG_FLAGS] != NULL) {
tb                175 net/netfilter/nft_log.c 				ntohl(nla_get_be32(tb[NFTA_LOG_FLAGS]));
tb                183 net/netfilter/nft_log.c 		li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
tb                184 net/netfilter/nft_log.c 		if (tb[NFTA_LOG_SNAPLEN] != NULL) {
tb                187 net/netfilter/nft_log.c 				ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
tb                189 net/netfilter/nft_log.c 		if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
tb                191 net/netfilter/nft_log.c 				ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
tb                 59 net/netfilter/nft_lookup.c 			   const struct nlattr * const tb[])
tb                 67 net/netfilter/nft_lookup.c 	if (tb[NFTA_LOOKUP_SET] == NULL ||
tb                 68 net/netfilter/nft_lookup.c 	    tb[NFTA_LOOKUP_SREG] == NULL)
tb                 71 net/netfilter/nft_lookup.c 	set = nft_set_lookup_global(ctx->net, ctx->table, tb[NFTA_LOOKUP_SET],
tb                 72 net/netfilter/nft_lookup.c 				    tb[NFTA_LOOKUP_SET_ID], genmask);
tb                 76 net/netfilter/nft_lookup.c 	priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
tb                 81 net/netfilter/nft_lookup.c 	if (tb[NFTA_LOOKUP_FLAGS]) {
tb                 82 net/netfilter/nft_lookup.c 		flags = ntohl(nla_get_be32(tb[NFTA_LOOKUP_FLAGS]));
tb                 94 net/netfilter/nft_lookup.c 	if (tb[NFTA_LOOKUP_DREG] != NULL) {
tb                100 net/netfilter/nft_lookup.c 		priv->dreg = nft_parse_register(tb[NFTA_LOOKUP_DREG]);
tb                 44 net/netfilter/nft_masq.c 			 const struct nlattr * const tb[])
tb                 50 net/netfilter/nft_masq.c 	if (tb[NFTA_MASQ_FLAGS]) {
tb                 51 net/netfilter/nft_masq.c 		priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS]));
tb                 56 net/netfilter/nft_masq.c 	if (tb[NFTA_MASQ_REG_PROTO_MIN]) {
tb                 58 net/netfilter/nft_masq.c 			nft_parse_register(tb[NFTA_MASQ_REG_PROTO_MIN]);
tb                 64 net/netfilter/nft_masq.c 		if (tb[NFTA_MASQ_REG_PROTO_MAX]) {
tb                 66 net/netfilter/nft_masq.c 				nft_parse_register(tb[NFTA_MASQ_REG_PROTO_MAX]);
tb                319 net/netfilter/nft_meta.c 		      const struct nlattr * const tb[])
tb                324 net/netfilter/nft_meta.c 	priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
tb                383 net/netfilter/nft_meta.c 	priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
tb                453 net/netfilter/nft_meta.c 		      const struct nlattr * const tb[])
tb                459 net/netfilter/nft_meta.c 	priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
tb                478 net/netfilter/nft_meta.c 	priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
tb                579 net/netfilter/nft_meta.c 		    const struct nlattr * const tb[])
tb                581 net/netfilter/nft_meta.c 	if (tb[NFTA_META_KEY] == NULL)
tb                584 net/netfilter/nft_meta.c 	if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
tb                591 net/netfilter/nft_meta.c 	if (tb[NFTA_META_DREG])
tb                594 net/netfilter/nft_meta.c 	if (tb[NFTA_META_SREG])
tb                648 net/netfilter/nft_meta.c 				const struct nlattr * const tb[],
tb                654 net/netfilter/nft_meta.c 	if (tb[NFTA_SECMARK_CTX] == NULL)
tb                657 net/netfilter/nft_meta.c 	priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL);
tb                112 net/netfilter/nft_nat.c 			const struct nlattr * const tb[])
tb                119 net/netfilter/nft_nat.c 	if (tb[NFTA_NAT_TYPE] == NULL ||
tb                120 net/netfilter/nft_nat.c 	    (tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
tb                121 net/netfilter/nft_nat.c 	     tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
tb                124 net/netfilter/nft_nat.c 	switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) {
tb                135 net/netfilter/nft_nat.c 	if (tb[NFTA_NAT_FAMILY] == NULL)
tb                138 net/netfilter/nft_nat.c 	family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
tb                154 net/netfilter/nft_nat.c 	if (tb[NFTA_NAT_REG_ADDR_MIN]) {
tb                156 net/netfilter/nft_nat.c 			nft_parse_register(tb[NFTA_NAT_REG_ADDR_MIN]);
tb                161 net/netfilter/nft_nat.c 		if (tb[NFTA_NAT_REG_ADDR_MAX]) {
tb                163 net/netfilter/nft_nat.c 				nft_parse_register(tb[NFTA_NAT_REG_ADDR_MAX]);
tb                175 net/netfilter/nft_nat.c 	if (tb[NFTA_NAT_REG_PROTO_MIN]) {
tb                177 net/netfilter/nft_nat.c 			nft_parse_register(tb[NFTA_NAT_REG_PROTO_MIN]);
tb                183 net/netfilter/nft_nat.c 		if (tb[NFTA_NAT_REG_PROTO_MAX]) {
tb                185 net/netfilter/nft_nat.c 				nft_parse_register(tb[NFTA_NAT_REG_PROTO_MAX]);
tb                196 net/netfilter/nft_nat.c 	if (tb[NFTA_NAT_FLAGS]) {
tb                197 net/netfilter/nft_nat.c 		priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS]));
tb                 55 net/netfilter/nft_numgen.c 			   const struct nlattr * const tb[])
tb                 59 net/netfilter/nft_numgen.c 	if (tb[NFTA_NG_OFFSET])
tb                 60 net/netfilter/nft_numgen.c 		priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET]));
tb                 62 net/netfilter/nft_numgen.c 	priv->modulus = ntohl(nla_get_be32(tb[NFTA_NG_MODULUS]));
tb                 69 net/netfilter/nft_numgen.c 	priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]);
tb                127 net/netfilter/nft_numgen.c 			      const struct nlattr * const tb[])
tb                131 net/netfilter/nft_numgen.c 	if (tb[NFTA_NG_OFFSET])
tb                132 net/netfilter/nft_numgen.c 		priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET]));
tb                134 net/netfilter/nft_numgen.c 	priv->modulus = ntohl(nla_get_be32(tb[NFTA_NG_MODULUS]));
tb                143 net/netfilter/nft_numgen.c 	priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]);
tb                175 net/netfilter/nft_numgen.c nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
tb                179 net/netfilter/nft_numgen.c 	if (!tb[NFTA_NG_DREG]	 ||
tb                180 net/netfilter/nft_numgen.c 	    !tb[NFTA_NG_MODULUS] ||
tb                181 net/netfilter/nft_numgen.c 	    !tb[NFTA_NG_TYPE])
tb                184 net/netfilter/nft_numgen.c 	type = ntohl(nla_get_be32(tb[NFTA_NG_TYPE]));
tb                 27 net/netfilter/nft_objref.c 			   const struct nlattr * const tb[])
tb                 33 net/netfilter/nft_objref.c 	if (!tb[NFTA_OBJREF_IMM_NAME] ||
tb                 34 net/netfilter/nft_objref.c 	    !tb[NFTA_OBJREF_IMM_TYPE])
tb                 37 net/netfilter/nft_objref.c 	objtype = ntohl(nla_get_be32(tb[NFTA_OBJREF_IMM_TYPE]));
tb                 39 net/netfilter/nft_objref.c 			     tb[NFTA_OBJREF_IMM_NAME], objtype,
tb                124 net/netfilter/nft_objref.c 			       const struct nlattr * const tb[])
tb                132 net/netfilter/nft_objref.c 				    tb[NFTA_OBJREF_SET_NAME],
tb                133 net/netfilter/nft_objref.c 				    tb[NFTA_OBJREF_SET_ID], genmask);
tb                140 net/netfilter/nft_objref.c 	priv->sreg = nft_parse_register(tb[NFTA_OBJREF_SET_SREG]);
tb                208 net/netfilter/nft_objref.c                       const struct nlattr * const tb[])
tb                210 net/netfilter/nft_objref.c 	if (tb[NFTA_OBJREF_SET_SREG] &&
tb                211 net/netfilter/nft_objref.c 	    (tb[NFTA_OBJREF_SET_NAME] ||
tb                212 net/netfilter/nft_objref.c 	     tb[NFTA_OBJREF_SET_ID]))
tb                214 net/netfilter/nft_objref.c 	else if (tb[NFTA_OBJREF_IMM_NAME] &&
tb                215 net/netfilter/nft_objref.c 		 tb[NFTA_OBJREF_IMM_TYPE])
tb                 57 net/netfilter/nft_osf.c 			const struct nlattr * const tb[])
tb                 64 net/netfilter/nft_osf.c 	if (!tb[NFTA_OSF_DREG])
tb                 67 net/netfilter/nft_osf.c 	if (tb[NFTA_OSF_TTL]) {
tb                 68 net/netfilter/nft_osf.c 		ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
tb                 74 net/netfilter/nft_osf.c 	if (tb[NFTA_OSF_FLAGS]) {
tb                 75 net/netfilter/nft_osf.c 		flags = ntohl(nla_get_be32(tb[NFTA_OSF_FLAGS]));
tb                 81 net/netfilter/nft_osf.c 	priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
tb                129 net/netfilter/nft_payload.c 			    const struct nlattr * const tb[])
tb                133 net/netfilter/nft_payload.c 	priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
tb                134 net/netfilter/nft_payload.c 	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
tb                135 net/netfilter/nft_payload.c 	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
tb                136 net/netfilter/nft_payload.c 	priv->dreg   = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
tb                549 net/netfilter/nft_payload.c 				const struct nlattr * const tb[])
tb                553 net/netfilter/nft_payload.c 	priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
tb                554 net/netfilter/nft_payload.c 	priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
tb                555 net/netfilter/nft_payload.c 	priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
tb                556 net/netfilter/nft_payload.c 	priv->sreg        = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
tb                558 net/netfilter/nft_payload.c 	if (tb[NFTA_PAYLOAD_CSUM_TYPE])
tb                560 net/netfilter/nft_payload.c 			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
tb                561 net/netfilter/nft_payload.c 	if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
tb                563 net/netfilter/nft_payload.c 			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
tb                564 net/netfilter/nft_payload.c 	if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
tb                567 net/netfilter/nft_payload.c 		flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
tb                614 net/netfilter/nft_payload.c 		       const struct nlattr * const tb[])
tb                619 net/netfilter/nft_payload.c 	if (tb[NFTA_PAYLOAD_BASE] == NULL ||
tb                620 net/netfilter/nft_payload.c 	    tb[NFTA_PAYLOAD_OFFSET] == NULL ||
tb                621 net/netfilter/nft_payload.c 	    tb[NFTA_PAYLOAD_LEN] == NULL)
tb                624 net/netfilter/nft_payload.c 	base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
tb                634 net/netfilter/nft_payload.c 	if (tb[NFTA_PAYLOAD_SREG] != NULL) {
tb                635 net/netfilter/nft_payload.c 		if (tb[NFTA_PAYLOAD_DREG] != NULL)
tb                640 net/netfilter/nft_payload.c 	if (tb[NFTA_PAYLOAD_DREG] == NULL)
tb                643 net/netfilter/nft_payload.c 	offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
tb                644 net/netfilter/nft_payload.c 	len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
tb                 80 net/netfilter/nft_queue.c 			  const struct nlattr * const tb[])
tb                 85 net/netfilter/nft_queue.c 	priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));
tb                 87 net/netfilter/nft_queue.c 	if (tb[NFTA_QUEUE_TOTAL])
tb                 88 net/netfilter/nft_queue.c 		priv->queues_total = ntohs(nla_get_be16(tb[NFTA_QUEUE_TOTAL]));
tb                 99 net/netfilter/nft_queue.c 	if (tb[NFTA_QUEUE_FLAGS]) {
tb                100 net/netfilter/nft_queue.c 		priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS]));
tb                109 net/netfilter/nft_queue.c 			       const struct nlattr * const tb[])
tb                114 net/netfilter/nft_queue.c 	priv->sreg_qnum = nft_parse_register(tb[NFTA_QUEUE_SREG_QNUM]);
tb                119 net/netfilter/nft_queue.c 	if (tb[NFTA_QUEUE_FLAGS]) {
tb                120 net/netfilter/nft_queue.c 		priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS]));
tb                179 net/netfilter/nft_queue.c 		     const struct nlattr * const tb[])
tb                181 net/netfilter/nft_queue.c 	if (tb[NFTA_QUEUE_NUM] && tb[NFTA_QUEUE_SREG_QNUM])
tb                186 net/netfilter/nft_queue.c 	if (tb[NFTA_QUEUE_NUM])
tb                189 net/netfilter/nft_queue.c 	if (tb[NFTA_QUEUE_SREG_QNUM])
tb                 66 net/netfilter/nft_quota.c static int nft_quota_do_init(const struct nlattr * const tb[],
tb                 72 net/netfilter/nft_quota.c 	if (!tb[NFTA_QUOTA_BYTES])
tb                 75 net/netfilter/nft_quota.c 	quota = be64_to_cpu(nla_get_be64(tb[NFTA_QUOTA_BYTES]));
tb                 79 net/netfilter/nft_quota.c 	if (tb[NFTA_QUOTA_CONSUMED]) {
tb                 80 net/netfilter/nft_quota.c 		consumed = be64_to_cpu(nla_get_be64(tb[NFTA_QUOTA_CONSUMED]));
tb                 85 net/netfilter/nft_quota.c 	if (tb[NFTA_QUOTA_FLAGS]) {
tb                 86 net/netfilter/nft_quota.c 		flags = ntohl(nla_get_be32(tb[NFTA_QUOTA_FLAGS]));
tb                101 net/netfilter/nft_quota.c 			      const struct nlattr * const tb[],
tb                106 net/netfilter/nft_quota.c 	return nft_quota_do_init(tb, priv);
tb                194 net/netfilter/nft_quota.c 			  const struct nlattr * const tb[])
tb                198 net/netfilter/nft_quota.c 	return nft_quota_do_init(tb, priv);
tb                 51 net/netfilter/nft_range.c 			const struct nlattr * const tb[])
tb                 58 net/netfilter/nft_range.c 	if (!tb[NFTA_RANGE_SREG]      ||
tb                 59 net/netfilter/nft_range.c 	    !tb[NFTA_RANGE_OP]	      ||
tb                 60 net/netfilter/nft_range.c 	    !tb[NFTA_RANGE_FROM_DATA] ||
tb                 61 net/netfilter/nft_range.c 	    !tb[NFTA_RANGE_TO_DATA])
tb                 65 net/netfilter/nft_range.c 			    &desc_from, tb[NFTA_RANGE_FROM_DATA]);
tb                 75 net/netfilter/nft_range.c 			    &desc_to, tb[NFTA_RANGE_TO_DATA]);
tb                 89 net/netfilter/nft_range.c 	priv->sreg = nft_parse_register(tb[NFTA_RANGE_SREG]);
tb                 94 net/netfilter/nft_range.c 	err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op);
tb                 45 net/netfilter/nft_redir.c 			  const struct nlattr * const tb[])
tb                 52 net/netfilter/nft_redir.c 	if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
tb                 54 net/netfilter/nft_redir.c 			nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]);
tb                 60 net/netfilter/nft_redir.c 		if (tb[NFTA_REDIR_REG_PROTO_MAX]) {
tb                 62 net/netfilter/nft_redir.c 				nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MAX]);
tb                 73 net/netfilter/nft_redir.c 	if (tb[NFTA_REDIR_FLAGS]) {
tb                 74 net/netfilter/nft_redir.c 		priv->flags = ntohl(nla_get_be32(tb[NFTA_REDIR_FLAGS]));
tb                 39 net/netfilter/nft_reject.c 		    const struct nlattr * const tb[])
tb                 43 net/netfilter/nft_reject.c 	if (tb[NFTA_REJECT_TYPE] == NULL)
tb                 46 net/netfilter/nft_reject.c 	priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
tb                 49 net/netfilter/nft_reject.c 		if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
tb                 51 net/netfilter/nft_reject.c 		priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
tb                 63 net/netfilter/nft_reject_inet.c 				const struct nlattr * const tb[])
tb                 68 net/netfilter/nft_reject_inet.c 	if (tb[NFTA_REJECT_TYPE] == NULL)
tb                 71 net/netfilter/nft_reject_inet.c 	priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
tb                 75 net/netfilter/nft_reject_inet.c 		if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
tb                 78 net/netfilter/nft_reject_inet.c 		icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
tb                112 net/netfilter/nft_rt.c 			   const struct nlattr * const tb[])
tb                117 net/netfilter/nft_rt.c 	if (tb[NFTA_RT_KEY] == NULL ||
tb                118 net/netfilter/nft_rt.c 	    tb[NFTA_RT_DREG] == NULL)
tb                121 net/netfilter/nft_rt.c 	priv->key = ntohl(nla_get_be32(tb[NFTA_RT_KEY]));
tb                144 net/netfilter/nft_rt.c 	priv->dreg = nft_parse_register(tb[NFTA_RT_DREG]);
tb                361 net/netfilter/nft_set_hash.c 			  const struct nlattr * const tb[])
tb                604 net/netfilter/nft_set_hash.c 			 const struct nlattr * const tb[])
tb                 78 net/netfilter/nft_socket.c 			   const struct nlattr * const tb[])
tb                 83 net/netfilter/nft_socket.c 	if (!tb[NFTA_SOCKET_DREG] || !tb[NFTA_SOCKET_KEY])
tb                 97 net/netfilter/nft_socket.c 	priv->key = ntohl(nla_get_u32(tb[NFTA_SOCKET_KEY]));
tb                109 net/netfilter/nft_socket.c 	priv->dreg = nft_parse_register(tb[NFTA_SOCKET_DREG]);
tb                153 net/netfilter/nft_synproxy.c 				const struct nlattr * const tb[],
tb                160 net/netfilter/nft_synproxy.c 	if (tb[NFTA_SYNPROXY_MSS])
tb                161 net/netfilter/nft_synproxy.c 		priv->info.mss = ntohs(nla_get_be16(tb[NFTA_SYNPROXY_MSS]));
tb                162 net/netfilter/nft_synproxy.c 	if (tb[NFTA_SYNPROXY_WSCALE])
tb                163 net/netfilter/nft_synproxy.c 		priv->info.wscale = nla_get_u8(tb[NFTA_SYNPROXY_WSCALE]);
tb                164 net/netfilter/nft_synproxy.c 	if (tb[NFTA_SYNPROXY_FLAGS]) {
tb                165 net/netfilter/nft_synproxy.c 		flags = ntohl(nla_get_be32(tb[NFTA_SYNPROXY_FLAGS]));
tb                260 net/netfilter/nft_synproxy.c 			     const struct nlattr * const tb[])
tb                264 net/netfilter/nft_synproxy.c 	return nft_synproxy_do_init(ctx, tb, priv);
tb                300 net/netfilter/nft_synproxy.c 				 const struct nlattr * const tb[],
tb                305 net/netfilter/nft_synproxy.c 	return nft_synproxy_do_init(ctx, tb, priv);
tb                186 net/netfilter/nft_tproxy.c 			   const struct nlattr * const tb[])
tb                192 net/netfilter/nft_tproxy.c 	if (!tb[NFTA_TPROXY_FAMILY] ||
tb                193 net/netfilter/nft_tproxy.c 	    (!tb[NFTA_TPROXY_REG_ADDR] && !tb[NFTA_TPROXY_REG_PORT]))
tb                196 net/netfilter/nft_tproxy.c 	priv->family = ntohl(nla_get_be32(tb[NFTA_TPROXY_FAMILY]));
tb                216 net/netfilter/nft_tproxy.c 	if (priv->family == NFPROTO_UNSPEC && tb[NFTA_TPROXY_REG_ADDR])
tb                249 net/netfilter/nft_tproxy.c 	if (tb[NFTA_TPROXY_REG_ADDR]) {
tb                250 net/netfilter/nft_tproxy.c 		priv->sreg_addr = nft_parse_register(tb[NFTA_TPROXY_REG_ADDR]);
tb                256 net/netfilter/nft_tproxy.c 	if (tb[NFTA_TPROXY_REG_PORT]) {
tb                257 net/netfilter/nft_tproxy.c 		priv->sreg_port = nft_parse_register(tb[NFTA_TPROXY_REG_PORT]);
tb                 74 net/netfilter/nft_tunnel.c 			       const struct nlattr * const tb[])
tb                 79 net/netfilter/nft_tunnel.c 	if (!tb[NFTA_TUNNEL_KEY] ||
tb                 80 net/netfilter/nft_tunnel.c 	    !tb[NFTA_TUNNEL_DREG])
tb                 83 net/netfilter/nft_tunnel.c 	priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
tb                 95 net/netfilter/nft_tunnel.c 	priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
tb                 97 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_MODE]) {
tb                 98 net/netfilter/nft_tunnel.c 		priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
tb                166 net/netfilter/nft_tunnel.c 	struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
tb                169 net/netfilter/nft_tunnel.c 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
tb                174 net/netfilter/nft_tunnel.c 	if (!tb[NFTA_TUNNEL_KEY_IP_DST])
tb                177 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_IP_SRC])
tb                178 net/netfilter/nft_tunnel.c 		info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
tb                179 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_IP_DST])
tb                180 net/netfilter/nft_tunnel.c 		info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
tb                195 net/netfilter/nft_tunnel.c 	struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
tb                198 net/netfilter/nft_tunnel.c 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
tb                203 net/netfilter/nft_tunnel.c 	if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
tb                206 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
tb                208 net/netfilter/nft_tunnel.c 		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
tb                211 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
tb                213 net/netfilter/nft_tunnel.c 		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
tb                216 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
tb                217 net/netfilter/nft_tunnel.c 		info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
tb                231 net/netfilter/nft_tunnel.c 	struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
tb                234 net/netfilter/nft_tunnel.c 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
tb                239 net/netfilter/nft_tunnel.c 	if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
tb                242 net/netfilter/nft_tunnel.c 	opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
tb                260 net/netfilter/nft_tunnel.c 	struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
tb                264 net/netfilter/nft_tunnel.c 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
tb                270 net/netfilter/nft_tunnel.c 	if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
tb                273 net/netfilter/nft_tunnel.c 	version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
tb                276 net/netfilter/nft_tunnel.c 		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
tb                280 net/netfilter/nft_tunnel.c 			nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
tb                283 net/netfilter/nft_tunnel.c 		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
tb                284 net/netfilter/nft_tunnel.c 		    !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
tb                287 net/netfilter/nft_tunnel.c 		hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
tb                288 net/netfilter/nft_tunnel.c 		dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
tb                314 net/netfilter/nft_tunnel.c 	struct nlattr *tb[NFTA_TUNNEL_KEY_OPTS_MAX + 1];
tb                317 net/netfilter/nft_tunnel.c 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_OPTS_MAX, attr,
tb                322 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_OPTS_VXLAN]) {
tb                323 net/netfilter/nft_tunnel.c 		err = nft_tunnel_obj_vxlan_init(tb[NFTA_TUNNEL_KEY_OPTS_VXLAN],
tb                325 net/netfilter/nft_tunnel.c 	} else if (tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN]) {
tb                326 net/netfilter/nft_tunnel.c 		err = nft_tunnel_obj_erspan_init(tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN],
tb                348 net/netfilter/nft_tunnel.c 			       const struct nlattr * const tb[],
tb                356 net/netfilter/nft_tunnel.c 	if (!tb[NFTA_TUNNEL_KEY_ID])
tb                361 net/netfilter/nft_tunnel.c 	info.key.tun_id		= key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
tb                364 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_IP]) {
tb                365 net/netfilter/nft_tunnel.c 		err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
tb                368 net/netfilter/nft_tunnel.c 	} else if (tb[NFTA_TUNNEL_KEY_IP6]) {
tb                369 net/netfilter/nft_tunnel.c 		err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
tb                376 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_SPORT]) {
tb                377 net/netfilter/nft_tunnel.c 		info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
tb                379 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_DPORT]) {
tb                380 net/netfilter/nft_tunnel.c 		info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
tb                383 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
tb                386 net/netfilter/nft_tunnel.c 		tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
tb                397 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_TOS])
tb                398 net/netfilter/nft_tunnel.c 		info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
tb                399 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_TTL])
tb                400 net/netfilter/nft_tunnel.c 		info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
tb                404 net/netfilter/nft_tunnel.c 	if (tb[NFTA_TUNNEL_KEY_OPTS]) {
tb                405 net/netfilter/nft_tunnel.c 		err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
tb                 34 net/netfilter/nft_xfrm.c 			     const struct nlattr * const tb[])
tb                 41 net/netfilter/nft_xfrm.c 	if (!tb[NFTA_XFRM_KEY] || !tb[NFTA_XFRM_DIR] || !tb[NFTA_XFRM_DREG])
tb                 53 net/netfilter/nft_xfrm.c 	priv->key = ntohl(nla_get_u32(tb[NFTA_XFRM_KEY]));
tb                 71 net/netfilter/nft_xfrm.c 	dir = nla_get_u8(tb[NFTA_XFRM_DIR]);
tb                 81 net/netfilter/nft_xfrm.c 	if (tb[NFTA_XFRM_SPNUM])
tb                 82 net/netfilter/nft_xfrm.c 		spnum = ntohl(nla_get_be32(tb[NFTA_XFRM_SPNUM]));
tb                 89 net/netfilter/nft_xfrm.c 	priv->dreg = nft_parse_register(tb[NFTA_XFRM_DREG]);
tb                 55 net/phonet/pn_netlink.c 	struct nlattr *tb[IFA_MAX+1];
tb                 69 net/phonet/pn_netlink.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
tb                 75 net/phonet/pn_netlink.c 	if (tb[IFA_LOCAL] == NULL)
tb                 77 net/phonet/pn_netlink.c 	pnaddr = nla_get_u8(tb[IFA_LOCAL]);
tb                222 net/phonet/pn_netlink.c 	struct nlattr *tb[RTA_MAX+1];
tb                236 net/phonet/pn_netlink.c 	err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
tb                244 net/phonet/pn_netlink.c 	if (tb[RTA_DST] == NULL || tb[RTA_OIF] == NULL)
tb                246 net/phonet/pn_netlink.c 	dst = nla_get_u8(tb[RTA_DST]);
tb                250 net/phonet/pn_netlink.c 	dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF]));
tb               1077 net/qrtr/qrtr.c 	struct nlattr *tb[IFA_MAX + 1];
tb               1089 net/qrtr/qrtr.c 	rc = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
tb               1095 net/qrtr/qrtr.c 	if (!tb[IFA_LOCAL])
tb               1098 net/qrtr/qrtr.c 	qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
tb                818 net/sched/act_api.c static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
tb                824 net/sched/act_api.c 	c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
tb                829 net/sched/act_api.c 	c->len = nla_len(tb[TCA_ACT_COOKIE]);
tb                852 net/sched/act_api.c 	struct nlattr *tb[TCA_ACT_MAX + 1];
tb                857 net/sched/act_api.c 		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
tb                862 net/sched/act_api.c 		kind = tb[TCA_ACT_KIND];
tb                871 net/sched/act_api.c 		if (tb[TCA_ACT_COOKIE]) {
tb                872 net/sched/act_api.c 			cookie = nla_memdup_cookie(tb);
tb                916 net/sched/act_api.c 		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
tb                924 net/sched/act_api.c 	if (!name && tb[TCA_ACT_COOKIE])
tb                960 net/sched/act_api.c 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
tb                966 net/sched/act_api.c 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
tb                971 net/sched/act_api.c 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
tb                972 net/sched/act_api.c 		act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
tb               1096 net/sched/act_api.c 	struct nlattr *tb[TCA_ACT_MAX + 1];
tb               1102 net/sched/act_api.c 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
tb               1108 net/sched/act_api.c 	if (tb[TCA_ACT_INDEX] == NULL ||
tb               1109 net/sched/act_api.c 	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
tb               1113 net/sched/act_api.c 	index = nla_get_u32(tb[TCA_ACT_INDEX]);
tb               1116 net/sched/act_api.c 	ops = tc_lookup_action(tb[TCA_ACT_KIND]);
tb               1146 net/sched/act_api.c 	struct nlattr *tb[TCA_ACT_MAX + 1];
tb               1157 net/sched/act_api.c 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
tb               1163 net/sched/act_api.c 	kind = tb[TCA_ACT_KIND];
tb               1282 net/sched/act_api.c 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
tb               1287 net/sched/act_api.c 	ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
tb               1293 net/sched/act_api.c 		if (tb[1])
tb               1294 net/sched/act_api.c 			return tca_action_flush(net, tb[1], n, portid, extack);
tb               1300 net/sched/act_api.c 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
tb               1301 net/sched/act_api.c 		act = tcf_action_get_1(net, tb[i], n, portid, extack);
tb               1436 net/sched/act_api.c 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
tb               1443 net/sched/act_api.c 	if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
tb               1446 net/sched/act_api.c 	if (tb[1] == NULL)
tb               1448 net/sched/act_api.c 	if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
tb               1464 net/sched/act_api.c 	struct nlattr *tb[TCA_ROOT_MAX + 1];
tb               1472 net/sched/act_api.c 	ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
tb               1477 net/sched/act_api.c 	kind = find_dump_kind(tb);
tb               1488 net/sched/act_api.c 	if (tb[TCA_ROOT_FLAGS]) {
tb               1489 net/sched/act_api.c 		bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
tb               1493 net/sched/act_api.c 	if (tb[TCA_ROOT_TIME_DELTA]) {
tb               1494 net/sched/act_api.c 		msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
tb                183 net/sched/act_bpf.c static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
tb                191 net/sched/act_bpf.c 	bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
tb                196 net/sched/act_bpf.c 	if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
tb                199 net/sched/act_bpf.c 	bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
tb                220 net/sched/act_bpf.c static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
tb                226 net/sched/act_bpf.c 	bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
tb                232 net/sched/act_bpf.c 	if (tb[TCA_ACT_BPF_NAME]) {
tb                233 net/sched/act_bpf.c 		name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL);
tb                281 net/sched/act_bpf.c 	struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
tb                293 net/sched/act_bpf.c 	ret = nla_parse_nested_deprecated(tb, TCA_ACT_BPF_MAX, nla,
tb                298 net/sched/act_bpf.c 	if (!tb[TCA_ACT_BPF_PARMS])
tb                301 net/sched/act_bpf.c 	parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
tb                330 net/sched/act_bpf.c 	is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
tb                331 net/sched/act_bpf.c 	is_ebpf = tb[TCA_ACT_BPF_FD];
tb                340 net/sched/act_bpf.c 	ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
tb                341 net/sched/act_bpf.c 		       tcf_bpf_init_from_efd(tb, &cfg);
tb                101 net/sched/act_connmark.c 	struct nlattr *tb[TCA_CONNMARK_MAX + 1];
tb                111 net/sched/act_connmark.c 	ret = nla_parse_nested_deprecated(tb, TCA_CONNMARK_MAX, nla,
tb                116 net/sched/act_connmark.c 	if (!tb[TCA_CONNMARK_PARMS])
tb                119 net/sched/act_connmark.c 	parm = nla_data(tb[TCA_CONNMARK_PARMS]);
tb                 50 net/sched/act_csum.c 	struct nlattr *tb[TCA_CSUM_MAX + 1];
tb                 60 net/sched/act_csum.c 	err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy,
tb                 65 net/sched/act_csum.c 	if (tb[TCA_CSUM_PARMS] == NULL)
tb                 67 net/sched/act_csum.c 	parm = nla_data(tb[TCA_CSUM_PARMS]);
tb                510 net/sched/act_ct.c 				  struct nlattr **tb,
tb                533 net/sched/act_ct.c 	if (tb[TCA_CT_NAT_IPV4_MIN]) {
tb                534 net/sched/act_ct.c 		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
tb                539 net/sched/act_ct.c 			nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
tb                544 net/sched/act_ct.c 	} else if (tb[TCA_CT_NAT_IPV6_MIN]) {
tb                545 net/sched/act_ct.c 		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
tb                550 net/sched/act_ct.c 			nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
tb                557 net/sched/act_ct.c 	if (tb[TCA_CT_NAT_PORT_MIN]) {
tb                559 net/sched/act_ct.c 		range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
tb                561 net/sched/act_ct.c 		range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
tb                562 net/sched/act_ct.c 				       nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
tb                569 net/sched/act_ct.c static void tcf_ct_set_key_val(struct nlattr **tb,
tb                574 net/sched/act_ct.c 	if (!tb[val_type])
tb                576 net/sched/act_ct.c 	nla_memcpy(val, tb[val_type], len);
tb                581 net/sched/act_ct.c 	if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
tb                584 net/sched/act_ct.c 		nla_memcpy(mask, tb[mask_type], len);
tb                590 net/sched/act_ct.c 			      struct nlattr **tb,
tb                600 net/sched/act_ct.c 	tcf_ct_set_key_val(tb,
tb                608 net/sched/act_ct.c 	err = tcf_ct_fill_params_nat(p, parm, tb, extack);
tb                612 net/sched/act_ct.c 	if (tb[TCA_CT_MARK]) {
tb                617 net/sched/act_ct.c 		tcf_ct_set_key_val(tb,
tb                623 net/sched/act_ct.c 	if (tb[TCA_CT_LABELS]) {
tb                633 net/sched/act_ct.c 		tcf_ct_set_key_val(tb,
tb                639 net/sched/act_ct.c 	if (tb[TCA_CT_ZONE]) {
tb                645 net/sched/act_ct.c 		tcf_ct_set_key_val(tb,
tb                675 net/sched/act_ct.c 	struct nlattr *tb[TCA_CT_MAX + 1];
tb                687 net/sched/act_ct.c 	err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
tb                691 net/sched/act_ct.c 	if (!tb[TCA_CT_PARMS]) {
tb                695 net/sched/act_ct.c 	parm = nla_data(tb[TCA_CT_PARMS]);
tb                730 net/sched/act_ct.c 	err = tcf_ct_fill_params(net, params, parm, tb, extack);
tb                161 net/sched/act_ctinfo.c 	struct nlattr *tb[TCA_CTINFO_MAX + 1];
tb                174 net/sched/act_ctinfo.c 	err = nla_parse_nested(tb, TCA_CTINFO_MAX, nla, ctinfo_policy, extack);
tb                178 net/sched/act_ctinfo.c 	if (!tb[TCA_CTINFO_ACT]) {
tb                183 net/sched/act_ctinfo.c 	actparm = nla_data(tb[TCA_CTINFO_ACT]);
tb                187 net/sched/act_ctinfo.c 	if (tb[TCA_CTINFO_PARMS_DSCP_MASK]) {
tb                188 net/sched/act_ctinfo.c 		dscpmask = nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_MASK]);
tb                193 net/sched/act_ctinfo.c 					    tb[TCA_CTINFO_PARMS_DSCP_MASK],
tb                197 net/sched/act_ctinfo.c 		dscpstatemask = tb[TCA_CTINFO_PARMS_DSCP_STATEMASK] ?
tb                198 net/sched/act_ctinfo.c 			nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK]) : 0;
tb                202 net/sched/act_ctinfo.c 					    tb[TCA_CTINFO_PARMS_DSCP_STATEMASK],
tb                243 net/sched/act_ctinfo.c 	cp_new->zone = tb[TCA_CTINFO_ZONE] ?
tb                244 net/sched/act_ctinfo.c 			nla_get_u16(tb[TCA_CTINFO_ZONE]) : 0;
tb                252 net/sched/act_ctinfo.c 	if (tb[TCA_CTINFO_PARMS_CPMARK_MASK]) {
tb                254 net/sched/act_ctinfo.c 				nla_get_u32(tb[TCA_CTINFO_PARMS_CPMARK_MASK]);
tb                 59 net/sched/act_gact.c 	struct nlattr *tb[TCA_GACT_MAX + 1];
tb                 73 net/sched/act_gact.c 	err = nla_parse_nested_deprecated(tb, TCA_GACT_MAX, nla, gact_policy,
tb                 78 net/sched/act_gact.c 	if (tb[TCA_GACT_PARMS] == NULL)
tb                 80 net/sched/act_gact.c 	parm = nla_data(tb[TCA_GACT_PARMS]);
tb                 84 net/sched/act_gact.c 	if (tb[TCA_GACT_PROB] != NULL)
tb                 87 net/sched/act_gact.c 	if (tb[TCA_GACT_PROB]) {
tb                 88 net/sched/act_gact.c 		p_parm = nla_data(tb[TCA_GACT_PROB]);
tb                439 net/sched/act_ife.c static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
tb                448 net/sched/act_ife.c 		if (tb[i]) {
tb                449 net/sched/act_ife.c 			val = nla_data(tb[i]);
tb                450 net/sched/act_ife.c 			len = nla_len(tb[i]);
tb                471 net/sched/act_ife.c 	struct nlattr *tb[TCA_IFE_MAX + 1];
tb                490 net/sched/act_ife.c 	err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy,
tb                495 net/sched/act_ife.c 	if (!tb[TCA_IFE_PARMS])
tb                498 net/sched/act_ife.c 	parm = nla_data(tb[TCA_IFE_PARMS]);
tb                549 net/sched/act_ife.c 		if (tb[TCA_IFE_TYPE])
tb                550 net/sched/act_ife.c 			ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
tb                551 net/sched/act_ife.c 		if (tb[TCA_IFE_DMAC])
tb                552 net/sched/act_ife.c 			daddr = nla_data(tb[TCA_IFE_DMAC]);
tb                553 net/sched/act_ife.c 		if (tb[TCA_IFE_SMAC])
tb                554 net/sched/act_ife.c 			saddr = nla_data(tb[TCA_IFE_SMAC]);
tb                571 net/sched/act_ife.c 	if (tb[TCA_IFE_METALST]) {
tb                573 net/sched/act_ife.c 						  tb[TCA_IFE_METALST], NULL,
tb                101 net/sched/act_ipt.c 	struct nlattr *tb[TCA_IPT_MAX + 1];
tb                113 net/sched/act_ipt.c 	err = nla_parse_nested_deprecated(tb, TCA_IPT_MAX, nla, ipt_policy,
tb                118 net/sched/act_ipt.c 	if (tb[TCA_IPT_INDEX] != NULL)
tb                119 net/sched/act_ipt.c 		index = nla_get_u32(tb[TCA_IPT_INDEX]);
tb                128 net/sched/act_ipt.c 	if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
tb                136 net/sched/act_ipt.c 	td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
tb                137 net/sched/act_ipt.c 	if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
tb                162 net/sched/act_ipt.c 	hook = nla_get_u32(tb[TCA_IPT_HOOK]);
tb                168 net/sched/act_ipt.c 	if (tb[TCA_IPT_TABLE] == NULL ||
tb                169 net/sched/act_ipt.c 	    nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
tb                 99 net/sched/act_mirred.c 	struct nlattr *tb[TCA_MIRRED_MAX + 1];
tb                113 net/sched/act_mirred.c 	ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
tb                117 net/sched/act_mirred.c 	if (!tb[TCA_MIRRED_PARMS]) {
tb                121 net/sched/act_mirred.c 	parm = nla_data(tb[TCA_MIRRED_PARMS]);
tb                140 net/sched/act_mpls.c 	struct nlattr *tb[TCA_MPLS_MAX + 1];
tb                155 net/sched/act_mpls.c 	err = nla_parse_nested(tb, TCA_MPLS_MAX, nla, mpls_policy, extack);
tb                159 net/sched/act_mpls.c 	if (!tb[TCA_MPLS_PARMS]) {
tb                163 net/sched/act_mpls.c 	parm = nla_data(tb[TCA_MPLS_PARMS]);
tb                169 net/sched/act_mpls.c 		if (!tb[TCA_MPLS_PROTO]) {
tb                173 net/sched/act_mpls.c 		if (!eth_proto_is_802_3(nla_get_be16(tb[TCA_MPLS_PROTO]))) {
tb                177 net/sched/act_mpls.c 		if (tb[TCA_MPLS_LABEL] || tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] ||
tb                178 net/sched/act_mpls.c 		    tb[TCA_MPLS_BOS]) {
tb                184 net/sched/act_mpls.c 		if (tb[TCA_MPLS_PROTO] || tb[TCA_MPLS_LABEL] ||
tb                185 net/sched/act_mpls.c 		    tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] || tb[TCA_MPLS_BOS]) {
tb                191 net/sched/act_mpls.c 		if (!tb[TCA_MPLS_LABEL]) {
tb                195 net/sched/act_mpls.c 		if (tb[TCA_MPLS_PROTO] &&
tb                196 net/sched/act_mpls.c 		    !eth_p_mpls(nla_get_be16(tb[TCA_MPLS_PROTO]))) {
tb                201 net/sched/act_mpls.c 		if (!tb[TCA_MPLS_TTL]) {
tb                211 net/sched/act_mpls.c 		if (tb[TCA_MPLS_PROTO]) {
tb                255 net/sched/act_mpls.c 	p->tcfm_label = tb[TCA_MPLS_LABEL] ? nla_get_u32(tb[TCA_MPLS_LABEL]) :
tb                257 net/sched/act_mpls.c 	p->tcfm_tc = tb[TCA_MPLS_TC] ? nla_get_u8(tb[TCA_MPLS_TC]) :
tb                259 net/sched/act_mpls.c 	p->tcfm_ttl = tb[TCA_MPLS_TTL] ? nla_get_u8(tb[TCA_MPLS_TTL]) :
tb                261 net/sched/act_mpls.c 	p->tcfm_bos = tb[TCA_MPLS_BOS] ? nla_get_u8(tb[TCA_MPLS_BOS]) :
tb                263 net/sched/act_mpls.c 	p->tcfm_proto = tb[TCA_MPLS_PROTO] ? nla_get_be16(tb[TCA_MPLS_PROTO]) :
tb                 42 net/sched/act_nat.c 	struct nlattr *tb[TCA_NAT_MAX + 1];
tb                 52 net/sched/act_nat.c 	err = nla_parse_nested_deprecated(tb, TCA_NAT_MAX, nla, nat_policy,
tb                 57 net/sched/act_nat.c 	if (tb[TCA_NAT_PARMS] == NULL)
tb                 59 net/sched/act_nat.c 	parm = nla_data(tb[TCA_NAT_PARMS]);
tb                 56 net/sched/act_pedit.c 		struct nlattr *tb[TCA_PEDIT_KEY_EX_MAX + 1];
tb                 69 net/sched/act_pedit.c 		err = nla_parse_nested_deprecated(tb, TCA_PEDIT_KEY_EX_MAX,
tb                 75 net/sched/act_pedit.c 		if (!tb[TCA_PEDIT_KEY_EX_HTYPE] ||
tb                 76 net/sched/act_pedit.c 		    !tb[TCA_PEDIT_KEY_EX_CMD]) {
tb                 81 net/sched/act_pedit.c 		k->htype = nla_get_u16(tb[TCA_PEDIT_KEY_EX_HTYPE]);
tb                 82 net/sched/act_pedit.c 		k->cmd = nla_get_u16(tb[TCA_PEDIT_KEY_EX_CMD]);
tb                143 net/sched/act_pedit.c 	struct nlattr *tb[TCA_PEDIT_MAX + 1];
tb                159 net/sched/act_pedit.c 	err = nla_parse_nested_deprecated(tb, TCA_PEDIT_MAX, nla,
tb                164 net/sched/act_pedit.c 	pattr = tb[TCA_PEDIT_PARMS];
tb                166 net/sched/act_pedit.c 		pattr = tb[TCA_PEDIT_PARMS_EX];
tb                183 net/sched/act_pedit.c 	keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys);
tb                 54 net/sched/act_police.c 	struct nlattr *tb[TCA_POLICE_MAX + 1];
tb                 68 net/sched/act_police.c 	err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla,
tb                 73 net/sched/act_police.c 	if (tb[TCA_POLICE_TBF] == NULL)
tb                 75 net/sched/act_police.c 	size = nla_len(tb[TCA_POLICE_TBF]);
tb                 79 net/sched/act_police.c 	parm = nla_data(tb[TCA_POLICE_TBF]);
tb                108 net/sched/act_police.c 		R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
tb                114 net/sched/act_police.c 					       tb[TCA_POLICE_PEAKRATE], NULL);
tb                128 net/sched/act_police.c 	} else if (tb[TCA_POLICE_AVRATE] &&
tb                135 net/sched/act_police.c 	if (tb[TCA_POLICE_RESULT]) {
tb                136 net/sched/act_police.c 		tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
tb                161 net/sched/act_police.c 		rate64 = tb[TCA_POLICE_RATE64] ?
tb                162 net/sched/act_police.c 			 nla_get_u64(tb[TCA_POLICE_RATE64]) : 0;
tb                170 net/sched/act_police.c 		prate64 = tb[TCA_POLICE_PEAKRATE64] ?
tb                171 net/sched/act_police.c 			  nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0;
tb                183 net/sched/act_police.c 	if (tb[TCA_POLICE_AVRATE])
tb                184 net/sched/act_police.c 		new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
tb                 42 net/sched/act_sample.c 	struct nlattr *tb[TCA_SAMPLE_MAX + 1];
tb                 53 net/sched/act_sample.c 	ret = nla_parse_nested_deprecated(tb, TCA_SAMPLE_MAX, nla,
tb                 57 net/sched/act_sample.c 	if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] ||
tb                 58 net/sched/act_sample.c 	    !tb[TCA_SAMPLE_PSAMPLE_GROUP])
tb                 61 net/sched/act_sample.c 	parm = nla_data(tb[TCA_SAMPLE_PARMS]);
tb                 86 net/sched/act_sample.c 	rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
tb                 92 net/sched/act_sample.c 	psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
tb                108 net/sched/act_sample.c 	if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
tb                110 net/sched/act_sample.c 		s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
tb                 92 net/sched/act_simple.c 	struct nlattr *tb[TCA_DEF_MAX + 1];
tb                103 net/sched/act_simple.c 	err = nla_parse_nested_deprecated(tb, TCA_DEF_MAX, nla, simple_policy,
tb                108 net/sched/act_simple.c 	if (tb[TCA_DEF_PARMS] == NULL)
tb                111 net/sched/act_simple.c 	parm = nla_data(tb[TCA_DEF_PARMS]);
tb                120 net/sched/act_simple.c 	if (tb[TCA_DEF_DATA] == NULL) {
tb                142 net/sched/act_simple.c 		err = alloc_defdata(d, tb[TCA_DEF_DATA]);
tb                154 net/sched/act_simple.c 		err = reset_policy(*a, tb[TCA_DEF_DATA], parm, tp, extack);
tb                 94 net/sched/act_skbedit.c 	struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
tb                107 net/sched/act_skbedit.c 	err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla,
tb                112 net/sched/act_skbedit.c 	if (tb[TCA_SKBEDIT_PARMS] == NULL)
tb                115 net/sched/act_skbedit.c 	if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
tb                117 net/sched/act_skbedit.c 		priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
tb                120 net/sched/act_skbedit.c 	if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
tb                122 net/sched/act_skbedit.c 		queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
tb                125 net/sched/act_skbedit.c 	if (tb[TCA_SKBEDIT_PTYPE] != NULL) {
tb                126 net/sched/act_skbedit.c 		ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]);
tb                132 net/sched/act_skbedit.c 	if (tb[TCA_SKBEDIT_MARK] != NULL) {
tb                134 net/sched/act_skbedit.c 		mark = nla_data(tb[TCA_SKBEDIT_MARK]);
tb                137 net/sched/act_skbedit.c 	if (tb[TCA_SKBEDIT_MASK] != NULL) {
tb                139 net/sched/act_skbedit.c 		mask = nla_data(tb[TCA_SKBEDIT_MASK]);
tb                142 net/sched/act_skbedit.c 	if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
tb                143 net/sched/act_skbedit.c 		u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
tb                149 net/sched/act_skbedit.c 	parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
tb                 86 net/sched/act_skbmod.c 	struct nlattr *tb[TCA_SKBMOD_MAX + 1];
tb                101 net/sched/act_skbmod.c 	err = nla_parse_nested_deprecated(tb, TCA_SKBMOD_MAX, nla,
tb                106 net/sched/act_skbmod.c 	if (!tb[TCA_SKBMOD_PARMS])
tb                109 net/sched/act_skbmod.c 	if (tb[TCA_SKBMOD_DMAC]) {
tb                110 net/sched/act_skbmod.c 		daddr = nla_data(tb[TCA_SKBMOD_DMAC]);
tb                114 net/sched/act_skbmod.c 	if (tb[TCA_SKBMOD_SMAC]) {
tb                115 net/sched/act_skbmod.c 		saddr = nla_data(tb[TCA_SKBMOD_SMAC]);
tb                119 net/sched/act_skbmod.c 	if (tb[TCA_SKBMOD_ETYPE]) {
tb                120 net/sched/act_skbmod.c 		eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]);
tb                124 net/sched/act_skbmod.c 	parm = nla_data(tb[TCA_SKBMOD_PARMS]);
tb                 71 net/sched/act_tunnel_key.c 	struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
tb                 75 net/sched/act_tunnel_key.c 	err = nla_parse_nested_deprecated(tb,
tb                 81 net/sched/act_tunnel_key.c 	if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
tb                 82 net/sched/act_tunnel_key.c 	    !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
tb                 83 net/sched/act_tunnel_key.c 	    !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
tb                 88 net/sched/act_tunnel_key.c 	data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
tb                 89 net/sched/act_tunnel_key.c 	data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
tb                106 net/sched/act_tunnel_key.c 			nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
tb                107 net/sched/act_tunnel_key.c 		opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
tb                219 net/sched/act_tunnel_key.c 	struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
tb                240 net/sched/act_tunnel_key.c 	err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
tb                247 net/sched/act_tunnel_key.c 	if (!tb[TCA_TUNNEL_KEY_PARMS]) {
tb                252 net/sched/act_tunnel_key.c 	parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
tb                265 net/sched/act_tunnel_key.c 		if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
tb                268 net/sched/act_tunnel_key.c 			key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
tb                274 net/sched/act_tunnel_key.c 		if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
tb                275 net/sched/act_tunnel_key.c 		    nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
tb                278 net/sched/act_tunnel_key.c 		if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
tb                279 net/sched/act_tunnel_key.c 			dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
tb                281 net/sched/act_tunnel_key.c 		if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
tb                282 net/sched/act_tunnel_key.c 			opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
tb                291 net/sched/act_tunnel_key.c 		if (tb[TCA_TUNNEL_KEY_ENC_TOS])
tb                292 net/sched/act_tunnel_key.c 			tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
tb                294 net/sched/act_tunnel_key.c 		if (tb[TCA_TUNNEL_KEY_ENC_TTL])
tb                295 net/sched/act_tunnel_key.c 			ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
tb                297 net/sched/act_tunnel_key.c 		if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
tb                298 net/sched/act_tunnel_key.c 		    tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
tb                302 net/sched/act_tunnel_key.c 			saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
tb                303 net/sched/act_tunnel_key.c 			daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
tb                308 net/sched/act_tunnel_key.c 		} else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
tb                309 net/sched/act_tunnel_key.c 			   tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
tb                313 net/sched/act_tunnel_key.c 			saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
tb                314 net/sched/act_tunnel_key.c 			daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
tb                338 net/sched/act_tunnel_key.c 			ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
tb                108 net/sched/act_vlan.c 	struct nlattr *tb[TCA_VLAN_MAX + 1];
tb                124 net/sched/act_vlan.c 	err = nla_parse_nested_deprecated(tb, TCA_VLAN_MAX, nla, vlan_policy,
tb                129 net/sched/act_vlan.c 	if (!tb[TCA_VLAN_PARMS])
tb                131 net/sched/act_vlan.c 	parm = nla_data(tb[TCA_VLAN_PARMS]);
tb                145 net/sched/act_vlan.c 		if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
tb                152 net/sched/act_vlan.c 		push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
tb                161 net/sched/act_vlan.c 		if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
tb                162 net/sched/act_vlan.c 			push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
tb                178 net/sched/act_vlan.c 		if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY])
tb                179 net/sched/act_vlan.c 			push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
tb               3009 net/sched/cls_api.c int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
tb               3018 net/sched/cls_api.c 		if (exts->police && tb[exts->police]) {
tb               3019 net/sched/cls_api.c 			act = tcf_action_init_1(net, tp, tb[exts->police],
tb               3029 net/sched/cls_api.c 		} else if (exts->action && tb[exts->action]) {
tb               3032 net/sched/cls_api.c 			err = tcf_action_init(net, tp, tb[exts->action],
tb               3042 net/sched/cls_api.c 	if ((exts->action && tb[exts->action]) ||
tb               3043 net/sched/cls_api.c 	    (exts->police && tb[exts->police])) {
tb                147 net/sched/cls_basic.c 			   struct nlattr **tb,
tb                153 net/sched/cls_basic.c 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, extack);
tb                157 net/sched/cls_basic.c 	err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &f->ematches);
tb                161 net/sched/cls_basic.c 	if (tb[TCA_BASIC_CLASSID]) {
tb                162 net/sched/cls_basic.c 		f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]);
tb                177 net/sched/cls_basic.c 	struct nlattr *tb[TCA_BASIC_MAX + 1];
tb                184 net/sched/cls_basic.c 	err = nla_parse_nested_deprecated(tb, TCA_BASIC_MAX, tca[TCA_OPTIONS],
tb                219 net/sched/cls_basic.c 	err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr,
tb                338 net/sched/cls_bpf.c static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
tb                346 net/sched/cls_bpf.c 	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
tb                351 net/sched/cls_bpf.c 	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
tb                354 net/sched/cls_bpf.c 	bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
tb                375 net/sched/cls_bpf.c static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
tb                383 net/sched/cls_bpf.c 	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
tb                390 net/sched/cls_bpf.c 	if (tb[TCA_BPF_NAME]) {
tb                391 net/sched/cls_bpf.c 		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
tb                410 net/sched/cls_bpf.c 			     struct nlattr **tb, struct nlattr *est, bool ovr,
tb                417 net/sched/cls_bpf.c 	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
tb                418 net/sched/cls_bpf.c 	is_ebpf = tb[TCA_BPF_FD];
tb                422 net/sched/cls_bpf.c 	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, true,
tb                427 net/sched/cls_bpf.c 	if (tb[TCA_BPF_FLAGS]) {
tb                428 net/sched/cls_bpf.c 		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
tb                435 net/sched/cls_bpf.c 	if (tb[TCA_BPF_FLAGS_GEN]) {
tb                436 net/sched/cls_bpf.c 		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
tb                445 net/sched/cls_bpf.c 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
tb                446 net/sched/cls_bpf.c 		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
tb                450 net/sched/cls_bpf.c 	if (tb[TCA_BPF_CLASSID]) {
tb                451 net/sched/cls_bpf.c 		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
tb                466 net/sched/cls_bpf.c 	struct nlattr *tb[TCA_BPF_MAX + 1];
tb                473 net/sched/cls_bpf.c 	ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
tb                506 net/sched/cls_bpf.c 	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
tb                 82 net/sched/cls_cgroup.c 	struct nlattr *tb[TCA_CGROUP_MAX + 1];
tb                105 net/sched/cls_cgroup.c 	err = nla_parse_nested_deprecated(tb, TCA_CGROUP_MAX,
tb                111 net/sched/cls_cgroup.c 	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr,
tb                116 net/sched/cls_cgroup.c 	err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches);
tb                396 net/sched/cls_flow.c 	struct nlattr *tb[TCA_FLOW_MAX + 1];
tb                407 net/sched/cls_flow.c 	err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
tb                412 net/sched/cls_flow.c 	if (tb[TCA_FLOW_BASECLASS]) {
tb                413 net/sched/cls_flow.c 		baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
tb                418 net/sched/cls_flow.c 	if (tb[TCA_FLOW_KEYS]) {
tb                419 net/sched/cls_flow.c 		keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
tb                437 net/sched/cls_flow.c 	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
tb                445 net/sched/cls_flow.c 	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
tb                471 net/sched/cls_flow.c 		if (tb[TCA_FLOW_MODE])
tb                472 net/sched/cls_flow.c 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
tb                478 net/sched/cls_flow.c 		if (tb[TCA_FLOW_PERTURB]) {
tb                481 net/sched/cls_flow.c 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
tb                487 net/sched/cls_flow.c 		if (!tb[TCA_FLOW_KEYS])
tb                491 net/sched/cls_flow.c 		if (tb[TCA_FLOW_MODE])
tb                492 net/sched/cls_flow.c 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
tb                496 net/sched/cls_flow.c 		if (tb[TCA_FLOW_PERTURB]) {
tb                499 net/sched/cls_flow.c 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
tb                520 net/sched/cls_flow.c 	if (tb[TCA_FLOW_KEYS]) {
tb                527 net/sched/cls_flow.c 	if (tb[TCA_FLOW_MASK])
tb                528 net/sched/cls_flow.c 		fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
tb                529 net/sched/cls_flow.c 	if (tb[TCA_FLOW_XOR])
tb                530 net/sched/cls_flow.c 		fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
tb                531 net/sched/cls_flow.c 	if (tb[TCA_FLOW_RSHIFT])
tb                532 net/sched/cls_flow.c 		fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
tb                533 net/sched/cls_flow.c 	if (tb[TCA_FLOW_ADDEND])
tb                534 net/sched/cls_flow.c 		fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
tb                536 net/sched/cls_flow.c 	if (tb[TCA_FLOW_DIVISOR])
tb                537 net/sched/cls_flow.c 		fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
tb                709 net/sched/cls_flower.c static void fl_set_key_val(struct nlattr **tb,
tb                713 net/sched/cls_flower.c 	if (!tb[val_type])
tb                715 net/sched/cls_flower.c 	nla_memcpy(val, tb[val_type], len);
tb                716 net/sched/cls_flower.c 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
tb                719 net/sched/cls_flower.c 		nla_memcpy(mask, tb[mask_type], len);
tb                722 net/sched/cls_flower.c static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
tb                725 net/sched/cls_flower.c 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
tb                728 net/sched/cls_flower.c 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
tb                731 net/sched/cls_flower.c 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
tb                734 net/sched/cls_flower.c 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
tb                749 net/sched/cls_flower.c static int fl_set_key_mpls(struct nlattr **tb,
tb                753 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
tb                754 net/sched/cls_flower.c 		key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
tb                757 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
tb                758 net/sched/cls_flower.c 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
tb                765 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
tb                766 net/sched/cls_flower.c 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
tb                773 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
tb                774 net/sched/cls_flower.c 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
tb                784 net/sched/cls_flower.c static void fl_set_key_vlan(struct nlattr **tb,
tb                792 net/sched/cls_flower.c 	if (tb[vlan_id_key]) {
tb                794 net/sched/cls_flower.c 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
tb                797 net/sched/cls_flower.c 	if (tb[vlan_prio_key]) {
tb                799 net/sched/cls_flower.c 			nla_get_u8(tb[vlan_prio_key]) &
tb                818 net/sched/cls_flower.c static int fl_set_key_flags(struct nlattr **tb,
tb                824 net/sched/cls_flower.c 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
tb                827 net/sched/cls_flower.c 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
tb                828 net/sched/cls_flower.c 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
tb                842 net/sched/cls_flower.c static void fl_set_key_ip(struct nlattr **tb, bool encap,
tb                851 net/sched/cls_flower.c 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
tb                852 net/sched/cls_flower.c 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
tb                859 net/sched/cls_flower.c 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
tb                883 net/sched/cls_flower.c 	err = nla_parse_nested_deprecated(tb,
tb                893 net/sched/cls_flower.c 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
tb                894 net/sched/cls_flower.c 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
tb                895 net/sched/cls_flower.c 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
tb                903 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
tb                906 net/sched/cls_flower.c 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
tb                927 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
tb                928 net/sched/cls_flower.c 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
tb                932 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
tb                933 net/sched/cls_flower.c 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
tb                940 net/sched/cls_flower.c static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
tb                947 net/sched/cls_flower.c 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
tb                953 net/sched/cls_flower.c 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
tb                955 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
tb                956 net/sched/cls_flower.c 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
tb                962 net/sched/cls_flower.c 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
tb                963 net/sched/cls_flower.c 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
tb                967 net/sched/cls_flower.c 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
tb               1007 net/sched/cls_flower.c static int fl_set_key_ct(struct nlattr **tb,
tb               1012 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
tb               1017 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
tb               1021 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
tb               1026 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
tb               1030 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
tb               1035 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
tb               1039 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
tb               1044 net/sched/cls_flower.c 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
tb               1052 net/sched/cls_flower.c static int fl_set_key(struct net *net, struct nlattr **tb,
tb               1059 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_INDEV]) {
tb               1060 net/sched/cls_flower.c 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
tb               1067 net/sched/cls_flower.c 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
tb               1070 net/sched/cls_flower.c 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
tb               1074 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
tb               1075 net/sched/cls_flower.c 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
tb               1078 net/sched/cls_flower.c 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
tb               1082 net/sched/cls_flower.c 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
tb               1083 net/sched/cls_flower.c 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
tb               1085 net/sched/cls_flower.c 					fl_set_key_vlan(tb, ethertype,
tb               1089 net/sched/cls_flower.c 					fl_set_key_val(tb, &key->basic.n_proto,
tb               1107 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
tb               1110 net/sched/cls_flower.c 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
tb               1113 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
tb               1116 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
tb               1119 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
tb               1122 net/sched/cls_flower.c 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
tb               1125 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
tb               1128 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
tb               1134 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
tb               1137 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
tb               1140 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
tb               1144 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
tb               1147 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
tb               1151 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
tb               1154 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
tb               1159 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
tb               1163 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
tb               1169 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
tb               1173 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
tb               1179 net/sched/cls_flower.c 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
tb               1184 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
tb               1187 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
tb               1190 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
tb               1193 net/sched/cls_flower.c 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
tb               1196 net/sched/cls_flower.c 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
tb               1204 net/sched/cls_flower.c 		ret = fl_set_key_port_range(tb, key, mask);
tb               1209 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
tb               1210 net/sched/cls_flower.c 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
tb               1213 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->enc_ipv4.src,
tb               1218 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->enc_ipv4.dst,
tb               1225 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
tb               1226 net/sched/cls_flower.c 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
tb               1229 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->enc_ipv6.src,
tb               1234 net/sched/cls_flower.c 		fl_set_key_val(tb, &key->enc_ipv6.dst,
tb               1241 net/sched/cls_flower.c 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
tb               1245 net/sched/cls_flower.c 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
tb               1249 net/sched/cls_flower.c 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
tb               1253 net/sched/cls_flower.c 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
tb               1255 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
tb               1256 net/sched/cls_flower.c 		ret = fl_set_enc_opt(tb, key, mask, extack);
tb               1261 net/sched/cls_flower.c 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
tb               1265 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_KEY_FLAGS])
tb               1266 net/sched/cls_flower.c 		ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
tb               1470 net/sched/cls_flower.c 			unsigned long base, struct nlattr **tb,
tb               1477 net/sched/cls_flower.c 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
tb               1482 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_CLASSID]) {
tb               1483 net/sched/cls_flower.c 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
tb               1491 net/sched/cls_flower.c 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
tb               1538 net/sched/cls_flower.c 	struct nlattr **tb;
tb               1553 net/sched/cls_flower.c 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
tb               1554 net/sched/cls_flower.c 	if (!tb) {
tb               1559 net/sched/cls_flower.c 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
tb               1581 net/sched/cls_flower.c 	if (tb[TCA_FLOWER_FLAGS]) {
tb               1582 net/sched/cls_flower.c 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
tb               1590 net/sched/cls_flower.c 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
tb               1692 net/sched/cls_flower.c 	kfree(tb);
tb               1711 net/sched/cls_flower.c 	kfree(tb);
tb               1915 net/sched/cls_flower.c 	struct nlattr **tb;
tb               1921 net/sched/cls_flower.c 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
tb               1922 net/sched/cls_flower.c 	if (!tb)
tb               1924 net/sched/cls_flower.c 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
tb               1935 net/sched/cls_flower.c 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
tb               1945 net/sched/cls_flower.c 	kfree(tb);
tb               1951 net/sched/cls_flower.c 	kfree(tb);
tb                200 net/sched/cls_fw.c 			struct fw_filter *f, struct nlattr **tb,
tb                208 net/sched/cls_fw.c 	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr,
tb                213 net/sched/cls_fw.c 	if (tb[TCA_FW_CLASSID]) {
tb                214 net/sched/cls_fw.c 		f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
tb                218 net/sched/cls_fw.c 	if (tb[TCA_FW_INDEV]) {
tb                220 net/sched/cls_fw.c 		ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack);
tb                227 net/sched/cls_fw.c 	if (tb[TCA_FW_MASK]) {
tb                228 net/sched/cls_fw.c 		mask = nla_get_u32(tb[TCA_FW_MASK]);
tb                246 net/sched/cls_fw.c 	struct nlattr *tb[TCA_FW_MAX + 1];
tb                252 net/sched/cls_fw.c 	err = nla_parse_nested_deprecated(tb, TCA_FW_MAX, opt, fw_policy,
tb                280 net/sched/cls_fw.c 		err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr, extack);
tb                308 net/sched/cls_fw.c 		if (tb[TCA_FW_MASK])
tb                309 net/sched/cls_fw.c 			mask = nla_get_u32(tb[TCA_FW_MASK]);
tb                329 net/sched/cls_fw.c 	err = fw_set_parms(net, tp, f, tb, tca, base, ovr, extack);
tb                165 net/sched/cls_matchall.c 			  unsigned long base, struct nlattr **tb,
tb                171 net/sched/cls_matchall.c 	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true,
tb                176 net/sched/cls_matchall.c 	if (tb[TCA_MATCHALL_CLASSID]) {
tb                177 net/sched/cls_matchall.c 		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
tb                190 net/sched/cls_matchall.c 	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
tb                201 net/sched/cls_matchall.c 	err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
tb                206 net/sched/cls_matchall.c 	if (tb[TCA_MATCHALL_FLAGS]) {
tb                207 net/sched/cls_matchall.c 		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
tb                230 net/sched/cls_matchall.c 	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
tb                384 net/sched/cls_route.c 			    struct nlattr **tb, struct nlattr *est, int new,
tb                393 net/sched/cls_route.c 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, extack);
tb                397 net/sched/cls_route.c 	if (tb[TCA_ROUTE4_TO]) {
tb                400 net/sched/cls_route.c 		to = nla_get_u32(tb[TCA_ROUTE4_TO]);
tb                406 net/sched/cls_route.c 	if (tb[TCA_ROUTE4_FROM]) {
tb                407 net/sched/cls_route.c 		if (tb[TCA_ROUTE4_IIF])
tb                409 net/sched/cls_route.c 		id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
tb                413 net/sched/cls_route.c 	} else if (tb[TCA_ROUTE4_IIF]) {
tb                414 net/sched/cls_route.c 		id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
tb                445 net/sched/cls_route.c 	if (tb[TCA_ROUTE4_TO])
tb                448 net/sched/cls_route.c 	if (tb[TCA_ROUTE4_FROM])
tb                450 net/sched/cls_route.c 	else if (tb[TCA_ROUTE4_IIF])
tb                457 net/sched/cls_route.c 	if (tb[TCA_ROUTE4_CLASSID]) {
tb                458 net/sched/cls_route.c 		f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
tb                475 net/sched/cls_route.c 	struct nlattr *tb[TCA_ROUTE4_MAX + 1];
tb                483 net/sched/cls_route.c 	err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
tb                512 net/sched/cls_route.c 	err = route4_set_parms(net, tp, base, f, handle, head, tb,
tb                485 net/sched/cls_rsvp.h 	struct nlattr *tb[TCA_RSVP_MAX + 1];
tb                494 net/sched/cls_rsvp.h 	err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy,
tb                502 net/sched/cls_rsvp.h 	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, true,
tb                528 net/sched/cls_rsvp.h 		if (tb[TCA_RSVP_CLASSID]) {
tb                529 net/sched/cls_rsvp.h 			n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
tb                542 net/sched/cls_rsvp.h 	if (tb[TCA_RSVP_DST] == NULL)
tb                554 net/sched/cls_rsvp.h 	if (tb[TCA_RSVP_SRC]) {
tb                555 net/sched/cls_rsvp.h 		memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
tb                558 net/sched/cls_rsvp.h 	if (tb[TCA_RSVP_PINFO]) {
tb                559 net/sched/cls_rsvp.h 		pinfo = nla_data(tb[TCA_RSVP_PINFO]);
tb                563 net/sched/cls_rsvp.h 	if (tb[TCA_RSVP_CLASSID])
tb                564 net/sched/cls_rsvp.h 		f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
tb                566 net/sched/cls_rsvp.h 	dst = nla_data(tb[TCA_RSVP_DST]);
tb                329 net/sched/cls_tcindex.c 		  struct tcindex_filter_result *r, struct nlattr **tb,
tb                342 net/sched/cls_tcindex.c 	err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
tb                363 net/sched/cls_tcindex.c 	if (tb[TCA_TCINDEX_HASH])
tb                364 net/sched/cls_tcindex.c 		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
tb                366 net/sched/cls_tcindex.c 	if (tb[TCA_TCINDEX_MASK])
tb                367 net/sched/cls_tcindex.c 		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
tb                369 net/sched/cls_tcindex.c 	if (tb[TCA_TCINDEX_SHIFT])
tb                370 net/sched/cls_tcindex.c 		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
tb                414 net/sched/cls_tcindex.c 	if (tb[TCA_TCINDEX_FALL_THROUGH])
tb                415 net/sched/cls_tcindex.c 		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
tb                469 net/sched/cls_tcindex.c 	if (tb[TCA_TCINDEX_CLASSID]) {
tb                470 net/sched/cls_tcindex.c 		cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
tb                529 net/sched/cls_tcindex.c 	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
tb                541 net/sched/cls_tcindex.c 	err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
tb                546 net/sched/cls_tcindex.c 	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
tb                711 net/sched/cls_u32.c 			 struct tc_u_knode *n, struct nlattr **tb,
tb                717 net/sched/cls_u32.c 	err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack);
tb                721 net/sched/cls_u32.c 	if (tb[TCA_U32_LINK]) {
tb                722 net/sched/cls_u32.c 		u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
tb                750 net/sched/cls_u32.c 	if (tb[TCA_U32_CLASSID]) {
tb                751 net/sched/cls_u32.c 		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
tb                755 net/sched/cls_u32.c 	if (tb[TCA_U32_INDEV]) {
tb                757 net/sched/cls_u32.c 		ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
tb                853 net/sched/cls_u32.c 	struct nlattr *tb[TCA_U32_MAX + 1];
tb                870 net/sched/cls_u32.c 	err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
tb                875 net/sched/cls_u32.c 	if (tb[TCA_U32_FLAGS]) {
tb                876 net/sched/cls_u32.c 		flags = nla_get_u32(tb[TCA_U32_FLAGS]);
tb                902 net/sched/cls_u32.c 		err = u32_set_parms(net, tp, base, new, tb,
tb                926 net/sched/cls_u32.c 	if (tb[TCA_U32_DIVISOR]) {
tb                927 net/sched/cls_u32.c 		unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
tb                979 net/sched/cls_u32.c 	if (tb[TCA_U32_HASH]) {
tb                980 net/sched/cls_u32.c 		htid = nla_get_u32(tb[TCA_U32_HASH]);
tb               1014 net/sched/cls_u32.c 	if (tb[TCA_U32_SEL] == NULL) {
tb               1020 net/sched/cls_u32.c 	s = nla_data(tb[TCA_U32_SEL]);
tb               1022 net/sched/cls_u32.c 	if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
tb               1059 net/sched/cls_u32.c 	if (tb[TCA_U32_MARK]) {
tb               1062 net/sched/cls_u32.c 		mark = nla_data(tb[TCA_U32_MARK]);
tb               1068 net/sched/cls_u32.c 	err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr,
tb                 30 net/sched/em_ipt.c 	int (*validate_match_data)(struct nlattr **tb, u8 mrev);
tb                 60 net/sched/em_ipt.c static int policy_validate_match_data(struct nlattr **tb, u8 mrev)
tb                 67 net/sched/em_ipt.c 	if (nla_get_u32(tb[TCA_EM_IPT_HOOK]) != NF_INET_PRE_ROUTING) {
tb                 75 net/sched/em_ipt.c static int addrtype_validate_match_data(struct nlattr **tb, u8 mrev)
tb                 97 net/sched/em_ipt.c static struct xt_match *get_xt_match(struct nlattr **tb)
tb                104 net/sched/em_ipt.c 	mname_attr = tb[TCA_EM_IPT_MATCH_NAME];
tb                115 net/sched/em_ipt.c 	if (tb[TCA_EM_IPT_MATCH_REVISION])
tb                116 net/sched/em_ipt.c 		mrev = nla_get_u8(tb[TCA_EM_IPT_MATCH_REVISION]);
tb                118 net/sched/em_ipt.c 	ret = m->validate_match_data(tb, mrev);
tb                122 net/sched/em_ipt.c 	nfproto = nla_get_u8(tb[TCA_EM_IPT_NFPROTO]);
tb                129 net/sched/em_ipt.c 	struct nlattr *tb[TCA_EM_IPT_MAX + 1];
tb                135 net/sched/em_ipt.c 	ret = nla_parse_deprecated(tb, TCA_EM_IPT_MAX, data, data_len,
tb                140 net/sched/em_ipt.c 	if (!tb[TCA_EM_IPT_HOOK] || !tb[TCA_EM_IPT_MATCH_NAME] ||
tb                141 net/sched/em_ipt.c 	    !tb[TCA_EM_IPT_MATCH_DATA] || !tb[TCA_EM_IPT_NFPROTO])
tb                144 net/sched/em_ipt.c 	nfproto = nla_get_u8(tb[TCA_EM_IPT_NFPROTO]);
tb                153 net/sched/em_ipt.c 	match = get_xt_match(tb);
tb                159 net/sched/em_ipt.c 	mdata_len = XT_ALIGN(nla_len(tb[TCA_EM_IPT_MATCH_DATA]));
tb                167 net/sched/em_ipt.c 	im->hook = nla_get_u32(tb[TCA_EM_IPT_HOOK]);
tb                169 net/sched/em_ipt.c 	nla_memcpy(im->match_data, tb[TCA_EM_IPT_MATCH_DATA], mdata_len);
tb                907 net/sched/em_meta.c 	struct nlattr *tb[TCA_EM_META_MAX + 1];
tb                911 net/sched/em_meta.c 	err = nla_parse_deprecated(tb, TCA_EM_META_MAX, data, len,
tb                917 net/sched/em_meta.c 	if (tb[TCA_EM_META_HDR] == NULL)
tb                919 net/sched/em_meta.c 	hdr = nla_data(tb[TCA_EM_META_HDR]);
tb                942 net/sched/em_meta.c 	if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 ||
tb                943 net/sched/em_meta.c 	    meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0)
tb                307 net/sched/ematch.c 	struct nlattr *tb[TCA_EMATCH_TREE_MAX + 1];
tb                316 net/sched/ematch.c 	err = nla_parse_nested_deprecated(tb, TCA_EMATCH_TREE_MAX, nla,
tb                322 net/sched/ematch.c 	rt_hdr = tb[TCA_EMATCH_TREE_HDR];
tb                323 net/sched/ematch.c 	rt_list = tb[TCA_EMATCH_TREE_LIST];
tb                471 net/sched/sch_api.c 	struct nlattr *tb[TCA_STAB_MAX + 1];
tb                478 net/sched/sch_api.c 	err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
tb                482 net/sched/sch_api.c 	if (!tb[TCA_STAB_BASE]) {
tb                487 net/sched/sch_api.c 	s = nla_data(tb[TCA_STAB_BASE]);
tb                490 net/sched/sch_api.c 		if (!tb[TCA_STAB_DATA]) {
tb                494 net/sched/sch_api.c 		tab = nla_data(tb[TCA_STAB_DATA]);
tb                495 net/sched/sch_api.c 		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
tb                203 net/sched/sch_atm.c 	struct nlattr *tb[TCA_ATM_MAX + 1];
tb                227 net/sched/sch_atm.c 	error = nla_parse_nested_deprecated(tb, TCA_ATM_MAX, opt, atm_policy,
tb                232 net/sched/sch_atm.c 	if (!tb[TCA_ATM_FD])
tb                234 net/sched/sch_atm.c 	fd = nla_get_u32(tb[TCA_ATM_FD]);
tb                236 net/sched/sch_atm.c 	if (tb[TCA_ATM_HDR]) {
tb                237 net/sched/sch_atm.c 		hdr_len = nla_len(tb[TCA_ATM_HDR]);
tb                238 net/sched/sch_atm.c 		hdr = nla_data(tb[TCA_ATM_HDR]);
tb                243 net/sched/sch_atm.c 	if (!tb[TCA_ATM_EXCESS])
tb                247 net/sched/sch_atm.c 			atm_tc_find(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
tb               2529 net/sched/sch_cake.c 	struct nlattr *tb[TCA_CAKE_MAX + 1];
tb               2535 net/sched/sch_cake.c 	err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
tb               2540 net/sched/sch_cake.c 	if (tb[TCA_CAKE_NAT]) {
tb               2544 net/sched/sch_cake.c 			!!nla_get_u32(tb[TCA_CAKE_NAT]);
tb               2546 net/sched/sch_cake.c 		NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
tb               2552 net/sched/sch_cake.c 	if (tb[TCA_CAKE_BASE_RATE64])
tb               2553 net/sched/sch_cake.c 		q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
tb               2555 net/sched/sch_cake.c 	if (tb[TCA_CAKE_DIFFSERV_MODE])
tb               2556 net/sched/sch_cake.c 		q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
tb               2558 net/sched/sch_cake.c 	if (tb[TCA_CAKE_WASH]) {
tb               2559 net/sched/sch_cake.c 		if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
tb               2565 net/sched/sch_cake.c 	if (tb[TCA_CAKE_FLOW_MODE])
tb               2567 net/sched/sch_cake.c 				(nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
tb               2570 net/sched/sch_cake.c 	if (tb[TCA_CAKE_ATM])
tb               2571 net/sched/sch_cake.c 		q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
tb               2573 net/sched/sch_cake.c 	if (tb[TCA_CAKE_OVERHEAD]) {
tb               2574 net/sched/sch_cake.c 		q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
tb               2583 net/sched/sch_cake.c 	if (tb[TCA_CAKE_RAW]) {
tb               2592 net/sched/sch_cake.c 	if (tb[TCA_CAKE_MPU])
tb               2593 net/sched/sch_cake.c 		q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
tb               2595 net/sched/sch_cake.c 	if (tb[TCA_CAKE_RTT]) {
tb               2596 net/sched/sch_cake.c 		q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
tb               2602 net/sched/sch_cake.c 	if (tb[TCA_CAKE_TARGET]) {
tb               2603 net/sched/sch_cake.c 		q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
tb               2609 net/sched/sch_cake.c 	if (tb[TCA_CAKE_AUTORATE]) {
tb               2610 net/sched/sch_cake.c 		if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
tb               2616 net/sched/sch_cake.c 	if (tb[TCA_CAKE_INGRESS]) {
tb               2617 net/sched/sch_cake.c 		if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
tb               2623 net/sched/sch_cake.c 	if (tb[TCA_CAKE_ACK_FILTER])
tb               2624 net/sched/sch_cake.c 		q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
tb               2626 net/sched/sch_cake.c 	if (tb[TCA_CAKE_MEMORY])
tb               2627 net/sched/sch_cake.c 		q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
tb               2629 net/sched/sch_cake.c 	if (tb[TCA_CAKE_SPLIT_GSO]) {
tb               2630 net/sched/sch_cake.c 		if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
tb               2636 net/sched/sch_cake.c 	if (tb[TCA_CAKE_FWMARK]) {
tb               2637 net/sched/sch_cake.c 		q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
tb               1130 net/sched/sch_cbq.c static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
tb               1141 net/sched/sch_cbq.c 	err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
tb               1146 net/sched/sch_cbq.c 	if (tb[TCA_CBQ_WRROPT]) {
tb               1147 net/sched/sch_cbq.c 		const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
tb               1161 net/sched/sch_cbq.c 	struct nlattr *tb[TCA_CBQ_MAX + 1];
tb               1169 net/sched/sch_cbq.c 	err = cbq_opt_parse(tb, opt, extack);
tb               1173 net/sched/sch_cbq.c 	if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
tb               1178 net/sched/sch_cbq.c 	r = nla_data(tb[TCA_CBQ_RATE]);
tb               1180 net/sched/sch_cbq.c 	q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
tb               1218 net/sched/sch_cbq.c 	if (tb[TCA_CBQ_LSSOPT])
tb               1219 net/sched/sch_cbq.c 		cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
tb               1484 net/sched/sch_cbq.c 	struct nlattr *tb[TCA_CBQ_MAX + 1];
tb               1488 net/sched/sch_cbq.c 	err = cbq_opt_parse(tb, opt, extack);
tb               1492 net/sched/sch_cbq.c 	if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
tb               1511 net/sched/sch_cbq.c 		if (tb[TCA_CBQ_RATE]) {
tb               1512 net/sched/sch_cbq.c 			rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
tb               1513 net/sched/sch_cbq.c 					      tb[TCA_CBQ_RTAB], extack);
tb               1542 net/sched/sch_cbq.c 		if (tb[TCA_CBQ_LSSOPT])
tb               1543 net/sched/sch_cbq.c 			cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
tb               1545 net/sched/sch_cbq.c 		if (tb[TCA_CBQ_WRROPT]) {
tb               1547 net/sched/sch_cbq.c 			cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
tb               1550 net/sched/sch_cbq.c 		if (tb[TCA_CBQ_FOPT])
tb               1551 net/sched/sch_cbq.c 			cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
tb               1564 net/sched/sch_cbq.c 	if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
tb               1569 net/sched/sch_cbq.c 	rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
tb               1656 net/sched/sch_cbq.c 	cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
tb               1657 net/sched/sch_cbq.c 	cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
tb               1664 net/sched/sch_cbq.c 	if (tb[TCA_CBQ_FOPT])
tb               1665 net/sched/sch_cbq.c 		cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
tb                365 net/sched/sch_cbs.c 	struct nlattr *tb[TCA_CBS_MAX + 1];
tb                369 net/sched/sch_cbs.c 	err = nla_parse_nested_deprecated(tb, TCA_CBS_MAX, opt, cbs_policy,
tb                374 net/sched/sch_cbs.c 	if (!tb[TCA_CBS_PARMS]) {
tb                379 net/sched/sch_cbs.c 	qopt = nla_data(tb[TCA_CBS_PARMS]);
tb                348 net/sched/sch_choke.c 	struct nlattr *tb[TCA_CHOKE_MAX + 1];
tb                358 net/sched/sch_choke.c 	err = nla_parse_nested_deprecated(tb, TCA_CHOKE_MAX, opt,
tb                363 net/sched/sch_choke.c 	if (tb[TCA_CHOKE_PARMS] == NULL ||
tb                364 net/sched/sch_choke.c 	    tb[TCA_CHOKE_STAB] == NULL)
tb                367 net/sched/sch_choke.c 	max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
tb                369 net/sched/sch_choke.c 	ctl = nla_data(tb[TCA_CHOKE_PARMS]);
tb                421 net/sched/sch_choke.c 		      nla_data(tb[TCA_CHOKE_STAB]),
tb                137 net/sched/sch_codel.c 	struct nlattr *tb[TCA_CODEL_MAX + 1];
tb                144 net/sched/sch_codel.c 	err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
tb                151 net/sched/sch_codel.c 	if (tb[TCA_CODEL_TARGET]) {
tb                152 net/sched/sch_codel.c 		u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
tb                157 net/sched/sch_codel.c 	if (tb[TCA_CODEL_CE_THRESHOLD]) {
tb                158 net/sched/sch_codel.c 		u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
tb                163 net/sched/sch_codel.c 	if (tb[TCA_CODEL_INTERVAL]) {
tb                164 net/sched/sch_codel.c 		u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
tb                169 net/sched/sch_codel.c 	if (tb[TCA_CODEL_LIMIT])
tb                170 net/sched/sch_codel.c 		sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
tb                172 net/sched/sch_codel.c 	if (tb[TCA_CODEL_ECN])
tb                173 net/sched/sch_codel.c 		q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
tb                 61 net/sched/sch_drr.c 	struct nlattr *tb[TCA_DRR_MAX + 1];
tb                 70 net/sched/sch_drr.c 	err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
tb                 75 net/sched/sch_drr.c 	if (tb[TCA_DRR_QUANTUM]) {
tb                 76 net/sched/sch_drr.c 		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
tb                 98 net/sched/sch_drr.c 		if (tb[TCA_DRR_QUANTUM])
tb                122 net/sched/sch_dsmark.c 	struct nlattr *tb[TCA_DSMARK_MAX + 1];
tb                136 net/sched/sch_dsmark.c 	err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
tb                141 net/sched/sch_dsmark.c 	if (tb[TCA_DSMARK_VALUE])
tb                142 net/sched/sch_dsmark.c 		p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
tb                144 net/sched/sch_dsmark.c 	if (tb[TCA_DSMARK_MASK])
tb                145 net/sched/sch_dsmark.c 		p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
tb                343 net/sched/sch_dsmark.c 	struct nlattr *tb[TCA_DSMARK_MAX + 1];
tb                358 net/sched/sch_dsmark.c 	err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
tb                364 net/sched/sch_dsmark.c 	if (!tb[TCA_DSMARK_INDICES])
tb                366 net/sched/sch_dsmark.c 	indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
tb                371 net/sched/sch_dsmark.c 	if (tb[TCA_DSMARK_DEFAULT_INDEX])
tb                372 net/sched/sch_dsmark.c 		default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
tb                388 net/sched/sch_dsmark.c 	p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
tb                351 net/sched/sch_etf.c 	struct nlattr *tb[TCA_ETF_MAX + 1];
tb                361 net/sched/sch_etf.c 	err = nla_parse_nested_deprecated(tb, TCA_ETF_MAX, opt, etf_policy,
tb                366 net/sched/sch_etf.c 	if (!tb[TCA_ETF_PARMS]) {
tb                371 net/sched/sch_etf.c 	qopt = nla_data(tb[TCA_ETF_PARMS]);
tb                757 net/sched/sch_fq.c 	struct nlattr *tb[TCA_FQ_MAX + 1];
tb                765 net/sched/sch_fq.c 	err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
tb                774 net/sched/sch_fq.c 	if (tb[TCA_FQ_BUCKETS_LOG]) {
tb                775 net/sched/sch_fq.c 		u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
tb                782 net/sched/sch_fq.c 	if (tb[TCA_FQ_PLIMIT])
tb                783 net/sched/sch_fq.c 		sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
tb                785 net/sched/sch_fq.c 	if (tb[TCA_FQ_FLOW_PLIMIT])
tb                786 net/sched/sch_fq.c 		q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
tb                788 net/sched/sch_fq.c 	if (tb[TCA_FQ_QUANTUM]) {
tb                789 net/sched/sch_fq.c 		u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
tb                799 net/sched/sch_fq.c 	if (tb[TCA_FQ_INITIAL_QUANTUM])
tb                800 net/sched/sch_fq.c 		q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
tb                802 net/sched/sch_fq.c 	if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
tb                804 net/sched/sch_fq.c 				    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
tb                806 net/sched/sch_fq.c 	if (tb[TCA_FQ_FLOW_MAX_RATE]) {
tb                807 net/sched/sch_fq.c 		u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
tb                811 net/sched/sch_fq.c 	if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
tb                813 net/sched/sch_fq.c 			nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
tb                815 net/sched/sch_fq.c 	if (tb[TCA_FQ_RATE_ENABLE]) {
tb                816 net/sched/sch_fq.c 		u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
tb                824 net/sched/sch_fq.c 	if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
tb                825 net/sched/sch_fq.c 		u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
tb                830 net/sched/sch_fq.c 	if (tb[TCA_FQ_ORPHAN_MASK])
tb                831 net/sched/sch_fq.c 		q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
tb                833 net/sched/sch_fq.c 	if (tb[TCA_FQ_CE_THRESHOLD])
tb                835 net/sched/sch_fq.c 				  nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
tb                372 net/sched/sch_fq_codel.c 	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
tb                378 net/sched/sch_fq_codel.c 	err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
tb                382 net/sched/sch_fq_codel.c 	if (tb[TCA_FQ_CODEL_FLOWS]) {
tb                385 net/sched/sch_fq_codel.c 		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
tb                392 net/sched/sch_fq_codel.c 	if (tb[TCA_FQ_CODEL_TARGET]) {
tb                393 net/sched/sch_fq_codel.c 		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
tb                398 net/sched/sch_fq_codel.c 	if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
tb                399 net/sched/sch_fq_codel.c 		u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
tb                404 net/sched/sch_fq_codel.c 	if (tb[TCA_FQ_CODEL_INTERVAL]) {
tb                405 net/sched/sch_fq_codel.c 		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
tb                410 net/sched/sch_fq_codel.c 	if (tb[TCA_FQ_CODEL_LIMIT])
tb                411 net/sched/sch_fq_codel.c 		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
tb                413 net/sched/sch_fq_codel.c 	if (tb[TCA_FQ_CODEL_ECN])
tb                414 net/sched/sch_fq_codel.c 		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
tb                416 net/sched/sch_fq_codel.c 	if (tb[TCA_FQ_CODEL_QUANTUM])
tb                417 net/sched/sch_fq_codel.c 		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
tb                419 net/sched/sch_fq_codel.c 	if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
tb                420 net/sched/sch_fq_codel.c 		q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
tb                422 net/sched/sch_fq_codel.c 	if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
tb                423 net/sched/sch_fq_codel.c 		q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
tb                533 net/sched/sch_gred.c 	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
tb                536 net/sched/sch_gred.c 	nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
tb                539 net/sched/sch_gred.c 	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
tb                541 net/sched/sch_gred.c 	if (tb[TCA_GRED_VQ_FLAGS])
tb                542 net/sched/sch_gred.c 		table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
tb                563 net/sched/sch_gred.c 	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
tb                567 net/sched/sch_gred.c 	err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
tb                572 net/sched/sch_gred.c 	if (!tb[TCA_GRED_VQ_DP]) {
tb                576 net/sched/sch_gred.c 	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
tb                586 net/sched/sch_gred.c 	if (tb[TCA_GRED_VQ_FLAGS]) {
tb                587 net/sched/sch_gred.c 		u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
tb                640 net/sched/sch_gred.c 	struct nlattr *tb[TCA_GRED_MAX + 1];
tb                649 net/sched/sch_gred.c 	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
tb                654 net/sched/sch_gred.c 	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
tb                655 net/sched/sch_gred.c 		if (tb[TCA_GRED_LIMIT] != NULL)
tb                656 net/sched/sch_gred.c 			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
tb                657 net/sched/sch_gred.c 		return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
tb                660 net/sched/sch_gred.c 	if (tb[TCA_GRED_PARMS] == NULL ||
tb                661 net/sched/sch_gred.c 	    tb[TCA_GRED_STAB] == NULL ||
tb                662 net/sched/sch_gred.c 	    tb[TCA_GRED_LIMIT] != NULL) {
tb                667 net/sched/sch_gred.c 	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
tb                669 net/sched/sch_gred.c 	ctl = nla_data(tb[TCA_GRED_PARMS]);
tb                670 net/sched/sch_gred.c 	stab = nla_data(tb[TCA_GRED_STAB]);
tb                677 net/sched/sch_gred.c 	if (tb[TCA_GRED_VQ_LIST]) {
tb                678 net/sched/sch_gred.c 		err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
tb                707 net/sched/sch_gred.c 	if (tb[TCA_GRED_VQ_LIST])
tb                708 net/sched/sch_gred.c 		gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
tb                731 net/sched/sch_gred.c 	struct nlattr *tb[TCA_GRED_MAX + 1];
tb                737 net/sched/sch_gred.c 	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
tb                742 net/sched/sch_gred.c 	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
tb                748 net/sched/sch_gred.c 	if (tb[TCA_GRED_LIMIT])
tb                749 net/sched/sch_gred.c 		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
tb                754 net/sched/sch_gred.c 	return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
tb                921 net/sched/sch_hfsc.c 	struct nlattr *tb[TCA_HFSC_MAX + 1];
tb                929 net/sched/sch_hfsc.c 	err = nla_parse_nested_deprecated(tb, TCA_HFSC_MAX, opt, hfsc_policy,
tb                934 net/sched/sch_hfsc.c 	if (tb[TCA_HFSC_RSC]) {
tb                935 net/sched/sch_hfsc.c 		rsc = nla_data(tb[TCA_HFSC_RSC]);
tb                940 net/sched/sch_hfsc.c 	if (tb[TCA_HFSC_FSC]) {
tb                941 net/sched/sch_hfsc.c 		fsc = nla_data(tb[TCA_HFSC_FSC]);
tb                946 net/sched/sch_hfsc.c 	if (tb[TCA_HFSC_USC]) {
tb                947 net/sched/sch_hfsc.c 		usc = nla_data(tb[TCA_HFSC_USC]);
tb                512 net/sched/sch_hhf.c 	struct nlattr *tb[TCA_HHF_MAX + 1];
tb                522 net/sched/sch_hhf.c 	err = nla_parse_nested_deprecated(tb, TCA_HHF_MAX, opt, hhf_policy,
tb                527 net/sched/sch_hhf.c 	if (tb[TCA_HHF_QUANTUM])
tb                528 net/sched/sch_hhf.c 		new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
tb                530 net/sched/sch_hhf.c 	if (tb[TCA_HHF_NON_HH_WEIGHT])
tb                531 net/sched/sch_hhf.c 		new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
tb                539 net/sched/sch_hhf.c 	if (tb[TCA_HHF_BACKLOG_LIMIT])
tb                540 net/sched/sch_hhf.c 		sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
tb                545 net/sched/sch_hhf.c 	if (tb[TCA_HHF_HH_FLOWS_LIMIT])
tb                546 net/sched/sch_hhf.c 		q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]);
tb                548 net/sched/sch_hhf.c 	if (tb[TCA_HHF_RESET_TIMEOUT]) {
tb                549 net/sched/sch_hhf.c 		u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]);
tb                554 net/sched/sch_hhf.c 	if (tb[TCA_HHF_ADMIT_BYTES])
tb                555 net/sched/sch_hhf.c 		q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]);
tb                557 net/sched/sch_hhf.c 	if (tb[TCA_HHF_EVICT_TIMEOUT]) {
tb                558 net/sched/sch_hhf.c 		u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]);
tb                999 net/sched/sch_htb.c 	struct nlattr *tb[TCA_HTB_MAX + 1];
tb               1013 net/sched/sch_htb.c 	err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
tb               1018 net/sched/sch_htb.c 	if (!tb[TCA_HTB_INIT])
tb               1021 net/sched/sch_htb.c 	gopt = nla_data(tb[TCA_HTB_INIT]);
tb               1031 net/sched/sch_htb.c 	if (tb[TCA_HTB_DIRECT_QLEN])
tb               1032 net/sched/sch_htb.c 		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
tb               1304 net/sched/sch_htb.c 	struct nlattr *tb[TCA_HTB_MAX + 1];
tb               1314 net/sched/sch_htb.c 	err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
tb               1320 net/sched/sch_htb.c 	if (tb[TCA_HTB_PARMS] == NULL)
tb               1325 net/sched/sch_htb.c 	hopt = nla_data(tb[TCA_HTB_PARMS]);
tb               1331 net/sched/sch_htb.c 		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
tb               1335 net/sched/sch_htb.c 		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
tb               1450 net/sched/sch_htb.c 	rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
tb               1452 net/sched/sch_htb.c 	ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
tb                119 net/sched/sch_mqprio.c static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
tb                125 net/sched/sch_mqprio.c 		return nla_parse_deprecated(tb, maxtype,
tb                129 net/sched/sch_mqprio.c 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
tb                142 net/sched/sch_mqprio.c 	struct nlattr *tb[TCA_MQPRIO_MAX + 1];
tb                169 net/sched/sch_mqprio.c 		err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
tb                177 net/sched/sch_mqprio.c 		if (tb[TCA_MQPRIO_MODE]) {
tb                179 net/sched/sch_mqprio.c 			priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
tb                182 net/sched/sch_mqprio.c 		if (tb[TCA_MQPRIO_SHAPER]) {
tb                184 net/sched/sch_mqprio.c 			priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
tb                187 net/sched/sch_mqprio.c 		if (tb[TCA_MQPRIO_MIN_RATE64]) {
tb                191 net/sched/sch_mqprio.c 			nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
tb                203 net/sched/sch_mqprio.c 		if (tb[TCA_MQPRIO_MAX_RATE64]) {
tb                207 net/sched/sch_mqprio.c 			nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
tb                930 net/sched/sch_netem.c static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
tb                941 net/sched/sch_netem.c 		return nla_parse_deprecated(tb, maxtype,
tb                945 net/sched/sch_netem.c 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
tb                954 net/sched/sch_netem.c 	struct nlattr *tb[TCA_NETEM_MAX + 1];
tb                964 net/sched/sch_netem.c 	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
tb                972 net/sched/sch_netem.c 	if (tb[TCA_NETEM_LOSS]) {
tb                973 net/sched/sch_netem.c 		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
tb                982 net/sched/sch_netem.c 	if (tb[TCA_NETEM_DELAY_DIST]) {
tb                984 net/sched/sch_netem.c 				     tb[TCA_NETEM_DELAY_DIST]);
tb                989 net/sched/sch_netem.c 	if (tb[TCA_NETEM_SLOT_DIST]) {
tb                991 net/sched/sch_netem.c 				     tb[TCA_NETEM_SLOT_DIST]);
tb               1012 net/sched/sch_netem.c 	if (tb[TCA_NETEM_CORR])
tb               1013 net/sched/sch_netem.c 		get_correlation(q, tb[TCA_NETEM_CORR]);
tb               1015 net/sched/sch_netem.c 	if (tb[TCA_NETEM_REORDER])
tb               1016 net/sched/sch_netem.c 		get_reorder(q, tb[TCA_NETEM_REORDER]);
tb               1018 net/sched/sch_netem.c 	if (tb[TCA_NETEM_CORRUPT])
tb               1019 net/sched/sch_netem.c 		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
tb               1021 net/sched/sch_netem.c 	if (tb[TCA_NETEM_RATE])
tb               1022 net/sched/sch_netem.c 		get_rate(q, tb[TCA_NETEM_RATE]);
tb               1024 net/sched/sch_netem.c 	if (tb[TCA_NETEM_RATE64])
tb               1026 net/sched/sch_netem.c 				nla_get_u64(tb[TCA_NETEM_RATE64]));
tb               1028 net/sched/sch_netem.c 	if (tb[TCA_NETEM_LATENCY64])
tb               1029 net/sched/sch_netem.c 		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
tb               1031 net/sched/sch_netem.c 	if (tb[TCA_NETEM_JITTER64])
tb               1032 net/sched/sch_netem.c 		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
tb               1034 net/sched/sch_netem.c 	if (tb[TCA_NETEM_ECN])
tb               1035 net/sched/sch_netem.c 		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
tb               1037 net/sched/sch_netem.c 	if (tb[TCA_NETEM_SLOT])
tb               1038 net/sched/sch_netem.c 		get_slot(q, tb[TCA_NETEM_SLOT]);
tb                203 net/sched/sch_pie.c 	struct nlattr *tb[TCA_PIE_MAX + 1];
tb                210 net/sched/sch_pie.c 	err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
tb                218 net/sched/sch_pie.c 	if (tb[TCA_PIE_TARGET]) {
tb                220 net/sched/sch_pie.c 		u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
tb                227 net/sched/sch_pie.c 	if (tb[TCA_PIE_TUPDATE])
tb                229 net/sched/sch_pie.c 			usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
tb                231 net/sched/sch_pie.c 	if (tb[TCA_PIE_LIMIT]) {
tb                232 net/sched/sch_pie.c 		u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
tb                238 net/sched/sch_pie.c 	if (tb[TCA_PIE_ALPHA])
tb                239 net/sched/sch_pie.c 		q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
tb                241 net/sched/sch_pie.c 	if (tb[TCA_PIE_BETA])
tb                242 net/sched/sch_pie.c 		q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
tb                244 net/sched/sch_pie.c 	if (tb[TCA_PIE_ECN])
tb                245 net/sched/sch_pie.c 		q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
tb                247 net/sched/sch_pie.c 	if (tb[TCA_PIE_BYTEMODE])
tb                248 net/sched/sch_pie.c 		q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
tb                399 net/sched/sch_qfq.c 	struct nlattr *tb[TCA_QFQ_MAX + 1];
tb                410 net/sched/sch_qfq.c 	err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS],
tb                415 net/sched/sch_qfq.c 	if (tb[TCA_QFQ_WEIGHT]) {
tb                416 net/sched/sch_qfq.c 		weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
tb                424 net/sched/sch_qfq.c 	if (tb[TCA_QFQ_LMAX]) {
tb                425 net/sched/sch_qfq.c 		lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
tb                196 net/sched/sch_red.c 	struct nlattr *tb[TCA_RED_MAX + 1];
tb                204 net/sched/sch_red.c 	err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
tb                209 net/sched/sch_red.c 	if (tb[TCA_RED_PARMS] == NULL ||
tb                210 net/sched/sch_red.c 	    tb[TCA_RED_STAB] == NULL)
tb                213 net/sched/sch_red.c 	max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
tb                215 net/sched/sch_red.c 	ctl = nla_data(tb[TCA_RED_PARMS]);
tb                241 net/sched/sch_red.c 		      nla_data(tb[TCA_RED_STAB]),
tb                493 net/sched/sch_sfb.c 	struct nlattr *tb[TCA_SFB_MAX + 1];
tb                499 net/sched/sch_sfb.c 		err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt,
tb                504 net/sched/sch_sfb.c 		if (tb[TCA_SFB_PARMS] == NULL)
tb                507 net/sched/sch_sfb.c 		ctl = nla_data(tb[TCA_SFB_PARMS]);
tb                780 net/sched/sch_taprio.c static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
tb                785 net/sched/sch_taprio.c 	if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
tb                787 net/sched/sch_taprio.c 			tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
tb                789 net/sched/sch_taprio.c 	if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
tb                791 net/sched/sch_taprio.c 			tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
tb                793 net/sched/sch_taprio.c 	if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
tb                795 net/sched/sch_taprio.c 			tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
tb                810 net/sched/sch_taprio.c 	struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
tb                813 net/sched/sch_taprio.c 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
tb                822 net/sched/sch_taprio.c 	return fill_sched_entry(tb, entry, extack);
tb                865 net/sched/sch_taprio.c static int parse_taprio_schedule(struct nlattr **tb,
tb                871 net/sched/sch_taprio.c 	if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
tb                876 net/sched/sch_taprio.c 	if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
tb                877 net/sched/sch_taprio.c 		new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
tb                879 net/sched/sch_taprio.c 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
tb                880 net/sched/sch_taprio.c 		new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
tb                882 net/sched/sch_taprio.c 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
tb                883 net/sched/sch_taprio.c 		new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
tb                885 net/sched/sch_taprio.c 	if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
tb                887 net/sched/sch_taprio.c 			tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
tb               1284 net/sched/sch_taprio.c static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
tb               1298 net/sched/sch_taprio.c 		if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
tb               1313 net/sched/sch_taprio.c 	} else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
tb               1314 net/sched/sch_taprio.c 		int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
tb               1409 net/sched/sch_taprio.c 	struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
tb               1418 net/sched/sch_taprio.c 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
tb               1423 net/sched/sch_taprio.c 	if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
tb               1424 net/sched/sch_taprio.c 		mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
tb               1426 net/sched/sch_taprio.c 	err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
tb               1459 net/sched/sch_taprio.c 	err = parse_taprio_schedule(tb, new_admin, extack);
tb               1469 net/sched/sch_taprio.c 	err = taprio_parse_clockid(sch, tb, extack);
tb               1498 net/sched/sch_taprio.c 	if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
tb               1505 net/sched/sch_taprio.c 		q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
tb                297 net/sched/sch_tbf.c 	struct nlattr *tb[TCA_TBF_MAX + 1];
tb                306 net/sched/sch_tbf.c 	err = nla_parse_nested_deprecated(tb, TCA_TBF_MAX, opt, tbf_policy,
tb                312 net/sched/sch_tbf.c 	if (tb[TCA_TBF_PARMS] == NULL)
tb                315 net/sched/sch_tbf.c 	qopt = nla_data(tb[TCA_TBF_PARMS]);
tb                318 net/sched/sch_tbf.c 					      tb[TCA_TBF_RTAB],
tb                323 net/sched/sch_tbf.c 						      tb[TCA_TBF_PTAB],
tb                329 net/sched/sch_tbf.c 	if (tb[TCA_TBF_RATE64])
tb                330 net/sched/sch_tbf.c 		rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
tb                333 net/sched/sch_tbf.c 	if (tb[TCA_TBF_BURST]) {
tb                334 net/sched/sch_tbf.c 		max_size = nla_get_u32(tb[TCA_TBF_BURST]);
tb                341 net/sched/sch_tbf.c 		if (tb[TCA_TBF_PRATE64])
tb                342 net/sched/sch_tbf.c 			prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
tb                351 net/sched/sch_tbf.c 		if (tb[TCA_TBF_PBURST]) {
tb                352 net/sched/sch_tbf.c 			u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
tb                395 net/sched/sch_tbf.c 	if (tb[TCA_TBF_PBURST])
tb                400 net/sched/sch_tbf.c 	if (tb[TCA_TBF_BURST])
tb                331 net/smc/smc_pnet.c 			       struct nlattr *tb[])
tb                340 net/smc/smc_pnet.c 	if (!tb[SMC_PNETID_NAME])
tb                342 net/smc/smc_pnet.c 	string = (char *)nla_data(tb[SMC_PNETID_NAME]);
tb                347 net/smc/smc_pnet.c 	if (tb[SMC_PNETID_ETHNAME]) {
tb                348 net/smc/smc_pnet.c 		string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
tb                359 net/smc/smc_pnet.c 	if (tb[SMC_PNETID_IBNAME]) {
tb                360 net/smc/smc_pnet.c 		ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
tb                367 net/smc/smc_pnet.c 			if (!tb[SMC_PNETID_IBPORT])
tb                369 net/smc/smc_pnet.c 			pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
tb               1038 net/wireless/nl80211.c 	struct nlattr *tb[NL80211_KEY_MAX + 1];
tb               1039 net/wireless/nl80211.c 	int err = nla_parse_nested_deprecated(tb, NL80211_KEY_MAX, key,
tb               1045 net/wireless/nl80211.c 	k->def = !!tb[NL80211_KEY_DEFAULT];
tb               1046 net/wireless/nl80211.c 	k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT];
tb               1055 net/wireless/nl80211.c 	if (tb[NL80211_KEY_IDX])
tb               1056 net/wireless/nl80211.c 		k->idx = nla_get_u8(tb[NL80211_KEY_IDX]);
tb               1058 net/wireless/nl80211.c 	if (tb[NL80211_KEY_DATA]) {
tb               1059 net/wireless/nl80211.c 		k->p.key = nla_data(tb[NL80211_KEY_DATA]);
tb               1060 net/wireless/nl80211.c 		k->p.key_len = nla_len(tb[NL80211_KEY_DATA]);
tb               1063 net/wireless/nl80211.c 	if (tb[NL80211_KEY_SEQ]) {
tb               1064 net/wireless/nl80211.c 		k->p.seq = nla_data(tb[NL80211_KEY_SEQ]);
tb               1065 net/wireless/nl80211.c 		k->p.seq_len = nla_len(tb[NL80211_KEY_SEQ]);
tb               1068 net/wireless/nl80211.c 	if (tb[NL80211_KEY_CIPHER])
tb               1069 net/wireless/nl80211.c 		k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]);
tb               1071 net/wireless/nl80211.c 	if (tb[NL80211_KEY_TYPE])
tb               1072 net/wireless/nl80211.c 		k->type = nla_get_u32(tb[NL80211_KEY_TYPE]);
tb               1074 net/wireless/nl80211.c 	if (tb[NL80211_KEY_DEFAULT_TYPES]) {
tb               1079 net/wireless/nl80211.c 						  tb[NL80211_KEY_DEFAULT_TYPES],
tb               1089 net/wireless/nl80211.c 	if (tb[NL80211_KEY_MODE])
tb               1090 net/wireless/nl80211.c 		k->p.mode = nla_get_u8(tb[NL80211_KEY_MODE]);
tb               1307 net/wireless/nl80211.c 							struct nlattr *tb)
tb               1311 net/wireless/nl80211.c 	if (tb == NULL)
tb               1313 net/wireless/nl80211.c 	chan = ieee80211_get_channel(wiphy, nla_get_u32(tb));
tb               2466 net/wireless/nl80211.c 	struct nlattr **tb = kcalloc(NUM_NL80211_ATTR, sizeof(*tb), GFP_KERNEL);
tb               2469 net/wireless/nl80211.c 	if (!tb)
tb               2474 net/wireless/nl80211.c 				     tb, nl80211_fam.maxattr,
tb               2482 net/wireless/nl80211.c 	state->split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
tb               2483 net/wireless/nl80211.c 	if (tb[NL80211_ATTR_WIPHY])
tb               2484 net/wireless/nl80211.c 		state->filter_wiphy = nla_get_u32(tb[NL80211_ATTR_WIPHY]);
tb               2485 net/wireless/nl80211.c 	if (tb[NL80211_ATTR_WDEV])
tb               2486 net/wireless/nl80211.c 		state->filter_wiphy = nla_get_u64(tb[NL80211_ATTR_WDEV]) >> 32;
tb               2487 net/wireless/nl80211.c 	if (tb[NL80211_ATTR_IFINDEX]) {
tb               2490 net/wireless/nl80211.c 		int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]);
tb               2506 net/wireless/nl80211.c 	kfree(tb);
tb               2617 net/wireless/nl80211.c static int parse_txq_params(struct nlattr *tb[],
tb               2622 net/wireless/nl80211.c 	if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] ||
tb               2623 net/wireless/nl80211.c 	    !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] ||
tb               2624 net/wireless/nl80211.c 	    !tb[NL80211_TXQ_ATTR_AIFS])
tb               2627 net/wireless/nl80211.c 	ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]);
tb               2628 net/wireless/nl80211.c 	txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]);
tb               2629 net/wireless/nl80211.c 	txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]);
tb               2630 net/wireless/nl80211.c 	txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]);
tb               2631 net/wireless/nl80211.c 	txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]);
tb               2921 net/wireless/nl80211.c 		struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1];
tb               2939 net/wireless/nl80211.c 			result = nla_parse_nested_deprecated(tb,
tb               2946 net/wireless/nl80211.c 			result = parse_txq_params(tb, &txq_params);
tb               4240 net/wireless/nl80211.c 	struct nlattr *tb[NL80211_TXRATE_MAX + 1];
tb               4284 net/wireless/nl80211.c 		err = nla_parse_nested_deprecated(tb, NL80211_TXRATE_MAX,
tb               4290 net/wireless/nl80211.c 		if (tb[NL80211_TXRATE_LEGACY]) {
tb               4293 net/wireless/nl80211.c 				nla_data(tb[NL80211_TXRATE_LEGACY]),
tb               4294 net/wireless/nl80211.c 				nla_len(tb[NL80211_TXRATE_LEGACY]));
tb               4296 net/wireless/nl80211.c 			    nla_len(tb[NL80211_TXRATE_LEGACY]))
tb               4299 net/wireless/nl80211.c 		if (tb[NL80211_TXRATE_HT]) {
tb               4302 net/wireless/nl80211.c 					nla_data(tb[NL80211_TXRATE_HT]),
tb               4303 net/wireless/nl80211.c 					nla_len(tb[NL80211_TXRATE_HT]),
tb               4307 net/wireless/nl80211.c 		if (tb[NL80211_TXRATE_VHT]) {
tb               4310 net/wireless/nl80211.c 					nla_data(tb[NL80211_TXRATE_VHT]),
tb               4314 net/wireless/nl80211.c 		if (tb[NL80211_TXRATE_GI]) {
tb               4316 net/wireless/nl80211.c 				nla_get_u8(tb[NL80211_TXRATE_GI]);
tb               4453 net/wireless/nl80211.c 		struct nlattr *tb[NL80211_FTM_RESP_ATTR_MAX + 1];
tb               4455 net/wireless/nl80211.c 		err = nla_parse_nested_deprecated(tb,
tb               4462 net/wireless/nl80211.c 		if (tb[NL80211_FTM_RESP_ATTR_ENABLED] &&
tb               4469 net/wireless/nl80211.c 		if (tb[NL80211_FTM_RESP_ATTR_LCI]) {
tb               4470 net/wireless/nl80211.c 			bcn->lci = nla_data(tb[NL80211_FTM_RESP_ATTR_LCI]);
tb               4471 net/wireless/nl80211.c 			bcn->lci_len = nla_len(tb[NL80211_FTM_RESP_ATTR_LCI]);
tb               4474 net/wireless/nl80211.c 		if (tb[NL80211_FTM_RESP_ATTR_CIVICLOC]) {
tb               4475 net/wireless/nl80211.c 			bcn->civicloc = nla_data(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]);
tb               4476 net/wireless/nl80211.c 			bcn->civicloc_len = nla_len(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]);
tb               4488 net/wireless/nl80211.c 	struct nlattr *tb[NL80211_HE_OBSS_PD_ATTR_MAX + 1];
tb               4491 net/wireless/nl80211.c 	err = nla_parse_nested(tb, NL80211_HE_OBSS_PD_ATTR_MAX, attrs,
tb               4496 net/wireless/nl80211.c 	if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] ||
tb               4497 net/wireless/nl80211.c 	    !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
tb               4501 net/wireless/nl80211.c 		nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
tb               4503 net/wireless/nl80211.c 		nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
tb               5570 net/wireless/nl80211.c 	struct nlattr *tb[NL80211_STA_WME_MAX + 1];
tb               5579 net/wireless/nl80211.c 	err = nla_parse_nested_deprecated(tb, NL80211_STA_WME_MAX, nla,
tb               5585 net/wireless/nl80211.c 	if (tb[NL80211_STA_WME_UAPSD_QUEUES])
tb               5587 net/wireless/nl80211.c 			tb[NL80211_STA_WME_UAPSD_QUEUES]);
tb               5591 net/wireless/nl80211.c 	if (tb[NL80211_STA_WME_MAX_SP])
tb               5592 net/wireless/nl80211.c 		params->max_sp = nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
tb               6710 net/wireless/nl80211.c 	struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1];
tb               6714 net/wireless/nl80211.c #define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, mask, attr, fn)	\
tb               6716 net/wireless/nl80211.c 	if (tb[attr]) {							\
tb               6717 net/wireless/nl80211.c 		cfg->param = fn(tb[attr]);				\
tb               6724 net/wireless/nl80211.c 	if (nla_parse_nested_deprecated(tb, NL80211_MESHCONF_ATTR_MAX, info->attrs[NL80211_ATTR_MESH_CONFIG], nl80211_meshconf_params_policy, info->extack))
tb               6732 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, mask,
tb               6734 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, mask,
tb               6737 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, mask,
tb               6740 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, mask,
tb               6743 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, mask,
tb               6745 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, mask,
tb               6747 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, mask,
tb               6749 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, mask,
tb               6752 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
tb               6756 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, mask,
tb               6759 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, mask,
tb               6765 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, mask,
tb               6768 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout,
tb               6776 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, mask,
tb               6779 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, mask,
tb               6782 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
tb               6786 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask,
tb               6788 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask,
tb               6791 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshGateAnnouncementProtocol,
tb               6794 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, mask,
tb               6796 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, mask,
tb               6799 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConnectedToMeshGate, mask,
tb               6806 net/wireless/nl80211.c 	if (tb[NL80211_MESHCONF_HT_OPMODE]) {
tb               6807 net/wireless/nl80211.c 		ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
tb               6820 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
tb               6828 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, mask,
tb               6831 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPconfirmationInterval,
tb               6835 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, mask,
tb               6837 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, mask,
tb               6839 net/wireless/nl80211.c 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, mask,
tb               6853 net/wireless/nl80211.c 	struct nlattr *tb[NL80211_MESH_SETUP_ATTR_MAX + 1];
tb               6857 net/wireless/nl80211.c 	if (nla_parse_nested_deprecated(tb, NL80211_MESH_SETUP_ATTR_MAX, info->attrs[NL80211_ATTR_MESH_SETUP], nl80211_mesh_setup_params_policy, info->extack))
tb               6860 net/wireless/nl80211.c 	if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])
tb               6862 net/wireless/nl80211.c 		(nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])) ?
tb               6866 net/wireless/nl80211.c 	if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])
tb               6868 net/wireless/nl80211.c 		(nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ?
tb               6872 net/wireless/nl80211.c 	if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC])
tb               6874 net/wireless/nl80211.c 		(nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC])) ?
tb               6878 net/wireless/nl80211.c 	if (tb[NL80211_MESH_SETUP_IE]) {
tb               6880 net/wireless/nl80211.c 			tb[NL80211_MESH_SETUP_IE];
tb               6884 net/wireless/nl80211.c 	if (tb[NL80211_MESH_SETUP_USERSPACE_MPM] &&
tb               6887 net/wireless/nl80211.c 	setup->user_mpm = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_MPM]);
tb               6888 net/wireless/nl80211.c 	setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]);
tb               6889 net/wireless/nl80211.c 	setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]);
tb               6893 net/wireless/nl80211.c 	if (tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]) {
tb               6897 net/wireless/nl80211.c 			nla_get_u8(tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]);
tb               7153 net/wireless/nl80211.c static int parse_reg_rule(struct nlattr *tb[],
tb               7159 net/wireless/nl80211.c 	if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
tb               7161 net/wireless/nl80211.c 	if (!tb[NL80211_ATTR_FREQ_RANGE_START])
tb               7163 net/wireless/nl80211.c 	if (!tb[NL80211_ATTR_FREQ_RANGE_END])
tb               7165 net/wireless/nl80211.c 	if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
tb               7167 net/wireless/nl80211.c 	if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
tb               7170 net/wireless/nl80211.c 	reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
tb               7173 net/wireless/nl80211.c 		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
tb               7175 net/wireless/nl80211.c 		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
tb               7177 net/wireless/nl80211.c 		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
tb               7180 net/wireless/nl80211.c 		nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
tb               7182 net/wireless/nl80211.c 	if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
tb               7184 net/wireless/nl80211.c 			nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
tb               7186 net/wireless/nl80211.c 	if (tb[NL80211_ATTR_DFS_CAC_TIME])
tb               7188 net/wireless/nl80211.c 			nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]);
tb               7195 net/wireless/nl80211.c 	struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1];
tb               7241 net/wireless/nl80211.c 		r = nla_parse_nested_deprecated(tb, NL80211_REG_RULE_ATTR_MAX,
tb               7246 net/wireless/nl80211.c 		r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]);
tb               7898 net/wireless/nl80211.c 	struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
tb               7933 net/wireless/nl80211.c 			err = nla_parse_nested_deprecated(tb,
tb               7942 net/wireless/nl80211.c 			if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] &&
tb               7943 net/wireless/nl80211.c 			    tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID])
tb               7947 net/wireless/nl80211.c 			if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] ||
tb               7948 net/wireless/nl80211.c 			    tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]) {
tb               7952 net/wireless/nl80211.c 			rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
tb               8121 net/wireless/nl80211.c 			err = nla_parse_nested_deprecated(tb,
tb               8128 net/wireless/nl80211.c 			ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID];
tb               8129 net/wireless/nl80211.c 			bssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID];
tb               8166 net/wireless/nl80211.c 			rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
tb               8174 net/wireless/nl80211.c 				tb[NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI],
tb               11381 net/wireless/nl80211.c 	struct nlattr *tb[NUM_NL80211_WOWLAN_TCP];
tb               11392 net/wireless/nl80211.c 	err = nla_parse_nested_deprecated(tb, MAX_NL80211_WOWLAN_TCP, attr,
tb               11397 net/wireless/nl80211.c 	if (!tb[NL80211_WOWLAN_TCP_SRC_IPV4] ||
tb               11398 net/wireless/nl80211.c 	    !tb[NL80211_WOWLAN_TCP_DST_IPV4] ||
tb               11399 net/wireless/nl80211.c 	    !tb[NL80211_WOWLAN_TCP_DST_MAC] ||
tb               11400 net/wireless/nl80211.c 	    !tb[NL80211_WOWLAN_TCP_DST_PORT] ||
tb               11401 net/wireless/nl80211.c 	    !tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD] ||
tb               11402 net/wireless/nl80211.c 	    !tb[NL80211_WOWLAN_TCP_DATA_INTERVAL] ||
tb               11403 net/wireless/nl80211.c 	    !tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD] ||
tb               11404 net/wireless/nl80211.c 	    !tb[NL80211_WOWLAN_TCP_WAKE_MASK])
tb               11407 net/wireless/nl80211.c 	data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]);
tb               11411 net/wireless/nl80211.c 	if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) >
tb               11413 net/wireless/nl80211.c 	    nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0)
tb               11416 net/wireless/nl80211.c 	wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]);
tb               11420 net/wireless/nl80211.c 	wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]);
tb               11424 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]) {
tb               11425 net/wireless/nl80211.c 		u32 tokln = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]);
tb               11427 net/wireless/nl80211.c 		tok = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]);
tb               11444 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) {
tb               11445 net/wireless/nl80211.c 		seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]);
tb               11462 net/wireless/nl80211.c 	cfg->src = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_SRC_IPV4]);
tb               11463 net/wireless/nl80211.c 	cfg->dst = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_DST_IPV4]);
tb               11464 net/wireless/nl80211.c 	memcpy(cfg->dst_mac, nla_data(tb[NL80211_WOWLAN_TCP_DST_MAC]),
tb               11466 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TCP_SRC_PORT])
tb               11467 net/wireless/nl80211.c 		port = nla_get_u16(tb[NL80211_WOWLAN_TCP_SRC_PORT]);
tb               11492 net/wireless/nl80211.c 	cfg->dst_port = nla_get_u16(tb[NL80211_WOWLAN_TCP_DST_PORT]);
tb               11496 net/wireless/nl80211.c 	       nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]),
tb               11500 net/wireless/nl80211.c 	cfg->data_interval = nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]);
tb               11504 net/wireless/nl80211.c 	       nla_data(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]),
tb               11509 net/wireless/nl80211.c 	       nla_data(tb[NL80211_WOWLAN_TCP_WAKE_MASK]),
tb               11526 net/wireless/nl80211.c 	struct nlattr **tb;
tb               11529 net/wireless/nl80211.c 	tb = kcalloc(NUM_NL80211_ATTR, sizeof(*tb), GFP_KERNEL);
tb               11530 net/wireless/nl80211.c 	if (!tb)
tb               11538 net/wireless/nl80211.c 	err = nla_parse_nested_deprecated(tb, NL80211_ATTR_MAX, attr,
tb               11543 net/wireless/nl80211.c 	trig->nd_config = nl80211_parse_sched_scan(&rdev->wiphy, NULL, tb,
tb               11550 net/wireless/nl80211.c 	kfree(tb);
tb               11557 net/wireless/nl80211.c 	struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG];
tb               11574 net/wireless/nl80211.c 	err = nla_parse_nested_deprecated(tb, MAX_NL80211_WOWLAN_TRIG,
tb               11580 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_ANY]) {
tb               11586 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_DISCONNECT]) {
tb               11593 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_MAGIC_PKT]) {
tb               11600 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED])
tb               11603 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE]) {
tb               11610 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST]) {
tb               11617 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE]) {
tb               11624 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_RFKILL_RELEASE]) {
tb               11631 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) {
tb               11639 net/wireless/nl80211.c 		nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
tb               11654 net/wireless/nl80211.c 		nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
tb               11705 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION]) {
tb               11708 net/wireless/nl80211.c 			rdev, tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION],
tb               11714 net/wireless/nl80211.c 	if (tb[NL80211_WOWLAN_TRIG_NET_DETECT]) {
tb               11717 net/wireless/nl80211.c 			rdev, wowlan, tb[NL80211_WOWLAN_TRIG_NET_DETECT],
tb               11871 net/wireless/nl80211.c 	struct nlattr *tb[NUM_NL80211_ATTR_COALESCE_RULE], *pat;
tb               11875 net/wireless/nl80211.c 	err = nla_parse_nested_deprecated(tb, NL80211_ATTR_COALESCE_RULE_MAX,
tb               11880 net/wireless/nl80211.c 	if (tb[NL80211_ATTR_COALESCE_RULE_DELAY])
tb               11882 net/wireless/nl80211.c 			nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_DELAY]);
tb               11886 net/wireless/nl80211.c 	if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION])
tb               11888 net/wireless/nl80211.c 			nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]);
tb               11890 net/wireless/nl80211.c 	if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN])
tb               11893 net/wireless/nl80211.c 	nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
tb               11907 net/wireless/nl80211.c 	nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
tb               12029 net/wireless/nl80211.c 	struct nlattr *tb[NUM_NL80211_REKEY_DATA];
tb               12036 net/wireless/nl80211.c 	err = nla_parse_nested_deprecated(tb, MAX_NL80211_REKEY_DATA,
tb               12042 net/wireless/nl80211.c 	if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
tb               12043 net/wireless/nl80211.c 	    !tb[NL80211_REKEY_DATA_KCK])
tb               12045 net/wireless/nl80211.c 	if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
tb               12047 net/wireless/nl80211.c 	if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
tb               12049 net/wireless/nl80211.c 	if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN)
tb               12052 net/wireless/nl80211.c 	rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]);
tb               12053 net/wireless/nl80211.c 	rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]);
tb               12054 net/wireless/nl80211.c 	rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]);
tb               12335 net/wireless/nl80211.c 	struct nlattr *tb[NUM_NL80211_NAN_FUNC_ATTR], *func_attr;
tb               12350 net/wireless/nl80211.c 	err = nla_parse_nested_deprecated(tb, NL80211_NAN_FUNC_ATTR_MAX,
tb               12363 net/wireless/nl80211.c 	if (!tb[NL80211_NAN_FUNC_TYPE] ||
tb               12364 net/wireless/nl80211.c 	    nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]) > NL80211_NAN_FUNC_MAX_TYPE) {
tb               12370 net/wireless/nl80211.c 	func->type = nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]);
tb               12372 net/wireless/nl80211.c 	if (!tb[NL80211_NAN_FUNC_SERVICE_ID]) {
tb               12377 net/wireless/nl80211.c 	memcpy(func->service_id, nla_data(tb[NL80211_NAN_FUNC_SERVICE_ID]),
tb               12381 net/wireless/nl80211.c 		nla_get_flag(tb[NL80211_NAN_FUNC_CLOSE_RANGE]);
tb               12383 net/wireless/nl80211.c 	if (tb[NL80211_NAN_FUNC_SERVICE_INFO]) {
tb               12385 net/wireless/nl80211.c 			nla_len(tb[NL80211_NAN_FUNC_SERVICE_INFO]);
tb               12387 net/wireless/nl80211.c 			kmemdup(nla_data(tb[NL80211_NAN_FUNC_SERVICE_INFO]),
tb               12396 net/wireless/nl80211.c 	if (tb[NL80211_NAN_FUNC_TTL])
tb               12397 net/wireless/nl80211.c 		func->ttl = nla_get_u32(tb[NL80211_NAN_FUNC_TTL]);
tb               12401 net/wireless/nl80211.c 		if (!tb[NL80211_NAN_FUNC_PUBLISH_TYPE]) {
tb               12407 net/wireless/nl80211.c 			nla_get_u8(tb[NL80211_NAN_FUNC_PUBLISH_TYPE]);
tb               12409 net/wireless/nl80211.c 			nla_get_flag(tb[NL80211_NAN_FUNC_PUBLISH_BCAST]);
tb               12419 net/wireless/nl80211.c 			nla_get_flag(tb[NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE]);
tb               12422 net/wireless/nl80211.c 		if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
tb               12423 net/wireless/nl80211.c 		    !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] ||
tb               12424 net/wireless/nl80211.c 		    !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) {
tb               12430 net/wireless/nl80211.c 			nla_get_u8(tb[NL80211_NAN_FUNC_FOLLOW_UP_ID]);
tb               12432 net/wireless/nl80211.c 			nla_get_u8(tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]);
tb               12434 net/wireless/nl80211.c 		       nla_data(tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]),
tb               12446 net/wireless/nl80211.c 	if (tb[NL80211_NAN_FUNC_SRF]) {
tb               12451 net/wireless/nl80211.c 						  tb[NL80211_NAN_FUNC_SRF],
tb               12510 net/wireless/nl80211.c 	if (tb[NL80211_NAN_FUNC_TX_MATCH_FILTER]) {
tb               12511 net/wireless/nl80211.c 		err = handle_nan_filter(tb[NL80211_NAN_FUNC_TX_MATCH_FILTER],
tb               12517 net/wireless/nl80211.c 	if (tb[NL80211_NAN_FUNC_RX_MATCH_FILTER]) {
tb               12518 net/wireless/nl80211.c 		err = handle_nan_filter(tb[NL80211_NAN_FUNC_RX_MATCH_FILTER],
tb                 18 net/wireless/pmsr.c 	struct nlattr *tb[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1];
tb                 28 net/wireless/pmsr.c 	nla_parse_nested_deprecated(tb, NL80211_PMSR_FTM_REQ_ATTR_MAX, ftmreq,
tb                 31 net/wireless/pmsr.c 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE])
tb                 32 net/wireless/pmsr.c 		preamble = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]);
tb                 42 net/wireless/pmsr.c 		if (!tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) {
tb                 51 net/wireless/pmsr.c 				    tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE],
tb                 59 net/wireless/pmsr.c 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
tb                 61 net/wireless/pmsr.c 			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
tb                 63 net/wireless/pmsr.c 	out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
tb                 66 net/wireless/pmsr.c 				    tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP],
tb                 78 net/wireless/pmsr.c 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
tb                 80 net/wireless/pmsr.c 			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
tb                 85 net/wireless/pmsr.c 				    tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP],
tb                 91 net/wireless/pmsr.c 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
tb                 93 net/wireless/pmsr.c 			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
tb                 96 net/wireless/pmsr.c 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
tb                 98 net/wireless/pmsr.c 			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]);
tb                104 net/wireless/pmsr.c 				    tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST],
tb                110 net/wireless/pmsr.c 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
tb                112 net/wireless/pmsr.c 			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
tb                114 net/wireless/pmsr.c 	out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
tb                117 net/wireless/pmsr.c 				    tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI],
tb                122 net/wireless/pmsr.c 		!!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC];
tb                125 net/wireless/pmsr.c 				    tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC],
tb                137 net/wireless/pmsr.c 	struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1];
tb                143 net/wireless/pmsr.c 	nla_parse_nested_deprecated(tb, NL80211_PMSR_PEER_ATTR_MAX, peer,
tb                146 net/wireless/pmsr.c 	if (!tb[NL80211_PMSR_PEER_ATTR_ADDR] ||
tb                147 net/wireless/pmsr.c 	    !tb[NL80211_PMSR_PEER_ATTR_CHAN] ||
tb                148 net/wireless/pmsr.c 	    !tb[NL80211_PMSR_PEER_ATTR_REQ]) {
tb                154 net/wireless/pmsr.c 	memcpy(out->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN);
tb                160 net/wireless/pmsr.c 					  tb[NL80211_PMSR_PEER_ATTR_CHAN],
tb                171 net/wireless/pmsr.c 				    tb[NL80211_PMSR_PEER_ATTR_REQ], NULL,
tb                176 net/wireless/pmsr.c 				    tb[NL80211_PMSR_PEER_ATTR_REQ],
tb                633 net/xfrm/xfrm_interface.c static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
tb                655 net/xfrm/xfrm_interface.c 			struct nlattr *tb[], struct nlattr *data[],
tb                682 net/xfrm/xfrm_interface.c static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
tb                398 security/keys/trusted.c static int osap(struct tpm_buf *tb, struct osapsess *s,
tb                409 security/keys/trusted.c 	INIT_BUF(tb);
tb                410 security/keys/trusted.c 	store16(tb, TPM_TAG_RQU_COMMAND);
tb                411 security/keys/trusted.c 	store32(tb, TPM_OSAP_SIZE);
tb                412 security/keys/trusted.c 	store32(tb, TPM_ORD_OSAP);
tb                413 security/keys/trusted.c 	store16(tb, type);
tb                414 security/keys/trusted.c 	store32(tb, handle);
tb                415 security/keys/trusted.c 	storebytes(tb, ononce, TPM_NONCE_SIZE);
tb                417 security/keys/trusted.c 	ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
tb                421 security/keys/trusted.c 	s->handle = LOAD32(tb->data, TPM_DATA_OFFSET);
tb                422 security/keys/trusted.c 	memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]),
tb                424 security/keys/trusted.c 	memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) +
tb                433 security/keys/trusted.c int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
tb                440 security/keys/trusted.c 	INIT_BUF(tb);
tb                441 security/keys/trusted.c 	store16(tb, TPM_TAG_RQU_COMMAND);
tb                442 security/keys/trusted.c 	store32(tb, TPM_OIAP_SIZE);
tb                443 security/keys/trusted.c 	store32(tb, TPM_ORD_OIAP);
tb                444 security/keys/trusted.c 	ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
tb                448 security/keys/trusted.c 	*handle = LOAD32(tb->data, TPM_DATA_OFFSET);
tb                449 security/keys/trusted.c 	memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)],
tb                467 security/keys/trusted.c static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
tb                492 security/keys/trusted.c 	ret = osap(tb, &sess, keyauth, keytype, keyhandle);
tb                538 security/keys/trusted.c 	INIT_BUF(tb);
tb                539 security/keys/trusted.c 	store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
tb                540 security/keys/trusted.c 	store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen);
tb                541 security/keys/trusted.c 	store32(tb, TPM_ORD_SEAL);
tb                542 security/keys/trusted.c 	store32(tb, keyhandle);
tb                543 security/keys/trusted.c 	storebytes(tb, td->encauth, SHA1_DIGEST_SIZE);
tb                544 security/keys/trusted.c 	store32(tb, pcrinfosize);
tb                545 security/keys/trusted.c 	storebytes(tb, pcrinfo, pcrinfosize);
tb                546 security/keys/trusted.c 	store32(tb, datalen);
tb                547 security/keys/trusted.c 	storebytes(tb, data, datalen);
tb                548 security/keys/trusted.c 	store32(tb, sess.handle);
tb                549 security/keys/trusted.c 	storebytes(tb, td->nonceodd, TPM_NONCE_SIZE);
tb                550 security/keys/trusted.c 	store8(tb, cont);
tb                551 security/keys/trusted.c 	storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE);
tb                553 security/keys/trusted.c 	ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
tb                558 security/keys/trusted.c 	sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t));
tb                559 security/keys/trusted.c 	encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) +
tb                565 security/keys/trusted.c 	ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret,
tb                571 security/keys/trusted.c 		memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize);
tb                582 security/keys/trusted.c static int tpm_unseal(struct tpm_buf *tb,
tb                601 security/keys/trusted.c 	ret = oiap(tb, &authhandle1, enonce1);
tb                606 security/keys/trusted.c 	ret = oiap(tb, &authhandle2, enonce2);
tb                631 security/keys/trusted.c 	INIT_BUF(tb);
tb                632 security/keys/trusted.c 	store16(tb, TPM_TAG_RQU_AUTH2_COMMAND);
tb                633 security/keys/trusted.c 	store32(tb, TPM_UNSEAL_SIZE + bloblen);
tb                634 security/keys/trusted.c 	store32(tb, TPM_ORD_UNSEAL);
tb                635 security/keys/trusted.c 	store32(tb, keyhandle);
tb                636 security/keys/trusted.c 	storebytes(tb, blob, bloblen);
tb                637 security/keys/trusted.c 	store32(tb, authhandle1);
tb                638 security/keys/trusted.c 	storebytes(tb, nonceodd, TPM_NONCE_SIZE);
tb                639 security/keys/trusted.c 	store8(tb, cont);
tb                640 security/keys/trusted.c 	storebytes(tb, authdata1, SHA1_DIGEST_SIZE);
tb                641 security/keys/trusted.c 	store32(tb, authhandle2);
tb                642 security/keys/trusted.c 	storebytes(tb, nonceodd, TPM_NONCE_SIZE);
tb                643 security/keys/trusted.c 	store8(tb, cont);
tb                644 security/keys/trusted.c 	storebytes(tb, authdata2, SHA1_DIGEST_SIZE);
tb                646 security/keys/trusted.c 	ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
tb                652 security/keys/trusted.c 	*datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
tb                653 security/keys/trusted.c 	ret = TSS_checkhmac2(tb->data, ordinal, nonceodd,
tb                663 security/keys/trusted.c 	memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen);
tb                673 security/keys/trusted.c 	struct tpm_buf *tb;
tb                676 security/keys/trusted.c 	tb = kzalloc(sizeof *tb, GFP_KERNEL);
tb                677 security/keys/trusted.c 	if (!tb)
tb                683 security/keys/trusted.c 	ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth,
tb                689 security/keys/trusted.c 	kzfree(tb);
tb                699 security/keys/trusted.c 	struct tpm_buf *tb;
tb                702 security/keys/trusted.c 	tb = kzalloc(sizeof *tb, GFP_KERNEL);
tb                703 security/keys/trusted.c 	if (!tb)
tb                706 security/keys/trusted.c 	ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
tb                714 security/keys/trusted.c 	kzfree(tb);
tb                236 tools/arch/powerpc/include/uapi/asm/kvm.h 			__u64 tb;
tb                225 tools/bpf/bpftool/main.h int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb);
tb                226 tools/bpf/bpftool/main.h int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
tb                 87 tools/bpf/bpftool/net.c static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
tb                108 tools/bpf/bpftool/net.c 		 tb[IFLA_IFNAME]
tb                109 tools/bpf/bpftool/net.c 			 ? libbpf_nla_getattr_str(tb[IFLA_IFNAME])
tb                113 tools/bpf/bpftool/net.c 	return do_xdp_dump(ifinfo, tb);
tb                116 tools/bpf/bpftool/net.c static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
tb                123 tools/bpf/bpftool/net.c 		if (tb[TCA_KIND] &&
tb                124 tools/bpf/bpftool/net.c 		    strcmp(libbpf_nla_data(tb[TCA_KIND]), "clsact") == 0)
tb                142 tools/bpf/bpftool/net.c 		 tb[TCA_KIND]
tb                143 tools/bpf/bpftool/net.c 			 ? libbpf_nla_getattr_str(tb[TCA_KIND])
tb                150 tools/bpf/bpftool/net.c static int dump_filter_nlmsg(void *cookie, void *msg, struct nlattr **tb)
tb                154 tools/bpf/bpftool/net.c 	return do_filter_dump((struct tcmsg *)msg, tb, filter_info->kind,
tb                 14 tools/bpf/bpftool/netlink_dumper.c static void xdp_dump_prog_id(struct nlattr **tb, int attr,
tb                 18 tools/bpf/bpftool/netlink_dumper.c 	if (!tb[attr])
tb                 24 tools/bpf/bpftool/netlink_dumper.c 	NET_DUMP_UINT("id", " id %u", libbpf_nla_getattr_u32(tb[attr]))
tb                 32 tools/bpf/bpftool/netlink_dumper.c 	struct nlattr *tb[IFLA_XDP_MAX + 1];
tb                 35 tools/bpf/bpftool/netlink_dumper.c 	if (libbpf_nla_parse_nested(tb, IFLA_XDP_MAX, attr, NULL) < 0)
tb                 38 tools/bpf/bpftool/netlink_dumper.c 	if (!tb[IFLA_XDP_ATTACHED])
tb                 41 tools/bpf/bpftool/netlink_dumper.c 	mode = libbpf_nla_getattr_u8(tb[IFLA_XDP_ATTACHED]);
tb                 55 tools/bpf/bpftool/netlink_dumper.c 		xdp_dump_prog_id(tb, IFLA_XDP_SKB_PROG_ID, "generic", true);
tb                 56 tools/bpf/bpftool/netlink_dumper.c 		xdp_dump_prog_id(tb, IFLA_XDP_DRV_PROG_ID, "driver", true);
tb                 57 tools/bpf/bpftool/netlink_dumper.c 		xdp_dump_prog_id(tb, IFLA_XDP_HW_PROG_ID, "offload", true);
tb                 61 tools/bpf/bpftool/netlink_dumper.c 		xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "driver", false);
tb                 63 tools/bpf/bpftool/netlink_dumper.c 		xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "generic", false);
tb                 65 tools/bpf/bpftool/netlink_dumper.c 		xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "offload", false);
tb                 72 tools/bpf/bpftool/netlink_dumper.c int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb)
tb                 74 tools/bpf/bpftool/netlink_dumper.c 	if (!tb[IFLA_XDP])
tb                 77 tools/bpf/bpftool/netlink_dumper.c 	return do_xdp_dump_one(tb[IFLA_XDP], ifinfo->ifi_index,
tb                 78 tools/bpf/bpftool/netlink_dumper.c 			       libbpf_nla_getattr_str(tb[IFLA_IFNAME]));
tb                 83 tools/bpf/bpftool/netlink_dumper.c 	struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
tb                 85 tools/bpf/bpftool/netlink_dumper.c 	if (libbpf_nla_parse_nested(tb, TCA_ACT_BPF_MAX, attr, NULL) < 0)
tb                 88 tools/bpf/bpftool/netlink_dumper.c 	if (!tb[TCA_ACT_BPF_PARMS])
tb                 92 tools/bpf/bpftool/netlink_dumper.c 	if (tb[TCA_ACT_BPF_NAME])
tb                 94 tools/bpf/bpftool/netlink_dumper.c 			     libbpf_nla_getattr_str(tb[TCA_ACT_BPF_NAME]));
tb                 95 tools/bpf/bpftool/netlink_dumper.c 	if (tb[TCA_ACT_BPF_ID])
tb                 97 tools/bpf/bpftool/netlink_dumper.c 			      libbpf_nla_getattr_u32(tb[TCA_ACT_BPF_ID]));
tb                104 tools/bpf/bpftool/netlink_dumper.c 	struct nlattr *tb[TCA_ACT_MAX + 1];
tb                109 tools/bpf/bpftool/netlink_dumper.c 	if (libbpf_nla_parse_nested(tb, TCA_ACT_MAX, attr, NULL) < 0)
tb                112 tools/bpf/bpftool/netlink_dumper.c 	if (tb[TCA_ACT_KIND] &&
tb                113 tools/bpf/bpftool/netlink_dumper.c 	    strcmp(libbpf_nla_data(tb[TCA_ACT_KIND]), "bpf") == 0)
tb                114 tools/bpf/bpftool/netlink_dumper.c 		return do_bpf_dump_one_act(tb[TCA_ACT_OPTIONS]);
tb                121 tools/bpf/bpftool/netlink_dumper.c 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
tb                124 tools/bpf/bpftool/netlink_dumper.c 	if (libbpf_nla_parse_nested(tb, TCA_ACT_MAX_PRIO, attr, NULL) < 0)
tb                129 tools/bpf/bpftool/netlink_dumper.c 		ret = do_dump_one_act(tb[act]);
tb                140 tools/bpf/bpftool/netlink_dumper.c 	struct nlattr *tb[TCA_BPF_MAX + 1];
tb                143 tools/bpf/bpftool/netlink_dumper.c 	if (libbpf_nla_parse_nested(tb, TCA_BPF_MAX, attr, NULL) < 0)
tb                146 tools/bpf/bpftool/netlink_dumper.c 	if (tb[TCA_BPF_NAME])
tb                148 tools/bpf/bpftool/netlink_dumper.c 			     libbpf_nla_getattr_str(tb[TCA_BPF_NAME]));
tb                149 tools/bpf/bpftool/netlink_dumper.c 	if (tb[TCA_BPF_ID])
tb                151 tools/bpf/bpftool/netlink_dumper.c 			      libbpf_nla_getattr_u32(tb[TCA_BPF_ID]));
tb                152 tools/bpf/bpftool/netlink_dumper.c 	if (tb[TCA_BPF_ACT]) {
tb                153 tools/bpf/bpftool/netlink_dumper.c 		ret = do_bpf_act_dump(tb[TCA_BPF_ACT]);
tb                161 tools/bpf/bpftool/netlink_dumper.c int do_filter_dump(struct tcmsg *info, struct nlattr **tb, const char *kind,
tb                166 tools/bpf/bpftool/netlink_dumper.c 	if (tb[TCA_OPTIONS] &&
tb                167 tools/bpf/bpftool/netlink_dumper.c 	    strcmp(libbpf_nla_data(tb[TCA_KIND]), "bpf") == 0) {
tb                173 tools/bpf/bpftool/netlink_dumper.c 		ret = do_bpf_filter_dump(tb[TCA_OPTIONS]);
tb                193 tools/lib/bpf/netlink.c 	struct nlattr *tb[IFLA_MAX + 1], *attr;
tb                199 tools/lib/bpf/netlink.c 	if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
tb                202 tools/lib/bpf/netlink.c 	return dump_link_nlmsg(cookie, ifi, tb);
tb                219 tools/lib/bpf/netlink.c static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb)
tb                230 tools/lib/bpf/netlink.c 	if (!tb[IFLA_XDP])
tb                233 tools/lib/bpf/netlink.c 	ret = libbpf_nla_parse_nested(xdp_tb, IFLA_XDP_MAX, tb[IFLA_XDP], NULL);
tb                310 tools/lib/bpf/netlink.c 	struct nlattr *tb[TCA_MAX + 1], *attr;
tb                316 tools/lib/bpf/netlink.c 	if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
tb                319 tools/lib/bpf/netlink.c 	return dump_class_nlmsg(cookie, t, tb);
tb                349 tools/lib/bpf/netlink.c 	struct nlattr *tb[TCA_MAX + 1], *attr;
tb                355 tools/lib/bpf/netlink.c 	if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
tb                358 tools/lib/bpf/netlink.c 	return dump_qdisc_nlmsg(cookie, t, tb);
tb                388 tools/lib/bpf/netlink.c 	struct nlattr *tb[TCA_MAX + 1], *attr;
tb                394 tools/lib/bpf/netlink.c 	if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
tb                397 tools/lib/bpf/netlink.c 	return dump_filter_nlmsg(cookie, t, tb);
tb                103 tools/lib/bpf/nlattr.c int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head,
tb                109 tools/lib/bpf/nlattr.c 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
tb                123 tools/lib/bpf/nlattr.c 		if (tb[type])
tb                127 tools/lib/bpf/nlattr.c 		tb[type] = nla;
tb                148 tools/lib/bpf/nlattr.c int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype,
tb                152 tools/lib/bpf/nlattr.c 	return libbpf_nla_parse(tb, maxtype, libbpf_nla_data(nla),
tb                163 tools/lib/bpf/nlattr.c 	struct nlattr *tb[NLMSGERR_ATTR_MAX + 1], *attr;
tb                182 tools/lib/bpf/nlattr.c 	if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen,
tb                189 tools/lib/bpf/nlattr.c 	if (tb[NLMSGERR_ATTR_MSG])
tb                190 tools/lib/bpf/nlattr.c 		errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]);
tb                 98 tools/lib/bpf/nlattr.h int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head,
tb                100 tools/lib/bpf/nlattr.h int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype,
tb                 33 tools/testing/selftests/gpio/gpio-mockup-chardev.c 	struct libmnt_table *tb;
tb                 46 tools/testing/selftests/gpio/gpio-mockup-chardev.c 	if (mnt_context_get_mtab(cxt, &tb))
tb                 49 tools/testing/selftests/gpio/gpio-mockup-chardev.c 	while (mnt_table_next_fs(tb, itr, &fs) == 0) {
tb                 17 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	struct trace_buffer *tb;
tb                 19 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (size < sizeof(*tb)) {
tb                 24 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	tb = mmap(NULL, size, PROT_READ | PROT_WRITE,
tb                 26 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (tb == MAP_FAILED) {
tb                 31 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	tb->size = size;
tb                 32 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	tb->tail = tb->data;
tb                 33 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	tb->overflow = false;
tb                 35 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	return tb;
tb                 38 tools/testing/selftests/powerpc/pmu/ebb/trace.c static bool trace_check_bounds(struct trace_buffer *tb, void *p)
tb                 40 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	return p < ((void *)tb + tb->size);
tb                 43 tools/testing/selftests/powerpc/pmu/ebb/trace.c static bool trace_check_alloc(struct trace_buffer *tb, void *p)
tb                 51 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (tb->overflow)
tb                 54 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (!trace_check_bounds(tb, p)) {
tb                 55 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		tb->overflow = true;
tb                 62 tools/testing/selftests/powerpc/pmu/ebb/trace.c static void *trace_alloc(struct trace_buffer *tb, int bytes)
tb                 66 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = tb->tail;
tb                 67 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	newtail = tb->tail + bytes;
tb                 68 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (!trace_check_alloc(tb, newtail))
tb                 71 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	tb->tail = newtail;
tb                 76 tools/testing/selftests/powerpc/pmu/ebb/trace.c static struct trace_entry *trace_alloc_entry(struct trace_buffer *tb, int payload_size)
tb                 80 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc(tb, sizeof(*e) + payload_size);
tb                 87 tools/testing/selftests/powerpc/pmu/ebb/trace.c int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value)
tb                 92 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc_entry(tb, sizeof(reg) + sizeof(value));
tb                104 tools/testing/selftests/powerpc/pmu/ebb/trace.c int trace_log_counter(struct trace_buffer *tb, u64 value)
tb                109 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc_entry(tb, sizeof(value));
tb                120 tools/testing/selftests/powerpc/pmu/ebb/trace.c int trace_log_string(struct trace_buffer *tb, char *str)
tb                129 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc_entry(tb, len + 1);
tb                142 tools/testing/selftests/powerpc/pmu/ebb/trace.c int trace_log_indent(struct trace_buffer *tb)
tb                146 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc_entry(tb, 0);
tb                155 tools/testing/selftests/powerpc/pmu/ebb/trace.c int trace_log_outdent(struct trace_buffer *tb)
tb                159 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc_entry(tb, 0);
tb                269 tools/testing/selftests/powerpc/pmu/ebb/trace.c void trace_buffer_print(struct trace_buffer *tb)
tb                276 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	printf("  address  %p \n", tb);
tb                277 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	printf("  tail     %p\n", tb->tail);
tb                278 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	printf("  size     %llu\n", tb->size);
tb                279 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	printf("  overflow %s\n", tb->overflow ? "TRUE" : "false");
tb                282 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = tb->data;
tb                287 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	while (trace_check_bounds(tb, p) && p < tb->tail) {
tb                297 tools/testing/selftests/powerpc/pmu/ebb/trace.c void trace_print_location(struct trace_buffer *tb)
tb                299 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	printf("Trace buffer 0x%llx bytes @ %p\n", tb->size, tb);
tb                 33 tools/testing/selftests/powerpc/pmu/ebb/trace.h int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value);
tb                 34 tools/testing/selftests/powerpc/pmu/ebb/trace.h int trace_log_counter(struct trace_buffer *tb, u64 value);
tb                 35 tools/testing/selftests/powerpc/pmu/ebb/trace.h int trace_log_string(struct trace_buffer *tb, char *str);
tb                 36 tools/testing/selftests/powerpc/pmu/ebb/trace.h int trace_log_indent(struct trace_buffer *tb);
tb                 37 tools/testing/selftests/powerpc/pmu/ebb/trace.h int trace_log_outdent(struct trace_buffer *tb);
tb                 38 tools/testing/selftests/powerpc/pmu/ebb/trace.h void trace_buffer_print(struct trace_buffer *tb);
tb                 39 tools/testing/selftests/powerpc/pmu/ebb/trace.h void trace_print_location(struct trace_buffer *tb);