tbl 116 arch/arc/kernel/setup.c const struct id_to_str *tbl; tbl 126 arch/arc/kernel/setup.c for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) { tbl 127 arch/arc/kernel/setup.c if (cpu->core.family == tbl->id) { tbl 128 arch/arc/kernel/setup.c cpu->release = tbl->str; tbl 135 arch/arc/kernel/setup.c else if (tbl->str) tbl 159 arch/arc/kernel/setup.c for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) { tbl 160 arch/arc/kernel/setup.c if (uarch.maj == tbl->id) { tbl 161 arch/arc/kernel/setup.c cpu->release = tbl->str; tbl 112 arch/mips/include/asm/txx9/tx3927.h volatile unsigned long tbl; /* +d0 */ tbl 63 arch/parisc/include/asm/pdc.h int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl); tbl 75 arch/parisc/include/asm/pdc.h struct pdc_memory_table *tbl, unsigned long entries); tbl 935 arch/parisc/kernel/firmware.c int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl) tbl 940 arch/parisc/kernel/firmware.c BUG_ON((unsigned long)tbl & 0x7); tbl 945 arch/parisc/kernel/firmware.c __pa(pdc_result), hpa, __pa(tbl)); tbl 1085 arch/parisc/kernel/firmware.c struct pdc_memory_table *tbl, unsigned long entries) tbl 1094 arch/parisc/kernel/firmware.c memcpy(tbl, pdc_result2, entries * sizeof(*tbl)); tbl 40 arch/powerpc/include/asm/iommu.h int (*set)(struct iommu_table *tbl, tbl 51 arch/powerpc/include/asm/iommu.h int (*xchg_no_kill)(struct iommu_table *tbl, tbl 57 arch/powerpc/include/asm/iommu.h void (*tce_kill)(struct iommu_table *tbl, tbl 62 arch/powerpc/include/asm/iommu.h __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); tbl 64 arch/powerpc/include/asm/iommu.h void (*clear)(struct iommu_table *tbl, tbl 67 arch/powerpc/include/asm/iommu.h unsigned long (*get)(struct iommu_table *tbl, long index); tbl 68 arch/powerpc/include/asm/iommu.h void (*flush)(struct iommu_table *tbl); tbl 69 arch/powerpc/include/asm/iommu.h void (*free)(struct iommu_table *tbl); tbl 119 arch/powerpc/include/asm/iommu.h #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ tbl 120 arch/powerpc/include/asm/iommu.h ((tbl)->it_ops->useraddrptr((tbl), (entry), false)) tbl 121 arch/powerpc/include/asm/iommu.h #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ tbl 122 arch/powerpc/include/asm/iommu.h ((tbl)->it_ops->useraddrptr((tbl), (entry), true)) tbl 126 arch/powerpc/include/asm/iommu.h int get_iommu_order(unsigned long size, struct iommu_table *tbl) tbl 128 arch/powerpc/include/asm/iommu.h return __ilog2((size - 1) >> tbl->it_page_shift) + 1; tbl 149 arch/powerpc/include/asm/iommu.h extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl); tbl 150 arch/powerpc/include/asm/iommu.h extern int iommu_tce_table_put(struct iommu_table *tbl); tbl 155 arch/powerpc/include/asm/iommu.h extern struct iommu_table *iommu_init_table(struct iommu_table *tbl, tbl 210 arch/powerpc/include/asm/iommu.h extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, tbl 214 arch/powerpc/include/asm/iommu.h struct iommu_table *tbl, tbl 217 arch/powerpc/include/asm/iommu.h extern void iommu_tce_kill(struct iommu_table *tbl, tbl 252 arch/powerpc/include/asm/iommu.h extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, tbl 257 arch/powerpc/include/asm/iommu.h extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, tbl 263 arch/powerpc/include/asm/iommu.h extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, tbl 266 arch/powerpc/include/asm/iommu.h extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, tbl 268 arch/powerpc/include/asm/iommu.h extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, tbl 273 arch/powerpc/include/asm/iommu.h extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, tbl 302 arch/powerpc/include/asm/iommu.h #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \ tbl 303 arch/powerpc/include/asm/iommu.h (iommu_tce_check_ioba((tbl)->it_page_shift, \ tbl 304 arch/powerpc/include/asm/iommu.h (tbl)->it_offset, (tbl)->it_size, \ tbl 306 arch/powerpc/include/asm/iommu.h #define iommu_tce_put_param_check(tbl, ioba, gpa) \ tbl 307 arch/powerpc/include/asm/iommu.h (iommu_tce_check_ioba((tbl)->it_page_shift, \ tbl 308 arch/powerpc/include/asm/iommu.h (tbl)->it_offset, (tbl)->it_size, \ tbl 310 arch/powerpc/include/asm/iommu.h iommu_tce_check_gpa((tbl)->it_page_shift, (gpa))) tbl 312 arch/powerpc/include/asm/iommu.h extern void iommu_flush_tce(struct iommu_table *tbl); tbl 313 arch/powerpc/include/asm/iommu.h extern int iommu_take_ownership(struct iommu_table *tbl); tbl 314 arch/powerpc/include/asm/iommu.h extern void iommu_release_ownership(struct iommu_table *tbl); tbl 176 arch/powerpc/include/asm/kvm_host.h u32 tbu, tbl; tbl 189 arch/powerpc/include/asm/kvm_host.h struct iommu_table *tbl; tbl 56 arch/powerpc/include/asm/time.h unsigned long tbl; tbl 57 arch/powerpc/include/asm/time.h asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); tbl 58 arch/powerpc/include/asm/time.h return tbl; tbl 769 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl); tbl 771 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl); tbl 123 arch/powerpc/kernel/dma-iommu.c struct iommu_table *tbl = get_iommu_table_base(dev); tbl 131 arch/powerpc/kernel/dma-iommu.c if (!tbl) { tbl 136 arch/powerpc/kernel/dma-iommu.c if (tbl->it_offset > (mask >> tbl->it_page_shift)) { tbl 139 arch/powerpc/kernel/dma-iommu.c mask, tbl->it_offset << tbl->it_page_shift); tbl 150 arch/powerpc/kernel/dma-iommu.c struct iommu_table *tbl = get_iommu_table_base(dev); tbl 153 arch/powerpc/kernel/dma-iommu.c if (!tbl) tbl 163 arch/powerpc/kernel/dma-iommu.c mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); tbl 164 arch/powerpc/kernel/iommu.c struct iommu_table *tbl, tbl 198 arch/powerpc/kernel/iommu.c pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); tbl 201 arch/powerpc/kernel/iommu.c pool = &(tbl->large_pool); tbl 203 arch/powerpc/kernel/iommu.c pool = &(tbl->pools[pool_nr]); tbl 223 arch/powerpc/kernel/iommu.c if (limit + tbl->it_offset > mask) { tbl 224 arch/powerpc/kernel/iommu.c limit = mask - tbl->it_offset + 1; tbl 231 arch/powerpc/kernel/iommu.c pool = &(tbl->pools[0]); tbl 241 arch/powerpc/kernel/iommu.c 1 << tbl->it_page_shift); tbl 243 arch/powerpc/kernel/iommu.c boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); tbl 246 arch/powerpc/kernel/iommu.c n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, tbl 247 arch/powerpc/kernel/iommu.c boundary_size >> tbl->it_page_shift, align_mask); tbl 255 arch/powerpc/kernel/iommu.c } else if (pass <= tbl->nr_pools) { tbl 258 arch/powerpc/kernel/iommu.c pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); tbl 259 arch/powerpc/kernel/iommu.c pool = &tbl->pools[pool_nr]; tbl 280 arch/powerpc/kernel/iommu.c pool->hint = (end + tbl->it_blocksize - 1) & tbl 281 arch/powerpc/kernel/iommu.c ~(tbl->it_blocksize - 1); tbl 293 arch/powerpc/kernel/iommu.c static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, tbl 303 arch/powerpc/kernel/iommu.c entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); tbl 308 arch/powerpc/kernel/iommu.c entry += tbl->it_offset; /* Offset into real TCE table */ tbl 309 arch/powerpc/kernel/iommu.c ret = entry << tbl->it_page_shift; /* Set the return dma address */ tbl 312 arch/powerpc/kernel/iommu.c build_fail = tbl->it_ops->set(tbl, entry, npages, tbl 314 arch/powerpc/kernel/iommu.c IOMMU_PAGE_MASK(tbl), direction, attrs); tbl 322 arch/powerpc/kernel/iommu.c __iommu_free(tbl, ret, npages); tbl 327 arch/powerpc/kernel/iommu.c if (tbl->it_ops->flush) tbl 328 arch/powerpc/kernel/iommu.c tbl->it_ops->flush(tbl); tbl 336 arch/powerpc/kernel/iommu.c static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, tbl 341 arch/powerpc/kernel/iommu.c entry = dma_addr >> tbl->it_page_shift; tbl 342 arch/powerpc/kernel/iommu.c free_entry = entry - tbl->it_offset; tbl 344 arch/powerpc/kernel/iommu.c if (((free_entry + npages) > tbl->it_size) || tbl 345 arch/powerpc/kernel/iommu.c (entry < tbl->it_offset)) { tbl 350 arch/powerpc/kernel/iommu.c printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); tbl 351 arch/powerpc/kernel/iommu.c printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); tbl 352 arch/powerpc/kernel/iommu.c printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); tbl 353 arch/powerpc/kernel/iommu.c printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); tbl 354 arch/powerpc/kernel/iommu.c printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); tbl 364 arch/powerpc/kernel/iommu.c static struct iommu_pool *get_pool(struct iommu_table *tbl, tbl 368 arch/powerpc/kernel/iommu.c unsigned long largepool_start = tbl->large_pool.start; tbl 372 arch/powerpc/kernel/iommu.c p = &tbl->large_pool; tbl 374 arch/powerpc/kernel/iommu.c unsigned int pool_nr = entry / tbl->poolsize; tbl 376 arch/powerpc/kernel/iommu.c BUG_ON(pool_nr > tbl->nr_pools); tbl 377 arch/powerpc/kernel/iommu.c p = &tbl->pools[pool_nr]; tbl 383 arch/powerpc/kernel/iommu.c static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, tbl 390 arch/powerpc/kernel/iommu.c entry = dma_addr >> tbl->it_page_shift; tbl 391 arch/powerpc/kernel/iommu.c free_entry = entry - tbl->it_offset; tbl 393 arch/powerpc/kernel/iommu.c pool = get_pool(tbl, free_entry); tbl 395 arch/powerpc/kernel/iommu.c if (!iommu_free_check(tbl, dma_addr, npages)) tbl 398 arch/powerpc/kernel/iommu.c tbl->it_ops->clear(tbl, entry, npages); tbl 401 arch/powerpc/kernel/iommu.c bitmap_clear(tbl->it_map, free_entry, npages); tbl 405 arch/powerpc/kernel/iommu.c static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, tbl 408 arch/powerpc/kernel/iommu.c __iommu_free(tbl, dma_addr, npages); tbl 414 arch/powerpc/kernel/iommu.c if (tbl->it_ops->flush) tbl 415 arch/powerpc/kernel/iommu.c tbl->it_ops->flush(tbl); tbl 418 arch/powerpc/kernel/iommu.c int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, tbl 432 arch/powerpc/kernel/iommu.c if ((nelems == 0) || !tbl) tbl 457 arch/powerpc/kernel/iommu.c npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); tbl 459 arch/powerpc/kernel/iommu.c if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && tbl 461 arch/powerpc/kernel/iommu.c align = PAGE_SHIFT - tbl->it_page_shift; tbl 462 arch/powerpc/kernel/iommu.c entry = iommu_range_alloc(dev, tbl, npages, &handle, tbl 463 arch/powerpc/kernel/iommu.c mask >> tbl->it_page_shift, align); tbl 472 arch/powerpc/kernel/iommu.c "vaddr %lx npages %lu\n", tbl, vaddr, tbl 478 arch/powerpc/kernel/iommu.c entry += tbl->it_offset; tbl 479 arch/powerpc/kernel/iommu.c dma_addr = entry << tbl->it_page_shift; tbl 480 arch/powerpc/kernel/iommu.c dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); tbl 486 arch/powerpc/kernel/iommu.c build_fail = tbl->it_ops->set(tbl, entry, npages, tbl 487 arch/powerpc/kernel/iommu.c vaddr & IOMMU_PAGE_MASK(tbl), tbl 525 arch/powerpc/kernel/iommu.c if (tbl->it_ops->flush) tbl 526 arch/powerpc/kernel/iommu.c tbl->it_ops->flush(tbl); tbl 549 arch/powerpc/kernel/iommu.c vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); tbl 551 arch/powerpc/kernel/iommu.c IOMMU_PAGE_SIZE(tbl)); tbl 552 arch/powerpc/kernel/iommu.c __iommu_free(tbl, vaddr, npages); tbl 563 arch/powerpc/kernel/iommu.c void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, tbl 571 arch/powerpc/kernel/iommu.c if (!tbl) tbl 582 arch/powerpc/kernel/iommu.c IOMMU_PAGE_SIZE(tbl)); tbl 583 arch/powerpc/kernel/iommu.c __iommu_free(tbl, dma_handle, npages); tbl 591 arch/powerpc/kernel/iommu.c if (tbl->it_ops->flush) tbl 592 arch/powerpc/kernel/iommu.c tbl->it_ops->flush(tbl); tbl 595 arch/powerpc/kernel/iommu.c static void iommu_table_clear(struct iommu_table *tbl) tbl 604 arch/powerpc/kernel/iommu.c tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); tbl 609 arch/powerpc/kernel/iommu.c if (tbl->it_ops->get) { tbl 613 arch/powerpc/kernel/iommu.c for (index = 0; index < tbl->it_size; index++) { tbl 614 arch/powerpc/kernel/iommu.c tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); tbl 619 arch/powerpc/kernel/iommu.c __set_bit(index, tbl->it_map); tbl 624 arch/powerpc/kernel/iommu.c if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { tbl 628 arch/powerpc/kernel/iommu.c for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; tbl 629 arch/powerpc/kernel/iommu.c index < tbl->it_size; index++) tbl 630 arch/powerpc/kernel/iommu.c __clear_bit(index, tbl->it_map); tbl 636 arch/powerpc/kernel/iommu.c static void iommu_table_reserve_pages(struct iommu_table *tbl, tbl 647 arch/powerpc/kernel/iommu.c if (tbl->it_offset == 0) tbl 648 arch/powerpc/kernel/iommu.c set_bit(0, tbl->it_map); tbl 650 arch/powerpc/kernel/iommu.c tbl->it_reserved_start = res_start; tbl 651 arch/powerpc/kernel/iommu.c tbl->it_reserved_end = res_end; tbl 655 arch/powerpc/kernel/iommu.c (tbl->it_offset + tbl->it_size < res_start || tbl 656 arch/powerpc/kernel/iommu.c res_end < tbl->it_offset)) tbl 659 arch/powerpc/kernel/iommu.c for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) tbl 660 arch/powerpc/kernel/iommu.c set_bit(i - tbl->it_offset, tbl->it_map); tbl 663 arch/powerpc/kernel/iommu.c static void iommu_table_release_pages(struct iommu_table *tbl) tbl 671 arch/powerpc/kernel/iommu.c if (tbl->it_offset == 0) tbl 672 arch/powerpc/kernel/iommu.c clear_bit(0, tbl->it_map); tbl 674 arch/powerpc/kernel/iommu.c for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) tbl 675 arch/powerpc/kernel/iommu.c clear_bit(i - tbl->it_offset, tbl->it_map); tbl 682 arch/powerpc/kernel/iommu.c struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, tbl 691 arch/powerpc/kernel/iommu.c BUG_ON(!tbl->it_ops); tbl 694 arch/powerpc/kernel/iommu.c sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); tbl 699 arch/powerpc/kernel/iommu.c tbl->it_map = page_address(page); tbl 700 arch/powerpc/kernel/iommu.c memset(tbl->it_map, 0, sz); tbl 702 arch/powerpc/kernel/iommu.c iommu_table_reserve_pages(tbl, res_start, res_end); tbl 705 arch/powerpc/kernel/iommu.c if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) tbl 706 arch/powerpc/kernel/iommu.c tbl->nr_pools = IOMMU_NR_POOLS; tbl 708 arch/powerpc/kernel/iommu.c tbl->nr_pools = 1; tbl 711 arch/powerpc/kernel/iommu.c tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; tbl 713 arch/powerpc/kernel/iommu.c for (i = 0; i < tbl->nr_pools; i++) { tbl 714 arch/powerpc/kernel/iommu.c p = &tbl->pools[i]; tbl 716 arch/powerpc/kernel/iommu.c p->start = tbl->poolsize * i; tbl 718 arch/powerpc/kernel/iommu.c p->end = p->start + tbl->poolsize; tbl 721 arch/powerpc/kernel/iommu.c p = &tbl->large_pool; tbl 723 arch/powerpc/kernel/iommu.c p->start = tbl->poolsize * i; tbl 725 arch/powerpc/kernel/iommu.c p->end = tbl->it_size; tbl 727 arch/powerpc/kernel/iommu.c iommu_table_clear(tbl); tbl 735 arch/powerpc/kernel/iommu.c return tbl; tbl 742 arch/powerpc/kernel/iommu.c struct iommu_table *tbl; tbl 744 arch/powerpc/kernel/iommu.c tbl = container_of(kref, struct iommu_table, it_kref); tbl 746 arch/powerpc/kernel/iommu.c if (tbl->it_ops->free) tbl 747 arch/powerpc/kernel/iommu.c tbl->it_ops->free(tbl); tbl 749 arch/powerpc/kernel/iommu.c if (!tbl->it_map) { tbl 750 arch/powerpc/kernel/iommu.c kfree(tbl); tbl 754 arch/powerpc/kernel/iommu.c iommu_table_release_pages(tbl); tbl 757 arch/powerpc/kernel/iommu.c if (!bitmap_empty(tbl->it_map, tbl->it_size)) tbl 761 arch/powerpc/kernel/iommu.c bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); tbl 765 arch/powerpc/kernel/iommu.c free_pages((unsigned long) tbl->it_map, order); tbl 768 arch/powerpc/kernel/iommu.c kfree(tbl); tbl 771 arch/powerpc/kernel/iommu.c struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl) tbl 773 arch/powerpc/kernel/iommu.c if (kref_get_unless_zero(&tbl->it_kref)) tbl 774 arch/powerpc/kernel/iommu.c return tbl; tbl 780 arch/powerpc/kernel/iommu.c int iommu_tce_table_put(struct iommu_table *tbl) tbl 782 arch/powerpc/kernel/iommu.c if (WARN_ON(!tbl)) tbl 785 arch/powerpc/kernel/iommu.c return kref_put(&tbl->it_kref, iommu_table_free); tbl 794 arch/powerpc/kernel/iommu.c dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, tbl 809 arch/powerpc/kernel/iommu.c if (tbl) { tbl 810 arch/powerpc/kernel/iommu.c npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); tbl 812 arch/powerpc/kernel/iommu.c if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && tbl 814 arch/powerpc/kernel/iommu.c align = PAGE_SHIFT - tbl->it_page_shift; tbl 816 arch/powerpc/kernel/iommu.c dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, tbl 817 arch/powerpc/kernel/iommu.c mask >> tbl->it_page_shift, align, tbl 823 arch/powerpc/kernel/iommu.c "vaddr %p npages %d\n", tbl, vaddr, tbl 827 arch/powerpc/kernel/iommu.c dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); tbl 833 arch/powerpc/kernel/iommu.c void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, tbl 841 arch/powerpc/kernel/iommu.c if (tbl) { tbl 843 arch/powerpc/kernel/iommu.c IOMMU_PAGE_SIZE(tbl)); tbl 844 arch/powerpc/kernel/iommu.c iommu_free(tbl, dma_handle, npages); tbl 852 arch/powerpc/kernel/iommu.c void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, tbl 876 arch/powerpc/kernel/iommu.c if (!tbl) tbl 887 arch/powerpc/kernel/iommu.c nio_pages = size >> tbl->it_page_shift; tbl 888 arch/powerpc/kernel/iommu.c io_order = get_iommu_order(size, tbl); tbl 889 arch/powerpc/kernel/iommu.c mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, tbl 890 arch/powerpc/kernel/iommu.c mask >> tbl->it_page_shift, io_order, 0); tbl 899 arch/powerpc/kernel/iommu.c void iommu_free_coherent(struct iommu_table *tbl, size_t size, tbl 902 arch/powerpc/kernel/iommu.c if (tbl) { tbl 906 arch/powerpc/kernel/iommu.c nio_pages = size >> tbl->it_page_shift; tbl 907 arch/powerpc/kernel/iommu.c iommu_free(tbl, dma_handle, nio_pages); tbl 974 arch/powerpc/kernel/iommu.c void iommu_flush_tce(struct iommu_table *tbl) tbl 977 arch/powerpc/kernel/iommu.c if (tbl->it_ops->flush) tbl 978 arch/powerpc/kernel/iommu.c tbl->it_ops->flush(tbl); tbl 1017 arch/powerpc/kernel/iommu.c struct iommu_table *tbl, tbl 1024 arch/powerpc/kernel/iommu.c ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false); tbl 1027 arch/powerpc/kernel/iommu.c !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, tbl 1035 arch/powerpc/kernel/iommu.c void iommu_tce_kill(struct iommu_table *tbl, tbl 1038 arch/powerpc/kernel/iommu.c if (tbl->it_ops->tce_kill) tbl 1039 arch/powerpc/kernel/iommu.c tbl->it_ops->tce_kill(tbl, entry, pages, false); tbl 1043 arch/powerpc/kernel/iommu.c int iommu_take_ownership(struct iommu_table *tbl) tbl 1045 arch/powerpc/kernel/iommu.c unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; tbl 1055 arch/powerpc/kernel/iommu.c if (!tbl->it_ops->xchg_no_kill) tbl 1058 arch/powerpc/kernel/iommu.c spin_lock_irqsave(&tbl->large_pool.lock, flags); tbl 1059 arch/powerpc/kernel/iommu.c for (i = 0; i < tbl->nr_pools; i++) tbl 1060 arch/powerpc/kernel/iommu.c spin_lock(&tbl->pools[i].lock); tbl 1062 arch/powerpc/kernel/iommu.c iommu_table_release_pages(tbl); tbl 1064 arch/powerpc/kernel/iommu.c if (!bitmap_empty(tbl->it_map, tbl->it_size)) { tbl 1068 arch/powerpc/kernel/iommu.c iommu_table_reserve_pages(tbl, tbl->it_reserved_start, tbl 1069 arch/powerpc/kernel/iommu.c tbl->it_reserved_end); tbl 1071 arch/powerpc/kernel/iommu.c memset(tbl->it_map, 0xff, sz); tbl 1074 arch/powerpc/kernel/iommu.c for (i = 0; i < tbl->nr_pools; i++) tbl 1075 arch/powerpc/kernel/iommu.c spin_unlock(&tbl->pools[i].lock); tbl 1076 arch/powerpc/kernel/iommu.c spin_unlock_irqrestore(&tbl->large_pool.lock, flags); tbl 1082 arch/powerpc/kernel/iommu.c void iommu_release_ownership(struct iommu_table *tbl) tbl 1084 arch/powerpc/kernel/iommu.c unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; tbl 1086 arch/powerpc/kernel/iommu.c spin_lock_irqsave(&tbl->large_pool.lock, flags); tbl 1087 arch/powerpc/kernel/iommu.c for (i = 0; i < tbl->nr_pools; i++) tbl 1088 arch/powerpc/kernel/iommu.c spin_lock(&tbl->pools[i].lock); tbl 1090 arch/powerpc/kernel/iommu.c memset(tbl->it_map, 0, sz); tbl 1092 arch/powerpc/kernel/iommu.c iommu_table_reserve_pages(tbl, tbl->it_reserved_start, tbl 1093 arch/powerpc/kernel/iommu.c tbl->it_reserved_end); tbl 1095 arch/powerpc/kernel/iommu.c for (i = 0; i < tbl->nr_pools; i++) tbl 1096 arch/powerpc/kernel/iommu.c spin_unlock(&tbl->pools[i].lock); tbl 1097 arch/powerpc/kernel/iommu.c spin_unlock_irqrestore(&tbl->large_pool.lock, flags); tbl 54 arch/powerpc/kvm/book3s_64_vio.c iommu_tce_table_put(stit->tbl); tbl 85 arch/powerpc/kvm/book3s_64_vio.c if (table_group->tables[i] != stit->tbl) tbl 99 arch/powerpc/kvm/book3s_64_vio.c struct iommu_table *tbl = NULL; tbl 140 arch/powerpc/kvm/book3s_64_vio.c tbl = iommu_tce_table_get(tbltmp); tbl 144 arch/powerpc/kvm/book3s_64_vio.c if (!tbl) tbl 148 arch/powerpc/kvm/book3s_64_vio.c if (tbl != stit->tbl) tbl 153 arch/powerpc/kvm/book3s_64_vio.c iommu_tce_table_put(tbl); tbl 165 arch/powerpc/kvm/book3s_64_vio.c iommu_tce_table_put(tbl); tbl 169 arch/powerpc/kvm/book3s_64_vio.c stit->tbl = tbl; tbl 370 arch/powerpc/kvm/book3s_64_vio.c long shift = stit->tbl->it_page_shift; tbl 392 arch/powerpc/kvm/book3s_64_vio.c u64 *tbl; tbl 408 arch/powerpc/kvm/book3s_64_vio.c tbl = page_to_virt(page); tbl 410 arch/powerpc/kvm/book3s_64_vio.c tbl[idx % TCES_PER_PAGE] = tce; tbl 413 arch/powerpc/kvm/book3s_64_vio.c static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl, tbl 419 arch/powerpc/kvm/book3s_64_vio.c iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir); tbl 423 arch/powerpc/kvm/book3s_64_vio.c struct iommu_table *tbl, unsigned long entry) tbl 426 arch/powerpc/kvm/book3s_64_vio.c const unsigned long pgsize = 1ULL << tbl->it_page_shift; tbl 427 arch/powerpc/kvm/book3s_64_vio.c __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); tbl 444 arch/powerpc/kvm/book3s_64_vio.c struct iommu_table *tbl, unsigned long entry) tbl 450 arch/powerpc/kvm/book3s_64_vio.c if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, tbl 457 arch/powerpc/kvm/book3s_64_vio.c ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); tbl 459 arch/powerpc/kvm/book3s_64_vio.c iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); tbl 465 arch/powerpc/kvm/book3s_64_vio.c struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, tbl 469 arch/powerpc/kvm/book3s_64_vio.c unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); tbl 473 arch/powerpc/kvm/book3s_64_vio.c ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i); tbl 481 arch/powerpc/kvm/book3s_64_vio.c long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, tbl 487 arch/powerpc/kvm/book3s_64_vio.c __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); tbl 494 arch/powerpc/kvm/book3s_64_vio.c mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift); tbl 499 arch/powerpc/kvm/book3s_64_vio.c if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) tbl 505 arch/powerpc/kvm/book3s_64_vio.c ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); tbl 512 arch/powerpc/kvm/book3s_64_vio.c kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); tbl 520 arch/powerpc/kvm/book3s_64_vio.c struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, tbl 525 arch/powerpc/kvm/book3s_64_vio.c unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); tbl 529 arch/powerpc/kvm/book3s_64_vio.c ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { tbl 531 arch/powerpc/kvm/book3s_64_vio.c ret = kvmppc_tce_iommu_do_map(kvm, tbl, tbl 578 arch/powerpc/kvm/book3s_64_vio.c stit->tbl, entry); tbl 580 arch/powerpc/kvm/book3s_64_vio.c ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, tbl 583 arch/powerpc/kvm/book3s_64_vio.c iommu_tce_kill(stit->tbl, entry, 1); tbl 586 arch/powerpc/kvm/book3s_64_vio.c kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); tbl 673 arch/powerpc/kvm/book3s_64_vio.c stit->tbl, entry + i, ua, tbl 677 arch/powerpc/kvm/book3s_64_vio.c kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, tbl 688 arch/powerpc/kvm/book3s_64_vio.c iommu_tce_kill(stit->tbl, entry, npages); tbl 722 arch/powerpc/kvm/book3s_64_vio.c stit->tbl, entry + i); tbl 731 arch/powerpc/kvm/book3s_64_vio.c kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); tbl 740 arch/powerpc/kvm/book3s_64_vio.c iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages); tbl 126 arch/powerpc/kvm/book3s_64_vio_hv.c long shift = stit->tbl->it_page_shift; tbl 175 arch/powerpc/kvm/book3s_64_vio_hv.c u64 *tbl; tbl 184 arch/powerpc/kvm/book3s_64_vio_hv.c tbl = kvmppc_page_address(page); tbl 186 arch/powerpc/kvm/book3s_64_vio_hv.c tbl[idx % TCES_PER_PAGE] = tce; tbl 222 arch/powerpc/kvm/book3s_64_vio_hv.c struct iommu_table *tbl, tbl 228 arch/powerpc/kvm/book3s_64_vio_hv.c ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true); tbl 232 arch/powerpc/kvm/book3s_64_vio_hv.c __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); tbl 244 arch/powerpc/kvm/book3s_64_vio_hv.c extern void iommu_tce_kill_rm(struct iommu_table *tbl, tbl 247 arch/powerpc/kvm/book3s_64_vio_hv.c if (tbl->it_ops->tce_kill) tbl 248 arch/powerpc/kvm/book3s_64_vio_hv.c tbl->it_ops->tce_kill(tbl, entry, pages, true); tbl 251 arch/powerpc/kvm/book3s_64_vio_hv.c static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl, tbl 257 arch/powerpc/kvm/book3s_64_vio_hv.c iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); tbl 261 arch/powerpc/kvm/book3s_64_vio_hv.c struct iommu_table *tbl, unsigned long entry) tbl 264 arch/powerpc/kvm/book3s_64_vio_hv.c const unsigned long pgsize = 1ULL << tbl->it_page_shift; tbl 265 arch/powerpc/kvm/book3s_64_vio_hv.c __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); tbl 283 arch/powerpc/kvm/book3s_64_vio_hv.c struct iommu_table *tbl, unsigned long entry) tbl 289 arch/powerpc/kvm/book3s_64_vio_hv.c if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir)) tbl 299 arch/powerpc/kvm/book3s_64_vio_hv.c ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); tbl 301 arch/powerpc/kvm/book3s_64_vio_hv.c iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); tbl 307 arch/powerpc/kvm/book3s_64_vio_hv.c struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, tbl 311 arch/powerpc/kvm/book3s_64_vio_hv.c unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); tbl 315 arch/powerpc/kvm/book3s_64_vio_hv.c ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i); tbl 323 arch/powerpc/kvm/book3s_64_vio_hv.c static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, tbl 329 arch/powerpc/kvm/book3s_64_vio_hv.c __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); tbl 336 arch/powerpc/kvm/book3s_64_vio_hv.c mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift); tbl 340 arch/powerpc/kvm/book3s_64_vio_hv.c if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, tbl 347 arch/powerpc/kvm/book3s_64_vio_hv.c ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); tbl 358 arch/powerpc/kvm/book3s_64_vio_hv.c kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); tbl 366 arch/powerpc/kvm/book3s_64_vio_hv.c struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, tbl 371 arch/powerpc/kvm/book3s_64_vio_hv.c unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); tbl 375 arch/powerpc/kvm/book3s_64_vio_hv.c ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { tbl 377 arch/powerpc/kvm/book3s_64_vio_hv.c ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl, tbl 423 arch/powerpc/kvm/book3s_64_vio_hv.c stit->tbl, entry); tbl 426 arch/powerpc/kvm/book3s_64_vio_hv.c stit->tbl, entry, ua, dir); tbl 428 arch/powerpc/kvm/book3s_64_vio_hv.c iommu_tce_kill_rm(stit->tbl, entry, 1); tbl 431 arch/powerpc/kvm/book3s_64_vio_hv.c kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); tbl 576 arch/powerpc/kvm/book3s_64_vio_hv.c stit->tbl, entry + i, ua, tbl 580 arch/powerpc/kvm/book3s_64_vio_hv.c kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, tbl 591 arch/powerpc/kvm/book3s_64_vio_hv.c iommu_tce_kill_rm(stit->tbl, entry, npages); tbl 629 arch/powerpc/kvm/book3s_64_vio_hv.c stit->tbl, entry + i); tbl 638 arch/powerpc/kvm/book3s_64_vio_hv.c kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); tbl 647 arch/powerpc/kvm/book3s_64_vio_hv.c iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages); tbl 660 arch/powerpc/kvm/book3s_64_vio_hv.c u64 *tbl; tbl 676 arch/powerpc/kvm/book3s_64_vio_hv.c tbl = (u64 *)page_address(page); tbl 678 arch/powerpc/kvm/book3s_64_vio_hv.c vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; tbl 154 arch/powerpc/platforms/cell/iommu.c static int tce_build_cell(struct iommu_table *tbl, long index, long npages, tbl 161 arch/powerpc/platforms/cell/iommu.c container_of(tbl, struct iommu_window, table); tbl 186 arch/powerpc/platforms/cell/iommu.c io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); tbl 188 arch/powerpc/platforms/cell/iommu.c for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) tbl 200 arch/powerpc/platforms/cell/iommu.c static void tce_free_cell(struct iommu_table *tbl, long index, long npages) tbl 206 arch/powerpc/platforms/cell/iommu.c container_of(tbl, struct iommu_window, table); tbl 220 arch/powerpc/platforms/cell/iommu.c io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); tbl 76 arch/powerpc/platforms/pasemi/iommu.c static int iobmap_build(struct iommu_table *tbl, long index, tbl 87 arch/powerpc/platforms/pasemi/iommu.c bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; tbl 89 arch/powerpc/platforms/pasemi/iommu.c ip = ((u32 *)tbl->it_base) + index; tbl 105 arch/powerpc/platforms/pasemi/iommu.c static void iobmap_free(struct iommu_table *tbl, long index, tbl 113 arch/powerpc/platforms/pasemi/iommu.c bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; tbl 115 arch/powerpc/platforms/pasemi/iommu.c ip = ((u32 *)tbl->it_base) + index; tbl 128 arch/powerpc/platforms/powernv/npu-dma.c struct iommu_table *tbl) tbl 134 arch/powerpc/platforms/powernv/npu-dma.c const unsigned long size = tbl->it_indirect_levels ? tbl 135 arch/powerpc/platforms/powernv/npu-dma.c tbl->it_level_size : tbl->it_size; tbl 136 arch/powerpc/platforms/powernv/npu-dma.c const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; tbl 137 arch/powerpc/platforms/powernv/npu-dma.c const __u64 win_size = tbl->it_size << tbl->it_page_shift; tbl 146 arch/powerpc/platforms/powernv/npu-dma.c IOMMU_PAGE_SIZE(tbl)); tbl 151 arch/powerpc/platforms/powernv/npu-dma.c tbl->it_indirect_levels + 1, tbl 152 arch/powerpc/platforms/powernv/npu-dma.c __pa(tbl->it_base), tbl 154 arch/powerpc/platforms/powernv/npu-dma.c IOMMU_PAGE_SIZE(tbl)); tbl 163 arch/powerpc/platforms/powernv/npu-dma.c tbl, &npe->table_group); tbl 293 arch/powerpc/platforms/powernv/npu-dma.c int num, struct iommu_table *tbl) tbl 307 arch/powerpc/platforms/powernv/npu-dma.c num, tbl); tbl 325 arch/powerpc/platforms/powernv/npu-dma.c table_group->tables[num] = iommu_tce_table_get(tbl); tbl 20 arch/powerpc/platforms/powernv/pci-ioda-tce.c void pnv_pci_setup_iommu_table(struct iommu_table *tbl, tbl 24 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_blocksize = 16; tbl 25 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_base = (unsigned long)tce_mem; tbl 26 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_page_shift = page_shift; tbl 27 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_offset = dma_offset >> tbl->it_page_shift; tbl 28 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_index = 0; tbl 29 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_size = tce_size >> 3; tbl 30 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_busno = 0; tbl 31 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_type = TCE_PCI; tbl 55 arch/powerpc/platforms/powernv/pci-ioda-tce.c static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) tbl 57 arch/powerpc/platforms/powernv/pci-ioda-tce.c __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base; tbl 58 arch/powerpc/platforms/powernv/pci-ioda-tce.c int level = tbl->it_indirect_levels; tbl 59 arch/powerpc/platforms/powernv/pci-ioda-tce.c const long shift = ilog2(tbl->it_level_size); tbl 60 arch/powerpc/platforms/powernv/pci-ioda-tce.c unsigned long mask = (tbl->it_level_size - 1) << (level * shift); tbl 72 arch/powerpc/platforms/powernv/pci-ioda-tce.c tmp2 = pnv_alloc_tce_level(tbl->it_nid, tbl 73 arch/powerpc/platforms/powernv/pci-ioda-tce.c ilog2(tbl->it_level_size) + 3); tbl 82 arch/powerpc/platforms/powernv/pci-ioda-tce.c ilog2(tbl->it_level_size) + 3, 1); tbl 96 arch/powerpc/platforms/powernv/pci-ioda-tce.c int pnv_tce_build(struct iommu_table *tbl, long index, long npages, tbl 101 arch/powerpc/platforms/powernv/pci-ioda-tce.c u64 rpn = __pa(uaddr) >> tbl->it_page_shift; tbl 109 arch/powerpc/platforms/powernv/pci-ioda-tce.c ((rpn + i) << tbl->it_page_shift); tbl 110 arch/powerpc/platforms/powernv/pci-ioda-tce.c unsigned long idx = index - tbl->it_offset + i; tbl 112 arch/powerpc/platforms/powernv/pci-ioda-tce.c *(pnv_tce(tbl, false, idx, true)) = cpu_to_be64(newtce); tbl 119 arch/powerpc/platforms/powernv/pci-ioda-tce.c int pnv_tce_xchg(struct iommu_table *tbl, long index, tbl 125 arch/powerpc/platforms/powernv/pci-ioda-tce.c unsigned long idx = index - tbl->it_offset; tbl 128 arch/powerpc/platforms/powernv/pci-ioda-tce.c BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl)); tbl 131 arch/powerpc/platforms/powernv/pci-ioda-tce.c ptce = pnv_tce(tbl, false, idx, false); tbl 139 arch/powerpc/platforms/powernv/pci-ioda-tce.c ptce = pnv_tce(tbl, false, idx, alloc); tbl 154 arch/powerpc/platforms/powernv/pci-ioda-tce.c __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index, bool alloc) tbl 156 arch/powerpc/platforms/powernv/pci-ioda-tce.c if (WARN_ON_ONCE(!tbl->it_userspace)) tbl 159 arch/powerpc/platforms/powernv/pci-ioda-tce.c return pnv_tce(tbl, true, index - tbl->it_offset, alloc); tbl 163 arch/powerpc/platforms/powernv/pci-ioda-tce.c void pnv_tce_free(struct iommu_table *tbl, long index, long npages) tbl 168 arch/powerpc/platforms/powernv/pci-ioda-tce.c unsigned long idx = index - tbl->it_offset + i; tbl 169 arch/powerpc/platforms/powernv/pci-ioda-tce.c __be64 *ptce = pnv_tce(tbl, false, idx, false); tbl 175 arch/powerpc/platforms/powernv/pci-ioda-tce.c i |= tbl->it_level_size - 1; tbl 179 arch/powerpc/platforms/powernv/pci-ioda-tce.c unsigned long pnv_tce_get(struct iommu_table *tbl, long index) tbl 181 arch/powerpc/platforms/powernv/pci-ioda-tce.c __be64 *ptce = pnv_tce(tbl, false, index - tbl->it_offset, false); tbl 213 arch/powerpc/platforms/powernv/pci-ioda-tce.c void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl) tbl 215 arch/powerpc/platforms/powernv/pci-ioda-tce.c const unsigned long size = tbl->it_indirect_levels ? tbl 216 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_level_size : tbl->it_size; tbl 218 arch/powerpc/platforms/powernv/pci-ioda-tce.c if (!tbl->it_size) tbl 221 arch/powerpc/platforms/powernv/pci-ioda-tce.c pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size, tbl 222 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_indirect_levels); tbl 223 arch/powerpc/platforms/powernv/pci-ioda-tce.c if (tbl->it_userspace) { tbl 224 arch/powerpc/platforms/powernv/pci-ioda-tce.c pnv_pci_ioda2_table_do_free_pages(tbl->it_userspace, size, tbl 225 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_indirect_levels); tbl 265 arch/powerpc/platforms/powernv/pci-ioda-tce.c bool alloc_userspace_copy, struct iommu_table *tbl) tbl 320 arch/powerpc/platforms/powernv/pci-ioda-tce.c pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset, tbl 322 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_level_size = 1ULL << (level_shift - 3); tbl 323 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_indirect_levels = levels - 1; tbl 324 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_userspace = uas; tbl 325 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_nid = nid; tbl 328 arch/powerpc/platforms/powernv/pci-ioda-tce.c window_size, tce_table_size, bus_offset, tbl->it_base, tbl 329 arch/powerpc/platforms/powernv/pci-ioda-tce.c tbl->it_userspace, 1, levels); tbl 351 arch/powerpc/platforms/powernv/pci-ioda-tce.c void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, tbl 358 arch/powerpc/platforms/powernv/pci-ioda-tce.c if (!tbl || !table_group) tbl 363 arch/powerpc/platforms/powernv/pci-ioda-tce.c list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { tbl 377 arch/powerpc/platforms/powernv/pci-ioda-tce.c if (table_group->tables[i] == tbl) { tbl 378 arch/powerpc/platforms/powernv/pci-ioda-tce.c iommu_tce_table_put(tbl); tbl 388 arch/powerpc/platforms/powernv/pci-ioda-tce.c struct iommu_table *tbl, tbl 393 arch/powerpc/platforms/powernv/pci-ioda-tce.c if (WARN_ON(!tbl || !table_group)) tbl 402 arch/powerpc/platforms/powernv/pci-ioda-tce.c list_add_rcu(&tgl->next, &tbl->it_group_list); tbl 404 arch/powerpc/platforms/powernv/pci-ioda-tce.c table_group->tables[num] = iommu_tce_table_get(tbl); tbl 1444 arch/powerpc/platforms/powernv/pci-ioda.c struct iommu_table *tbl; tbl 1447 arch/powerpc/platforms/powernv/pci-ioda.c tbl = pe->table_group.tables[0]; tbl 1457 arch/powerpc/platforms/powernv/pci-ioda.c iommu_tce_table_put(tbl); tbl 1901 arch/powerpc/platforms/powernv/pci-ioda.c static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, tbl 1905 arch/powerpc/platforms/powernv/pci-ioda.c &tbl->it_group_list, struct iommu_table_group_link, tbl 1912 arch/powerpc/platforms/powernv/pci-ioda.c start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset); tbl 1913 arch/powerpc/platforms/powernv/pci-ioda.c end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset + tbl 1938 arch/powerpc/platforms/powernv/pci-ioda.c static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, tbl 1943 arch/powerpc/platforms/powernv/pci-ioda.c int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, tbl 1947 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); tbl 1954 arch/powerpc/platforms/powernv/pci-ioda.c static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index, tbl 1958 arch/powerpc/platforms/powernv/pci-ioda.c return pnv_tce_xchg(tbl, index, hpa, direction, !realmode); tbl 1962 arch/powerpc/platforms/powernv/pci-ioda.c static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index, tbl 1965 arch/powerpc/platforms/powernv/pci-ioda.c pnv_tce_free(tbl, index, npages); tbl 1967 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); tbl 2045 arch/powerpc/platforms/powernv/pci-ioda.c static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, tbl 2050 arch/powerpc/platforms/powernv/pci-ioda.c list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) { tbl 2054 arch/powerpc/platforms/powernv/pci-ioda.c unsigned int shift = tbl->it_page_shift; tbl 2089 arch/powerpc/platforms/powernv/pci-ioda.c static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, tbl 2094 arch/powerpc/platforms/powernv/pci-ioda.c int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, tbl 2098 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false); tbl 2103 arch/powerpc/platforms/powernv/pci-ioda.c static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, tbl 2106 arch/powerpc/platforms/powernv/pci-ioda.c pnv_tce_free(tbl, index, npages); tbl 2108 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false); tbl 2176 arch/powerpc/platforms/powernv/pci-ioda.c struct iommu_table *tbl; tbl 2220 arch/powerpc/platforms/powernv/pci-ioda.c tbl = pnv_pci_table_alloc(phb->hose->node); tbl 2221 arch/powerpc/platforms/powernv/pci-ioda.c if (WARN_ON(!tbl)) tbl 2226 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group); tbl 2272 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs, tbl 2276 arch/powerpc/platforms/powernv/pci-ioda.c tbl->it_ops = &pnv_ioda1_iommu_ops; tbl 2277 arch/powerpc/platforms/powernv/pci-ioda.c pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift; tbl 2278 arch/powerpc/platforms/powernv/pci-ioda.c pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; tbl 2279 arch/powerpc/platforms/powernv/pci-ioda.c iommu_init_table(tbl, phb->hose->node, 0, 0); tbl 2289 arch/powerpc/platforms/powernv/pci-ioda.c if (tbl) { tbl 2290 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_unlink_table_and_group(tbl, &pe->table_group); tbl 2291 arch/powerpc/platforms/powernv/pci-ioda.c iommu_tce_table_put(tbl); tbl 2296 arch/powerpc/platforms/powernv/pci-ioda.c int num, struct iommu_table *tbl) tbl 2302 arch/powerpc/platforms/powernv/pci-ioda.c const unsigned long size = tbl->it_indirect_levels ? tbl 2303 arch/powerpc/platforms/powernv/pci-ioda.c tbl->it_level_size : tbl->it_size; tbl 2304 arch/powerpc/platforms/powernv/pci-ioda.c const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; tbl 2305 arch/powerpc/platforms/powernv/pci-ioda.c const __u64 win_size = tbl->it_size << tbl->it_page_shift; tbl 2309 arch/powerpc/platforms/powernv/pci-ioda.c IOMMU_PAGE_SIZE(tbl)); tbl 2318 arch/powerpc/platforms/powernv/pci-ioda.c tbl->it_indirect_levels + 1, tbl 2319 arch/powerpc/platforms/powernv/pci-ioda.c __pa(tbl->it_base), tbl 2321 arch/powerpc/platforms/powernv/pci-ioda.c IOMMU_PAGE_SIZE(tbl)); tbl 2328 arch/powerpc/platforms/powernv/pci-ioda.c tbl, &pe->table_group); tbl 2371 arch/powerpc/platforms/powernv/pci-ioda.c struct iommu_table *tbl; tbl 2373 arch/powerpc/platforms/powernv/pci-ioda.c tbl = pnv_pci_table_alloc(nid); tbl 2374 arch/powerpc/platforms/powernv/pci-ioda.c if (!tbl) tbl 2377 arch/powerpc/platforms/powernv/pci-ioda.c tbl->it_ops = &pnv_ioda2_iommu_ops; tbl 2381 arch/powerpc/platforms/powernv/pci-ioda.c levels, alloc_userspace_copy, tbl); tbl 2383 arch/powerpc/platforms/powernv/pci-ioda.c iommu_tce_table_put(tbl); tbl 2387 arch/powerpc/platforms/powernv/pci-ioda.c *ptbl = tbl; tbl 2394 arch/powerpc/platforms/powernv/pci-ioda.c struct iommu_table *tbl = NULL; tbl 2435 arch/powerpc/platforms/powernv/pci-ioda.c window_size, levels, false, &tbl); tbl 2446 arch/powerpc/platforms/powernv/pci-ioda.c res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift; tbl 2447 arch/powerpc/platforms/powernv/pci-ioda.c res_end = min(window_size, SZ_4G) >> tbl->it_page_shift; tbl 2449 arch/powerpc/platforms/powernv/pci-ioda.c iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end); tbl 2451 arch/powerpc/platforms/powernv/pci-ioda.c rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); tbl 2455 arch/powerpc/platforms/powernv/pci-ioda.c iommu_tce_table_put(tbl); tbl 2468 arch/powerpc/platforms/powernv/pci-ioda.c set_iommu_table_base(&pe->pdev->dev, tbl); tbl 2551 arch/powerpc/platforms/powernv/pci-ioda.c struct iommu_table *tbl = pe->table_group.tables[0]; tbl 2559 arch/powerpc/platforms/powernv/pci-ioda.c iommu_tce_table_put(tbl); tbl 3450 arch/powerpc/platforms/powernv/pci-ioda.c struct iommu_table *tbl = pe->table_group.tables[0]; tbl 3460 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false); tbl 3466 arch/powerpc/platforms/powernv/pci-ioda.c free_pages(tbl->it_base, get_order(tbl->it_size << 3)); tbl 3467 arch/powerpc/platforms/powernv/pci-ioda.c iommu_tce_table_put(tbl); tbl 3472 arch/powerpc/platforms/powernv/pci-ioda.c struct iommu_table *tbl = pe->table_group.tables[0]; tbl 3493 arch/powerpc/platforms/powernv/pci-ioda.c iommu_tce_table_put(tbl); tbl 801 arch/powerpc/platforms/powernv/pci.c struct iommu_table *tbl; tbl 803 arch/powerpc/platforms/powernv/pci.c tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); tbl 804 arch/powerpc/platforms/powernv/pci.c if (!tbl) tbl 807 arch/powerpc/platforms/powernv/pci.c INIT_LIST_HEAD_RCU(&tbl->it_group_list); tbl 808 arch/powerpc/platforms/powernv/pci.c kref_init(&tbl->it_kref); tbl 810 arch/powerpc/platforms/powernv/pci.c return tbl; tbl 225 arch/powerpc/platforms/powernv/pci.h extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, tbl 228 arch/powerpc/platforms/powernv/pci.h extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); tbl 229 arch/powerpc/platforms/powernv/pci.h extern int pnv_tce_xchg(struct iommu_table *tbl, long index, tbl 232 arch/powerpc/platforms/powernv/pci.h extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index, tbl 234 arch/powerpc/platforms/powernv/pci.h extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index); tbl 238 arch/powerpc/platforms/powernv/pci.h bool alloc_userspace_copy, struct iommu_table *tbl); tbl 239 arch/powerpc/platforms/powernv/pci.h extern void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl); tbl 242 arch/powerpc/platforms/powernv/pci.h struct iommu_table *tbl, tbl 244 arch/powerpc/platforms/powernv/pci.h extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, tbl 246 arch/powerpc/platforms/powernv/pci.h extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, tbl 45 arch/powerpc/platforms/pseries/iommu.c struct iommu_table *tbl; tbl 52 arch/powerpc/platforms/pseries/iommu.c tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node); tbl 53 arch/powerpc/platforms/pseries/iommu.c if (!tbl) tbl 56 arch/powerpc/platforms/pseries/iommu.c INIT_LIST_HEAD_RCU(&tbl->it_group_list); tbl 57 arch/powerpc/platforms/pseries/iommu.c kref_init(&tbl->it_kref); tbl 59 arch/powerpc/platforms/pseries/iommu.c table_group->tables[0] = tbl; tbl 71 arch/powerpc/platforms/pseries/iommu.c struct iommu_table *tbl; tbl 76 arch/powerpc/platforms/pseries/iommu.c tbl = table_group->tables[0]; tbl 83 arch/powerpc/platforms/pseries/iommu.c iommu_tce_table_put(tbl); tbl 88 arch/powerpc/platforms/pseries/iommu.c static int tce_build_pSeries(struct iommu_table *tbl, long index, tbl 102 arch/powerpc/platforms/pseries/iommu.c tcep = ((__be64 *)tbl->it_base) + index; tbl 116 arch/powerpc/platforms/pseries/iommu.c static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) tbl 120 arch/powerpc/platforms/pseries/iommu.c tcep = ((__be64 *)tbl->it_base) + index; tbl 126 arch/powerpc/platforms/pseries/iommu.c static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) tbl 130 arch/powerpc/platforms/pseries/iommu.c tcep = ((__be64 *)tbl->it_base) + index; tbl 181 arch/powerpc/platforms/pseries/iommu.c static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, tbl 196 arch/powerpc/platforms/pseries/iommu.c return tce_build_pSeriesLP(tbl->it_index, tcenum, tbl 197 arch/powerpc/platforms/pseries/iommu.c tbl->it_page_shift, npages, uaddr, tbl 213 arch/powerpc/platforms/pseries/iommu.c return tce_build_pSeriesLP(tbl->it_index, tcenum, tbl 214 arch/powerpc/platforms/pseries/iommu.c tbl->it_page_shift, tbl 238 arch/powerpc/platforms/pseries/iommu.c rc = plpar_tce_put_indirect((u64)tbl->it_index, tbl 251 arch/powerpc/platforms/pseries/iommu.c tce_freemulti_pSeriesLP(tbl, tcenum_start, tbl 258 arch/powerpc/platforms/pseries/iommu.c printk("\tindex = 0x%llx\n", (u64)tbl->it_index); tbl 285 arch/powerpc/platforms/pseries/iommu.c static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) tbl 290 arch/powerpc/platforms/pseries/iommu.c return tce_free_pSeriesLP(tbl->it_index, tcenum, npages); tbl 292 arch/powerpc/platforms/pseries/iommu.c rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); tbl 297 arch/powerpc/platforms/pseries/iommu.c printk("\tindex = 0x%llx\n", (u64)tbl->it_index); tbl 303 arch/powerpc/platforms/pseries/iommu.c static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) tbl 308 arch/powerpc/platforms/pseries/iommu.c rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret); tbl 312 arch/powerpc/platforms/pseries/iommu.c printk("\tindex = 0x%llx\n", (u64)tbl->it_index); tbl 482 arch/powerpc/platforms/pseries/iommu.c struct iommu_table *tbl) tbl 498 arch/powerpc/platforms/pseries/iommu.c tbl->it_base = (unsigned long)__va(*basep); tbl 501 arch/powerpc/platforms/pseries/iommu.c memset((void *)tbl->it_base, 0, *sizep); tbl 503 arch/powerpc/platforms/pseries/iommu.c tbl->it_busno = phb->bus->number; tbl 504 arch/powerpc/platforms/pseries/iommu.c tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; tbl 507 arch/powerpc/platforms/pseries/iommu.c tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift; tbl 518 arch/powerpc/platforms/pseries/iommu.c tbl->it_size = phb->dma_window_size >> tbl->it_page_shift; tbl 520 arch/powerpc/platforms/pseries/iommu.c tbl->it_index = 0; tbl 521 arch/powerpc/platforms/pseries/iommu.c tbl->it_blocksize = 16; tbl 522 arch/powerpc/platforms/pseries/iommu.c tbl->it_type = TCE_PCI; tbl 532 arch/powerpc/platforms/pseries/iommu.c struct iommu_table *tbl, tbl 538 arch/powerpc/platforms/pseries/iommu.c of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); tbl 540 arch/powerpc/platforms/pseries/iommu.c tbl->it_busno = phb->bus->number; tbl 541 arch/powerpc/platforms/pseries/iommu.c tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; tbl 542 arch/powerpc/platforms/pseries/iommu.c tbl->it_base = 0; tbl 543 arch/powerpc/platforms/pseries/iommu.c tbl->it_blocksize = 16; tbl 544 arch/powerpc/platforms/pseries/iommu.c tbl->it_type = TCE_PCI; tbl 545 arch/powerpc/platforms/pseries/iommu.c tbl->it_offset = offset >> tbl->it_page_shift; tbl 546 arch/powerpc/platforms/pseries/iommu.c tbl->it_size = size >> tbl->it_page_shift; tbl 561 arch/powerpc/platforms/pseries/iommu.c struct iommu_table *tbl; tbl 623 arch/powerpc/platforms/pseries/iommu.c tbl = pci->table_group->tables[0]; tbl 625 arch/powerpc/platforms/pseries/iommu.c iommu_table_setparms(pci->phb, dn, tbl); tbl 626 arch/powerpc/platforms/pseries/iommu.c tbl->it_ops = &iommu_table_pseries_ops; tbl 627 arch/powerpc/platforms/pseries/iommu.c iommu_init_table(tbl, pci->phb->node, 0, 0); tbl 638 arch/powerpc/platforms/pseries/iommu.c static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned tbl 643 arch/powerpc/platforms/pseries/iommu.c unsigned long ioba = (unsigned long) index << tbl->it_page_shift; tbl 648 arch/powerpc/platforms/pseries/iommu.c spin_lock_irqsave(&tbl->large_pool.lock, flags); tbl 650 arch/powerpc/platforms/pseries/iommu.c rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce); tbl 652 arch/powerpc/platforms/pseries/iommu.c rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce); tbl 659 arch/powerpc/platforms/pseries/iommu.c spin_unlock_irqrestore(&tbl->large_pool.lock, flags); tbl 676 arch/powerpc/platforms/pseries/iommu.c struct iommu_table *tbl; tbl 705 arch/powerpc/platforms/pseries/iommu.c tbl = ppci->table_group->tables[0]; tbl 706 arch/powerpc/platforms/pseries/iommu.c iommu_table_setparms_lpar(ppci->phb, pdn, tbl, tbl 708 arch/powerpc/platforms/pseries/iommu.c tbl->it_ops = &iommu_table_lpar_multi_ops; tbl 709 arch/powerpc/platforms/pseries/iommu.c iommu_init_table(tbl, ppci->phb->node, 0, 0); tbl 720 arch/powerpc/platforms/pseries/iommu.c struct iommu_table *tbl; tbl 735 arch/powerpc/platforms/pseries/iommu.c tbl = PCI_DN(dn)->table_group->tables[0]; tbl 736 arch/powerpc/platforms/pseries/iommu.c iommu_table_setparms(phb, dn, tbl); tbl 737 arch/powerpc/platforms/pseries/iommu.c tbl->it_ops = &iommu_table_pseries_ops; tbl 738 arch/powerpc/platforms/pseries/iommu.c iommu_init_table(tbl, phb->node, 0, 0); tbl 739 arch/powerpc/platforms/pseries/iommu.c set_iommu_table_base(&dev->dev, tbl); tbl 1151 arch/powerpc/platforms/pseries/iommu.c struct iommu_table *tbl; tbl 1184 arch/powerpc/platforms/pseries/iommu.c tbl = pci->table_group->tables[0]; tbl 1185 arch/powerpc/platforms/pseries/iommu.c iommu_table_setparms_lpar(pci->phb, pdn, tbl, tbl 1187 arch/powerpc/platforms/pseries/iommu.c tbl->it_ops = &iommu_table_lpar_multi_ops; tbl 1188 arch/powerpc/platforms/pseries/iommu.c iommu_init_table(tbl, pci->phb->node, 0, 0); tbl 518 arch/powerpc/platforms/pseries/vio.c struct iommu_table *tbl = get_iommu_table_base(dev); tbl 521 arch/powerpc/platforms/pseries/vio.c if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) tbl 523 arch/powerpc/platforms/pseries/vio.c ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev), tbl 530 arch/powerpc/platforms/pseries/vio.c vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); tbl 542 arch/powerpc/platforms/pseries/vio.c struct iommu_table *tbl = get_iommu_table_base(dev); tbl 544 arch/powerpc/platforms/pseries/vio.c iommu_unmap_page(tbl, dma_handle, size, direction, attrs); tbl 545 arch/powerpc/platforms/pseries/vio.c vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); tbl 553 arch/powerpc/platforms/pseries/vio.c struct iommu_table *tbl = get_iommu_table_base(dev); tbl 559 arch/powerpc/platforms/pseries/vio.c alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); tbl 563 arch/powerpc/platforms/pseries/vio.c ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev), tbl 569 arch/powerpc/platforms/pseries/vio.c alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); tbl 587 arch/powerpc/platforms/pseries/vio.c struct iommu_table *tbl = get_iommu_table_base(dev); tbl 593 arch/powerpc/platforms/pseries/vio.c alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); tbl 595 arch/powerpc/platforms/pseries/vio.c ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs); tbl 699 arch/powerpc/platforms/pseries/vio.c struct iommu_table *tbl; tbl 705 arch/powerpc/platforms/pseries/vio.c tbl = get_iommu_table_base(dev); tbl 733 arch/powerpc/platforms/pseries/vio.c IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl); tbl 1167 arch/powerpc/platforms/pseries/vio.c struct iommu_table *tbl; tbl 1175 arch/powerpc/platforms/pseries/vio.c tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); tbl 1176 arch/powerpc/platforms/pseries/vio.c if (tbl == NULL) tbl 1179 arch/powerpc/platforms/pseries/vio.c kref_init(&tbl->it_kref); tbl 1182 arch/powerpc/platforms/pseries/vio.c &tbl->it_index, &offset, &size); tbl 1185 arch/powerpc/platforms/pseries/vio.c tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; tbl 1186 arch/powerpc/platforms/pseries/vio.c tbl->it_size = size >> tbl->it_page_shift; tbl 1188 arch/powerpc/platforms/pseries/vio.c tbl->it_offset = offset >> tbl->it_page_shift; tbl 1189 arch/powerpc/platforms/pseries/vio.c tbl->it_busno = 0; tbl 1190 arch/powerpc/platforms/pseries/vio.c tbl->it_type = TCE_VB; tbl 1191 arch/powerpc/platforms/pseries/vio.c tbl->it_blocksize = 16; tbl 1194 arch/powerpc/platforms/pseries/vio.c tbl->it_ops = &iommu_table_lpar_multi_ops; tbl 1196 arch/powerpc/platforms/pseries/vio.c tbl->it_ops = &iommu_table_pseries_ops; tbl 1198 arch/powerpc/platforms/pseries/vio.c return iommu_init_table(tbl, -1, 0, 0); tbl 1312 arch/powerpc/platforms/pseries/vio.c struct iommu_table *tbl = get_iommu_table_base(dev); tbl 1314 arch/powerpc/platforms/pseries/vio.c if (tbl) tbl 1315 arch/powerpc/platforms/pseries/vio.c iommu_tce_table_put(tbl); tbl 163 arch/powerpc/sysdev/dart_iommu.c static void dart_flush(struct iommu_table *tbl) tbl 172 arch/powerpc/sysdev/dart_iommu.c static int dart_build(struct iommu_table *tbl, long index, tbl 183 arch/powerpc/sysdev/dart_iommu.c orig_dp = dp = ((unsigned int*)tbl->it_base) + index; tbl 209 arch/powerpc/sysdev/dart_iommu.c static void dart_free(struct iommu_table *tbl, long index, long npages) tbl 221 arch/powerpc/sysdev/dart_iommu.c orig_dp = dp = ((unsigned int *)tbl->it_base) + index; tbl 49 arch/sparc/include/asm/iommu_64.h struct iommu_map_table tbl; tbl 56 arch/sparc/include/asm/iommu_64.h struct iommu_map_table tbl; tbl 227 arch/sparc/kernel/iommu-common.c static struct iommu_pool *get_pool(struct iommu_map_table *tbl, tbl 231 arch/sparc/kernel/iommu-common.c unsigned long largepool_start = tbl->large_pool.start; tbl 232 arch/sparc/kernel/iommu-common.c bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); tbl 236 arch/sparc/kernel/iommu-common.c p = &tbl->large_pool; tbl 238 arch/sparc/kernel/iommu-common.c unsigned int pool_nr = entry / tbl->poolsize; tbl 240 arch/sparc/kernel/iommu-common.c BUG_ON(pool_nr >= tbl->nr_pools); tbl 241 arch/sparc/kernel/iommu-common.c p = &tbl->pools[pool_nr]; tbl 52 arch/sparc/kernel/iommu.c struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); tbl 105 arch/sparc/kernel/iommu.c iommu->tbl.table_map_base = dma_offset; tbl 111 arch/sparc/kernel/iommu.c iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); tbl 112 arch/sparc/kernel/iommu.c if (!iommu->tbl.map) tbl 115 arch/sparc/kernel/iommu.c iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, tbl 150 arch/sparc/kernel/iommu.c kfree(iommu->tbl.map); tbl 151 arch/sparc/kernel/iommu.c iommu->tbl.map = NULL; tbl 162 arch/sparc/kernel/iommu.c entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, tbl 230 arch/sparc/kernel/iommu.c *dma_addrp = (iommu->tbl.table_map_base + tbl 256 arch/sparc/kernel/iommu.c iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); tbl 296 arch/sparc/kernel/iommu.c bus_addr = (iommu->tbl.table_map_base + tbl 407 arch/sparc/kernel/iommu.c ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); tbl 429 arch/sparc/kernel/iommu.c iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); tbl 477 arch/sparc/kernel/iommu.c base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; tbl 491 arch/sparc/kernel/iommu.c entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, tbl 505 arch/sparc/kernel/iommu.c dma_addr = iommu->tbl.table_map_base + tbl 566 arch/sparc/kernel/iommu.c entry = (vaddr - iommu->tbl.table_map_base) tbl 573 arch/sparc/kernel/iommu.c iommu_tbl_range_free(&iommu->tbl, vaddr, npages, tbl 597 arch/sparc/kernel/iommu.c struct iommu_map_table *tbl = &iommu->tbl; tbl 601 arch/sparc/kernel/iommu.c ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT); tbl 638 arch/sparc/kernel/iommu.c entry = ((dma_handle - iommu->tbl.table_map_base) tbl 650 arch/sparc/kernel/iommu.c iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, tbl 685 arch/sparc/kernel/iommu.c struct iommu_map_table *tbl = &iommu->tbl; tbl 688 arch/sparc/kernel/iommu.c ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT); tbl 721 arch/sparc/kernel/iommu.c struct iommu_map_table *tbl = &iommu->tbl; tbl 724 arch/sparc/kernel/iommu.c tbl->table_map_base) >> IO_PAGE_SHIFT); tbl 187 arch/sparc/kernel/pci_sun4v.c struct iommu_map_table *tbl; tbl 214 arch/sparc/kernel/pci_sun4v.c tbl = &iommu->tbl; tbl 216 arch/sparc/kernel/pci_sun4v.c tbl = &iommu->atu->tbl; tbl 218 arch/sparc/kernel/pci_sun4v.c entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, tbl 224 arch/sparc/kernel/pci_sun4v.c *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); tbl 250 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); tbl 327 arch/sparc/kernel/pci_sun4v.c struct iommu_map_table *tbl; tbl 339 arch/sparc/kernel/pci_sun4v.c tbl = &iommu->tbl; tbl 342 arch/sparc/kernel/pci_sun4v.c tbl = &atu->tbl; tbl 345 arch/sparc/kernel/pci_sun4v.c entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); tbl 347 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); tbl 360 arch/sparc/kernel/pci_sun4v.c struct iommu_map_table *tbl; tbl 380 arch/sparc/kernel/pci_sun4v.c tbl = &iommu->tbl; tbl 382 arch/sparc/kernel/pci_sun4v.c tbl = &atu->tbl; tbl 384 arch/sparc/kernel/pci_sun4v.c entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, tbl 390 arch/sparc/kernel/pci_sun4v.c bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); tbl 423 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); tbl 434 arch/sparc/kernel/pci_sun4v.c struct iommu_map_table *tbl; tbl 457 arch/sparc/kernel/pci_sun4v.c tbl = &iommu->tbl; tbl 460 arch/sparc/kernel/pci_sun4v.c tbl = &atu->tbl; tbl 462 arch/sparc/kernel/pci_sun4v.c entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT; tbl 464 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); tbl 479 arch/sparc/kernel/pci_sun4v.c struct iommu_map_table *tbl; tbl 516 arch/sparc/kernel/pci_sun4v.c tbl = &iommu->tbl; tbl 518 arch/sparc/kernel/pci_sun4v.c tbl = &atu->tbl; tbl 520 arch/sparc/kernel/pci_sun4v.c base_shift = tbl->table_map_base >> IO_PAGE_SHIFT; tbl 534 arch/sparc/kernel/pci_sun4v.c entry = iommu_tbl_range_alloc(dev, tbl, npages, tbl 540 arch/sparc/kernel/pci_sun4v.c tbl, paddr, npages); tbl 547 arch/sparc/kernel/pci_sun4v.c dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT); tbl 611 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, vaddr, npages, tbl 651 arch/sparc/kernel/pci_sun4v.c struct iommu_map_table *tbl; tbl 660 arch/sparc/kernel/pci_sun4v.c tbl = &iommu->tbl; tbl 663 arch/sparc/kernel/pci_sun4v.c tbl = &atu->tbl; tbl 665 arch/sparc/kernel/pci_sun4v.c entry = ((dma_handle - tbl->table_map_base) >> shift); tbl 668 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, dma_handle, npages, tbl 853 arch/sparc/kernel/pci_sun4v.c atu->tbl.table_map_base = atu->base; tbl 855 arch/sparc/kernel/pci_sun4v.c atu->tbl.map = kzalloc(map_size, GFP_KERNEL); tbl 856 arch/sparc/kernel/pci_sun4v.c if (!atu->tbl.map) tbl 859 arch/sparc/kernel/pci_sun4v.c iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT, tbl 893 arch/sparc/kernel/pci_sun4v.c iommu->tbl.table_map_base = dma_offset; tbl 899 arch/sparc/kernel/pci_sun4v.c iommu->tbl.map = kzalloc(sz, GFP_KERNEL); tbl 900 arch/sparc/kernel/pci_sun4v.c if (!iommu->tbl.map) { tbl 904 arch/sparc/kernel/pci_sun4v.c iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, tbl 908 arch/sparc/kernel/pci_sun4v.c sz = probe_existing_entries(pbm, &iommu->tbl); tbl 42 arch/x86/boot/compressed/acpi.c efi_config_table_64_t *tbl = (efi_config_table_64_t *)config_tables + i; tbl 44 arch/x86/boot/compressed/acpi.c guid = tbl->guid; tbl 45 arch/x86/boot/compressed/acpi.c table = tbl->table; tbl 52 arch/x86/boot/compressed/acpi.c efi_config_table_32_t *tbl = (efi_config_table_32_t *)config_tables + i; tbl 54 arch/x86/boot/compressed/acpi.c guid = tbl->guid; tbl 55 arch/x86/boot/compressed/acpi.c table = tbl->table; tbl 34 arch/x86/include/asm/calgary.h void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev); tbl 35 arch/x86/include/asm/calgary.h void (*tce_cache_blast)(struct iommu_table *tbl); tbl 36 arch/x86/include/asm/calgary.h void (*dump_error_regs)(struct iommu_table *tbl); tbl 28 arch/x86/include/asm/tce.h extern void tce_build(struct iommu_table *tbl, unsigned long index, tbl 30 arch/x86/include/asm/tce.h extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); tbl 32 arch/x86/include/asm/tce.h extern void __init free_tce_table(void *tbl); tbl 161 arch/x86/kernel/pci-calgary_64.c static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); tbl 162 arch/x86/kernel/pci-calgary_64.c static void calgary_tce_cache_blast(struct iommu_table *tbl); tbl 163 arch/x86/kernel/pci-calgary_64.c static void calgary_dump_error_regs(struct iommu_table *tbl); tbl 164 arch/x86/kernel/pci-calgary_64.c static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); tbl 165 arch/x86/kernel/pci-calgary_64.c static void calioc2_tce_cache_blast(struct iommu_table *tbl); tbl 166 arch/x86/kernel/pci-calgary_64.c static void calioc2_dump_error_regs(struct iommu_table *tbl); tbl 167 arch/x86/kernel/pci-calgary_64.c static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl); tbl 184 arch/x86/kernel/pci-calgary_64.c static inline int translation_enabled(struct iommu_table *tbl) tbl 187 arch/x86/kernel/pci-calgary_64.c return (tbl != NULL); tbl 190 arch/x86/kernel/pci-calgary_64.c static void iommu_range_reserve(struct iommu_table *tbl, tbl 200 arch/x86/kernel/pci-calgary_64.c if (index >= tbl->it_size) tbl 204 arch/x86/kernel/pci-calgary_64.c if (end > tbl->it_size) /* don't go off the table */ tbl 205 arch/x86/kernel/pci-calgary_64.c end = tbl->it_size; tbl 207 arch/x86/kernel/pci-calgary_64.c spin_lock_irqsave(&tbl->it_lock, flags); tbl 209 arch/x86/kernel/pci-calgary_64.c bitmap_set(tbl->it_map, index, npages); tbl 211 arch/x86/kernel/pci-calgary_64.c spin_unlock_irqrestore(&tbl->it_lock, flags); tbl 215 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl, tbl 227 arch/x86/kernel/pci-calgary_64.c spin_lock_irqsave(&tbl->it_lock, flags); tbl 229 arch/x86/kernel/pci-calgary_64.c offset = iommu_area_alloc(tbl->it_map, tbl->it_size, tbl->it_hint, tbl 232 arch/x86/kernel/pci-calgary_64.c tbl->chip_ops->tce_cache_blast(tbl); tbl 234 arch/x86/kernel/pci-calgary_64.c offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0, tbl 238 arch/x86/kernel/pci-calgary_64.c spin_unlock_irqrestore(&tbl->it_lock, flags); tbl 246 arch/x86/kernel/pci-calgary_64.c tbl->it_hint = offset + npages; tbl 247 arch/x86/kernel/pci-calgary_64.c BUG_ON(tbl->it_hint > tbl->it_size); tbl 249 arch/x86/kernel/pci-calgary_64.c spin_unlock_irqrestore(&tbl->it_lock, flags); tbl 254 arch/x86/kernel/pci-calgary_64.c static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, tbl 260 arch/x86/kernel/pci-calgary_64.c entry = iommu_range_alloc(dev, tbl, npages); tbl 263 arch/x86/kernel/pci-calgary_64.c npages, tbl); tbl 271 arch/x86/kernel/pci-calgary_64.c tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, tbl 276 arch/x86/kernel/pci-calgary_64.c static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, tbl 291 arch/x86/kernel/pci-calgary_64.c BUG_ON(entry + npages > tbl->it_size); tbl 293 arch/x86/kernel/pci-calgary_64.c tce_free(tbl, entry, npages); tbl 295 arch/x86/kernel/pci-calgary_64.c spin_lock_irqsave(&tbl->it_lock, flags); tbl 297 arch/x86/kernel/pci-calgary_64.c bitmap_clear(tbl->it_map, entry, npages); tbl 299 arch/x86/kernel/pci-calgary_64.c spin_unlock_irqrestore(&tbl->it_lock, flags); tbl 306 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl; tbl 313 arch/x86/kernel/pci-calgary_64.c tbl = pci_iommu(pbus); tbl 314 arch/x86/kernel/pci-calgary_64.c if (tbl && tbl->it_busno == pbus->number) tbl 316 arch/x86/kernel/pci-calgary_64.c tbl = NULL; tbl 320 arch/x86/kernel/pci-calgary_64.c BUG_ON(tbl && (tbl->it_busno != pbus->number)); tbl 322 arch/x86/kernel/pci-calgary_64.c return tbl; tbl 329 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = find_iommu_table(dev); tbl 333 arch/x86/kernel/pci-calgary_64.c if (!translation_enabled(tbl)) tbl 345 arch/x86/kernel/pci-calgary_64.c iommu_free(tbl, dma, npages); tbl 353 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = find_iommu_table(dev); tbl 366 arch/x86/kernel/pci-calgary_64.c entry = iommu_range_alloc(dev, tbl, npages); tbl 376 arch/x86/kernel/pci-calgary_64.c tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir); tbl 399 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = find_iommu_table(dev); tbl 404 arch/x86/kernel/pci-calgary_64.c return iommu_alloc(dev, tbl, vaddr, npages, dir); tbl 411 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = find_iommu_table(dev); tbl 415 arch/x86/kernel/pci-calgary_64.c iommu_free(tbl, dma_addr, npages); tbl 424 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = find_iommu_table(dev); tbl 437 arch/x86/kernel/pci-calgary_64.c mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); tbl 454 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = find_iommu_table(dev); tbl 459 arch/x86/kernel/pci-calgary_64.c iommu_free(tbl, dma_handle, npages); tbl 527 arch/x86/kernel/pci-calgary_64.c static void calgary_tce_cache_blast(struct iommu_table *tbl) tbl 532 arch/x86/kernel/pci-calgary_64.c void __iomem *bbar = tbl->bbar; tbl 536 arch/x86/kernel/pci-calgary_64.c target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET); tbl 541 arch/x86/kernel/pci-calgary_64.c target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET); tbl 545 arch/x86/kernel/pci-calgary_64.c target = calgary_reg(bbar, split_queue_offset(tbl->it_busno)); tbl 554 arch/x86/kernel/pci-calgary_64.c target = calgary_reg(bbar, tar_offset(tbl->it_busno)); tbl 555 arch/x86/kernel/pci-calgary_64.c writeq(tbl->tar_val, target); tbl 558 arch/x86/kernel/pci-calgary_64.c target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET); tbl 563 arch/x86/kernel/pci-calgary_64.c static void calioc2_tce_cache_blast(struct iommu_table *tbl) tbl 565 arch/x86/kernel/pci-calgary_64.c void __iomem *bbar = tbl->bbar; tbl 571 arch/x86/kernel/pci-calgary_64.c unsigned char bus = tbl->it_busno; tbl 622 arch/x86/kernel/pci-calgary_64.c writeq(tbl->tar_val, target); tbl 657 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = pci_iommu(dev->bus); tbl 659 arch/x86/kernel/pci-calgary_64.c void __iomem *bbar = tbl->bbar; tbl 681 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = pci_iommu(dev->bus); tbl 683 arch/x86/kernel/pci-calgary_64.c void __iomem *bbar = tbl->bbar; tbl 717 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = pci_iommu(dev->bus); tbl 728 arch/x86/kernel/pci-calgary_64.c iommu_range_reserve(tbl, start, npages); tbl 741 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl; tbl 748 arch/x86/kernel/pci-calgary_64.c tbl = pci_iommu(dev->bus); tbl 749 arch/x86/kernel/pci-calgary_64.c tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space; tbl 752 arch/x86/kernel/pci-calgary_64.c calgary_init_bitmap_from_tce_table(tbl); tbl 754 arch/x86/kernel/pci-calgary_64.c tce_free(tbl, 0, tbl->it_size); tbl 757 arch/x86/kernel/pci-calgary_64.c tbl->chip_ops = &calgary_chip_ops; tbl 759 arch/x86/kernel/pci-calgary_64.c tbl->chip_ops = &calioc2_chip_ops; tbl 771 arch/x86/kernel/pci-calgary_64.c table_phys = (u64)__pa(tbl->it_base); tbl 778 arch/x86/kernel/pci-calgary_64.c tbl->tar_val = cpu_to_be64(val64); tbl 780 arch/x86/kernel/pci-calgary_64.c writeq(tbl->tar_val, target); tbl 789 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = pci_iommu(dev->bus); tbl 793 arch/x86/kernel/pci-calgary_64.c target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number)); tbl 799 arch/x86/kernel/pci-calgary_64.c bitmapsz = tbl->it_size / BITS_PER_BYTE; tbl 800 arch/x86/kernel/pci-calgary_64.c free_pages((unsigned long)tbl->it_map, get_order(bitmapsz)); tbl 801 arch/x86/kernel/pci-calgary_64.c tbl->it_map = NULL; tbl 803 arch/x86/kernel/pci-calgary_64.c kfree(tbl); tbl 811 arch/x86/kernel/pci-calgary_64.c static void calgary_dump_error_regs(struct iommu_table *tbl) tbl 813 arch/x86/kernel/pci-calgary_64.c void __iomem *bbar = tbl->bbar; tbl 817 arch/x86/kernel/pci-calgary_64.c target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET); tbl 820 arch/x86/kernel/pci-calgary_64.c target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET); tbl 825 arch/x86/kernel/pci-calgary_64.c tbl->it_busno, csr, plssr); tbl 828 arch/x86/kernel/pci-calgary_64.c static void calioc2_dump_error_regs(struct iommu_table *tbl) tbl 830 arch/x86/kernel/pci-calgary_64.c void __iomem *bbar = tbl->bbar; tbl 833 arch/x86/kernel/pci-calgary_64.c unsigned long phboff = phb_offset(tbl->it_busno); tbl 851 arch/x86/kernel/pci-calgary_64.c pr_emerg("DMA error on CalIOC2 PHB 0x%x\n", tbl->it_busno); tbl 876 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl = from_timer(tbl, t, watchdog_timer); tbl 877 arch/x86/kernel/pci-calgary_64.c void __iomem *bbar = tbl->bbar; tbl 881 arch/x86/kernel/pci-calgary_64.c target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET); tbl 886 arch/x86/kernel/pci-calgary_64.c tbl->chip_ops->dump_error_regs(tbl); tbl 892 arch/x86/kernel/pci-calgary_64.c target = calgary_reg(bbar, phb_offset(tbl->it_busno) | tbl 900 arch/x86/kernel/pci-calgary_64.c mod_timer(&tbl->watchdog_timer, jiffies + 2 * HZ); tbl 936 arch/x86/kernel/pci-calgary_64.c static void __init calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev) tbl 939 arch/x86/kernel/pci-calgary_64.c void __iomem *bbar = tbl->bbar; tbl 952 arch/x86/kernel/pci-calgary_64.c static void __init calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev) tbl 961 arch/x86/kernel/pci-calgary_64.c calgary_set_split_completion_timeout(tbl->bbar, busnum, tbl 971 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl; tbl 974 arch/x86/kernel/pci-calgary_64.c tbl = pci_iommu(dev->bus); tbl 975 arch/x86/kernel/pci-calgary_64.c bbar = tbl->bbar; tbl 991 arch/x86/kernel/pci-calgary_64.c timer_setup(&tbl->watchdog_timer, calgary_watchdog, 0); tbl 992 arch/x86/kernel/pci-calgary_64.c mod_timer(&tbl->watchdog_timer, jiffies); tbl 1001 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl; tbl 1004 arch/x86/kernel/pci-calgary_64.c tbl = pci_iommu(dev->bus); tbl 1005 arch/x86/kernel/pci-calgary_64.c bbar = tbl->bbar; tbl 1016 arch/x86/kernel/pci-calgary_64.c del_timer_sync(&tbl->watchdog_timer); tbl 1034 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl; tbl 1052 arch/x86/kernel/pci-calgary_64.c tbl = pci_iommu(dev->bus); tbl 1053 arch/x86/kernel/pci-calgary_64.c tbl->chip_ops->handle_quirks(tbl, dev); tbl 1154 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl; tbl 1156 arch/x86/kernel/pci-calgary_64.c tbl = find_iommu_table(&dev->dev); tbl 1158 arch/x86/kernel/pci-calgary_64.c if (translation_enabled(tbl)) tbl 1286 arch/x86/kernel/pci-calgary_64.c static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl) tbl 1290 arch/x86/kernel/pci-calgary_64.c tp = ((u64 *)tbl->it_base); tbl 1291 arch/x86/kernel/pci-calgary_64.c for (index = 0 ; index < tbl->it_size; index++) { tbl 1293 arch/x86/kernel/pci-calgary_64.c set_bit(index, tbl->it_map); tbl 1356 arch/x86/kernel/pci-calgary_64.c void *tbl; tbl 1431 arch/x86/kernel/pci-calgary_64.c tbl = alloc_tce_table(); tbl 1432 arch/x86/kernel/pci-calgary_64.c if (!tbl) tbl 1434 arch/x86/kernel/pci-calgary_64.c info->tce_space = tbl; tbl 1524 arch/x86/kernel/pci-calgary_64.c struct iommu_table *tbl; tbl 1528 arch/x86/kernel/pci-calgary_64.c tbl = pci_iommu(dev->bus); tbl 1545 arch/x86/kernel/pci-calgary_64.c iommu_range_reserve(tbl, r->start, npages); tbl 36 arch/x86/kernel/tce_64.c void tce_build(struct iommu_table *tbl, unsigned long index, tbl 47 arch/x86/kernel/tce_64.c tp = ((u64*)tbl->it_base) + index; tbl 62 arch/x86/kernel/tce_64.c void tce_free(struct iommu_table *tbl, long index, unsigned int npages) tbl 66 arch/x86/kernel/tce_64.c tp = ((u64*)tbl->it_base) + index; tbl 85 arch/x86/kernel/tce_64.c static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) tbl 91 arch/x86/kernel/tce_64.c tbl->it_busno = dev->bus->number; tbl 94 arch/x86/kernel/tce_64.c tbl->it_size = table_size_to_number_of_entries(specified_table_size); tbl 100 arch/x86/kernel/tce_64.c bitmapsz = tbl->it_size / BITS_PER_BYTE; tbl 108 arch/x86/kernel/tce_64.c tbl->it_map = (unsigned long*)bmppages; tbl 110 arch/x86/kernel/tce_64.c memset(tbl->it_map, 0, bitmapsz); tbl 112 arch/x86/kernel/tce_64.c tbl->it_hint = 0; tbl 114 arch/x86/kernel/tce_64.c spin_lock_init(&tbl->it_lock); tbl 124 arch/x86/kernel/tce_64.c struct iommu_table *tbl; tbl 133 arch/x86/kernel/tce_64.c tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL); tbl 134 arch/x86/kernel/tce_64.c if (!tbl) { tbl 140 arch/x86/kernel/tce_64.c ret = tce_table_setparms(dev, tbl); tbl 144 arch/x86/kernel/tce_64.c tbl->bbar = bbar; tbl 146 arch/x86/kernel/tce_64.c set_pci_iommu(dev->bus, tbl); tbl 151 arch/x86/kernel/tce_64.c kfree(tbl); tbl 166 arch/x86/kernel/tce_64.c void __init free_tce_table(void *tbl) tbl 170 arch/x86/kernel/tce_64.c if (!tbl) tbl 176 arch/x86/kernel/tce_64.c memblock_free(__pa(tbl), size); tbl 701 drivers/acpi/hmat/hmat.c struct acpi_table_header *tbl; tbl 708 drivers/acpi/hmat/hmat.c status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl); tbl 717 drivers/acpi/hmat/hmat.c acpi_put_table(tbl); tbl 719 drivers/acpi/hmat/hmat.c status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl); tbl 723 drivers/acpi/hmat/hmat.c hmat_revision = tbl->revision; tbl 748 drivers/acpi/hmat/hmat.c acpi_put_table(tbl); tbl 3619 drivers/acpi/nfit/core.c struct acpi_table_header *tbl; tbl 3624 drivers/acpi/nfit/core.c status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); tbl 3637 drivers/acpi/nfit/core.c rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); tbl 3640 drivers/acpi/nfit/core.c sz = tbl->length; tbl 3648 drivers/acpi/nfit/core.c acpi_desc->acpi_header = *tbl; tbl 3664 drivers/acpi/nfit/core.c rc = acpi_nfit_init(acpi_desc, (void *) tbl tbl 279 drivers/acpi/pmic/tps68470_pmic.c const struct tps68470_pmic_table *tbl, tbl 289 drivers/acpi/pmic/tps68470_pmic.c ret = pmic_get_reg_bit(address, tbl, tbl_size, ®, &bitmask); tbl 103 drivers/char/tpm/tpm_tis.c struct acpi_table_tpm2 *tbl; tbl 113 drivers/char/tpm/tpm_tis.c acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); tbl 114 drivers/char/tpm/tpm_tis.c if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) { tbl 120 drivers/char/tpm/tpm_tis.c if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED) tbl 238 drivers/clk/tegra/clk.c void __init tegra_init_from_table(struct tegra_clk_init_table *tbl, tbl 243 drivers/clk/tegra/clk.c for (; tbl->clk_id < clk_max; tbl++) { tbl 244 drivers/clk/tegra/clk.c clk = clks[tbl->clk_id]; tbl 247 drivers/clk/tegra/clk.c __func__, PTR_ERR(clk), tbl->clk_id); tbl 253 drivers/clk/tegra/clk.c if (tbl->parent_id < clk_max) { tbl 254 drivers/clk/tegra/clk.c struct clk *parent = clks[tbl->parent_id]; tbl 263 drivers/clk/tegra/clk.c if (tbl->rate) tbl 264 drivers/clk/tegra/clk.c if (clk_set_rate(clk, tbl->rate)) { tbl 266 drivers/clk/tegra/clk.c __func__, tbl->rate, tbl 271 drivers/clk/tegra/clk.c if (tbl->state) tbl 771 drivers/clk/tegra/clk.h void tegra_init_from_table(struct tegra_clk_init_table *tbl, tbl 85 drivers/cpufreq/cppc_cpufreq.c struct acpi_table_header *tbl; tbl 89 drivers/cpufreq/cppc_cpufreq.c status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); tbl 90 drivers/cpufreq/cppc_cpufreq.c if (ACPI_FAILURE(status) || !tbl) tbl 94 drivers/cpufreq/cppc_cpufreq.c if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && tbl 95 drivers/cpufreq/cppc_cpufreq.c !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && tbl 96 drivers/cpufreq/cppc_cpufreq.c wa_info[i].oem_revision == tbl->oem_revision) tbl 85 drivers/cpufreq/tegra186-cpufreq.c struct cpufreq_frequency_table *tbl = policy->freq_table + index; tbl 87 drivers/cpufreq/tegra186-cpufreq.c u32 edvd_val = tbl->driver_data; tbl 572 drivers/firmware/efi/efi.c efi_properties_table_t *tbl; tbl 574 drivers/firmware/efi/efi.c tbl = early_memremap(efi.properties_table, sizeof(*tbl)); tbl 575 drivers/firmware/efi/efi.c if (tbl == NULL) { tbl 580 drivers/firmware/efi/efi.c if (tbl->memory_protection_attribute & tbl 584 drivers/firmware/efi/efi.c early_memunmap(tbl, sizeof(*tbl)); tbl 23 drivers/firmware/efi/memattr.c efi_memory_attributes_table_t *tbl; tbl 28 drivers/firmware/efi/memattr.c tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl)); tbl 29 drivers/firmware/efi/memattr.c if (!tbl) { tbl 35 drivers/firmware/efi/memattr.c if (tbl->version > 1) { tbl 37 drivers/firmware/efi/memattr.c tbl->version); tbl 41 drivers/firmware/efi/memattr.c tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size; tbl 46 drivers/firmware/efi/memattr.c early_memunmap(tbl, sizeof(*tbl)); tbl 135 drivers/firmware/efi/memattr.c efi_memory_attributes_table_t *tbl; tbl 138 drivers/firmware/efi/memattr.c if (tbl_size <= sizeof(*tbl)) tbl 150 drivers/firmware/efi/memattr.c tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB); tbl 151 drivers/firmware/efi/memattr.c if (!tbl) { tbl 160 drivers/firmware/efi/memattr.c for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) { tbl 166 drivers/firmware/efi/memattr.c valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size, tbl 181 drivers/firmware/efi/memattr.c memunmap(tbl); tbl 452 drivers/gpio/gpio-thunderx.c void __iomem * const *tbl; tbl 480 drivers/gpio/gpio-thunderx.c tbl = pcim_iomap_table(pdev); tbl 481 drivers/gpio/gpio-thunderx.c txgpio->register_base = tbl[0]; tbl 169 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ATOM_OBJECT_TABLE *tbl = tbl 172 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (!tbl) { tbl 177 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (tbl->ucNumberOfObjects <= i) { tbl 179 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c i, tbl->ucNumberOfObjects); tbl 183 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c id = le16_to_cpu(tbl->asObjects[i].usObjectID); tbl 654 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl; tbl 672 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *) tbl 678 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (tbl[i].ucClockIndication != (uint8_t) id) tbl 697 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (SS_MODE_V3_EXTERNAL_SS_MASK & tbl[i].ucSpreadSpectrumMode) tbl 700 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (SS_MODE_V3_CENTRE_SPREAD_MASK & tbl[i].ucSpreadSpectrumMode) tbl 709 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c & tbl[i].ucSpreadSpectrumMode) tbl 715 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c le32_to_cpu(tbl[i].ulTargetClockRange) * 10; tbl 717 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage); tbl 719 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10); tbl 1025 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl; tbl 1040 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *) tbl 1045 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (tbl[i].ucClockIndication != (uint8_t)id) tbl 1049 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c & tbl[i].ucSpreadSpectrumMode) { tbl 1053 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c & tbl[i].ucSpreadSpectrumMode) { tbl 1059 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c le32_to_cpu(tbl[i].ulTargetClockRange) * 10; tbl 1061 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage); tbl 1063 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10); tbl 1089 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ATOM_SPREAD_SPECTRUM_INFO *tbl; tbl 1104 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info)); tbl 1130 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) - tbl 1135 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (id_local != (uint32_t)tbl->asSS_Info[i].ucSS_Id) tbl 1141 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl->asSS_Info[i].ucSpreadSpectrumType) tbl 1145 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl->asSS_Info[i].ucSpreadSpectrumType) tbl 1150 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c (uint32_t)le16_to_cpu(tbl->asSS_Info[i].usSpreadSpectrumPercentage); tbl 1151 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ss_info->step_and_delay_info.step = tbl->asSS_Info[i].ucSS_Step; tbl 1153 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl->asSS_Info[i].ucSS_Delay; tbl 1155 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl->asSS_Info[i].ucRecommendedRef_Div; tbl 1157 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c (uint32_t)tbl->asSS_Info[i].ucSS_Range * 10000; tbl 1619 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ATOM_SPREAD_SPECTRUM_INFO *tbl; tbl 1635 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, tbl 1661 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) - tbl 1666 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (id_local == (uint32_t)tbl->asSS_Info[i].ucSS_Id) { tbl 1706 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl; tbl 1720 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *) tbl 1723 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (tbl[i].ucClockIndication == (uint8_t)id) tbl 1742 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl; tbl 1755 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *) tbl 1759 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (tbl[i].ucClockIndication == (uint8_t)id) tbl 1952 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ATOM_OBJECT_TABLE *tbl; tbl 1980 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c tbl = GET_IMAGE(ATOM_OBJECT_TABLE, offset); tbl 1981 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (!tbl) tbl 1984 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c for (i = 0; i < tbl->ucNumberOfObjects; i++) tbl 1987 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c le16_to_cpu(tbl->asObjects[i].usObjectID)))) tbl 1988 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c return &tbl->asObjects[i]; tbl 178 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c struct object_info_table *tbl = &bp->object_info_tbl; tbl 179 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c struct display_object_info_table_v1_4 *v1_4 = tbl->v1_4; tbl 202 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c struct object_info_table *tbl = &bp->object_info_tbl; tbl 216 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c for (i = 0; i < tbl->v1_4->number_of_path; i++) { tbl 218 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c tbl->v1_4->display_path[i].encoderobjid); tbl 231 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c for (i = 0; i < tbl->v1_4->number_of_path; i++) { tbl 233 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c tbl->v1_4->display_path[i].display_objid); tbl 240 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c tbl->v1_4->display_path[i].encoderobjid); tbl 1691 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c struct object_info_table *tbl; tbl 1699 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c tbl = &bp->object_info_tbl; tbl 1700 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c v1_4 = tbl->v1_4; tbl 1804 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c struct object_info_table *tbl; tbl 1811 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c tbl = &bp->object_info_tbl; tbl 1812 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c v1_4 = tbl->v1_4; tbl 875 drivers/gpu/drm/i915/gt/selftest_workarounds.c const struct regmask *tbl, tbl 881 drivers/gpu/drm/i915/gt/selftest_workarounds.c if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask && tbl 882 drivers/gpu/drm/i915/gt/selftest_workarounds.c i915_mmio_reg_offset(tbl->reg) == offset) tbl 884 drivers/gpu/drm/i915/gt/selftest_workarounds.c tbl++; tbl 17 drivers/gpu/drm/i915/i915_user_extensions.c const i915_user_extension_fn *tbl, tbl 47 drivers/gpu/drm/i915/i915_user_extensions.c if (tbl[name]) tbl 48 drivers/gpu/drm/i915/i915_user_extensions.c err = tbl[name](ext, data); tbl 16 drivers/gpu/drm/i915/i915_user_extensions.h const i915_user_extension_fn *tbl, tbl 198 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl, tbl 203 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c if (!tbl || !tbl->nentry || !tbl->entries) tbl 206 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c for (i = 0; i < tbl->nentry; i++) tbl 207 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c if (total_fl <= tbl->entries[i].fl) tbl 208 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c return tbl->entries[i].lut; tbl 211 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c if (!tbl->entries[i-1].fl) tbl 212 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c return tbl->entries[i-1].lut; tbl 66 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c const struct dpu_vbif_dynamic_ot_tbl *tbl; tbl 80 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl : tbl 83 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c for (i = 0; i < tbl->count; i++) { tbl 84 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c if (pps <= tbl->cfg[i].pps) { tbl 85 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c *ot_lim = tbl->cfg[i].ot_limit; tbl 60 drivers/hwmon/ab8500.c const struct abx500_res_to_temp *tbl = cfg->temp_tbl; tbl 66 drivers/hwmon/ab8500.c if (r_ntc > tbl[0].resist || r_ntc < tbl[tbl_sz - 1].resist) tbl 69 drivers/hwmon/ab8500.c while (!(r_ntc <= tbl[i].resist && r_ntc > tbl[i + 1].resist) && tbl 74 drivers/hwmon/ab8500.c *temp = tbl[i].temp * 1000 + ((tbl[i + 1].temp - tbl[i].temp) * 1000 * tbl 75 drivers/hwmon/ab8500.c (r_ntc - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist); tbl 210 drivers/iio/imu/bmi160/bmi160_core.c const struct bmi160_scale *tbl; tbl 216 drivers/iio/imu/bmi160/bmi160_core.c .tbl = bmi160_accel_scale, tbl 220 drivers/iio/imu/bmi160/bmi160_core.c .tbl = bmi160_gyro_scale, tbl 252 drivers/iio/imu/bmi160/bmi160_core.c const struct bmi160_odr *tbl; tbl 258 drivers/iio/imu/bmi160/bmi160_core.c .tbl = bmi160_accel_odr, tbl 262 drivers/iio/imu/bmi160/bmi160_core.c .tbl = bmi160_gyro_odr, tbl 317 drivers/iio/imu/bmi160/bmi160_core.c if (bmi160_scale_table[t].tbl[i].uscale == uscale) tbl 324 drivers/iio/imu/bmi160/bmi160_core.c bmi160_scale_table[t].tbl[i].bits); tbl 338 drivers/iio/imu/bmi160/bmi160_core.c if (bmi160_scale_table[t].tbl[i].bits == val) { tbl 339 drivers/iio/imu/bmi160/bmi160_core.c *uscale = bmi160_scale_table[t].tbl[i].uscale; tbl 372 drivers/iio/imu/bmi160/bmi160_core.c if (bmi160_odr_table[t].tbl[i].odr == odr && tbl 373 drivers/iio/imu/bmi160/bmi160_core.c bmi160_odr_table[t].tbl[i].uodr == uodr) tbl 382 drivers/iio/imu/bmi160/bmi160_core.c bmi160_odr_table[t].tbl[i].bits); tbl 397 drivers/iio/imu/bmi160/bmi160_core.c if (val == bmi160_odr_table[t].tbl[i].bits) tbl 403 drivers/iio/imu/bmi160/bmi160_core.c *odr = bmi160_odr_table[t].tbl[i].odr; tbl 404 drivers/iio/imu/bmi160/bmi160_core.c *uodr = bmi160_odr_table[t].tbl[i].uodr; tbl 321 drivers/infiniband/hw/bnxt_re/ib_verbs.c gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; tbl 322 drivers/infiniband/hw/bnxt_re/ib_verbs.c vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; tbl 1154 drivers/infiniband/hw/bnxt_re/main.c if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, tbl 1163 drivers/infiniband/hw/bnxt_re/main.c memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid)); tbl 475 drivers/infiniband/hw/bnxt_re/qplib_res.c kfree(sgid_tbl->tbl); tbl 479 drivers/infiniband/hw/bnxt_re/qplib_res.c sgid_tbl->tbl = NULL; tbl 491 drivers/infiniband/hw/bnxt_re/qplib_res.c sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL); tbl 492 drivers/infiniband/hw/bnxt_re/qplib_res.c if (!sgid_tbl->tbl) tbl 516 drivers/infiniband/hw/bnxt_re/qplib_res.c kfree(sgid_tbl->tbl); tbl 517 drivers/infiniband/hw/bnxt_re/qplib_res.c sgid_tbl->tbl = NULL; tbl 527 drivers/infiniband/hw/bnxt_re/qplib_res.c if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, tbl 529 drivers/infiniband/hw/bnxt_re/qplib_res.c bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid, tbl 530 drivers/infiniband/hw/bnxt_re/qplib_res.c sgid_tbl->tbl[i].vlan_id, true); tbl 532 drivers/infiniband/hw/bnxt_re/qplib_res.c memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max); tbl 544 drivers/infiniband/hw/bnxt_re/qplib_res.c sgid_tbl->tbl[i].vlan_id = 0xffff; tbl 552 drivers/infiniband/hw/bnxt_re/qplib_res.c if (!pkey_tbl->tbl) tbl 555 drivers/infiniband/hw/bnxt_re/qplib_res.c kfree(pkey_tbl->tbl); tbl 557 drivers/infiniband/hw/bnxt_re/qplib_res.c pkey_tbl->tbl = NULL; tbl 566 drivers/infiniband/hw/bnxt_re/qplib_res.c pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL); tbl 567 drivers/infiniband/hw/bnxt_re/qplib_res.c if (!pkey_tbl->tbl) tbl 579 drivers/infiniband/hw/bnxt_re/qplib_res.c bit_num = find_first_bit(pdt->tbl, pdt->max); tbl 584 drivers/infiniband/hw/bnxt_re/qplib_res.c clear_bit(bit_num, pdt->tbl); tbl 593 drivers/infiniband/hw/bnxt_re/qplib_res.c if (test_and_set_bit(pd->id, pdt->tbl)) { tbl 604 drivers/infiniband/hw/bnxt_re/qplib_res.c kfree(pdt->tbl); tbl 605 drivers/infiniband/hw/bnxt_re/qplib_res.c pdt->tbl = NULL; tbl 618 drivers/infiniband/hw/bnxt_re/qplib_res.c pdt->tbl = kmalloc(bytes, GFP_KERNEL); tbl 619 drivers/infiniband/hw/bnxt_re/qplib_res.c if (!pdt->tbl) tbl 623 drivers/infiniband/hw/bnxt_re/qplib_res.c memset((u8 *)pdt->tbl, 0xFF, bytes); tbl 635 drivers/infiniband/hw/bnxt_re/qplib_res.c bit_num = find_first_bit(dpit->tbl, dpit->max); tbl 640 drivers/infiniband/hw/bnxt_re/qplib_res.c clear_bit(bit_num, dpit->tbl); tbl 658 drivers/infiniband/hw/bnxt_re/qplib_res.c if (test_and_set_bit(dpi->dpi, dpit->tbl)) { tbl 673 drivers/infiniband/hw/bnxt_re/qplib_res.c kfree(dpit->tbl); tbl 726 drivers/infiniband/hw/bnxt_re/qplib_res.c dpit->tbl = kmalloc(bytes, GFP_KERNEL); tbl 727 drivers/infiniband/hw/bnxt_re/qplib_res.c if (!dpit->tbl) { tbl 733 drivers/infiniband/hw/bnxt_re/qplib_res.c memset((u8 *)dpit->tbl, 0xFF, bytes); tbl 745 drivers/infiniband/hw/bnxt_re/qplib_res.c memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); tbl 754 drivers/infiniband/hw/bnxt_re/qplib_res.c memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); tbl 109 drivers/infiniband/hw/bnxt_re/qplib_res.h unsigned long *tbl; tbl 114 drivers/infiniband/hw/bnxt_re/qplib_res.h struct bnxt_qplib_gid_info *tbl; tbl 123 drivers/infiniband/hw/bnxt_re/qplib_res.h u16 *tbl; tbl 136 drivers/infiniband/hw/bnxt_re/qplib_res.h unsigned long *tbl; tbl 216 drivers/infiniband/hw/bnxt_re/qplib_sp.c memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid)); tbl 239 drivers/infiniband/hw/bnxt_re/qplib_sp.c if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) && tbl 240 drivers/infiniband/hw/bnxt_re/qplib_sp.c vlan_id == sgid_tbl->tbl[index].vlan_id) tbl 266 drivers/infiniband/hw/bnxt_re/qplib_sp.c memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero, tbl 268 drivers/infiniband/hw/bnxt_re/qplib_sp.c sgid_tbl->tbl[index].vlan_id = 0xFFFF; tbl 301 drivers/infiniband/hw/bnxt_re/qplib_sp.c if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) && tbl 302 drivers/infiniband/hw/bnxt_re/qplib_sp.c sgid_tbl->tbl[i].vlan_id == vlan_id) { tbl 307 drivers/infiniband/hw/bnxt_re/qplib_sp.c } else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, tbl 356 drivers/infiniband/hw/bnxt_re/qplib_sp.c memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); tbl 357 drivers/infiniband/hw/bnxt_re/qplib_sp.c sgid_tbl->tbl[free_idx].vlan_id = vlan_id; tbl 423 drivers/infiniband/hw/bnxt_re/qplib_sp.c memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey)); tbl 444 drivers/infiniband/hw/bnxt_re/qplib_sp.c if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) tbl 452 drivers/infiniband/hw/bnxt_re/qplib_sp.c memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey)); tbl 477 drivers/infiniband/hw/bnxt_re/qplib_sp.c if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) tbl 479 drivers/infiniband/hw/bnxt_re/qplib_sp.c else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max) tbl 488 drivers/infiniband/hw/bnxt_re/qplib_sp.c memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey)); tbl 116 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h struct pvrdma_id_table tbl; tbl 56 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c struct pvrdma_id_table *tbl = &dev->uar_table.tbl; tbl 61 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c tbl->last = 0; tbl 62 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c tbl->top = 0; tbl 63 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c tbl->max = num; tbl 64 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c tbl->mask = mask; tbl 65 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c spin_lock_init(&tbl->lock); tbl 66 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL); tbl 67 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c if (!tbl->table) tbl 71 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c set_bit(0, tbl->table); tbl 78 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c struct pvrdma_id_table *tbl = &dev->uar_table.tbl; tbl 80 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c kfree(tbl->table); tbl 85 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c struct pvrdma_id_table *tbl; tbl 89 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c tbl = &dev->uar_table.tbl; tbl 91 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c spin_lock_irqsave(&tbl->lock, flags); tbl 92 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c obj = find_next_zero_bit(tbl->table, tbl->max, tbl->last); tbl 93 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c if (obj >= tbl->max) { tbl 94 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c tbl->top = (tbl->top + tbl->max) & tbl->mask; tbl 95 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c obj = find_first_zero_bit(tbl->table, tbl->max); tbl 98 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c if (obj >= tbl->max) { tbl 99 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c spin_unlock_irqrestore(&tbl->lock, flags); tbl 103 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c set_bit(obj, tbl->table); tbl 104 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c obj |= tbl->top; tbl 106 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c spin_unlock_irqrestore(&tbl->lock, flags); tbl 117 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c struct pvrdma_id_table *tbl = &dev->uar_table.tbl; tbl 121 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c obj = uar->index & (tbl->max - 1); tbl 122 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c spin_lock_irqsave(&tbl->lock, flags); tbl 123 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c clear_bit(obj, tbl->table); tbl 124 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c tbl->last = min(tbl->last, obj); tbl 125 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c tbl->top = (tbl->top + tbl->max) & tbl->mask; tbl 126 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c spin_unlock_irqrestore(&tbl->lock, flags); tbl 153 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c struct opa_veswport_mactable *tbl) tbl 165 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c loffset = be16_to_cpu(tbl->offset); tbl 166 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c lnum_entries = be16_to_cpu(tbl->num_entries); tbl 177 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c entry = &tbl->tbl_entries[node->index - loffset]; tbl 184 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c tbl->mac_tbl_digest = cpu_to_be32(adapter->info.vport.mac_tbl_digest); tbl 205 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c struct opa_veswport_mactable *tbl) tbl 221 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c loffset = be16_to_cpu(tbl->offset); tbl 222 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c lnum_entries = be16_to_cpu(tbl->num_entries); tbl 228 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c &tbl->tbl_entries[i]; tbl 285 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c adapter->info.vport.mac_tbl_digest = be32_to_cpu(tbl->mac_tbl_digest); tbl 306 drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h struct opa_veswport_mactable *tbl); tbl 308 drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h struct opa_veswport_mactable *tbl); tbl 1825 drivers/iommu/amd_iommu.c static void free_gcr3_tbl_level1(u64 *tbl) tbl 1831 drivers/iommu/amd_iommu.c if (!(tbl[i] & GCR3_VALID)) tbl 1834 drivers/iommu/amd_iommu.c ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); tbl 1840 drivers/iommu/amd_iommu.c static void free_gcr3_tbl_level2(u64 *tbl) tbl 1846 drivers/iommu/amd_iommu.c if (!(tbl[i] & GCR3_VALID)) tbl 1849 drivers/iommu/amd_iommu.c ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); tbl 295 drivers/iommu/amd_iommu_v2.c static void free_pasid_states_level1(struct pasid_state **tbl) tbl 300 drivers/iommu/amd_iommu_v2.c if (tbl[i] == NULL) tbl 303 drivers/iommu/amd_iommu_v2.c free_page((unsigned long)tbl[i]); tbl 307 drivers/iommu/amd_iommu_v2.c static void free_pasid_states_level2(struct pasid_state **tbl) tbl 313 drivers/iommu/amd_iommu_v2.c if (tbl[i] == NULL) tbl 316 drivers/iommu/amd_iommu_v2.c ptr = (struct pasid_state **)tbl[i]; tbl 2992 drivers/iommu/intel-iommu.c struct context_entry **tbl, tbl 3011 drivers/iommu/intel-iommu.c tbl[tbl_idx] = new_ce; tbl 3081 drivers/iommu/intel-iommu.c tbl[tbl_idx + pos] = new_ce; tbl 361 drivers/media/platform/qcom/venus/hfi_venus.c const struct reg_val *tbl = res->reg_tbl; tbl 366 drivers/media/platform/qcom/venus/hfi_venus.c venus_writel(hdev, tbl[i].reg, tbl[i].value); tbl 495 drivers/media/platform/rcar_jpu.c static void jpu_set_tbl(struct jpu *jpu, u32 reg, const unsigned int *tbl, tbl 500 drivers/media/platform/rcar_jpu.c jpu_write(jpu, tbl[i], reg + (i << 2)); tbl 679 drivers/media/platform/s5p-jpeg/jpeg-core.c const unsigned char *tbl, tbl 686 drivers/media/platform/s5p-jpeg/jpeg-core.c dword = tbl[i] | tbl 687 drivers/media/platform/s5p-jpeg/jpeg-core.c (tbl[i + 1] << 8) | tbl 688 drivers/media/platform/s5p-jpeg/jpeg-core.c (tbl[i + 2] << 16) | tbl 689 drivers/media/platform/s5p-jpeg/jpeg-core.c (tbl[i + 3] << 24); tbl 204 drivers/media/usb/gspca/gl860/gl860-ov9655.c u8 **tbl; tbl 208 drivers/media/usb/gspca/gl860/gl860-ov9655.c tbl = (reso == IMAGE_640) ? tbl_640 : tbl_1280; tbl 211 drivers/media/usb/gspca/gl860/gl860-ov9655.c tbl_length[0], tbl[0]); tbl 214 drivers/media/usb/gspca/gl860/gl860-ov9655.c tbl_length[i], tbl[i]); tbl 216 drivers/media/usb/gspca/gl860/gl860-ov9655.c tbl_length[7], tbl[7]); tbl 580 drivers/media/usb/gspca/gl860/gl860.c int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len) tbl 585 drivers/media/usb/gspca/gl860/gl860.c if (tbl[n].idx != 0xffff) tbl 586 drivers/media/usb/gspca/gl860/gl860.c ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl 587 drivers/media/usb/gspca/gl860/gl860.c tbl[n].idx, 0, NULL); tbl 588 drivers/media/usb/gspca/gl860/gl860.c else if (tbl[n].val == 0xffff) tbl 591 drivers/media/usb/gspca/gl860/gl860.c msleep(tbl[n].val); tbl 596 drivers/media/usb/gspca/gl860/gl860.c int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl, tbl 600 drivers/media/usb/gspca/gl860/gl860.c if (tbl[n].idx != 0xffff) tbl 601 drivers/media/usb/gspca/gl860/gl860.c ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[n].idx, tbl 603 drivers/media/usb/gspca/gl860/gl860.c else if (tbl[n].val == 0xffff) tbl 606 drivers/media/usb/gspca/gl860/gl860.c msleep(tbl[n].val); tbl 611 drivers/media/usb/gspca/gl860/gl860.c void fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len) tbl 616 drivers/media/usb/gspca/gl860/gl860.c if (memcmp(tbl[n].data, "\xff\xff\xff", 3) != 0) tbl 617 drivers/media/usb/gspca/gl860/gl860.c ctrl_out(gspca_dev, 0x40, 3, 0x7a00, tbl[n].idx, tbl 618 drivers/media/usb/gspca/gl860/gl860.c 3, tbl[n].data); tbl 620 drivers/media/usb/gspca/gl860/gl860.c msleep(tbl[n].idx); tbl 79 drivers/media/usb/gspca/gl860/gl860.h int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len); tbl 80 drivers/media/usb/gspca/gl860/gl860.h int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl, tbl 82 drivers/media/usb/gspca/gl860/gl860.h void fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len); tbl 480 drivers/mfd/menelaus.c static int menelaus_get_vtg_value(int vtg, const struct menelaus_vtg_value *tbl, tbl 485 drivers/mfd/menelaus.c for (i = 0; i < n; i++, tbl++) tbl 486 drivers/mfd/menelaus.c if (tbl->vtg == vtg) tbl 487 drivers/mfd/menelaus.c return tbl->val; tbl 651 drivers/misc/cardreader/rtsx_pcr.c static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl) tbl 655 drivers/misc/cardreader/rtsx_pcr.c while (*tbl & 0xFFFF0000) { tbl 657 drivers/misc/cardreader/rtsx_pcr.c (u16)(*tbl >> 16), 0xFF, (u8)(*tbl)); tbl 658 drivers/misc/cardreader/rtsx_pcr.c tbl++; tbl 666 drivers/misc/cardreader/rtsx_pcr.c const u32 *tbl; tbl 669 drivers/misc/cardreader/rtsx_pcr.c tbl = pcr->sd_pull_ctl_enable_tbl; tbl 671 drivers/misc/cardreader/rtsx_pcr.c tbl = pcr->ms_pull_ctl_enable_tbl; tbl 675 drivers/misc/cardreader/rtsx_pcr.c return rtsx_pci_set_pull_ctl(pcr, tbl); tbl 681 drivers/misc/cardreader/rtsx_pcr.c const u32 *tbl; tbl 684 drivers/misc/cardreader/rtsx_pcr.c tbl = pcr->sd_pull_ctl_disable_tbl; tbl 686 drivers/misc/cardreader/rtsx_pcr.c tbl = pcr->ms_pull_ctl_disable_tbl; tbl 691 drivers/misc/cardreader/rtsx_pcr.c return rtsx_pci_set_pull_ctl(pcr, tbl); tbl 126 drivers/mtd/ubi/eba.c struct ubi_eba_table *tbl; tbl 130 drivers/mtd/ubi/eba.c tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); tbl 131 drivers/mtd/ubi/eba.c if (!tbl) tbl 134 drivers/mtd/ubi/eba.c tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries), tbl 136 drivers/mtd/ubi/eba.c if (!tbl->entries) tbl 140 drivers/mtd/ubi/eba.c tbl->entries[i].pnum = UBI_LEB_UNMAPPED; tbl 142 drivers/mtd/ubi/eba.c return tbl; tbl 145 drivers/mtd/ubi/eba.c kfree(tbl->entries); tbl 146 drivers/mtd/ubi/eba.c kfree(tbl); tbl 157 drivers/mtd/ubi/eba.c void ubi_eba_destroy_table(struct ubi_eba_table *tbl) tbl 159 drivers/mtd/ubi/eba.c if (!tbl) tbl 162 drivers/mtd/ubi/eba.c kfree(tbl->entries); tbl 163 drivers/mtd/ubi/eba.c kfree(tbl); tbl 195 drivers/mtd/ubi/eba.c void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl) tbl 198 drivers/mtd/ubi/eba.c vol->eba_tbl = tbl; tbl 1628 drivers/mtd/ubi/eba.c struct ubi_eba_table *tbl; tbl 1636 drivers/mtd/ubi/eba.c tbl = ubi_eba_create_table(vol, vol->reserved_pebs); tbl 1637 drivers/mtd/ubi/eba.c if (IS_ERR(tbl)) { tbl 1638 drivers/mtd/ubi/eba.c err = PTR_ERR(tbl); tbl 1642 drivers/mtd/ubi/eba.c ubi_eba_replace_table(vol, tbl); tbl 877 drivers/mtd/ubi/ubi.h void ubi_eba_destroy_table(struct ubi_eba_table *tbl); tbl 880 drivers/mtd/ubi/ubi.h void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl); tbl 512 drivers/net/bonding/bond_options.c const struct bond_opt_value *tbl; tbl 521 drivers/net/bonding/bond_options.c tbl = opt->values; tbl 522 drivers/net/bonding/bond_options.c if (!tbl) tbl 549 drivers/net/bonding/bond_options.c for (i = 0; tbl[i].string; i++) { tbl 552 drivers/net/bonding/bond_options.c if (val->value == tbl[i].value) tbl 553 drivers/net/bonding/bond_options.c ret = &tbl[i]; tbl 556 drivers/net/bonding/bond_options.c (tbl[i].flags & BOND_VALFLAG_DEFAULT)) tbl 557 drivers/net/bonding/bond_options.c ret = &tbl[i]; tbl 559 drivers/net/bonding/bond_options.c if (!strcmp(valstr, tbl[i].string)) tbl 560 drivers/net/bonding/bond_options.c ret = &tbl[i]; tbl 504 drivers/net/dsa/lantiq_gswip.c struct gswip_pce_table_entry *tbl) tbl 509 drivers/net/dsa/lantiq_gswip.c u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD : tbl 517 drivers/net/dsa/lantiq_gswip.c gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); tbl 520 drivers/net/dsa/lantiq_gswip.c tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS, tbl 528 drivers/net/dsa/lantiq_gswip.c for (i = 0; i < ARRAY_SIZE(tbl->key); i++) tbl 529 drivers/net/dsa/lantiq_gswip.c tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i)); tbl 531 drivers/net/dsa/lantiq_gswip.c for (i = 0; i < ARRAY_SIZE(tbl->val); i++) tbl 532 drivers/net/dsa/lantiq_gswip.c tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i)); tbl 534 drivers/net/dsa/lantiq_gswip.c tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK); tbl 538 drivers/net/dsa/lantiq_gswip.c tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE); tbl 539 drivers/net/dsa/lantiq_gswip.c tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD); tbl 540 drivers/net/dsa/lantiq_gswip.c tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; tbl 546 drivers/net/dsa/lantiq_gswip.c struct gswip_pce_table_entry *tbl) tbl 551 drivers/net/dsa/lantiq_gswip.c u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR : tbl 559 drivers/net/dsa/lantiq_gswip.c gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); tbl 562 drivers/net/dsa/lantiq_gswip.c tbl->table | addr_mode, tbl 565 drivers/net/dsa/lantiq_gswip.c for (i = 0; i < ARRAY_SIZE(tbl->key); i++) tbl 566 drivers/net/dsa/lantiq_gswip.c gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i)); tbl 568 drivers/net/dsa/lantiq_gswip.c for (i = 0; i < ARRAY_SIZE(tbl->val); i++) tbl 569 drivers/net/dsa/lantiq_gswip.c gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i)); tbl 573 drivers/net/dsa/lantiq_gswip.c tbl->table | addr_mode, tbl 576 drivers/net/dsa/lantiq_gswip.c gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK); tbl 581 drivers/net/dsa/lantiq_gswip.c if (tbl->type) tbl 583 drivers/net/dsa/lantiq_gswip.c if (tbl->valid) tbl 585 drivers/net/dsa/lantiq_gswip.c crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK; tbl 810 drivers/net/ethernet/atheros/alx/main.c u32 tbl[2] = {0, 0}; tbl 818 drivers/net/ethernet/atheros/alx/main.c tbl[idx] |= vector << shift; tbl 822 drivers/net/ethernet/atheros/alx/main.c tbl[0] |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT; tbl 825 drivers/net/ethernet/atheros/alx/main.c alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl[0]); tbl 826 drivers/net/ethernet/atheros/alx/main.c alx_write_mem32(hw, ALX_MSI_MAP_TBL2, tbl[1]); tbl 4712 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c struct msix_entry *tbl; tbl 4768 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); tbl 4769 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c if (!tbl) tbl 4771 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c bp->msix_table = tbl; tbl 14961 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c struct bdn_fc_npiv_tbl *tbl = NULL; tbl 14971 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); tbl 14972 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c if (!tbl) { tbl 14985 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) { tbl 14993 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c entries = tbl->fc_npiv_cfg.num_of_npiv; tbl 14995 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c tbl->fc_npiv_cfg.num_of_npiv = entries; tbl 14997 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c if (!tbl->fc_npiv_cfg.num_of_npiv) { tbl 15001 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) { tbl 15003 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c tbl->fc_npiv_cfg.num_of_npiv); tbl 15007 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c tbl->fc_npiv_cfg.num_of_npiv); tbl 15011 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv; tbl 15013 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8); tbl 15014 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8); tbl 15019 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c kfree(tbl); tbl 426 drivers/net/ethernet/chelsio/cxgb4/l2t.c unsigned int addr_len = neigh->tbl->key_len; tbl 539 drivers/net/ethernet/chelsio/cxgb4/l2t.c unsigned int addr_len = neigh->tbl->key_len; tbl 1532 drivers/net/ethernet/ibm/ibmveth.c struct iommu_table *tbl; tbl 1537 drivers/net/ethernet/ibm/ibmveth.c tbl = get_iommu_table_base(&vdev->dev); tbl 1541 drivers/net/ethernet/ibm/ibmveth.c return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl); tbl 1546 drivers/net/ethernet/ibm/ibmveth.c ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); tbl 1554 drivers/net/ethernet/ibm/ibmveth.c buff_size, tbl); tbl 1559 drivers/net/ethernet/ibm/ibmveth.c rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl); tbl 5093 drivers/net/ethernet/ibm/ibmvnic.c struct iommu_table *tbl; tbl 5097 drivers/net/ethernet/ibm/ibmvnic.c tbl = get_iommu_table_base(&vdev->dev); tbl 5101 drivers/net/ethernet/ibm/ibmvnic.c return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); tbl 5106 drivers/net/ethernet/ibm/ibmvnic.c ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); tbl 5114 drivers/net/ethernet/ibm/ibmvnic.c IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); tbl 50 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c enum ixgbe_ipsec_tbl_sel tbl) tbl 56 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c reg |= tbl << IXGBE_RXIDX_TBL_SHIFT | tbl 257 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); tbl 376 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); tbl 923 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) tbl 925 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c if (n->tbl != &arp_tbl) tbl 931 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len); tbl 950 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) tbl 952 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c if (!p->dev || p->tbl != &arp_tbl) tbl 309 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key) tbl 313 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) { tbl 328 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace); tbl 330 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock)) tbl 333 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&tbl->lock); tbl 353 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c struct mod_hdr_tbl *tbl; tbl 366 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c tbl = get_mod_hdr_table(priv, namespace); tbl 368 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_lock(&tbl->lock); tbl 369 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mh = mlx5e_mod_hdr_get(tbl, &key, hash_key); tbl 371 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&tbl->lock); tbl 383 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&tbl->lock); tbl 395 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key); tbl 396 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&tbl->lock); tbl 1485 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c struct neigh_table *tbl; tbl 1491 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c tbl = &arp_tbl; tbl 1494 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c tbl = ipv6_stub->nd_tbl; tbl 1544 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); tbl 644 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 674 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c if (action->dest_tbl.tbl->dmn != dmn) { tbl 679 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c if (action->dest_tbl.tbl->level <= matcher->tbl->level) { tbl 685 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : tbl 686 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr; tbl 964 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl) tbl 968 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c refcount_inc(&tbl->refcount); tbl 974 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c action->dest_tbl.tbl = tbl; tbl 979 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c refcount_dec(&tbl->refcount); tbl 1562 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c refcount_dec(&action->dest_tbl.tbl->refcount); tbl 31 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c static void dr_crc32_calc_lookup_entry(u32 (*tbl)[256], u8 i, u8 j) tbl 33 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c tbl[i][j] = (tbl[i - 1][j] >> 8) ^ tbl[0][tbl[i - 1][j] & 0xff]; tbl 160 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c mlx5dr_dbg(matcher->tbl->dmn, tbl 173 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 443 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c struct mlx5dr_table *tbl = matcher->tbl; tbl 444 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c struct mlx5dr_domain *dmn = tbl->dmn; tbl 449 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c if (!list_empty(&tbl->matcher_list)) tbl 450 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) { tbl 462 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c prev_matcher = list_last_entry(&tbl->matcher_list, tbl 490 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c list_add(&matcher->matcher_list, &tbl->matcher_list); tbl 509 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 530 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 595 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c struct mlx5dr_table *tbl = matcher->tbl; tbl 596 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c struct mlx5dr_domain *dmn = tbl->dmn; tbl 615 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c matcher->rx.nic_tbl = &tbl->rx; tbl 619 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c matcher->tx.nic_tbl = &tbl->tx; tbl 623 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c matcher->rx.nic_tbl = &tbl->rx; tbl 624 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c matcher->tx.nic_tbl = &tbl->tx; tbl 636 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c mlx5dr_matcher_create(struct mlx5dr_table *tbl, tbl 644 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c refcount_inc(&tbl->refcount); tbl 650 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c matcher->tbl = tbl; tbl 656 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c mutex_lock(&tbl->dmn->mutex); tbl 666 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c mutex_unlock(&tbl->dmn->mutex); tbl 673 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c mutex_unlock(&tbl->dmn->mutex); tbl 676 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c refcount_dec(&tbl->refcount); tbl 713 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c struct mlx5dr_table *tbl = matcher->tbl; tbl 714 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c struct mlx5dr_domain *dmn = tbl->dmn; tbl 717 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c if (list_is_last(&matcher->matcher_list, &tbl->matcher_list)) tbl 722 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c if (matcher->matcher_list.prev == &tbl->matcher_list) tbl 729 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c ret = dr_matcher_disconnect(dmn, &tbl->rx, tbl 738 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c ret = dr_matcher_disconnect(dmn, &tbl->tx, tbl 752 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c struct mlx5dr_table *tbl = matcher->tbl; tbl 757 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c mutex_lock(&tbl->dmn->mutex); tbl 761 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c refcount_dec(&matcher->tbl->refcount); tbl 763 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c mutex_unlock(&tbl->dmn->mutex); tbl 44 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 76 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n"); tbl 88 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n"); tbl 187 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n"); tbl 256 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n", tbl 313 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n"); tbl 332 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n"); tbl 364 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 478 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; tbl 511 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n"); tbl 699 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n"); tbl 747 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n"); tbl 777 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 880 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n"); tbl 891 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n"); tbl 901 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n"); tbl 911 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n"); tbl 921 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n"); tbl 931 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n"); tbl 954 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; tbl 1023 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 1173 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 1234 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mutex_lock(&matcher->tbl->dmn->mutex); tbl 1241 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mutex_unlock(&matcher->tbl->dmn->mutex); tbl 1249 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c struct mlx5dr_table *tbl = rule->matcher->tbl; tbl 1252 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mutex_lock(&tbl->dmn->mutex); tbl 1256 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c mutex_unlock(&tbl->dmn->mutex); tbl 454 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 606 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 737 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c struct mlx5dr_domain *dmn = matcher->tbl->dmn; tbl 6 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, tbl 17 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c mutex_lock(&tbl->dmn->mutex); tbl 19 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c if (!list_empty(&tbl->matcher_list)) tbl 20 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c last_matcher = list_last_entry(&tbl->matcher_list, tbl 24 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX || tbl 25 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { tbl 29 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c last_htbl = tbl->rx.s_anchor; tbl 31 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->rx.default_icm_addr = action ? tbl 32 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : tbl 33 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->rx.nic_dmn->default_icm_addr; tbl 36 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c info.miss_icm_addr = tbl->rx.default_icm_addr; tbl 38 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn, tbl 39 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->rx.nic_dmn, tbl 43 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c mlx5dr_dbg(tbl->dmn, "Failed to set RX miss action, ret %d\n", ret); tbl 48 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX || tbl 49 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { tbl 53 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c last_htbl = tbl->tx.s_anchor; tbl 55 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->tx.default_icm_addr = action ? tbl 56 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr : tbl 57 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->tx.nic_dmn->default_icm_addr; tbl 60 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c info.miss_icm_addr = tbl->tx.default_icm_addr; tbl 62 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn, tbl 63 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->tx.nic_dmn, tbl 66 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c mlx5dr_dbg(tbl->dmn, "Failed to set TX miss action, ret %d\n", ret); tbl 72 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c if (tbl->miss_action) tbl 73 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c refcount_dec(&tbl->miss_action->refcount); tbl 76 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->miss_action = action; tbl 77 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c if (tbl->miss_action) tbl 81 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c mutex_unlock(&tbl->dmn->mutex); tbl 90 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c static void dr_table_uninit_fdb(struct mlx5dr_table *tbl) tbl 92 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c dr_table_uninit_nic(&tbl->rx); tbl 93 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c dr_table_uninit_nic(&tbl->tx); tbl 96 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c static void dr_table_uninit(struct mlx5dr_table *tbl) tbl 98 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c mutex_lock(&tbl->dmn->mutex); tbl 100 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c switch (tbl->dmn->type) { tbl 102 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c dr_table_uninit_nic(&tbl->rx); tbl 105 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c dr_table_uninit_nic(&tbl->tx); tbl 108 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c dr_table_uninit_fdb(tbl); tbl 115 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c mutex_unlock(&tbl->dmn->mutex); tbl 151 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c static int dr_table_init_fdb(struct mlx5dr_table *tbl) tbl 155 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = dr_table_init_nic(tbl->dmn, &tbl->rx); tbl 159 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = dr_table_init_nic(tbl->dmn, &tbl->tx); tbl 166 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c dr_table_uninit_nic(&tbl->rx); tbl 170 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c static int dr_table_init(struct mlx5dr_table *tbl) tbl 174 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c INIT_LIST_HEAD(&tbl->matcher_list); tbl 176 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c mutex_lock(&tbl->dmn->mutex); tbl 178 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c switch (tbl->dmn->type) { tbl 180 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_RX; tbl 181 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->rx.nic_dmn = &tbl->dmn->info.rx; tbl 182 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = dr_table_init_nic(tbl->dmn, &tbl->rx); tbl 185 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_TX; tbl 186 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->tx.nic_dmn = &tbl->dmn->info.tx; tbl 187 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = dr_table_init_nic(tbl->dmn, &tbl->tx); tbl 190 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->table_type = MLX5_FLOW_TABLE_TYPE_FDB; tbl 191 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->rx.nic_dmn = &tbl->dmn->info.rx; tbl 192 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->tx.nic_dmn = &tbl->dmn->info.tx; tbl 193 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = dr_table_init_fdb(tbl); tbl 200 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c mutex_unlock(&tbl->dmn->mutex); tbl 205 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl) tbl 207 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c return mlx5dr_cmd_destroy_flow_table(tbl->dmn->mdev, tbl 208 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->table_id, tbl 209 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->table_type); tbl 212 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl) tbl 218 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c if (tbl->rx.s_anchor) tbl 219 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c icm_addr_rx = tbl->rx.s_anchor->chunk->icm_addr; tbl 221 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c if (tbl->tx.s_anchor) tbl 222 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr; tbl 224 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, tbl 225 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->table_type, tbl 228 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->dmn->info.caps.max_ft_level - 1, tbl 230 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c &tbl->table_id); tbl 237 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c struct mlx5dr_table *tbl; tbl 242 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); tbl 243 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c if (!tbl) tbl 246 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->dmn = dmn; tbl 247 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c tbl->level = level; tbl 248 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c refcount_set(&tbl->refcount, 1); tbl 250 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = dr_table_init(tbl); tbl 254 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = dr_table_create_sw_owned_tbl(tbl); tbl 258 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c return tbl; tbl 261 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c dr_table_uninit(tbl); tbl 263 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c kfree(tbl); tbl 269 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c int mlx5dr_table_destroy(struct mlx5dr_table *tbl) tbl 273 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c if (refcount_read(&tbl->refcount) > 1) tbl 276 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ret = dr_table_destroy_sw_owned_tbl(tbl); tbl 280 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c dr_table_uninit(tbl); tbl 282 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c if (tbl->miss_action) tbl 283 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c refcount_dec(&tbl->miss_action->refcount); tbl 285 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c refcount_dec(&tbl->dmn->refcount); tbl 286 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c kfree(tbl); tbl 291 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl) tbl 293 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c return tbl->table_id; tbl 694 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h struct mlx5dr_table *tbl; tbl 736 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h struct mlx5dr_table *tbl; tbl 68 drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c struct mlx5dr_table *tbl; tbl 76 drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, tbl 78 drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c if (!tbl) { tbl 83 drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c ft->fs_dr_table.dr_table = tbl; tbl 84 drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c ft->id = mlx5dr_table_get_id(tbl); tbl 89 drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c mlx5dr_table_destroy(tbl); tbl 71 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, tbl 161 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, tbl 1911 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return neigh_entry->key.n->tbl->family; tbl 2417 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (neigh_entry->key.n->tbl->family == AF_INET) { tbl 2422 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c } else if (neigh_entry->key.n->tbl->family == AF_INET6) { tbl 2566 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!p->dev || (p->tbl->family != AF_INET && tbl 2567 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c p->tbl->family != AF_INET6)) tbl 2586 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6) tbl 108 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c static int mlxsw_sp_span_dmac(struct neigh_table *tbl, tbl 113 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); tbl 117 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c neigh = neigh_create(tbl, pkey, dev); tbl 241 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct neigh_table *tbl, tbl 250 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) tbl 578 drivers/net/ethernet/qlogic/qed/qed_main.c struct msix_entry *tbl; tbl 585 drivers/net/ethernet/qlogic/qed/qed_main.c int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); tbl 3186 drivers/net/ethernet/rocker/rocker_main.c if (n->tbl != &arp_tbl) tbl 4715 drivers/net/ethernet/sun/niu.c struct rdc_table *tbl = &tp->tables[i]; tbl 4721 drivers/net/ethernet/sun/niu.c tbl->rxdma_channel[slot]); tbl 183 drivers/net/phy/adin.c static int adin_lookup_reg_value(const struct adin_cfg_reg_map *tbl, int cfg) tbl 187 drivers/net/phy/adin.c for (i = 0; tbl[i].cfg; i++) { tbl 188 drivers/net/phy/adin.c if (tbl[i].cfg == cfg) tbl 189 drivers/net/phy/adin.c return tbl[i].reg; tbl 197 drivers/net/phy/adin.c const struct adin_cfg_reg_map *tbl, tbl 207 drivers/net/phy/adin.c rc = adin_lookup_reg_value(tbl, val); tbl 1127 drivers/net/wireless/ath/ath6kl/debug.c const struct wmi_target_roam_tbl *tbl; tbl 1130 drivers/net/wireless/ath/ath6kl/debug.c if (len < sizeof(*tbl)) tbl 1133 drivers/net/wireless/ath/ath6kl/debug.c tbl = (const struct wmi_target_roam_tbl *) buf; tbl 1134 drivers/net/wireless/ath/ath6kl/debug.c num_entries = le16_to_cpu(tbl->num_entries); tbl 1135 drivers/net/wireless/ath/ath6kl/debug.c if (struct_size(tbl, info, num_entries) > len) tbl 1163 drivers/net/wireless/ath/ath6kl/debug.c struct wmi_target_roam_tbl *tbl; tbl 1190 drivers/net/wireless/ath/ath6kl/debug.c tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl; tbl 1191 drivers/net/wireless/ath/ath6kl/debug.c num_entries = le16_to_cpu(tbl->num_entries); tbl 1201 drivers/net/wireless/ath/ath6kl/debug.c le16_to_cpu(tbl->roam_mode)); tbl 1204 drivers/net/wireless/ath/ath6kl/debug.c struct wmi_bss_roam_info *info = &tbl->info[i]; tbl 466 drivers/net/wireless/ath/wil6210/debugfs.c const struct dbg_off * const tbl) tbl 470 drivers/net/wireless/ath/wil6210/debugfs.c for (i = 0; tbl[i].name; i++) { tbl 471 drivers/net/wireless/ath/wil6210/debugfs.c switch (tbl[i].type) { tbl 473 drivers/net/wireless/ath/wil6210/debugfs.c debugfs_create_u32(tbl[i].name, tbl[i].mode, dbg, tbl 474 drivers/net/wireless/ath/wil6210/debugfs.c base + tbl[i].off); tbl 477 drivers/net/wireless/ath/wil6210/debugfs.c debugfs_create_x32(tbl[i].name, tbl[i].mode, dbg, tbl 478 drivers/net/wireless/ath/wil6210/debugfs.c base + tbl[i].off); tbl 481 drivers/net/wireless/ath/wil6210/debugfs.c debugfs_create_file_unsafe(tbl[i].name, tbl[i].mode, tbl 482 drivers/net/wireless/ath/wil6210/debugfs.c dbg, base + tbl[i].off, tbl 486 drivers/net/wireless/ath/wil6210/debugfs.c wil_debugfs_create_iomem_x32(tbl[i].name, tbl[i].mode, tbl 487 drivers/net/wireless/ath/wil6210/debugfs.c dbg, base + tbl[i].off, tbl 491 drivers/net/wireless/ath/wil6210/debugfs.c debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg, tbl 492 drivers/net/wireless/ath/wil6210/debugfs.c base + tbl[i].off); tbl 14153 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c struct phytbl_info tbl; tbl 14155 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tbl.tbl_id = id; tbl 14156 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tbl.tbl_len = len; tbl 14157 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tbl.tbl_offset = offset; tbl 14158 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tbl.tbl_width = width; tbl 14159 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tbl.tbl_ptr = data; tbl 14160 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlc_phy_write_table_nphy(pi, &tbl); tbl 14167 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c struct phytbl_info tbl; tbl 14169 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tbl.tbl_id = id; tbl 14170 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tbl.tbl_len = len; tbl 14171 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tbl.tbl_offset = offset; tbl 14172 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tbl.tbl_width = width; tbl 14173 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tbl.tbl_ptr = data; tbl 14174 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlc_phy_read_table_nphy(pi, &tbl); tbl 357 drivers/net/wireless/intel/iwlegacy/4965-calib.c __le16 *tbl) tbl 359 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] = tbl 361 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = tbl 363 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] = tbl 365 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = tbl 368 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] = tbl 370 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = tbl 373 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck); tbl 374 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm); tbl 376 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] = tbl 378 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] = tbl 380 drivers/net/wireless/intel/iwlegacy/4965-calib.c tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca); tbl 389 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx) tbl 391 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->expected_tpt) tbl 392 drivers/net/wireless/intel/iwlegacy/4965-rs.c return tbl->expected_tpt[rs_idx]; tbl 404 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx, tbl 415 drivers/net/wireless/intel/iwlegacy/4965-rs.c win = &(tbl->win[scale_idx]); tbl 418 drivers/net/wireless/intel/iwlegacy/4965-rs.c tpt = il4965_get_expected_tpt(tbl, scale_idx); tbl 482 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl, tbl 487 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_legacy(tbl->lq_type)) { tbl 492 drivers/net/wireless/intel/iwlegacy/4965-rs.c } else if (is_Ht(tbl->lq_type)) { tbl 499 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_siso(tbl->lq_type)) tbl 504 drivers/net/wireless/intel/iwlegacy/4965-rs.c IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type); tbl 508 drivers/net/wireless/intel/iwlegacy/4965-rs.c ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK); tbl 510 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_Ht(tbl->lq_type)) { tbl 511 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->is_ht40) { tbl 512 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->is_dup) tbl 517 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->is_SGI) tbl 522 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_siso(tbl->lq_type) && tbl->is_SGI) { tbl 538 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl, int *rate_idx) tbl 545 drivers/net/wireless/intel/iwlegacy/4965-rs.c memset(tbl, 0, sizeof(struct il_scale_tbl_info)); tbl 552 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_SGI = 0; /* default legacy setup */ tbl 553 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_ht40 = 0; tbl 554 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_dup = 0; tbl 555 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); tbl 556 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_NONE; tbl 557 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->max_search = IL_MAX_SEARCH; tbl 563 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_A; tbl 565 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_G; tbl 570 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_SGI = 1; tbl 574 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_ht40 = 1; tbl 577 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_dup = 1; tbl 584 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_SISO; /*else NONE */ tbl 588 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_MIMO2; tbl 598 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl) tbl 602 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (!tbl->ant_type || tbl->ant_type > ANT_ABC) tbl 605 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type)) tbl 608 drivers/net/wireless/intel/iwlegacy/4965-rs.c new_ant_type = ant_toggle_lookup[tbl->ant_type]; tbl 610 drivers/net/wireless/intel/iwlegacy/4965-rs.c while (new_ant_type != tbl->ant_type && tbl 614 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (new_ant_type == tbl->ant_type) tbl 617 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->ant_type = new_ant_type; tbl 715 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl, u8 scale_idx, tbl 728 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) { tbl 732 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_A; tbl 734 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_G; tbl 736 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (il4965_num_of_ant(tbl->ant_type) > 1) tbl 737 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->ant_type = tbl 740 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_ht40 = 0; tbl 741 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_SGI = 0; tbl 742 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->max_search = IL_MAX_SEARCH; tbl 745 drivers/net/wireless/intel/iwlegacy/4965-rs.c rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); tbl 748 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_legacy(tbl->lq_type)) { tbl 766 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type); tbl 773 drivers/net/wireless/intel/iwlegacy/4965-rs.c return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); tbl 1012 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl) tbl 1018 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { tbl 1019 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->expected_tpt = expected_tpt_legacy; tbl 1024 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_legacy(tbl->lq_type)) { tbl 1025 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->expected_tpt = expected_tpt_legacy; tbl 1032 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) tbl 1034 drivers/net/wireless/intel/iwlegacy/4965-rs.c else if (is_siso(tbl->lq_type)) tbl 1036 drivers/net/wireless/intel/iwlegacy/4965-rs.c else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) tbl 1041 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ tbl 1042 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->expected_tpt = ht_tbl_pointer[0]; tbl 1043 drivers/net/wireless/intel/iwlegacy/4965-rs.c else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ tbl 1044 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->expected_tpt = ht_tbl_pointer[1]; tbl 1045 drivers/net/wireless/intel/iwlegacy/4965-rs.c else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ tbl 1046 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->expected_tpt = ht_tbl_pointer[2]; tbl 1048 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->expected_tpt = ht_tbl_pointer[3]; tbl 1065 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl, /* "search" */ tbl 1075 drivers/net/wireless/intel/iwlegacy/4965-rs.c s32 *tpt_tbl = tbl->expected_tpt; tbl 1086 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type); tbl 1161 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl, int idx) tbl 1179 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_MIMO2; tbl 1180 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_dup = lq_sta->is_dup; tbl 1181 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action = 0; tbl 1182 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->max_search = IL_MAX_SEARCH; tbl 1186 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_ht40 = 1; tbl 1188 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_ht40 = 0; tbl 1190 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_set_expected_tpt_table(lq_sta, tbl); tbl 1192 drivers/net/wireless/intel/iwlegacy/4965-rs.c rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx); tbl 1200 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->current_rate = tbl 1201 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green); tbl 1203 drivers/net/wireless/intel/iwlegacy/4965-rs.c D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate, tbl 1214 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl, int idx) tbl 1225 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_dup = lq_sta->is_dup; tbl 1226 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_SISO; tbl 1227 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action = 0; tbl 1228 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->max_search = IL_MAX_SEARCH; tbl 1232 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_ht40 = 1; tbl 1234 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_ht40 = 0; tbl 1237 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */ tbl 1239 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_set_expected_tpt_table(lq_sta, tbl); tbl 1240 drivers/net/wireless/intel/iwlegacy/4965-rs.c rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx); tbl 1248 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->current_rate = tbl 1249 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green); tbl 1250 drivers/net/wireless/intel/iwlegacy/4965-rs.c D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate, tbl 1263 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 1266 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_rate_scale_data *win = &(tbl->win[idx]); tbl 1276 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action = IL_LEGACY_SWITCH_SISO; tbl 1278 drivers/net/wireless/intel/iwlegacy/4965-rs.c start_action = tbl->action; tbl 1281 drivers/net/wireless/intel/iwlegacy/4965-rs.c switch (tbl->action) { tbl 1286 drivers/net/wireless/intel/iwlegacy/4965-rs.c if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 && tbl 1288 drivers/net/wireless/intel/iwlegacy/4965-rs.c (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 && tbl 1297 drivers/net/wireless/intel/iwlegacy/4965-rs.c memcpy(search_tbl, tbl, sz); tbl 1312 drivers/net/wireless/intel/iwlegacy/4965-rs.c memcpy(search_tbl, tbl, sz); tbl 1329 drivers/net/wireless/intel/iwlegacy/4965-rs.c memcpy(search_tbl, tbl, sz); tbl 1332 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB) tbl 1334 drivers/net/wireless/intel/iwlegacy/4965-rs.c else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC) tbl 1352 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action++; tbl 1353 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC) tbl 1354 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action = IL_LEGACY_SWITCH_ANTENNA1; tbl 1356 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action == start_action) tbl 1365 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action++; tbl 1366 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC) tbl 1367 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action = IL_LEGACY_SWITCH_ANTENNA1; tbl 1369 drivers/net/wireless/intel/iwlegacy/4965-rs.c search_tbl->action = tbl->action; tbl 1383 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 1386 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_rate_scale_data *win = &(tbl->win[idx]); tbl 1397 drivers/net/wireless/intel/iwlegacy/4965-rs.c start_action = tbl->action; tbl 1401 drivers/net/wireless/intel/iwlegacy/4965-rs.c switch (tbl->action) { tbl 1405 drivers/net/wireless/intel/iwlegacy/4965-rs.c if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 && tbl 1407 drivers/net/wireless/intel/iwlegacy/4965-rs.c (tbl->action == IL_SISO_SWITCH_ANTENNA2 && tbl 1414 drivers/net/wireless/intel/iwlegacy/4965-rs.c memcpy(search_tbl, tbl, sz); tbl 1426 drivers/net/wireless/intel/iwlegacy/4965-rs.c memcpy(search_tbl, tbl, sz); tbl 1429 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action == IL_SISO_SWITCH_MIMO2_AB) tbl 1431 drivers/net/wireless/intel/iwlegacy/4965-rs.c else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC) tbl 1447 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (!tbl->is_ht40 && tbl 1450 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->is_ht40 && tbl 1456 drivers/net/wireless/intel/iwlegacy/4965-rs.c memcpy(search_tbl, tbl, sz); tbl 1458 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (!tbl->is_SGI) tbl 1463 drivers/net/wireless/intel/iwlegacy/4965-rs.c search_tbl->is_SGI = !tbl->is_SGI; tbl 1465 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->is_SGI) { tbl 1476 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action++; tbl 1477 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action > IL_SISO_SWITCH_GI) tbl 1478 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action = IL_SISO_SWITCH_ANTENNA1; tbl 1480 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action == start_action) tbl 1488 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action++; tbl 1489 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action > IL_SISO_SWITCH_GI) tbl 1490 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action = IL_SISO_SWITCH_ANTENNA1; tbl 1492 drivers/net/wireless/intel/iwlegacy/4965-rs.c search_tbl->action = tbl->action; tbl 1506 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 1509 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_rate_scale_data *win = &(tbl->win[idx]); tbl 1520 drivers/net/wireless/intel/iwlegacy/4965-rs.c start_action = tbl->action; tbl 1523 drivers/net/wireless/intel/iwlegacy/4965-rs.c switch (tbl->action) { tbl 1534 drivers/net/wireless/intel/iwlegacy/4965-rs.c memcpy(search_tbl, tbl, sz); tbl 1548 drivers/net/wireless/intel/iwlegacy/4965-rs.c memcpy(search_tbl, tbl, sz); tbl 1550 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action == IL_MIMO2_SWITCH_SISO_A) tbl 1552 drivers/net/wireless/intel/iwlegacy/4965-rs.c else if (tbl->action == IL_MIMO2_SWITCH_SISO_B) tbl 1570 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (!tbl->is_ht40 && tbl 1573 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->is_ht40 && tbl 1580 drivers/net/wireless/intel/iwlegacy/4965-rs.c memcpy(search_tbl, tbl, sz); tbl 1581 drivers/net/wireless/intel/iwlegacy/4965-rs.c search_tbl->is_SGI = !tbl->is_SGI; tbl 1589 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->is_SGI) { tbl 1601 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action++; tbl 1602 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action > IL_MIMO2_SWITCH_GI) tbl 1603 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action = IL_MIMO2_SWITCH_ANTENNA1; tbl 1605 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action == start_action) tbl 1612 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action++; tbl 1613 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tbl->action > IL_MIMO2_SWITCH_GI) tbl 1614 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->action = IL_MIMO2_SWITCH_ANTENNA1; tbl 1616 drivers/net/wireless/intel/iwlegacy/4965-rs.c search_tbl->action = tbl->action; tbl 1632 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl; tbl 1641 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 1690 drivers/net/wireless/intel/iwlegacy/4965-rs.c (tbl-> tbl 1701 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_rate_scale_clear_win(&(tbl->win[i])); tbl 1711 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl, int idx, u8 is_green) tbl 1716 drivers/net/wireless/intel/iwlegacy/4965-rs.c rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green); tbl 1745 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl, *tbl1; tbl 1785 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 1786 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_legacy(tbl->lq_type)) tbl 1795 drivers/net/wireless/intel/iwlegacy/4965-rs.c D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type); tbl 1798 drivers/net/wireless/intel/iwlegacy/4965-rs.c rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); tbl 1803 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_legacy(tbl->lq_type)) { tbl 1823 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_NONE; tbl 1825 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 1827 drivers/net/wireless/intel/iwlegacy/4965-rs.c idx = il4965_hwrate_to_plcp_idx(tbl->current_rate); tbl 1828 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_update_rate_tbl(il, lq_sta, tbl, idx, tbl 1835 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (!tbl->expected_tpt) { tbl 1844 drivers/net/wireless/intel/iwlegacy/4965-rs.c win = &(tbl->win[idx]); tbl 1848 drivers/net/wireless/intel/iwlegacy/4965-rs.c win = &(tbl->win[idx]); tbl 1875 drivers/net/wireless/intel/iwlegacy/4965-rs.c ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) { tbl 1878 drivers/net/wireless/intel/iwlegacy/4965-rs.c ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128); tbl 1893 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (!is_legacy(tbl->lq_type)) tbl 1909 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type = LQ_NONE; tbl 1913 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 1916 drivers/net/wireless/intel/iwlegacy/4965-rs.c idx = il4965_hwrate_to_plcp_idx(tbl->current_rate); tbl 1934 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->lq_type); tbl 1947 drivers/net/wireless/intel/iwlegacy/4965-rs.c low_tpt = tbl->win[low].average_tpt; tbl 1949 drivers/net/wireless/intel/iwlegacy/4965-rs.c high_tpt = tbl->win[high].average_tpt; tbl 1999 drivers/net/wireless/intel/iwlegacy/4965-rs.c (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low])) tbl 2026 drivers/net/wireless/intel/iwlegacy/4965-rs.c idx, scale_action, low, high, tbl->lq_type); tbl 2031 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_update_rate_tbl(il, lq_sta, tbl, idx, is_green); tbl 2049 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_legacy(tbl->lq_type)) tbl 2051 drivers/net/wireless/intel/iwlegacy/4965-rs.c else if (is_siso(tbl->lq_type)) tbl 2061 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); tbl 2063 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_rate_scale_clear_win(&(tbl->win[i])); tbl 2066 drivers/net/wireless/intel/iwlegacy/4965-rs.c idx = il4965_hwrate_to_plcp_idx(tbl->current_rate); tbl 2069 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->current_rate, idx); tbl 2070 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate); tbl 2111 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->current_rate = tbl 2112 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green); tbl 2135 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl; tbl 2157 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 2163 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->ant_type = il4965_first_antenna(valid_tx_ant); tbl 2164 drivers/net/wireless/intel/iwlegacy/4965-rs.c rate |= tbl->ant_type << RATE_MCS_ANT_POS; tbl 2169 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx); tbl 2170 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) tbl 2171 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl); tbl 2173 drivers/net/wireless/intel/iwlegacy/4965-rs.c rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green); tbl 2174 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->current_rate = rate; tbl 2175 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_set_expected_tpt_table(lq_sta, tbl); tbl 2578 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 2599 drivers/net/wireless/intel/iwlegacy/4965-rs.c (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); tbl 2600 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_Ht(tbl->lq_type)) { tbl 2603 drivers/net/wireless/intel/iwlegacy/4965-rs.c (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2"); tbl 2606 drivers/net/wireless/intel/iwlegacy/4965-rs.c (tbl->is_ht40) ? "40MHz" : "20MHz"); tbl 2609 drivers/net/wireless/intel/iwlegacy/4965-rs.c (tbl->is_SGI) ? "SGI" : "", tbl 2645 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_legacy(tbl->lq_type)) { tbl 2727 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; tbl 2729 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (is_Ht(tbl->lq_type)) tbl 2732 drivers/net/wireless/intel/iwlegacy/4965-rs.c tbl->expected_tpt[lq_sta->last_txrate_idx]); tbl 2705 drivers/net/wireless/intel/iwlegacy/common.h #define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A) tbl 2706 drivers/net/wireless/intel/iwlegacy/common.h #define is_siso(tbl) ((tbl) == LQ_SISO) tbl 2707 drivers/net/wireless/intel/iwlegacy/common.h #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) tbl 2708 drivers/net/wireless/intel/iwlegacy/common.h #define is_mimo(tbl) (is_mimo2(tbl)) tbl 2709 drivers/net/wireless/intel/iwlegacy/common.h #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) tbl 2710 drivers/net/wireless/intel/iwlegacy/common.h #define is_a_band(tbl) ((tbl) == LQ_A) tbl 2711 drivers/net/wireless/intel/iwlegacy/common.h #define is_g_and(tbl) ((tbl) == LQ_G) tbl 427 drivers/net/wireless/intel/iwlwifi/dvm/calib.c __le16 *tbl) tbl 429 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = tbl 431 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = tbl 433 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = tbl 435 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = tbl 438 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = tbl 440 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = tbl 443 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = tbl 445 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = tbl 448 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = tbl 450 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = tbl 452 drivers/net/wireless/intel/iwlwifi/dvm/calib.c tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = tbl 435 drivers/net/wireless/intel/iwlwifi/dvm/rs.c static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) tbl 437 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->expected_tpt) tbl 438 drivers/net/wireless/intel/iwlwifi/dvm/rs.c return tbl->expected_tpt[rs_index]; tbl 449 drivers/net/wireless/intel/iwlwifi/dvm/rs.c static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, tbl 460 drivers/net/wireless/intel/iwlwifi/dvm/rs.c window = &(tbl->win[scale_index]); tbl 463 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tpt = get_expected_tpt(tbl, scale_index); tbl 528 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl, tbl 533 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_legacy(tbl->lq_type)) { tbl 538 drivers/net/wireless/intel/iwlwifi/dvm/rs.c } else if (is_Ht(tbl->lq_type)) { tbl 545 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_siso(tbl->lq_type)) tbl 547 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (is_mimo2(tbl->lq_type)) tbl 552 drivers/net/wireless/intel/iwlwifi/dvm/rs.c IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type); tbl 555 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & tbl 558 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_Ht(tbl->lq_type)) { tbl 559 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->is_ht40) { tbl 560 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->is_dup) tbl 565 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->is_SGI) tbl 570 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_siso(tbl->lq_type) && tbl->is_SGI) { tbl 585 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl, tbl 592 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); tbl 599 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_SGI = 0; /* default legacy setup */ tbl 600 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_ht40 = 0; tbl 601 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_dup = 0; tbl 602 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); tbl 603 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_NONE; tbl 604 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->max_search = IWL_MAX_SEARCH; tbl 610 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_A; tbl 612 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_G; tbl 617 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_SGI = 1; tbl 621 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_ht40 = 1; tbl 624 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_dup = 1; tbl 631 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_SISO; /*else NONE*/ tbl 635 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_MIMO2; tbl 639 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; tbl 640 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_MIMO3; tbl 650 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl) tbl 654 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!tbl->ant_type || tbl->ant_type > ANT_ABC) tbl 657 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!rs_is_valid_ant(valid_ant, tbl->ant_type)) tbl 660 drivers/net/wireless/intel/iwlwifi/dvm/rs.c new_ant_type = ant_toggle_lookup[tbl->ant_type]; tbl 662 drivers/net/wireless/intel/iwlwifi/dvm/rs.c while ((new_ant_type != tbl->ant_type) && tbl 666 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (new_ant_type == tbl->ant_type) tbl 669 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->ant_type = new_ant_type; tbl 774 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl, tbl 787 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { tbl 791 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_A; tbl 793 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_G; tbl 795 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (num_of_ant(tbl->ant_type) > 1) tbl 796 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->ant_type = tbl 799 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_ht40 = 0; tbl 800 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_SGI = 0; tbl 801 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->max_search = IWL_MAX_SEARCH; tbl 804 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); tbl 807 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_legacy(tbl->lq_type)) { tbl 823 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type); tbl 830 drivers/net/wireless/intel/iwlwifi/dvm/rs.c return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); tbl 846 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl; tbl 865 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 866 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); tbl 1096 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl) tbl 1102 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { tbl 1103 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->expected_tpt = expected_tpt_legacy; tbl 1108 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_legacy(tbl->lq_type)) { tbl 1109 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->expected_tpt = expected_tpt_legacy; tbl 1116 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) tbl 1118 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (is_siso(tbl->lq_type)) tbl 1120 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) tbl 1122 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (is_mimo2(tbl->lq_type)) tbl 1124 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) tbl 1129 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ tbl 1130 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->expected_tpt = ht_tbl_pointer[0]; tbl 1131 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ tbl 1132 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->expected_tpt = ht_tbl_pointer[1]; tbl 1133 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ tbl 1134 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->expected_tpt = ht_tbl_pointer[2]; tbl 1136 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->expected_tpt = ht_tbl_pointer[3]; tbl 1153 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl, /* "search" */ tbl 1162 drivers/net/wireless/intel/iwlwifi/dvm/rs.c const u16 *tpt_tbl = tbl->expected_tpt; tbl 1172 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type); tbl 1248 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl, int index) tbl 1268 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_MIMO2; tbl 1269 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_dup = lq_sta->is_dup; tbl 1270 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = 0; tbl 1271 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->max_search = IWL_MAX_SEARCH; tbl 1275 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_ht40 = 1; tbl 1277 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_ht40 = 0; tbl 1279 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_set_expected_tpt_table(lq_sta, tbl); tbl 1281 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); tbl 1289 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); tbl 1292 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->current_rate, is_green); tbl 1303 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl, int index) tbl 1323 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_MIMO3; tbl 1324 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_dup = lq_sta->is_dup; tbl 1325 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = 0; tbl 1326 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; tbl 1330 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_ht40 = 1; tbl 1332 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_ht40 = 0; tbl 1334 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_set_expected_tpt_table(lq_sta, tbl); tbl 1336 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); tbl 1345 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); tbl 1348 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->current_rate, is_green); tbl 1359 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl, int index) tbl 1372 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_dup = lq_sta->is_dup; tbl 1373 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_SISO; tbl 1374 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = 0; tbl 1375 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->max_search = IWL_MAX_SEARCH; tbl 1379 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_ht40 = 1; tbl 1381 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_ht40 = 0; tbl 1384 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/ tbl 1386 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_set_expected_tpt_table(lq_sta, tbl); tbl 1387 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); tbl 1395 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); tbl 1397 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->current_rate, is_green); tbl 1410 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 1413 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_rate_scale_data *window = &(tbl->win[index]); tbl 1428 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2) tbl 1429 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_LEGACY_SWITCH_SISO; tbl 1436 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 && tbl 1437 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action != IWL_LEGACY_SWITCH_SISO) tbl 1438 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_LEGACY_SWITCH_SISO; tbl 1447 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; tbl 1449 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action > IWL_LEGACY_SWITCH_SISO) tbl 1450 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_LEGACY_SWITCH_SISO; tbl 1455 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; tbl 1456 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) tbl 1457 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_LEGACY_SWITCH_SISO; tbl 1462 drivers/net/wireless/intel/iwlwifi/dvm/rs.c start_action = tbl->action; tbl 1465 drivers/net/wireless/intel/iwlwifi/dvm/rs.c switch (tbl->action) { tbl 1470 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 && tbl 1472 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 && tbl 1484 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1497 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1513 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1516 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) tbl 1518 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC) tbl 1538 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1554 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action++; tbl 1555 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) tbl 1556 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; tbl 1558 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == start_action) tbl 1567 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action++; tbl 1568 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) tbl 1569 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; tbl 1571 drivers/net/wireless/intel/iwlwifi/dvm/rs.c search_tbl->action = tbl->action; tbl 1583 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 1586 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_rate_scale_data *window = &(tbl->win[index]); tbl 1602 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) tbl 1603 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_SISO_SWITCH_MIMO2_AB; tbl 1610 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) tbl 1611 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_SISO_SWITCH_ANTENNA1; tbl 1619 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action > IWL_SISO_SWITCH_ANTENNA2) { tbl 1621 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_SISO_SWITCH_ANTENNA1; tbl 1628 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) tbl 1629 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_SISO_SWITCH_ANTENNA1; tbl 1632 drivers/net/wireless/intel/iwlwifi/dvm/rs.c start_action = tbl->action; tbl 1635 drivers/net/wireless/intel/iwlwifi/dvm/rs.c switch (tbl->action) { tbl 1639 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && tbl 1641 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && tbl 1651 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1662 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1665 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) tbl 1667 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC) tbl 1681 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!tbl->is_ht40 && !(ht_cap->cap & tbl 1684 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->is_ht40 && !(ht_cap->cap & tbl 1690 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1692 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!tbl->is_SGI) tbl 1698 drivers/net/wireless/intel/iwlwifi/dvm/rs.c search_tbl->is_SGI = !tbl->is_SGI; tbl 1700 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->is_SGI) { tbl 1712 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1725 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action++; tbl 1726 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) tbl 1727 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_SISO_SWITCH_ANTENNA1; tbl 1729 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == start_action) tbl 1737 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action++; tbl 1738 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC) tbl 1739 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_SISO_SWITCH_ANTENNA1; tbl 1741 drivers/net/wireless/intel/iwlwifi/dvm/rs.c search_tbl->action = tbl->action; tbl 1753 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 1756 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_rate_scale_data *window = &(tbl->win[index]); tbl 1773 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action != IWL_MIMO2_SWITCH_SISO_A) tbl 1774 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO2_SWITCH_SISO_A; tbl 1778 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == IWL_MIMO2_SWITCH_SISO_B || tbl 1779 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action == IWL_MIMO2_SWITCH_SISO_C) tbl 1780 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO2_SWITCH_SISO_A; tbl 1788 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (tbl->action < IWL_MIMO2_SWITCH_SISO_A || tbl 1789 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action > IWL_MIMO2_SWITCH_SISO_C)) { tbl 1791 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO2_SWITCH_SISO_A; tbl 1796 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (tbl->action < IWL_MIMO2_SWITCH_SISO_A || tbl 1797 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action > IWL_MIMO2_SWITCH_SISO_C)) tbl 1798 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO2_SWITCH_SISO_A; tbl 1800 drivers/net/wireless/intel/iwlwifi/dvm/rs.c start_action = tbl->action; tbl 1803 drivers/net/wireless/intel/iwlwifi/dvm/rs.c switch (tbl->action) { tbl 1814 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1827 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1829 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) tbl 1831 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) tbl 1847 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!tbl->is_ht40 && !(ht_cap->cap & tbl 1850 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->is_ht40 && !(ht_cap->cap & tbl 1857 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1858 drivers/net/wireless/intel/iwlwifi/dvm/rs.c search_tbl->is_SGI = !tbl->is_SGI; tbl 1866 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->is_SGI) { tbl 1879 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1893 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action++; tbl 1894 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) tbl 1895 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; tbl 1897 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == start_action) tbl 1904 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action++; tbl 1905 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) tbl 1906 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; tbl 1908 drivers/net/wireless/intel/iwlwifi/dvm/rs.c search_tbl->action = tbl->action; tbl 1921 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 1924 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_rate_scale_data *window = &(tbl->win[index]); tbl 1941 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action != IWL_MIMO3_SWITCH_SISO_A) tbl 1942 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO3_SWITCH_SISO_A; tbl 1946 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == IWL_MIMO3_SWITCH_SISO_B || tbl 1947 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action == IWL_MIMO3_SWITCH_SISO_C) tbl 1948 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO3_SWITCH_SISO_A; tbl 1956 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (tbl->action < IWL_MIMO3_SWITCH_SISO_A || tbl 1957 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action > IWL_MIMO3_SWITCH_SISO_C)) { tbl 1959 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO3_SWITCH_SISO_A; tbl 1964 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (tbl->action < IWL_MIMO3_SWITCH_SISO_A || tbl 1965 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action > IWL_MIMO3_SWITCH_SISO_C)) tbl 1966 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO3_SWITCH_SISO_A; tbl 1968 drivers/net/wireless/intel/iwlwifi/dvm/rs.c start_action = tbl->action; tbl 1971 drivers/net/wireless/intel/iwlwifi/dvm/rs.c switch (tbl->action) { tbl 1982 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1993 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 1995 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == IWL_MIMO3_SWITCH_SISO_A) tbl 1997 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B) tbl 2017 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 2019 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB) tbl 2021 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC) tbl 2037 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!tbl->is_ht40 && !(ht_cap->cap & tbl 2040 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->is_ht40 && !(ht_cap->cap & tbl 2047 drivers/net/wireless/intel/iwlwifi/dvm/rs.c memcpy(search_tbl, tbl, sz); tbl 2048 drivers/net/wireless/intel/iwlwifi/dvm/rs.c search_tbl->is_SGI = !tbl->is_SGI; tbl 2056 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->is_SGI) { tbl 2067 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action++; tbl 2068 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action > IWL_MIMO3_SWITCH_GI) tbl 2069 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; tbl 2071 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action == start_action) tbl 2078 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action++; tbl 2079 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tbl->action > IWL_MIMO3_SWITCH_GI) tbl 2080 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; tbl 2082 drivers/net/wireless/intel/iwlwifi/dvm/rs.c search_tbl->action = tbl->action; tbl 2094 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl; tbl 2103 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 2154 drivers/net/wireless/intel/iwlwifi/dvm/rs.c &(tbl->win[i])); tbl 2163 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_rate_scale_clear_window(&(tbl->win[i])); tbl 2174 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl, tbl 2180 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); tbl 2209 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl, *tbl1; tbl 2252 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 2253 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_legacy(tbl->lq_type)) tbl 2263 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type); tbl 2266 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); tbl 2271 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_legacy(tbl->lq_type)) { tbl 2290 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_NONE; tbl 2292 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 2294 drivers/net/wireless/intel/iwlwifi/dvm/rs.c index = iwl_hwrate_to_plcp_idx(tbl->current_rate); tbl 2295 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_update_rate_tbl(priv, ctx, lq_sta, tbl, tbl 2302 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!tbl->expected_tpt) { tbl 2312 drivers/net/wireless/intel/iwlwifi/dvm/rs.c window = &(tbl->win[index]); tbl 2316 drivers/net/wireless/intel/iwlwifi/dvm/rs.c window = &(tbl->win[index]); tbl 2344 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->expected_tpt[index] + 64) / 128)) { tbl 2347 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->expected_tpt[index] + 64) / 128); tbl 2364 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!is_legacy(tbl->lq_type)) tbl 2381 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type = LQ_NONE; tbl 2385 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 2388 drivers/net/wireless/intel/iwlwifi/dvm/rs.c index = iwl_hwrate_to_plcp_idx(tbl->current_rate); tbl 2405 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->lq_type); tbl 2419 drivers/net/wireless/intel/iwlwifi/dvm/rs.c low_tpt = tbl->win[low].average_tpt; tbl 2421 drivers/net/wireless/intel/iwlwifi/dvm/rs.c high_tpt = tbl->win[high].average_tpt; tbl 2478 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (current_tpt > (100 * tbl->expected_tpt[low])))) tbl 2480 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!iwl_ht_enabled(priv) && !is_legacy(tbl->lq_type)) tbl 2483 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) tbl 2487 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { tbl 2501 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { tbl 2532 drivers/net/wireless/intel/iwlwifi/dvm/rs.c index, scale_action, low, high, tbl->lq_type); tbl 2537 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green); tbl 2556 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_legacy(tbl->lq_type)) tbl 2558 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (is_siso(tbl->lq_type)) tbl 2560 drivers/net/wireless/intel/iwlwifi/dvm/rs.c else if (is_mimo2(tbl->lq_type)) tbl 2568 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); tbl 2570 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_rate_scale_clear_window(&(tbl->win[i])); tbl 2573 drivers/net/wireless/intel/iwlwifi/dvm/rs.c index = iwl_hwrate_to_plcp_idx(tbl->current_rate); tbl 2576 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->current_rate, index); tbl 2577 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); tbl 2620 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); tbl 2642 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl; tbl 2667 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 2673 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->ant_type = first_antenna(valid_tx_ant); tbl 2674 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rate |= tbl->ant_type << RATE_MCS_ANT_POS; tbl 2679 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx); tbl 2680 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) tbl 2681 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_toggle_antenna(valid_tx_ant, &rate, tbl); tbl 2683 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rate = rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green); tbl 2684 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->current_rate = rate; tbl 2685 drivers/net/wireless/intel/iwlwifi/dvm/rs.c rs_set_expected_tpt_table(lq_sta, tbl); tbl 3108 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 3126 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); tbl 3127 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_Ht(tbl->lq_type)) { tbl 3129 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (is_siso(tbl->lq_type)) ? "SISO" : tbl 3130 drivers/net/wireless/intel/iwlwifi/dvm/rs.c ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3")); tbl 3132 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (tbl->is_ht40) ? "40MHz" : "20MHz"); tbl 3134 drivers/net/wireless/intel/iwlwifi/dvm/rs.c (tbl->is_SGI) ? "SGI" : "", tbl 3163 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_legacy(tbl->lq_type)) { tbl 3233 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; tbl 3237 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (is_Ht(tbl->lq_type)) tbl 3240 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tbl->expected_tpt[lq_sta->last_txrate_idx]); tbl 261 drivers/net/wireless/intel/iwlwifi/dvm/rs.h #define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) tbl 262 drivers/net/wireless/intel/iwlwifi/dvm/rs.h #define is_siso(tbl) ((tbl) == LQ_SISO) tbl 263 drivers/net/wireless/intel/iwlwifi/dvm/rs.h #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) tbl 264 drivers/net/wireless/intel/iwlwifi/dvm/rs.h #define is_mimo3(tbl) ((tbl) == LQ_MIMO3) tbl 265 drivers/net/wireless/intel/iwlwifi/dvm/rs.h #define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) tbl 266 drivers/net/wireless/intel/iwlwifi/dvm/rs.h #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) tbl 267 drivers/net/wireless/intel/iwlwifi/dvm/rs.h #define is_a_band(tbl) ((tbl) == LQ_A) tbl 268 drivers/net/wireless/intel/iwlwifi/dvm/rs.h #define is_g_and(tbl) ((tbl) == LQ_G) tbl 586 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl) tbl 592 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_rate_scale_clear_window(&tbl->win[i]); tbl 594 drivers/net/wireless/intel/iwlwifi/mvm/rs.c for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++) tbl 595 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_rate_scale_clear_window(&tbl->tpc_win[i]); tbl 667 drivers/net/wireless/intel/iwlwifi/mvm/rs.c static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) tbl 669 drivers/net/wireless/intel/iwlwifi/mvm/rs.c if (tbl->expected_tpt) tbl 670 drivers/net/wireless/intel/iwlwifi/mvm/rs.c return tbl->expected_tpt[rs_index]; tbl 682 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl, tbl 690 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tpt = get_expected_tpt(tbl, scale_index); tbl 748 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl, tbl 757 drivers/net/wireless/intel/iwlwifi/mvm/rs.c window = &tbl->tpc_win[reduced_txp]; tbl 758 drivers/net/wireless/intel/iwlwifi/mvm/rs.c return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, tbl 794 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl, tbl 802 drivers/net/wireless/intel/iwlwifi/mvm/rs.c if (tbl->column != RS_COLUMN_INVALID) { tbl 805 drivers/net/wireless/intel/iwlwifi/mvm/rs.c pers->tx_stats[tbl->column][scale_index].total += attempts; tbl 806 drivers/net/wireless/intel/iwlwifi/mvm/rs.c pers->tx_stats[tbl->column][scale_index].success += successes; tbl 812 drivers/net/wireless/intel/iwlwifi/mvm/rs.c window = &(tbl->win[scale_index]); tbl 813 drivers/net/wireless/intel/iwlwifi/mvm/rs.c return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, tbl 1347 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl) tbl 1349 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct rs_rate *rate = &tbl->rate; tbl 1350 drivers/net/wireless/intel/iwlwifi/mvm/rs.c const struct rs_tx_column *column = &rs_tx_columns[tbl->column]; tbl 1352 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw); tbl 1369 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl, /* "search" */ tbl 1376 drivers/net/wireless/intel/iwlwifi/mvm/rs.c const u16 *tpt_tbl = tbl->expected_tpt; tbl 1400 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->rate.type); tbl 1455 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl; tbl 1463 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 1501 drivers/net/wireless/intel/iwlwifi/mvm/rs.c lq_sta->visited_columns = BIT(tbl->column); tbl 1516 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_rate_scale_clear_tbl_windows(mvm, tbl); tbl 1524 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_rate_scale_clear_tbl_windows(mvm, tbl); tbl 1530 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl, tbl 1540 drivers/net/wireless/intel/iwlwifi/mvm/rs.c if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) || tbl 1541 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->rate.index < IWL_RATE_MCS_5_INDEX || tbl 1574 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl) tbl 1576 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate); tbl 1583 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl, tbl 1589 drivers/net/wireless/intel/iwlwifi/mvm/rs.c if (!is_vht_siso(&tbl->rate)) tbl 1592 drivers/net/wireless/intel/iwlwifi/mvm/rs.c if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) && tbl 1593 drivers/net/wireless/intel/iwlwifi/mvm/rs.c (tbl->rate.index == IWL_RATE_MCS_0_INDEX) && tbl 1595 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20; tbl 1596 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->rate.index = IWL_RATE_MCS_4_INDEX; tbl 1606 drivers/net/wireless/intel/iwlwifi/mvm/rs.c if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) && tbl 1607 drivers/net/wireless/intel/iwlwifi/mvm/rs.c (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) && tbl 1609 drivers/net/wireless/intel/iwlwifi/mvm/rs.c ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) && tbl 1611 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80; tbl 1612 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->rate.index = IWL_RATE_MCS_1_INDEX; tbl 1620 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_set_expected_tpt_table(lq_sta, tbl); tbl 1621 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_rate_scale_clear_tbl_windows(mvm, tbl); tbl 1628 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl) tbl 1632 drivers/net/wireless/intel/iwlwifi/mvm/rs.c const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column]; tbl 1662 drivers/net/wireless/intel/iwlwifi/mvm/rs.c if (allow_func && !allow_func(mvm, sta, &tbl->rate, tbl 1714 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; tbl 1719 drivers/net/wireless/intel/iwlwifi/mvm/rs.c const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; tbl 1723 drivers/net/wireless/intel/iwlwifi/mvm/rs.c memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win)); tbl 1788 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl, tbl 1859 drivers/net/wireless/intel/iwlwifi/mvm/rs.c } else if (current_tpt > (100 * tbl->expected_tpt[low])) { tbl 2000 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl) tbl 2007 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct rs_rate *rate = &tbl->rate; tbl 2042 drivers/net/wireless/intel/iwlwifi/mvm/rs.c window = tbl->tpc_win; tbl 2105 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl, *tbl1; tbl 2126 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 2127 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rate = &tbl->rate; tbl 2133 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_set_expected_tpt_table(lq_sta, tbl); tbl 2134 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_rate_scale_clear_tbl_windows(mvm, tbl); tbl 2149 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 2150 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_update_rate_tbl(mvm, sta, lq_sta, tbl); tbl 2156 drivers/net/wireless/intel/iwlwifi/mvm/rs.c if (!tbl->expected_tpt) { tbl 2162 drivers/net/wireless/intel/iwlwifi/mvm/rs.c window = &(tbl->win[index]); tbl 2219 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 2222 drivers/net/wireless/intel/iwlwifi/mvm/rs.c index = tbl->rate.index; tbl 2249 drivers/net/wireless/intel/iwlwifi/mvm/rs.c low_tpt = tbl->win[low].average_tpt; tbl 2251 drivers/net/wireless/intel/iwlwifi/mvm/rs.c high_tpt = tbl->win[high].average_tpt; tbl 2258 drivers/net/wireless/intel/iwlwifi/mvm/rs.c scale_action = rs_get_rate_action(mvm, tbl, sr, low, high, tbl 2296 drivers/net/wireless/intel/iwlwifi/mvm/rs.c update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl); tbl 2305 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->rate.index = index; tbl 2307 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action); tbl 2308 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_set_amsdu_len(mvm, sta, tbl, scale_action); tbl 2309 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_update_rate_tbl(mvm, sta, lq_sta, tbl); tbl 2333 drivers/net/wireless/intel/iwlwifi/mvm/rs.c next_column = rs_get_next_column(mvm, lq_sta, sta, tbl); tbl 2348 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)]; tbl 2349 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_rate_scale_clear_tbl_windows(mvm, tbl); tbl 2352 drivers/net/wireless/intel/iwlwifi/mvm/rs.c index = tbl->rate.index; tbl 2354 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_dump_rate(mvm, &tbl->rate, tbl 2356 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_update_rate_tbl(mvm, sta, lq_sta, tbl); tbl 2681 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl; tbl 2693 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl = &(lq_sta->lq_info[active_tbl]); tbl 2694 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rate = &tbl->rate; tbl 2704 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->column = rs_get_column_from_rate(rate); tbl 2706 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rs_set_expected_tpt_table(lq_sta, tbl); tbl 3810 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tbl 3811 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct rs_rate *rate = &tbl->rate; tbl 3909 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_scale_tbl_info *tbl; tbl 3918 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl = &(lq_sta->lq_info[i]); tbl 3919 drivers/net/wireless/intel/iwlwifi/mvm/rs.c rate = &tbl->rate; tbl 3934 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->win[j].counter, tbl 3935 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->win[j].success_counter, tbl 3936 drivers/net/wireless/intel/iwlwifi/mvm/rs.c tbl->win[j].success_ratio); tbl 782 drivers/net/wireless/marvell/mwifiex/11n.c struct mwifiex_tx_ba_stream_tbl *tbl, *tmp; tbl 788 drivers/net/wireless/marvell/mwifiex/11n.c list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) tbl 789 drivers/net/wireless/marvell/mwifiex/11n.c if (!memcmp(tbl->ra, ra, ETH_ALEN)) tbl 790 drivers/net/wireless/marvell/mwifiex/11n.c mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl); tbl 110 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct mwifiex_rx_reorder_tbl *tbl, tbl 120 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c pkt_to_send = (start_win > tbl->start_win) ? tbl 121 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c min((start_win - tbl->start_win), tbl->win_size) : tbl 122 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->win_size; tbl 125 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (tbl->rx_reorder_ptr[i]) { tbl 126 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c skb = tbl->rx_reorder_ptr[i]; tbl 128 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->rx_reorder_ptr[i] = NULL; tbl 136 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c for (i = 0; i < tbl->win_size - pkt_to_send; ++i) { tbl 137 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i]; tbl 138 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->rx_reorder_ptr[pkt_to_send + i] = NULL; tbl 141 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->start_win = start_win; tbl 158 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct mwifiex_rx_reorder_tbl *tbl) tbl 167 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c for (i = 0; i < tbl->win_size; ++i) { tbl 168 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (!tbl->rx_reorder_ptr[i]) tbl 170 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c skb = tbl->rx_reorder_ptr[i]; tbl 172 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->rx_reorder_ptr[i] = NULL; tbl 180 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c xchg = tbl->win_size - i; tbl 182 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->rx_reorder_ptr[j] = tbl->rx_reorder_ptr[i + j]; tbl 183 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->rx_reorder_ptr[i + j] = NULL; tbl 186 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); tbl 202 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct mwifiex_rx_reorder_tbl *tbl) tbl 206 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (!tbl) tbl 218 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1); tbl 219 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win); tbl 221 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c del_timer_sync(&tbl->timer_context.timer); tbl 222 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->timer_context.timer_is_set = false; tbl 225 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c list_del(&tbl->list); tbl 228 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c kfree(tbl->rx_reorder_ptr); tbl 229 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c kfree(tbl); tbl 244 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct mwifiex_rx_reorder_tbl *tbl; tbl 247 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) { tbl 248 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) { tbl 250 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c return tbl; tbl 263 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct mwifiex_rx_reorder_tbl *tbl, *tmp; tbl 269 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) { tbl 270 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (!memcmp(tbl->ta, ta, ETH_ALEN)) { tbl 272 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mwifiex_del_rx_reorder_entry(priv, tbl); tbl 345 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct mwifiex_rx_reorder_tbl *tbl, *new_node; tbl 353 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); tbl 354 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (tbl) { tbl 355 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num); tbl 422 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mwifiex_11n_rxreorder_timer_restart(struct mwifiex_rx_reorder_tbl *tbl) tbl 426 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (tbl->win_size >= MWIFIEX_BA_WIN_SIZE_32) tbl 431 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mod_timer(&tbl->timer_context.timer, tbl 432 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c jiffies + msecs_to_jiffies(min_flush_time * tbl->win_size)); tbl 434 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->timer_context.timer_is_set = true; tbl 563 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct mwifiex_rx_reorder_tbl *tbl; tbl 569 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); tbl 570 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (!tbl) { tbl 576 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) { tbl 581 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c start_win = tbl->start_win; tbl 583 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c win_size = tbl->win_size; tbl 585 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) { tbl 587 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT; tbl 590 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (tbl->flags & RXREOR_FORCE_NO_DROP) { tbl 593 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->flags &= ~RXREOR_FORCE_NO_DROP; tbl 595 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c seq_num >= tbl->init_win) { tbl 598 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c start_win, seq_num, tbl->init_win); tbl 599 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->start_win = start_win = seq_num; tbl 636 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win); tbl 645 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (tbl->rx_reorder_ptr[pkt_index]) { tbl 650 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->rx_reorder_ptr[pkt_index] = payload; tbl 657 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mwifiex_11n_scan_and_dispatch(priv, tbl); tbl 660 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (!tbl->timer_context.timer_is_set || tbl 661 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c prev_start_win != tbl->start_win) tbl 662 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mwifiex_11n_rxreorder_timer_restart(tbl); tbl 675 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct mwifiex_rx_reorder_tbl *tbl; tbl 690 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, tbl 692 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (!tbl) { tbl 697 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mwifiex_del_rx_reorder_entry(priv, tbl); tbl 729 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct mwifiex_rx_reorder_tbl *tbl; tbl 744 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, tbl 746 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (tbl) tbl 747 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c mwifiex_del_rx_reorder_entry(priv, tbl); tbl 755 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, tbl 757 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c if (tbl) { tbl 761 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->amsdu = true; tbl 763 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->amsdu = false; tbl 820 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c struct mwifiex_rx_reorder_tbl *tbl; tbl 829 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) tbl 830 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c tbl->flags = flags; tbl 38 drivers/net/wireless/marvell/mwifiex/init.c struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl; tbl 47 drivers/net/wireless/marvell/mwifiex/init.c spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock); tbl 48 drivers/net/wireless/marvell/mwifiex/init.c list_add_tail(&bss_prio->list, &tbl[priv->bss_priority].bss_prio_head); tbl 49 drivers/net/wireless/marvell/mwifiex/init.c spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock); tbl 1176 drivers/net/wireless/marvell/mwifiex/wmm.c struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl; tbl 1179 drivers/net/wireless/marvell/mwifiex/wmm.c spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock); tbl 1184 drivers/net/wireless/marvell/mwifiex/wmm.c list_move(&tbl[priv->bss_priority].bss_prio_head, tbl 1185 drivers/net/wireless/marvell/mwifiex/wmm.c &tbl[priv->bss_priority].bss_prio_cur->list); tbl 1186 drivers/net/wireless/marvell/mwifiex/wmm.c spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock); tbl 792 drivers/net/wireless/realtek/rtw88/main.h void (*parse)(struct rtw_dev *rtwdev, const struct rtw_table *tbl); tbl 793 drivers/net/wireless/realtek/rtw88/main.h void (*do_cfg)(struct rtw_dev *rtwdev, const struct rtw_table *tbl, tbl 799 drivers/net/wireless/realtek/rtw88/main.h const struct rtw_table *tbl) tbl 801 drivers/net/wireless/realtek/rtw88/main.h (*tbl->parse)(rtwdev, tbl); tbl 821 drivers/net/wireless/realtek/rtw88/phy.c void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) tbl 823 drivers/net/wireless/realtek/rtw88/phy.c const union phy_table_tile *p = tbl->data; tbl 824 drivers/net/wireless/realtek/rtw88/phy.c const union phy_table_tile *end = p + tbl->size / 2; tbl 859 drivers/net/wireless/realtek/rtw88/phy.c (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data); tbl 1211 drivers/net/wireless/realtek/rtw88/phy.c void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl) tbl 1213 drivers/net/wireless/realtek/rtw88/phy.c const struct phy_pg_cfg_pair *p = tbl->data; tbl 1214 drivers/net/wireless/realtek/rtw88/phy.c const struct phy_pg_cfg_pair *end = p + tbl->size / 6; tbl 1360 drivers/net/wireless/realtek/rtw88/phy.c const struct rtw_table *tbl) tbl 1362 drivers/net/wireless/realtek/rtw88/phy.c const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data; tbl 1363 drivers/net/wireless/realtek/rtw88/phy.c const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size; tbl 1373 drivers/net/wireless/realtek/rtw88/phy.c void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, tbl 1379 drivers/net/wireless/realtek/rtw88/phy.c void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, tbl 1385 drivers/net/wireless/realtek/rtw88/phy.c void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, tbl 1404 drivers/net/wireless/realtek/rtw88/phy.c void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, tbl 1412 drivers/net/wireless/realtek/rtw88/phy.c rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data); tbl 1447 drivers/net/wireless/realtek/rtw88/phy.c const struct rtw_table *tbl; tbl 1449 drivers/net/wireless/realtek/rtw88/phy.c tbl = chip->rf_tbl[rf_path]; tbl 1450 drivers/net/wireless/realtek/rtw88/phy.c rtw_load_table(rtwdev, tbl); tbl 31 drivers/net/wireless/realtek/rtw88/phy.h void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl); tbl 32 drivers/net/wireless/realtek/rtw88/phy.h void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl); tbl 33 drivers/net/wireless/realtek/rtw88/phy.h void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, const struct rtw_table *tbl); tbl 34 drivers/net/wireless/realtek/rtw88/phy.h void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl, tbl 36 drivers/net/wireless/realtek/rtw88/phy.h void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl, tbl 38 drivers/net/wireless/realtek/rtw88/phy.h void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl, tbl 40 drivers/net/wireless/realtek/rtw88/phy.h void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, tbl 2063 drivers/net/wireless/realtek/rtw88/rtw8822c.c const struct rtw_table *tbl) tbl 2065 drivers/net/wireless/realtek/rtw88/rtw8822c.c const struct dpk_cfg_pair *p = tbl->data; tbl 2066 drivers/net/wireless/realtek/rtw88/rtw8822c.c const struct dpk_cfg_pair *end = p + tbl->size / 3; tbl 119 drivers/net/wireless/realtek/rtw88/rtw8822c.h const struct rtw_table *tbl); tbl 184 drivers/net/wireless/st/cw1200/txrx.c policy->tbl[off] |= __cpu_to_le32(retries << shift); tbl 366 drivers/net/wireless/st/cw1200/txrx.c &arg.tbl[arg.num]; tbl 373 drivers/net/wireless/st/cw1200/txrx.c memcpy(dst->rate_count_indices, src->tbl, tbl 23 drivers/net/wireless/st/cw1200/txrx.h __le32 tbl[3]; tbl 1556 drivers/net/wireless/st/cw1200/wsm.h struct wsm_tx_rate_retry_policy tbl[8]; tbl 1281 drivers/phy/qualcomm/phy-qcom-qmp.c const struct qmp_phy_init_tbl tbl[], tbl 1285 drivers/phy/qualcomm/phy-qcom-qmp.c const struct qmp_phy_init_tbl *t = tbl; tbl 216 drivers/phy/qualcomm/phy-qcom-qusb2.c const struct qusb2_phy_init_tbl *tbl; tbl 239 drivers/phy/qualcomm/phy-qcom-qusb2.c .tbl = msm8996_init_tbl, tbl 250 drivers/phy/qualcomm/phy-qcom-qusb2.c .tbl = msm8998_init_tbl, tbl 262 drivers/phy/qualcomm/phy-qcom-qusb2.c .tbl = sdm845_init_tbl, tbl 378 drivers/phy/qualcomm/phy-qcom-qusb2.c const struct qusb2_phy_init_tbl tbl[], int num) tbl 383 drivers/phy/qualcomm/phy-qcom-qusb2.c if (tbl[i].in_layout) tbl 384 drivers/phy/qualcomm/phy-qcom-qusb2.c writel(tbl[i].val, base + regs[tbl[i].offset]); tbl 386 drivers/phy/qualcomm/phy-qcom-qusb2.c writel(tbl[i].val, base + tbl[i].offset); tbl 646 drivers/phy/qualcomm/phy-qcom-qusb2.c qcom_qusb2_phy_configure(qphy->base, cfg->regs, cfg->tbl, tbl 426 drivers/power/supply/ab8500_btemp.c const struct abx500_res_to_temp *tbl, int tbl_size, int res) tbl 435 drivers/power/supply/ab8500_btemp.c if (res > tbl[0].resist) tbl 437 drivers/power/supply/ab8500_btemp.c else if (res <= tbl[tbl_size - 1].resist) tbl 441 drivers/power/supply/ab8500_btemp.c while (!(res <= tbl[i].resist && tbl 442 drivers/power/supply/ab8500_btemp.c res > tbl[i + 1].resist)) tbl 446 drivers/power/supply/ab8500_btemp.c return tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) * tbl 447 drivers/power/supply/ab8500_btemp.c (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist); tbl 857 drivers/power/supply/ab8500_fg.c const struct abx500_v_to_cap *tbl; tbl 860 drivers/power/supply/ab8500_fg.c tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl, tbl 864 drivers/power/supply/ab8500_fg.c if (voltage > tbl[i].voltage) tbl 870 drivers/power/supply/ab8500_fg.c tbl[i].voltage, tbl 871 drivers/power/supply/ab8500_fg.c tbl[i].capacity * 10, tbl 872 drivers/power/supply/ab8500_fg.c tbl[i-1].voltage, tbl 873 drivers/power/supply/ab8500_fg.c tbl[i-1].capacity * 10); tbl 909 drivers/power/supply/ab8500_fg.c const struct batres_vs_temp *tbl; tbl 912 drivers/power/supply/ab8500_fg.c tbl = di->bm->bat_type[di->bm->batt_id].batres_tbl; tbl 916 drivers/power/supply/ab8500_fg.c if (di->bat_temp / 10 > tbl[i].temp) tbl 922 drivers/power/supply/ab8500_fg.c tbl[i].temp, tbl 923 drivers/power/supply/ab8500_fg.c tbl[i].resist, tbl 924 drivers/power/supply/ab8500_fg.c tbl[i-1].temp, tbl 925 drivers/power/supply/ab8500_fg.c tbl[i-1].resist); tbl 927 drivers/power/supply/ab8500_fg.c resist = tbl[0].resist; tbl 929 drivers/power/supply/ab8500_fg.c resist = tbl[tbl_size - 1].resist; tbl 229 drivers/power/supply/bq24190_charger.c static u8 bq24190_find_idx(const int tbl[], int tbl_size, int v) tbl 234 drivers/power/supply/bq24190_charger.c if (v < tbl[i]) tbl 294 drivers/power/supply/bq24190_charger.c const int tbl[], int tbl_size, tbl 305 drivers/power/supply/bq24190_charger.c *val = tbl[v]; tbl 312 drivers/power/supply/bq24190_charger.c const int tbl[], int tbl_size, tbl 317 drivers/power/supply/bq24190_charger.c idx = bq24190_find_idx(tbl, tbl_size, val); tbl 269 drivers/power/supply/bq25890_charger.c const u32 *tbl; tbl 313 drivers/power/supply/bq25890_charger.c const u32 *tbl = bq25890_tables[id].lt.tbl; tbl 316 drivers/power/supply/bq25890_charger.c for (idx = 1; idx < tbl_size && tbl[idx] <= value; idx++) tbl 339 drivers/power/supply/bq25890_charger.c return bq25890_tables[id].lt.tbl[idx]; tbl 254 drivers/power/supply/rt9455_charger.c static unsigned int rt9455_find_idx(const int tbl[], int tbl_size, int v) tbl 265 drivers/power/supply/rt9455_charger.c if (v <= tbl[i]) tbl 273 drivers/power/supply/rt9455_charger.c const int tbl[], int tbl_size, int *val) tbl 283 drivers/power/supply/rt9455_charger.c *val = tbl[v]; tbl 290 drivers/power/supply/rt9455_charger.c const int tbl[], int tbl_size, int val) tbl 292 drivers/power/supply/rt9455_charger.c unsigned int idx = rt9455_find_idx(tbl, tbl_size, val); tbl 203 drivers/power/supply/smb347-charger.c static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val) tbl 207 drivers/power/supply/smb347-charger.c return tbl[val]; tbl 211 drivers/power/supply/smb347-charger.c static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val) tbl 216 drivers/power/supply/smb347-charger.c if (val < tbl[i]) tbl 1198 drivers/rpmsg/qcom_smd.c int tbl; tbl 1202 drivers/rpmsg/qcom_smd.c for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { tbl 1204 drivers/rpmsg/qcom_smd.c smem_items[tbl].alloc_tbl_id, NULL); tbl 1211 drivers/rpmsg/qcom_smd.c if (test_bit(i, edge->allocated[tbl])) tbl 1227 drivers/rpmsg/qcom_smd.c info_id = smem_items[tbl].info_base_id + cid; tbl 1228 drivers/rpmsg/qcom_smd.c fifo_id = smem_items[tbl].fifo_base_id + cid; tbl 1239 drivers/rpmsg/qcom_smd.c set_bit(i, edge->allocated[tbl]); tbl 2107 drivers/s390/net/qeth_l3_main.c if (np->tbl->family == AF_INET) tbl 329 drivers/sbus/char/envctrl.c int scale, char *tbl, char *bufdata) tbl 343 drivers/sbus/char/envctrl.c bufdata[0] = tbl[data]; tbl 348 drivers/sbus/char/envctrl.c sprintf(bufdata,"%d ", (tbl[data] * 10) / (scale)); tbl 369 drivers/sbus/char/envctrl.c char *tbl, j = -1; tbl 388 drivers/sbus/char/envctrl.c tbl = pchild->tables + pchild->tblprop_array[i].offset; tbl 392 drivers/sbus/char/envctrl.c tbl, bufdata); tbl 404 drivers/sbus/char/envctrl.c char *tbl = NULL; tbl 419 drivers/sbus/char/envctrl.c tbl = pchild->tables + pchild->tblprop_array[i].offset; tbl 423 drivers/sbus/char/envctrl.c tbl, bufdata); tbl 172 drivers/sbus/char/oradax.c void *tbl; /* Table Address or bitmap */ tbl 484 drivers/sbus/char/oradax.c if (dax_lock_page(ccbp->tbl, tbl 486 drivers/sbus/char/oradax.c *err_va = (u64)ccbp->tbl; tbl 271 drivers/scsi/be2iscsi/be_main.c struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; tbl 335 drivers/scsi/be2iscsi/be_main.c inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; tbl 336 drivers/scsi/be2iscsi/be_main.c inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; tbl 354 drivers/scsi/be2iscsi/be_main.c if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { tbl 1340 drivers/scsi/myrs.c struct myrs_cpu_type_tbl *tbl; tbl 1348 drivers/scsi/myrs.c tbl = myrs_cpu_type_names; tbl 1350 drivers/scsi/myrs.c if (tbl[i].type == info->cpu[0].cpu_type) { tbl 1351 drivers/scsi/myrs.c first_processor = tbl[i].name; tbl 1357 drivers/scsi/myrs.c tbl = myrs_cpu_type_names; tbl 1359 drivers/scsi/myrs.c if (tbl[i].type == info->cpu[1].cpu_type) { tbl 1360 drivers/scsi/myrs.c second_processor = tbl[i].name; tbl 522 drivers/soc/qcom/qcom-geni-se.c int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl) tbl 528 drivers/soc/qcom/qcom-geni-se.c *tbl = se->clk_perf_tbl; tbl 545 drivers/soc/qcom/qcom-geni-se.c *tbl = se->clk_perf_tbl; tbl 573 drivers/soc/qcom/qcom-geni-se.c unsigned long *tbl; tbl 580 drivers/soc/qcom/qcom-geni-se.c num_clk_levels = geni_se_clk_tbl_get(se, &tbl); tbl 589 drivers/soc/qcom/qcom-geni-se.c divider = DIV_ROUND_UP(tbl[i], req_freq); tbl 590 drivers/soc/qcom/qcom-geni-se.c new_delta = req_freq - tbl[i] / divider; tbl 594 drivers/soc/qcom/qcom-geni-se.c *res_freq = tbl[i]; tbl 205 drivers/staging/media/hantro/hantro_h264.c struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu; tbl 206 drivers/staging/media/hantro/hantro_h264.c u32 *dst = (u32 *)tbl->scaling_list; tbl 228 drivers/staging/media/hantro/hantro_h264.c struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu; tbl 233 drivers/staging/media/hantro/hantro_h264.c tbl->poc[i * 2] = dpb[i].top_field_order_cnt; tbl 234 drivers/staging/media/hantro/hantro_h264.c tbl->poc[i * 2 + 1] = dpb[i].bottom_field_order_cnt; tbl 237 drivers/staging/media/hantro/hantro_h264.c tbl->poc[32] = dec_param->top_field_order_cnt; tbl 238 drivers/staging/media/hantro/hantro_h264.c tbl->poc[33] = dec_param->bottom_field_order_cnt; tbl 622 drivers/staging/media/hantro/hantro_h264.c struct hantro_h264_dec_priv_tbl *tbl; tbl 625 drivers/staging/media/hantro/hantro_h264.c priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma, tbl 630 drivers/staging/media/hantro/hantro_h264.c priv->size = sizeof(*tbl); tbl 631 drivers/staging/media/hantro/hantro_h264.c tbl = priv->cpu; tbl 632 drivers/staging/media/hantro/hantro_h264.c memcpy(tbl->cabac_table, h264_cabac_table, sizeof(tbl->cabac_table)); tbl 207 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = container->tables[i]; tbl 209 drivers/vfio/vfio_iommu_spapr_tce.c if (tbl) { tbl 210 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long entry = ioba >> tbl->it_page_shift; tbl 211 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long start = tbl->it_offset; tbl 212 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long end = start + tbl->it_size; tbl 215 drivers/vfio/vfio_iommu_spapr_tce.c *ptbl = tbl; tbl 337 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl, tbl 340 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl); tbl 360 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = container->tables[i]; tbl 362 drivers/vfio/vfio_iommu_spapr_tce.c if (!tbl) tbl 365 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); tbl 366 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_free_table(container, tbl); tbl 410 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl, unsigned long entry) tbl 415 drivers/vfio/vfio_iommu_spapr_tce.c __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); tbl 421 drivers/vfio/vfio_iommu_spapr_tce.c tbl->it_page_shift, &hpa, &mem); tbl 432 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl, tbl 441 drivers/vfio/vfio_iommu_spapr_tce.c if (tbl->it_indirect_levels && tbl->it_userspace) { tbl 450 drivers/vfio/vfio_iommu_spapr_tce.c __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, tbl 454 drivers/vfio/vfio_iommu_spapr_tce.c entry |= tbl->it_level_size - 1; tbl 463 drivers/vfio/vfio_iommu_spapr_tce.c ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa, tbl 472 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_unuse_page_v2(container, tbl, entry); tbl 479 drivers/vfio/vfio_iommu_spapr_tce.c iommu_tce_kill(tbl, firstentry, pages); tbl 500 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl, tbl 509 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; tbl 516 drivers/vfio/vfio_iommu_spapr_tce.c tbl->it_page_shift)) { tbl 523 drivers/vfio/vfio_iommu_spapr_tce.c ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i, tbl 528 drivers/vfio/vfio_iommu_spapr_tce.c __func__, entry << tbl->it_page_shift, tbl 536 drivers/vfio/vfio_iommu_spapr_tce.c tce += IOMMU_PAGE_SIZE(tbl); tbl 540 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_clear(container, tbl, entry, i); tbl 542 drivers/vfio/vfio_iommu_spapr_tce.c iommu_tce_kill(tbl, entry, pages); tbl 548 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl, tbl 558 drivers/vfio/vfio_iommu_spapr_tce.c __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i); tbl 561 drivers/vfio/vfio_iommu_spapr_tce.c tce, tbl->it_page_shift, &hpa, &mem); tbl 566 drivers/vfio/vfio_iommu_spapr_tce.c tbl->it_page_shift)) { tbl 572 drivers/vfio/vfio_iommu_spapr_tce.c hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; tbl 579 drivers/vfio/vfio_iommu_spapr_tce.c ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i, tbl 583 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_unuse_page_v2(container, tbl, entry + i); tbl 585 drivers/vfio/vfio_iommu_spapr_tce.c __func__, entry << tbl->it_page_shift, tbl 591 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_unuse_page_v2(container, tbl, entry + i); tbl 595 drivers/vfio/vfio_iommu_spapr_tce.c tce += IOMMU_PAGE_SIZE(tbl); tbl 599 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_clear(container, tbl, entry, i); tbl 601 drivers/vfio/vfio_iommu_spapr_tce.c iommu_tce_kill(tbl, entry, pages); tbl 635 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl) tbl 637 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; tbl 639 drivers/vfio/vfio_iommu_spapr_tce.c iommu_tce_table_put(tbl); tbl 649 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = NULL; tbl 673 drivers/vfio/vfio_iommu_spapr_tce.c page_shift, window_size, levels, &tbl); tbl 677 drivers/vfio/vfio_iommu_spapr_tce.c BUG_ON(!tbl->it_ops->free); tbl 686 drivers/vfio/vfio_iommu_spapr_tce.c ret = table_group->ops->set_window(table_group, num, tbl); tbl 691 drivers/vfio/vfio_iommu_spapr_tce.c container->tables[num] = tbl; tbl 694 drivers/vfio/vfio_iommu_spapr_tce.c *start_addr = tbl->it_offset << tbl->it_page_shift; tbl 703 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_free_table(container, tbl); tbl 712 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl; tbl 716 drivers/vfio/vfio_iommu_spapr_tce.c num = tce_iommu_find_table(container, start_addr, &tbl); tbl 720 drivers/vfio/vfio_iommu_spapr_tce.c BUG_ON(!tbl->it_size); tbl 740 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); tbl 741 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_free_table(container, tbl); tbl 857 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = NULL; tbl 880 drivers/vfio/vfio_iommu_spapr_tce.c num = tce_iommu_find_table(container, param.iova, &tbl); tbl 884 drivers/vfio/vfio_iommu_spapr_tce.c if ((param.size & ~IOMMU_PAGE_MASK(tbl)) || tbl 885 drivers/vfio/vfio_iommu_spapr_tce.c (param.vaddr & ~IOMMU_PAGE_MASK(tbl))) tbl 901 drivers/vfio/vfio_iommu_spapr_tce.c ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr); tbl 906 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_build_v2(container, tbl, tbl 907 drivers/vfio/vfio_iommu_spapr_tce.c param.iova >> tbl->it_page_shift, tbl 909 drivers/vfio/vfio_iommu_spapr_tce.c param.size >> tbl->it_page_shift, tbl 912 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_build(container, tbl, tbl 913 drivers/vfio/vfio_iommu_spapr_tce.c param.iova >> tbl->it_page_shift, tbl 915 drivers/vfio/vfio_iommu_spapr_tce.c param.size >> tbl->it_page_shift, tbl 918 drivers/vfio/vfio_iommu_spapr_tce.c iommu_flush_tce(tbl); tbl 924 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = NULL; tbl 947 drivers/vfio/vfio_iommu_spapr_tce.c num = tce_iommu_find_table(container, param.iova, &tbl); tbl 951 drivers/vfio/vfio_iommu_spapr_tce.c if (param.size & ~IOMMU_PAGE_MASK(tbl)) tbl 954 drivers/vfio/vfio_iommu_spapr_tce.c ret = iommu_tce_clear_param_check(tbl, param.iova, 0, tbl 955 drivers/vfio/vfio_iommu_spapr_tce.c param.size >> tbl->it_page_shift); tbl 959 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_clear(container, tbl, tbl 960 drivers/vfio/vfio_iommu_spapr_tce.c param.iova >> tbl->it_page_shift, tbl 961 drivers/vfio/vfio_iommu_spapr_tce.c param.size >> tbl->it_page_shift); tbl 962 drivers/vfio/vfio_iommu_spapr_tce.c iommu_flush_tce(tbl); tbl 1147 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = container->tables[i]; tbl 1149 drivers/vfio/vfio_iommu_spapr_tce.c if (!tbl) tbl 1152 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); tbl 1153 drivers/vfio/vfio_iommu_spapr_tce.c if (tbl->it_map) tbl 1154 drivers/vfio/vfio_iommu_spapr_tce.c iommu_release_ownership(tbl); tbl 1166 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = table_group->tables[i]; tbl 1168 drivers/vfio/vfio_iommu_spapr_tce.c if (!tbl || !tbl->it_map) tbl 1171 drivers/vfio/vfio_iommu_spapr_tce.c rc = iommu_take_ownership(tbl); tbl 1219 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = container->tables[i]; tbl 1221 drivers/vfio/vfio_iommu_spapr_tce.c if (!tbl) tbl 1224 drivers/vfio/vfio_iommu_spapr_tce.c ret = table_group->ops->set_window(table_group, i, tbl); tbl 1347 drivers/video/fbdev/riva/riva_hw.c #define LOAD_FIXED_STATE(tbl,dev) \ tbl 1348 drivers/video/fbdev/riva/riva_hw.c for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \ tbl 1349 drivers/video/fbdev/riva/riva_hw.c chip->dev[tbl##Table##dev[i][0]] = tbl##Table##dev[i][1] tbl 1350 drivers/video/fbdev/riva/riva_hw.c #define LOAD_FIXED_STATE_8BPP(tbl,dev) \ tbl 1351 drivers/video/fbdev/riva/riva_hw.c for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \ tbl 1352 drivers/video/fbdev/riva/riva_hw.c chip->dev[tbl##Table##dev##_8BPP[i][0]] = tbl##Table##dev##_8BPP[i][1] tbl 1353 drivers/video/fbdev/riva/riva_hw.c #define LOAD_FIXED_STATE_15BPP(tbl,dev) \ tbl 1354 drivers/video/fbdev/riva/riva_hw.c for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \ tbl 1355 drivers/video/fbdev/riva/riva_hw.c chip->dev[tbl##Table##dev##_15BPP[i][0]] = tbl##Table##dev##_15BPP[i][1] tbl 1356 drivers/video/fbdev/riva/riva_hw.c #define LOAD_FIXED_STATE_16BPP(tbl,dev) \ tbl 1357 drivers/video/fbdev/riva/riva_hw.c for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++) \ tbl 1358 drivers/video/fbdev/riva/riva_hw.c chip->dev[tbl##Table##dev##_16BPP[i][0]] = tbl##Table##dev##_16BPP[i][1] tbl 1359 drivers/video/fbdev/riva/riva_hw.c #define LOAD_FIXED_STATE_32BPP(tbl,dev) \ tbl 1360 drivers/video/fbdev/riva/riva_hw.c for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++) \ tbl 1361 drivers/video/fbdev/riva/riva_hw.c chip->dev[tbl##Table##dev##_32BPP[i][0]] = tbl##Table##dev##_32BPP[i][1] tbl 1364 drivers/video/fbdev/riva/riva_hw.c #define LOAD_FIXED_STATE(tbl,dev) \ tbl 1365 drivers/video/fbdev/riva/riva_hw.c for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \ tbl 1366 drivers/video/fbdev/riva/riva_hw.c NV_WR32(&chip->dev[tbl##Table##dev[i][0]], 0, tbl##Table##dev[i][1]) tbl 1367 drivers/video/fbdev/riva/riva_hw.c #define LOAD_FIXED_STATE_8BPP(tbl,dev) \ tbl 1368 drivers/video/fbdev/riva/riva_hw.c for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \ tbl 1369 drivers/video/fbdev/riva/riva_hw.c NV_WR32(&chip->dev[tbl##Table##dev##_8BPP[i][0]], 0, tbl##Table##dev##_8BPP[i][1]) tbl 1370 drivers/video/fbdev/riva/riva_hw.c #define LOAD_FIXED_STATE_15BPP(tbl,dev) \ tbl 1371 drivers/video/fbdev/riva/riva_hw.c for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \ tbl 1372 drivers/video/fbdev/riva/riva_hw.c NV_WR32(&chip->dev[tbl##Table##dev##_15BPP[i][0]], 0, tbl##Table##dev##_15BPP[i][1]) tbl 1373 drivers/video/fbdev/riva/riva_hw.c #define LOAD_FIXED_STATE_16BPP(tbl,dev) \ tbl 1374 drivers/video/fbdev/riva/riva_hw.c for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++) \ tbl 1375 drivers/video/fbdev/riva/riva_hw.c NV_WR32(&chip->dev[tbl##Table##dev##_16BPP[i][0]], 0, tbl##Table##dev##_16BPP[i][1]) tbl 1376 drivers/video/fbdev/riva/riva_hw.c #define LOAD_FIXED_STATE_32BPP(tbl,dev) \ tbl 1377 drivers/video/fbdev/riva/riva_hw.c for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++) \ tbl 1378 drivers/video/fbdev/riva/riva_hw.c NV_WR32(&chip->dev[tbl##Table##dev##_32BPP[i][0]], 0, tbl##Table##dev##_32BPP[i][1]) tbl 317 drivers/watchdog/wdat_wdt.c const struct acpi_table_wdat *tbl; tbl 325 drivers/watchdog/wdat_wdt.c (struct acpi_table_header **)&tbl); tbl 339 drivers/watchdog/wdat_wdt.c if (tbl->timer_period < 1) tbl 341 drivers/watchdog/wdat_wdt.c if (tbl->min_count > tbl->max_count) tbl 344 drivers/watchdog/wdat_wdt.c wdat->period = tbl->timer_period; tbl 345 drivers/watchdog/wdat_wdt.c wdat->wdd.min_hw_heartbeat_ms = wdat->period * tbl->min_count; tbl 346 drivers/watchdog/wdat_wdt.c wdat->wdd.max_hw_heartbeat_ms = wdat->period * tbl->max_count; tbl 347 drivers/watchdog/wdat_wdt.c wdat->stopped_in_sleep = tbl->flags & ACPI_WDAT_STOPPED; tbl 373 drivers/watchdog/wdat_wdt.c entries = (struct acpi_wdat_entry *)(tbl + 1); tbl 374 drivers/watchdog/wdat_wdt.c for (i = 0; i < tbl->entries; i++) { tbl 630 fs/cifs/winucase.c const wchar_t *tbl; tbl 637 fs/cifs/winucase.c tbl = toplevel[idx]; tbl 638 fs/cifs/winucase.c if (!tbl) tbl 645 fs/cifs/winucase.c out = tbl[idx]; tbl 32 fs/fs_parser.c int __lookup_constant(const struct constant_table *tbl, size_t tbl_size, tbl 38 fs/fs_parser.c if (strcmp(name, tbl[i].name) == 0) tbl 39 fs/fs_parser.c return tbl[i].value; tbl 315 fs/fs_parser.c bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, tbl 327 fs/fs_parser.c if (!tbl[i].name) { tbl 330 fs/fs_parser.c } else if (i > 0 && tbl[i - 1].name) { tbl 331 fs/fs_parser.c int c = strcmp(tbl[i-1].name, tbl[i].name); tbl 335 fs/fs_parser.c i, tbl[i].name); tbl 340 fs/fs_parser.c i, tbl[i-1].name, tbl[i].name); tbl 345 fs/fs_parser.c if (tbl[i].value != special && tbl 346 fs/fs_parser.c (tbl[i].value < low || tbl[i].value > high)) { tbl 348 fs/fs_parser.c i, tbl[i].name, tbl[i].value, low, high); tbl 416 fs/nfs/callback_proc.c validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot, tbl 422 fs/nfs/callback_proc.c if (args->csa_slotid > tbl->server_highest_slotid) tbl 428 fs/nfs/callback_proc.c if (nfs4_test_locked_slot(tbl, slot->slot_nr)) tbl 469 fs/nfs/callback_proc.c struct nfs4_slot_table *tbl; tbl 478 fs/nfs/callback_proc.c tbl = &session->fc_slot_table; tbl 490 fs/nfs/callback_proc.c status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid, tbl 507 fs/nfs/callback_proc.c struct nfs4_slot_table *tbl; tbl 521 fs/nfs/callback_proc.c tbl = &clp->cl_session->bc_slot_table; tbl 529 fs/nfs/callback_proc.c spin_lock(&tbl->slot_tbl_lock); tbl 531 fs/nfs/callback_proc.c if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { tbl 542 fs/nfs/callback_proc.c slot = nfs4_lookup_slot(tbl, args->csa_slotid); tbl 546 fs/nfs/callback_proc.c res->csr_highestslotid = tbl->server_highest_slotid; tbl 547 fs/nfs/callback_proc.c res->csr_target_highestslotid = tbl->target_highest_slotid; tbl 549 fs/nfs/callback_proc.c status = validate_seqid(tbl, slot, args); tbl 552 fs/nfs/callback_proc.c if (!nfs4_try_to_lock_slot(tbl, slot)) { tbl 570 fs/nfs/callback_proc.c &tbl->slot_tbl_lock) < 0) { tbl 582 fs/nfs/callback_proc.c spin_unlock(&tbl->slot_tbl_lock); tbl 795 fs/nfs/callback_xdr.c struct nfs4_slot_table *tbl = &session->bc_slot_table; tbl 797 fs/nfs/callback_xdr.c spin_lock(&tbl->slot_tbl_lock); tbl 802 fs/nfs/callback_xdr.c nfs4_free_slot(tbl, slot); tbl 803 fs/nfs/callback_xdr.c spin_unlock(&tbl->slot_tbl_lock); tbl 130 fs/nfs/filelayout/filelayout.c struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; tbl 173 fs/nfs/filelayout/filelayout.c rpc_wake_up(&tbl->slot_tbl_waitq); tbl 188 fs/nfs/filelayout/filelayout.c rpc_wake_up(&tbl->slot_tbl_waitq); tbl 1137 fs/nfs/flexfilelayout/flexfilelayout.c struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; tbl 1175 fs/nfs/flexfilelayout/flexfilelayout.c rpc_wake_up(&tbl->slot_tbl_waitq); tbl 1189 fs/nfs/flexfilelayout/flexfilelayout.c rpc_wake_up(&tbl->slot_tbl_waitq); tbl 298 fs/nfs/nfs4client.c struct nfs4_slot_table *tbl; tbl 301 fs/nfs/nfs4client.c tbl = kzalloc(sizeof(*tbl), GFP_NOFS); tbl 302 fs/nfs/nfs4client.c if (tbl == NULL) tbl 305 fs/nfs/nfs4client.c ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE, tbl 308 fs/nfs/nfs4client.c kfree(tbl); tbl 312 fs/nfs/nfs4client.c clp->cl_slot_tbl = tbl; tbl 698 fs/nfs/nfs4proc.c struct nfs4_slot_table *tbl; tbl 700 fs/nfs/nfs4proc.c tbl = slot->table; tbl 701 fs/nfs/nfs4proc.c spin_lock(&tbl->slot_tbl_lock); tbl 702 fs/nfs/nfs4proc.c if (!nfs41_wake_and_assign_slot(tbl, slot)) tbl 703 fs/nfs/nfs4proc.c nfs4_free_slot(tbl, slot); tbl 704 fs/nfs/nfs4proc.c spin_unlock(&tbl->slot_tbl_lock); tbl 722 fs/nfs/nfs4proc.c struct nfs4_slot_table *tbl; tbl 727 fs/nfs/nfs4proc.c tbl = slot->table; tbl 728 fs/nfs/nfs4proc.c session = tbl->session; tbl 735 fs/nfs/nfs4proc.c spin_lock(&tbl->slot_tbl_lock); tbl 739 fs/nfs/nfs4proc.c if (tbl->highest_used_slotid > tbl->target_highest_slotid) tbl 742 fs/nfs/nfs4proc.c if (nfs41_wake_and_assign_slot(tbl, slot)) { tbl 746 fs/nfs/nfs4proc.c nfs4_free_slot(tbl, slot); tbl 748 fs/nfs/nfs4proc.c if (tbl->highest_used_slotid != NFS4_NO_SLOT) tbl 751 fs/nfs/nfs4proc.c spin_unlock(&tbl->slot_tbl_lock); tbl 754 fs/nfs/nfs4proc.c if (waitqueue_active(&tbl->slot_waitq)) tbl 755 fs/nfs/nfs4proc.c wake_up_all(&tbl->slot_waitq); tbl 1017 fs/nfs/nfs4proc.c struct nfs4_slot_table *tbl = client->cl_slot_tbl; tbl 1025 fs/nfs/nfs4proc.c tbl = &session->fc_slot_table; tbl 1027 fs/nfs/nfs4proc.c spin_lock(&tbl->slot_tbl_lock); tbl 1029 fs/nfs/nfs4proc.c if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) tbl 1032 fs/nfs/nfs4proc.c slot = nfs4_alloc_slot(tbl); tbl 1038 fs/nfs/nfs4proc.c spin_unlock(&tbl->slot_tbl_lock); tbl 1050 fs/nfs/nfs4proc.c rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, tbl 1053 fs/nfs/nfs4proc.c rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, tbl 1055 fs/nfs/nfs4proc.c spin_unlock(&tbl->slot_tbl_lock); tbl 1059 fs/nfs/nfs4proc.c rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, tbl 1062 fs/nfs/nfs4proc.c rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); tbl 1063 fs/nfs/nfs4proc.c spin_unlock(&tbl->slot_tbl_lock); tbl 27 fs/nfs/nfs4session.c static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue) tbl 29 fs/nfs/nfs4session.c tbl->highest_used_slotid = NFS4_NO_SLOT; tbl 30 fs/nfs/nfs4session.c spin_lock_init(&tbl->slot_tbl_lock); tbl 31 fs/nfs/nfs4session.c rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue); tbl 32 fs/nfs/nfs4session.c init_waitqueue_head(&tbl->slot_waitq); tbl 33 fs/nfs/nfs4session.c init_completion(&tbl->complete); tbl 39 fs/nfs/nfs4session.c static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize) tbl 42 fs/nfs/nfs4session.c if (newsize >= tbl->max_slots) tbl 45 fs/nfs/nfs4session.c p = &tbl->slots; tbl 53 fs/nfs/nfs4session.c tbl->max_slots--; tbl 62 fs/nfs/nfs4session.c void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl) tbl 64 fs/nfs/nfs4session.c if (nfs4_slot_tbl_draining(tbl)) tbl 65 fs/nfs/nfs4session.c complete(&tbl->complete); tbl 83 fs/nfs/nfs4session.c void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot) tbl 88 fs/nfs/nfs4session.c __clear_bit(slotid, tbl->used_slots); tbl 91 fs/nfs/nfs4session.c if (slotid == tbl->highest_used_slotid) { tbl 92 fs/nfs/nfs4session.c u32 new_max = find_last_bit(tbl->used_slots, slotid); tbl 94 fs/nfs/nfs4session.c tbl->highest_used_slotid = new_max; tbl 96 fs/nfs/nfs4session.c tbl->highest_used_slotid = NFS4_NO_SLOT; tbl 97 fs/nfs/nfs4session.c nfs4_slot_tbl_drain_complete(tbl); tbl 101 fs/nfs/nfs4session.c slotid, tbl->highest_used_slotid); tbl 104 fs/nfs/nfs4session.c static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl, tbl 111 fs/nfs/nfs4session.c slot->table = tbl; tbl 120 fs/nfs/nfs4session.c static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl, tbl 125 fs/nfs/nfs4session.c p = &tbl->slots; tbl 128 fs/nfs/nfs4session.c *p = nfs4_new_slot(tbl, tbl->max_slots, tbl 132 fs/nfs/nfs4session.c tbl->max_slots++; tbl 142 fs/nfs/nfs4session.c static void nfs4_lock_slot(struct nfs4_slot_table *tbl, tbl 147 fs/nfs/nfs4session.c __set_bit(slotid, tbl->used_slots); tbl 148 fs/nfs/nfs4session.c if (slotid > tbl->highest_used_slotid || tbl 149 fs/nfs/nfs4session.c tbl->highest_used_slotid == NFS4_NO_SLOT) tbl 150 fs/nfs/nfs4session.c tbl->highest_used_slotid = slotid; tbl 151 fs/nfs/nfs4session.c slot->generation = tbl->generation; tbl 159 fs/nfs/nfs4session.c bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot) tbl 161 fs/nfs/nfs4session.c if (nfs4_test_locked_slot(tbl, slot->slot_nr)) tbl 163 fs/nfs/nfs4session.c nfs4_lock_slot(tbl, slot); tbl 172 fs/nfs/nfs4session.c struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid) tbl 174 fs/nfs/nfs4session.c if (slotid <= tbl->max_slotid) tbl 175 fs/nfs/nfs4session.c return nfs4_find_or_create_slot(tbl, slotid, 0, GFP_NOWAIT); tbl 179 fs/nfs/nfs4session.c static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid, tbl 181 fs/nfs/nfs4session.c __must_hold(&tbl->slot_tbl_lock) tbl 186 fs/nfs/nfs4session.c slot = nfs4_lookup_slot(tbl, slotid); tbl 201 fs/nfs/nfs4session.c static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl, tbl 207 fs/nfs/nfs4session.c spin_lock(&tbl->slot_tbl_lock); tbl 208 fs/nfs/nfs4session.c if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 && tbl 209 fs/nfs/nfs4session.c cur_seq == seq_nr && test_bit(slotid, tbl->used_slots)) tbl 211 fs/nfs/nfs4session.c spin_unlock(&tbl->slot_tbl_lock); tbl 222 fs/nfs/nfs4session.c int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl, tbl 226 fs/nfs/nfs4session.c if (wait_event_timeout(tbl->slot_waitq, tbl 227 fs/nfs/nfs4session.c !nfs4_slot_seqid_in_use(tbl, slotid, seq_nr), tbl 242 fs/nfs/nfs4session.c struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl) tbl 248 fs/nfs/nfs4session.c __func__, tbl->used_slots[0], tbl->highest_used_slotid, tbl 249 fs/nfs/nfs4session.c tbl->max_slotid + 1); tbl 250 fs/nfs/nfs4session.c slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1); tbl 251 fs/nfs/nfs4session.c if (slotid <= tbl->max_slotid) { tbl 252 fs/nfs/nfs4session.c ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT); tbl 254 fs/nfs/nfs4session.c nfs4_lock_slot(tbl, ret); tbl 257 fs/nfs/nfs4session.c __func__, tbl->used_slots[0], tbl->highest_used_slotid, tbl 262 fs/nfs/nfs4session.c static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl, tbl 265 fs/nfs/nfs4session.c if (max_reqs <= tbl->max_slots) tbl 267 fs/nfs/nfs4session.c if (!IS_ERR(nfs4_find_or_create_slot(tbl, max_reqs - 1, ivalue, GFP_NOFS))) tbl 272 fs/nfs/nfs4session.c static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl, tbl 278 fs/nfs/nfs4session.c nfs4_shrink_slot_table(tbl, server_highest_slotid + 1); tbl 279 fs/nfs/nfs4session.c p = &tbl->slots; tbl 286 fs/nfs/nfs4session.c tbl->highest_used_slotid = NFS4_NO_SLOT; tbl 287 fs/nfs/nfs4session.c tbl->target_highest_slotid = server_highest_slotid; tbl 288 fs/nfs/nfs4session.c tbl->server_highest_slotid = server_highest_slotid; tbl 289 fs/nfs/nfs4session.c tbl->d_target_highest_slotid = 0; tbl 290 fs/nfs/nfs4session.c tbl->d2_target_highest_slotid = 0; tbl 291 fs/nfs/nfs4session.c tbl->max_slotid = server_highest_slotid; tbl 297 fs/nfs/nfs4session.c static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, tbl 303 fs/nfs/nfs4session.c max_reqs, tbl->max_slots); tbl 308 fs/nfs/nfs4session.c ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue); tbl 312 fs/nfs/nfs4session.c spin_lock(&tbl->slot_tbl_lock); tbl 313 fs/nfs/nfs4session.c nfs4_reset_slot_table(tbl, max_reqs - 1, ivalue); tbl 314 fs/nfs/nfs4session.c spin_unlock(&tbl->slot_tbl_lock); tbl 317 fs/nfs/nfs4session.c tbl, tbl->slots, tbl->max_slots); tbl 326 fs/nfs/nfs4session.c static void nfs4_release_slot_table(struct nfs4_slot_table *tbl) tbl 328 fs/nfs/nfs4session.c nfs4_shrink_slot_table(tbl, 0); tbl 336 fs/nfs/nfs4session.c void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl) tbl 338 fs/nfs/nfs4session.c nfs4_release_slot_table(tbl); tbl 339 fs/nfs/nfs4session.c rpc_destroy_wait_queue(&tbl->slot_tbl_waitq); tbl 350 fs/nfs/nfs4session.c int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, unsigned int max_reqs, tbl 353 fs/nfs/nfs4session.c nfs4_init_slot_table(tbl, queue); tbl 354 fs/nfs/nfs4session.c return nfs4_realloc_slot_table(tbl, max_reqs, 0); tbl 362 fs/nfs/nfs4session.c struct nfs4_slot_table *tbl = slot->table; tbl 364 fs/nfs/nfs4session.c if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) tbl 366 fs/nfs/nfs4session.c slot->generation = tbl->generation; tbl 375 fs/nfs/nfs4session.c static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, tbl 378 fs/nfs/nfs4session.c if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs41_assign_slot, slot)) tbl 383 fs/nfs/nfs4session.c bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, tbl 386 fs/nfs/nfs4session.c if (slot->slot_nr > tbl->max_slotid) tbl 388 fs/nfs/nfs4session.c return __nfs41_wake_and_assign_slot(tbl, slot); tbl 391 fs/nfs/nfs4session.c static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl) tbl 393 fs/nfs/nfs4session.c struct nfs4_slot *slot = nfs4_alloc_slot(tbl); tbl 395 fs/nfs/nfs4session.c bool ret = __nfs41_wake_and_assign_slot(tbl, slot); tbl 398 fs/nfs/nfs4session.c nfs4_free_slot(tbl, slot); tbl 403 fs/nfs/nfs4session.c void nfs41_wake_slot_table(struct nfs4_slot_table *tbl) tbl 406 fs/nfs/nfs4session.c if (!nfs41_try_wake_next_slot_table_entry(tbl)) tbl 413 fs/nfs/nfs4session.c static void nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl, tbl 419 fs/nfs/nfs4session.c if (max_slotid > tbl->server_highest_slotid) tbl 420 fs/nfs/nfs4session.c max_slotid = tbl->server_highest_slotid; tbl 421 fs/nfs/nfs4session.c if (max_slotid > tbl->target_highest_slotid) tbl 422 fs/nfs/nfs4session.c max_slotid = tbl->target_highest_slotid; tbl 423 fs/nfs/nfs4session.c tbl->max_slotid = max_slotid; tbl 424 fs/nfs/nfs4session.c nfs41_wake_slot_table(tbl); tbl 428 fs/nfs/nfs4session.c static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl, tbl 431 fs/nfs/nfs4session.c if (tbl->target_highest_slotid == target_highest_slotid) tbl 433 fs/nfs/nfs4session.c tbl->target_highest_slotid = target_highest_slotid; tbl 434 fs/nfs/nfs4session.c tbl->generation++; tbl 437 fs/nfs/nfs4session.c void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, tbl 440 fs/nfs/nfs4session.c spin_lock(&tbl->slot_tbl_lock); tbl 441 fs/nfs/nfs4session.c nfs41_set_target_slotid_locked(tbl, target_highest_slotid); tbl 442 fs/nfs/nfs4session.c tbl->d_target_highest_slotid = 0; tbl 443 fs/nfs/nfs4session.c tbl->d2_target_highest_slotid = 0; tbl 444 fs/nfs/nfs4session.c nfs41_set_max_slotid_locked(tbl, target_highest_slotid); tbl 445 fs/nfs/nfs4session.c spin_unlock(&tbl->slot_tbl_lock); tbl 448 fs/nfs/nfs4session.c static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl, tbl 451 fs/nfs/nfs4session.c if (tbl->server_highest_slotid == highest_slotid) tbl 453 fs/nfs/nfs4session.c if (tbl->highest_used_slotid > highest_slotid) tbl 456 fs/nfs/nfs4session.c nfs4_shrink_slot_table(tbl, highest_slotid + 1); tbl 457 fs/nfs/nfs4session.c tbl->server_highest_slotid = highest_slotid; tbl 489 fs/nfs/nfs4session.c static bool nfs41_is_outlier_target_slotid(struct nfs4_slot_table *tbl, tbl 496 fs/nfs/nfs4session.c tbl->target_highest_slotid); tbl 498 fs/nfs/nfs4session.c tbl->d_target_highest_slotid); tbl 500 fs/nfs/nfs4session.c if (nfs41_same_sign_or_zero_s32(d_target, tbl->d_target_highest_slotid)) tbl 503 fs/nfs/nfs4session.c if (nfs41_same_sign_or_zero_s32(d2_target, tbl->d2_target_highest_slotid)) tbl 505 fs/nfs/nfs4session.c tbl->d_target_highest_slotid = d_target; tbl 506 fs/nfs/nfs4session.c tbl->d2_target_highest_slotid = d2_target; tbl 510 fs/nfs/nfs4session.c void nfs41_update_target_slotid(struct nfs4_slot_table *tbl, tbl 514 fs/nfs/nfs4session.c spin_lock(&tbl->slot_tbl_lock); tbl 515 fs/nfs/nfs4session.c if (!nfs41_is_outlier_target_slotid(tbl, res->sr_target_highest_slotid)) tbl 516 fs/nfs/nfs4session.c nfs41_set_target_slotid_locked(tbl, res->sr_target_highest_slotid); tbl 517 fs/nfs/nfs4session.c if (tbl->generation == slot->generation) tbl 518 fs/nfs/nfs4session.c nfs41_set_server_slotid_locked(tbl, res->sr_highest_slotid); tbl 519 fs/nfs/nfs4session.c nfs41_set_max_slotid_locked(tbl, res->sr_target_highest_slotid); tbl 520 fs/nfs/nfs4session.c spin_unlock(&tbl->slot_tbl_lock); tbl 534 fs/nfs/nfs4session.c struct nfs4_slot_table *tbl; tbl 539 fs/nfs/nfs4session.c tbl = &ses->fc_slot_table; tbl 540 fs/nfs/nfs4session.c tbl->session = ses; tbl 541 fs/nfs/nfs4session.c status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); tbl 545 fs/nfs/nfs4session.c tbl = &ses->bc_slot_table; tbl 546 fs/nfs/nfs4session.c tbl->session = ses; tbl 547 fs/nfs/nfs4session.c status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); tbl 548 fs/nfs/nfs4session.c if (status && tbl->slots == NULL) tbl 82 fs/nfs/nfs4session.h extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, tbl 84 fs/nfs/nfs4session.h extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl); tbl 85 fs/nfs/nfs4session.h extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl); tbl 86 fs/nfs/nfs4session.h extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid); tbl 87 fs/nfs/nfs4session.h extern int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl, tbl 90 fs/nfs/nfs4session.h extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); tbl 91 fs/nfs/nfs4session.h extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); tbl 92 fs/nfs/nfs4session.h extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl); tbl 93 fs/nfs/nfs4session.h bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, tbl 95 fs/nfs/nfs4session.h void nfs41_wake_slot_table(struct nfs4_slot_table *tbl); tbl 97 fs/nfs/nfs4session.h static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl) tbl 99 fs/nfs/nfs4session.h return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); tbl 102 fs/nfs/nfs4session.h static inline bool nfs4_test_locked_slot(const struct nfs4_slot_table *tbl, tbl 105 fs/nfs/nfs4session.h return !!test_bit(slotid, tbl->used_slots); tbl 114 fs/nfs/nfs4session.h extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, tbl 116 fs/nfs/nfs4session.h extern void nfs41_update_target_slotid(struct nfs4_slot_table *tbl, tbl 253 fs/nfs/nfs4state.c static void nfs4_end_drain_slot_table(struct nfs4_slot_table *tbl) tbl 255 fs/nfs/nfs4state.c if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { tbl 256 fs/nfs/nfs4state.c spin_lock(&tbl->slot_tbl_lock); tbl 257 fs/nfs/nfs4state.c nfs41_wake_slot_table(tbl); tbl 258 fs/nfs/nfs4state.c spin_unlock(&tbl->slot_tbl_lock); tbl 277 fs/nfs/nfs4state.c static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl) tbl 279 fs/nfs/nfs4state.c set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); tbl 280 fs/nfs/nfs4state.c spin_lock(&tbl->slot_tbl_lock); tbl 281 fs/nfs/nfs4state.c if (tbl->highest_used_slotid != NFS4_NO_SLOT) { tbl 282 fs/nfs/nfs4state.c reinit_completion(&tbl->complete); tbl 283 fs/nfs/nfs4state.c spin_unlock(&tbl->slot_tbl_lock); tbl 284 fs/nfs/nfs4state.c return wait_for_completion_interruptible(&tbl->complete); tbl 286 fs/nfs/nfs4state.c spin_unlock(&tbl->slot_tbl_lock); tbl 2705 fs/nfsd/nfs4state.c find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) tbl 2710 fs/nfsd/nfs4state.c list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { tbl 2724 fs/nfsd/nfs4state.c struct list_head *tbl = nn->conf_id_hashtbl; tbl 2727 fs/nfsd/nfs4state.c return find_client_in_id_table(tbl, clid, sessions); tbl 2733 fs/nfsd/nfs4state.c struct list_head *tbl = nn->unconf_id_hashtbl; tbl 2736 fs/nfsd/nfs4state.c return find_client_in_id_table(tbl, clid, sessions); tbl 93 include/linux/fs_parser.h extern int __lookup_constant(const struct constant_table tbl[], size_t tbl_size, tbl 98 include/linux/fs_parser.h extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, tbl 102 include/linux/fs_parser.h static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, tbl 404 include/linux/qcom-geni-se.h int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl); tbl 82 include/linux/rhashtable-types.h struct bucket_table __rcu *tbl; tbl 108 include/linux/rhashtable-types.h struct bucket_table *tbl; tbl 119 include/linux/rhashtable.h static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, tbl 122 include/linux/rhashtable.h return hash & (tbl->size - 1); tbl 156 include/linux/rhashtable.h struct rhashtable *ht, const struct bucket_table *tbl, tbl 159 include/linux/rhashtable.h unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); tbl 161 include/linux/rhashtable.h return rht_bucket_index(tbl, hash); tbl 165 include/linux/rhashtable.h struct rhashtable *ht, const struct bucket_table *tbl, tbl 171 include/linux/rhashtable.h rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: tbl 173 include/linux/rhashtable.h tbl->hash_rnd)) : tbl 174 include/linux/rhashtable.h rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); tbl 183 include/linux/rhashtable.h const struct bucket_table *tbl) tbl 186 include/linux/rhashtable.h return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && tbl 187 include/linux/rhashtable.h (!ht->p.max_size || tbl->size < ht->p.max_size); tbl 196 include/linux/rhashtable.h const struct bucket_table *tbl) tbl 199 include/linux/rhashtable.h return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && tbl 200 include/linux/rhashtable.h tbl->size > ht->p.min_size; tbl 209 include/linux/rhashtable.h const struct bucket_table *tbl) tbl 211 include/linux/rhashtable.h return atomic_read(&ht->nelems) > tbl->size && tbl 212 include/linux/rhashtable.h (!ht->p.max_size || tbl->size < ht->p.max_size); tbl 221 include/linux/rhashtable.h const struct bucket_table *tbl) tbl 228 include/linux/rhashtable.h int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); tbl 235 include/linux/rhashtable.h static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, tbl 264 include/linux/rhashtable.h struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, tbl 266 include/linux/rhashtable.h struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, tbl 269 include/linux/rhashtable.h struct bucket_table *tbl, tbl 278 include/linux/rhashtable.h #define rht_dereference_bucket(p, tbl, hash) \ tbl 279 include/linux/rhashtable.h rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) tbl 281 include/linux/rhashtable.h #define rht_dereference_bucket_rcu(p, tbl, hash) \ tbl 282 include/linux/rhashtable.h rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) tbl 288 include/linux/rhashtable.h const struct bucket_table *tbl, unsigned int hash) tbl 290 include/linux/rhashtable.h return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : tbl 291 include/linux/rhashtable.h &tbl->buckets[hash]; tbl 295 include/linux/rhashtable.h struct bucket_table *tbl, unsigned int hash) tbl 297 include/linux/rhashtable.h return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : tbl 298 include/linux/rhashtable.h &tbl->buckets[hash]; tbl 302 include/linux/rhashtable.h struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) tbl 304 include/linux/rhashtable.h return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : tbl 305 include/linux/rhashtable.h &tbl->buckets[hash]; tbl 327 include/linux/rhashtable.h static inline void rht_lock(struct bucket_table *tbl, tbl 332 include/linux/rhashtable.h lock_map_acquire(&tbl->dep_map); tbl 335 include/linux/rhashtable.h static inline void rht_lock_nested(struct bucket_table *tbl, tbl 341 include/linux/rhashtable.h lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_); tbl 344 include/linux/rhashtable.h static inline void rht_unlock(struct bucket_table *tbl, tbl 347 include/linux/rhashtable.h lock_map_release(&tbl->dep_map); tbl 377 include/linux/rhashtable.h struct bucket_table *tbl, tbl 380 include/linux/rhashtable.h return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash); tbl 399 include/linux/rhashtable.h static inline void rht_assign_unlock(struct bucket_table *tbl, tbl 407 include/linux/rhashtable.h lock_map_release(&tbl->dep_map); tbl 421 include/linux/rhashtable.h #define rht_for_each_from(pos, head, tbl, hash) \ tbl 424 include/linux/rhashtable.h pos = rht_dereference_bucket((pos)->next, tbl, hash)) tbl 432 include/linux/rhashtable.h #define rht_for_each(pos, tbl, hash) \ tbl 433 include/linux/rhashtable.h rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ tbl 434 include/linux/rhashtable.h tbl, hash) tbl 445 include/linux/rhashtable.h #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ tbl 448 include/linux/rhashtable.h pos = rht_dereference_bucket((pos)->next, tbl, hash)) tbl 458 include/linux/rhashtable.h #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ tbl 460 include/linux/rhashtable.h rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ tbl 461 include/linux/rhashtable.h tbl, hash, member) tbl 475 include/linux/rhashtable.h #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ tbl 476 include/linux/rhashtable.h for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ tbl 478 include/linux/rhashtable.h rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ tbl 482 include/linux/rhashtable.h rht_dereference_bucket(pos->next, tbl, hash) : NULL) tbl 495 include/linux/rhashtable.h #define rht_for_each_rcu_from(pos, head, tbl, hash) \ tbl 511 include/linux/rhashtable.h #define rht_for_each_rcu(pos, tbl, hash) \ tbl 513 include/linux/rhashtable.h pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \ tbl 530 include/linux/rhashtable.h #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ tbl 534 include/linux/rhashtable.h pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) tbl 548 include/linux/rhashtable.h #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ tbl 550 include/linux/rhashtable.h rht_ptr_rcu(rht_bucket(tbl, hash)), \ tbl 551 include/linux/rhashtable.h tbl, hash, member) tbl 597 include/linux/rhashtable.h struct bucket_table *tbl; tbl 601 include/linux/rhashtable.h tbl = rht_dereference_rcu(ht->tbl, ht); tbl 603 include/linux/rhashtable.h hash = rht_key_hashfn(ht, tbl, key, params); tbl 604 include/linux/rhashtable.h bkt = rht_bucket(tbl, hash); tbl 606 include/linux/rhashtable.h rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) { tbl 621 include/linux/rhashtable.h tbl = rht_dereference_rcu(tbl->future_tbl, ht); tbl 622 include/linux/rhashtable.h if (unlikely(tbl)) tbl 714 include/linux/rhashtable.h struct bucket_table *tbl; tbl 722 include/linux/rhashtable.h tbl = rht_dereference_rcu(ht->tbl, ht); tbl 723 include/linux/rhashtable.h hash = rht_head_hashfn(ht, tbl, obj, params); tbl 725 include/linux/rhashtable.h bkt = rht_bucket_insert(ht, tbl, hash); tbl 730 include/linux/rhashtable.h rht_lock(tbl, bkt); tbl 732 include/linux/rhashtable.h if (unlikely(rcu_access_pointer(tbl->future_tbl))) { tbl 734 include/linux/rhashtable.h rht_unlock(tbl, bkt); tbl 739 include/linux/rhashtable.h rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { tbl 762 include/linux/rhashtable.h head = rht_dereference_bucket(head->next, tbl, hash); tbl 766 include/linux/rhashtable.h rht_unlock(tbl, bkt); tbl 768 include/linux/rhashtable.h rht_assign_unlock(tbl, bkt, obj); tbl 777 include/linux/rhashtable.h if (unlikely(rht_grow_above_max(ht, tbl))) tbl 780 include/linux/rhashtable.h if (unlikely(rht_grow_above_100(ht, tbl))) tbl 784 include/linux/rhashtable.h head = rht_ptr(bkt, tbl, hash); tbl 795 include/linux/rhashtable.h rht_assign_unlock(tbl, bkt, obj); tbl 797 include/linux/rhashtable.h if (rht_grow_above_75(ht, tbl)) tbl 807 include/linux/rhashtable.h rht_unlock(tbl, bkt); tbl 994 include/linux/rhashtable.h struct rhashtable *ht, struct bucket_table *tbl, tbl 1004 include/linux/rhashtable.h hash = rht_head_hashfn(ht, tbl, obj, params); tbl 1005 include/linux/rhashtable.h bkt = rht_bucket_var(tbl, hash); tbl 1009 include/linux/rhashtable.h rht_lock(tbl, bkt); tbl 1011 include/linux/rhashtable.h rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { tbl 1027 include/linux/rhashtable.h tbl, hash); tbl 1033 include/linux/rhashtable.h list = rht_dereference_bucket(list->next, tbl, hash); tbl 1039 include/linux/rhashtable.h obj = rht_dereference_bucket(obj->next, tbl, hash); tbl 1043 include/linux/rhashtable.h list = rht_dereference_bucket(list->next, tbl, hash); tbl 1053 include/linux/rhashtable.h rht_unlock(tbl, bkt); tbl 1055 include/linux/rhashtable.h rht_assign_unlock(tbl, bkt, obj); tbl 1060 include/linux/rhashtable.h rht_unlock(tbl, bkt); tbl 1065 include/linux/rhashtable.h rht_shrink_below_30(ht, tbl))) tbl 1078 include/linux/rhashtable.h struct bucket_table *tbl; tbl 1083 include/linux/rhashtable.h tbl = rht_dereference_rcu(ht->tbl, ht); tbl 1090 include/linux/rhashtable.h while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, tbl 1092 include/linux/rhashtable.h (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) tbl 1146 include/linux/rhashtable.h struct rhashtable *ht, struct bucket_table *tbl, tbl 1159 include/linux/rhashtable.h hash = rht_head_hashfn(ht, tbl, obj_old, params); tbl 1160 include/linux/rhashtable.h if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) tbl 1163 include/linux/rhashtable.h bkt = rht_bucket_var(tbl, hash); tbl 1168 include/linux/rhashtable.h rht_lock(tbl, bkt); tbl 1170 include/linux/rhashtable.h rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { tbl 1179 include/linux/rhashtable.h rht_unlock(tbl, bkt); tbl 1181 include/linux/rhashtable.h rht_assign_unlock(tbl, bkt, obj_new); tbl 1187 include/linux/rhashtable.h rht_unlock(tbl, bkt); tbl 1212 include/linux/rhashtable.h struct bucket_table *tbl; tbl 1217 include/linux/rhashtable.h tbl = rht_dereference_rcu(ht->tbl, ht); tbl 1224 include/linux/rhashtable.h while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, tbl 1226 include/linux/rhashtable.h (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) tbl 479 include/net/ip6_fib.h struct fib6_table *tbl; tbl 70 include/net/ipv6_stubs.h int dif, int sdif, struct udp_table *tbl, tbl 75 include/net/neighbour.h struct neigh_table *tbl; tbl 132 include/net/neighbour.h #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) tbl 136 include/net/neighbour.h struct neigh_table *tbl; tbl 240 include/net/neighbour.h return p->tbl->family; tbl 248 include/net/neighbour.h return (char *)n + n->tbl->entry_size; tbl 281 include/net/neighbour.h struct neigh_table *tbl, tbl 289 include/net/neighbour.h struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht); tbl 304 include/net/neighbour.h static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl, tbl 308 include/net/neighbour.h return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev); tbl 311 include/net/neighbour.h void neigh_table_init(int index, struct neigh_table *tbl); tbl 312 include/net/neighbour.h int neigh_table_clear(int index, struct neigh_table *tbl); tbl 313 include/net/neighbour.h struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, tbl 315 include/net/neighbour.h struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, tbl 317 include/net/neighbour.h struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, tbl 319 include/net/neighbour.h static inline struct neighbour *neigh_create(struct neigh_table *tbl, tbl 323 include/net/neighbour.h return __neigh_create(tbl, pkey, dev, true); tbl 330 include/net/neighbour.h bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl); tbl 331 include/net/neighbour.h void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); tbl 332 include/net/neighbour.h int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); tbl 333 include/net/neighbour.h int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev); tbl 337 include/net/neighbour.h struct neighbour *neigh_event_ns(struct neigh_table *tbl, tbl 342 include/net/neighbour.h struct neigh_table *tbl); tbl 343 include/net/neighbour.h void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); tbl 353 include/net/neighbour.h void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, tbl 355 include/net/neighbour.h struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, tbl 358 include/net/neighbour.h struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net, tbl 360 include/net/neighbour.h int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, tbl 369 include/net/neighbour.h void neigh_for_each(struct neigh_table *tbl, tbl 371 include/net/neighbour.h void __neigh_for_each_release(struct neigh_table *tbl, tbl 374 include/net/neighbour.h void pneigh_for_each(struct neigh_table *tbl, tbl 379 include/net/neighbour.h struct neigh_table *tbl; tbl 514 include/net/neighbour.h __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) tbl 516 include/net/neighbour.h struct neighbour *n = neigh_lookup(tbl, pkey, dev); tbl 521 include/net/neighbour.h n = neigh_create(tbl, pkey, dev); tbl 526 include/net/neighbour.h __neigh_lookup_errno(struct neigh_table *tbl, const void *pkey, tbl 529 include/net/neighbour.h struct neighbour *n = neigh_lookup(tbl, pkey, dev); tbl 534 include/net/neighbour.h return neigh_create(tbl, pkey, dev); tbl 308 include/net/udp.h struct udp_table *tbl, struct sk_buff *skb); tbl 318 include/net/udp.h int dif, int sdif, struct udp_table *tbl, tbl 25 include/trace/events/neigh.h TP_PROTO(struct neigh_table *tbl, struct net_device *dev, tbl 29 include/trace/events/neigh.h TP_ARGS(tbl, dev, pkey, n, exempt_from_gc), tbl 45 include/trace/events/neigh.h __entry->family = tbl->family; tbl 47 include/trace/events/neigh.h __entry->entries = atomic_read(&tbl->gc_entries); tbl 53 include/trace/events/neigh.h if (tbl->family == AF_INET) tbl 59 include/trace/events/neigh.h if (tbl->family == AF_INET6) { tbl 105 include/trace/events/neigh.h __entry->family = n->tbl->family; tbl 117 include/trace/events/neigh.h if (n->tbl->family == AF_INET) tbl 123 include/trace/events/neigh.h if (n->tbl->family == AF_INET6) { tbl 182 include/trace/events/neigh.h __entry->family = n->tbl->family; tbl 194 include/trace/events/neigh.h if (n->tbl->family == AF_INET) tbl 200 include/trace/events/neigh.h if (n->tbl->family == AF_INET6) { tbl 83 kernel/ucount.c struct ctl_table *tbl; tbl 85 kernel/ucount.c tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL); tbl 86 kernel/ucount.c if (tbl) { tbl 89 kernel/ucount.c tbl[i].data = &ns->ucount_max[i]; tbl 91 kernel/ucount.c ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl); tbl 94 kernel/ucount.c kfree(tbl); tbl 105 kernel/ucount.c struct ctl_table *tbl; tbl 107 kernel/ucount.c tbl = ns->sysctls->ctl_table_arg; tbl 110 kernel/ucount.c kfree(tbl); tbl 5841 kernel/workqueue.c cpumask_var_t *tbl; tbl 5860 kernel/workqueue.c tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL); tbl 5861 kernel/workqueue.c BUG_ON(!tbl); tbl 5864 kernel/workqueue.c BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, tbl 5874 kernel/workqueue.c cpumask_set_cpu(cpu, tbl[node]); tbl 5877 kernel/workqueue.c wq_numa_possible_cpumask = tbl; tbl 341 lib/devres.c void __iomem **tbl; tbl 345 lib/devres.c tbl = (void __iomem **)pcim_iomap_table(pdev); tbl 346 lib/devres.c if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ tbl 349 lib/devres.c tbl[bar] = pci_iomap(pdev, bar, maxlen); tbl 350 lib/devres.c return tbl[bar]; tbl 363 lib/devres.c void __iomem **tbl; tbl 368 lib/devres.c tbl = (void __iomem **)pcim_iomap_table(pdev); tbl 369 lib/devres.c BUG_ON(!tbl); tbl 372 lib/devres.c if (tbl[i] == addr) { tbl 373 lib/devres.c tbl[i] = NULL; tbl 38 lib/rhashtable.c const struct bucket_table *tbl, tbl 41 lib/rhashtable.c return rht_head_hashfn(ht, tbl, he, ht->p); tbl 53 lib/rhashtable.c int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) tbl 57 lib/rhashtable.c if (unlikely(tbl->nest)) tbl 59 lib/rhashtable.c return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); tbl 85 lib/rhashtable.c static void nested_bucket_table_free(const struct bucket_table *tbl) tbl 87 lib/rhashtable.c unsigned int size = tbl->size >> tbl->nest; tbl 88 lib/rhashtable.c unsigned int len = 1 << tbl->nest; tbl 92 lib/rhashtable.c ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); tbl 100 lib/rhashtable.c static void bucket_table_free(const struct bucket_table *tbl) tbl 102 lib/rhashtable.c if (tbl->nest) tbl 103 lib/rhashtable.c nested_bucket_table_free(tbl); tbl 105 lib/rhashtable.c kvfree(tbl); tbl 143 lib/rhashtable.c struct bucket_table *tbl; tbl 149 lib/rhashtable.c size = sizeof(*tbl) + sizeof(tbl->buckets[0]); tbl 151 lib/rhashtable.c tbl = kzalloc(size, gfp); tbl 152 lib/rhashtable.c if (!tbl) tbl 155 lib/rhashtable.c if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, tbl 157 lib/rhashtable.c kfree(tbl); tbl 161 lib/rhashtable.c tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; tbl 163 lib/rhashtable.c return tbl; tbl 170 lib/rhashtable.c struct bucket_table *tbl = NULL; tbl 175 lib/rhashtable.c tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); tbl 179 lib/rhashtable.c if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) { tbl 180 lib/rhashtable.c tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); tbl 184 lib/rhashtable.c if (tbl == NULL) tbl 187 lib/rhashtable.c lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0); tbl 189 lib/rhashtable.c tbl->size = size; tbl 191 lib/rhashtable.c rcu_head_init(&tbl->rcu); tbl 192 lib/rhashtable.c INIT_LIST_HEAD(&tbl->walkers); tbl 194 lib/rhashtable.c tbl->hash_rnd = get_random_u32(); tbl 197 lib/rhashtable.c INIT_RHT_NULLS_HEAD(tbl->buckets[i]); tbl 199 lib/rhashtable.c return tbl; tbl 203 lib/rhashtable.c struct bucket_table *tbl) tbl 208 lib/rhashtable.c new_tbl = tbl; tbl 209 lib/rhashtable.c tbl = rht_dereference_rcu(tbl->future_tbl, ht); tbl 210 lib/rhashtable.c } while (tbl); tbl 219 lib/rhashtable.c struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); tbl 268 lib/rhashtable.c struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); tbl 305 lib/rhashtable.c struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); tbl 323 lib/rhashtable.c rcu_assign_pointer(ht->tbl, new_tbl); tbl 327 lib/rhashtable.c walker->tbl = NULL; tbl 380 lib/rhashtable.c struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); tbl 401 lib/rhashtable.c struct bucket_table *tbl; tbl 407 lib/rhashtable.c tbl = rht_dereference(ht->tbl, ht); tbl 408 lib/rhashtable.c tbl = rhashtable_last_table(ht, tbl); tbl 410 lib/rhashtable.c if (rht_grow_above_75(ht, tbl)) tbl 411 lib/rhashtable.c err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); tbl 412 lib/rhashtable.c else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) tbl 414 lib/rhashtable.c else if (tbl->nest) tbl 415 lib/rhashtable.c err = rhashtable_rehash_alloc(ht, tbl, tbl->size); tbl 431 lib/rhashtable.c struct bucket_table *tbl) tbl 438 lib/rhashtable.c old_tbl = rht_dereference_rcu(ht->tbl, ht); tbl 440 lib/rhashtable.c size = tbl->size; tbl 444 lib/rhashtable.c if (rht_grow_above_75(ht, tbl)) tbl 447 lib/rhashtable.c else if (old_tbl != tbl) tbl 456 lib/rhashtable.c err = rhashtable_rehash_attach(ht, tbl, new_tbl); tbl 468 lib/rhashtable.c if (likely(rcu_access_pointer(tbl->future_tbl))) tbl 480 lib/rhashtable.c struct bucket_table *tbl, unsigned int hash, tbl 492 lib/rhashtable.c rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { tbl 512 lib/rhashtable.c head = rht_dereference_bucket(head->next, tbl, hash); tbl 531 lib/rhashtable.c struct bucket_table *tbl, tbl 545 lib/rhashtable.c new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); tbl 552 lib/rhashtable.c if (unlikely(rht_grow_above_max(ht, tbl))) tbl 555 lib/rhashtable.c if (unlikely(rht_grow_above_100(ht, tbl))) tbl 558 lib/rhashtable.c head = rht_ptr(bkt, tbl, hash); tbl 574 lib/rhashtable.c if (rht_grow_above_75(ht, tbl)) tbl 584 lib/rhashtable.c struct bucket_table *tbl; tbl 589 lib/rhashtable.c new_tbl = rcu_dereference(ht->tbl); tbl 592 lib/rhashtable.c tbl = new_tbl; tbl 593 lib/rhashtable.c hash = rht_head_hashfn(ht, tbl, obj, ht->p); tbl 594 lib/rhashtable.c if (rcu_access_pointer(tbl->future_tbl)) tbl 596 lib/rhashtable.c bkt = rht_bucket_var(tbl, hash); tbl 598 lib/rhashtable.c bkt = rht_bucket_insert(ht, tbl, hash); tbl 600 lib/rhashtable.c new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); tbl 603 lib/rhashtable.c rht_lock(tbl, bkt); tbl 604 lib/rhashtable.c data = rhashtable_lookup_one(ht, bkt, tbl, tbl 606 lib/rhashtable.c new_tbl = rhashtable_insert_one(ht, bkt, tbl, tbl 611 lib/rhashtable.c rht_unlock(tbl, bkt); tbl 616 lib/rhashtable.c data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: tbl 667 lib/rhashtable.c iter->walker.tbl = tbl 668 lib/rhashtable.c rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); tbl 669 lib/rhashtable.c list_add(&iter->walker.list, &iter->walker.tbl->walkers); tbl 683 lib/rhashtable.c if (iter->walker.tbl) tbl 716 lib/rhashtable.c if (iter->walker.tbl) tbl 722 lib/rhashtable.c if (!iter->walker.tbl) { tbl 723 lib/rhashtable.c iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); tbl 736 lib/rhashtable.c rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { tbl 751 lib/rhashtable.c rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { tbl 782 lib/rhashtable.c struct bucket_table *tbl = iter->walker.tbl; tbl 788 lib/rhashtable.c if (!tbl) tbl 791 lib/rhashtable.c for (; iter->slot < tbl->size; iter->slot++) { tbl 794 lib/rhashtable.c rht_for_each_rcu(p, tbl, iter->slot) { tbl 828 lib/rhashtable.c iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); tbl 829 lib/rhashtable.c if (iter->walker.tbl) { tbl 927 lib/rhashtable.c struct bucket_table *tbl = iter->walker.tbl; tbl 929 lib/rhashtable.c if (!tbl) tbl 935 lib/rhashtable.c if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) tbl 937 lib/rhashtable.c iter->walker.tbl = NULL; tbl 939 lib/rhashtable.c list_add(&iter->walker.list, &tbl->walkers); tbl 1011 lib/rhashtable.c struct bucket_table *tbl; tbl 1054 lib/rhashtable.c tbl = bucket_table_alloc(ht, size, GFP_KERNEL); tbl 1055 lib/rhashtable.c if (unlikely(tbl == NULL)) { tbl 1057 lib/rhashtable.c tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); tbl 1062 lib/rhashtable.c RCU_INIT_POINTER(ht->tbl, tbl); tbl 1127 lib/rhashtable.c struct bucket_table *tbl, *next_tbl; tbl 1133 lib/rhashtable.c tbl = rht_dereference(ht->tbl, ht); tbl 1136 lib/rhashtable.c for (i = 0; i < tbl->size; i++) { tbl 1140 lib/rhashtable.c for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)), tbl 1151 lib/rhashtable.c next_tbl = rht_dereference(tbl->future_tbl, ht); tbl 1152 lib/rhashtable.c bucket_table_free(tbl); tbl 1154 lib/rhashtable.c tbl = next_tbl; tbl 1167 lib/rhashtable.c struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, tbl 1171 lib/rhashtable.c unsigned int index = hash & ((1 << tbl->nest) - 1); tbl 1172 lib/rhashtable.c unsigned int size = tbl->size >> tbl->nest; tbl 1176 lib/rhashtable.c ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); tbl 1177 lib/rhashtable.c ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); tbl 1178 lib/rhashtable.c subhash >>= tbl->nest; tbl 1183 lib/rhashtable.c tbl, hash); tbl 1196 lib/rhashtable.c struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, tbl 1203 lib/rhashtable.c return __rht_bucket_nested(tbl, hash) ?: &rhnull; tbl 1208 lib/rhashtable.c struct bucket_table *tbl, tbl 1212 lib/rhashtable.c unsigned int index = hash & ((1 << tbl->nest) - 1); tbl 1213 lib/rhashtable.c unsigned int size = tbl->size >> tbl->nest; tbl 1216 lib/rhashtable.c ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); tbl 1217 lib/rhashtable.c hash >>= tbl->nest; tbl 488 lib/test_rhashtable.c const struct bucket_table *tbl; tbl 495 lib/test_rhashtable.c tbl = rht_dereference(ht->tbl, ht); tbl 496 lib/test_rhashtable.c for (i = 0; i < tbl->size; i++) { tbl 500 lib/test_rhashtable.c pos = rht_ptr_exclusive(tbl->buckets + i); tbl 138 net/802/hippi.c if (p->tbl->family != AF_INET6) tbl 294 net/atm/clip.c if (neigh->tbl->family != AF_INET) tbl 832 net/atm/lec.c static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl, tbl 838 net/atm/lec.c e = tbl->first; tbl 840 net/atm/lec.c e = tbl->first; tbl 89 net/bridge/br_fdb.c static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl, tbl 100 net/bridge/br_fdb.c return rhashtable_lookup(tbl, &key, br_fdb_rht_params); tbl 32 net/bridge/br_vlan.c static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid) tbl 34 net/bridge/br_vlan.c return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params); tbl 37 net/bridge/br_vlan_tunnel.c static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl, tbl 40 net/bridge/br_vlan_tunnel.c return rhashtable_lookup_fast(tbl, &tunnel_id, tbl 58 net/core/neighbour.c static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, tbl 124 net/core/neighbour.c atomic_dec(&n->tbl->gc_entries); tbl 132 net/core/neighbour.c write_lock_bh(&n->tbl->lock); tbl 144 net/core/neighbour.c atomic_dec(&n->tbl->gc_entries); tbl 147 net/core/neighbour.c list_add_tail(&n->gc_list, &n->tbl->gc_list); tbl 148 net/core/neighbour.c atomic_inc(&n->tbl->gc_entries); tbl 152 net/core/neighbour.c write_unlock_bh(&n->tbl->lock); tbl 178 net/core/neighbour.c struct neigh_table *tbl) tbl 187 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 198 net/core/neighbour.c bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) tbl 206 net/core/neighbour.c nht = rcu_dereference_protected(tbl->nht, tbl 207 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 208 net/core/neighbour.c hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd); tbl 213 net/core/neighbour.c lockdep_is_held(&tbl->lock)))) { tbl 215 net/core/neighbour.c return neigh_del(n, np, tbl); tbl 221 net/core/neighbour.c static int neigh_forced_gc(struct neigh_table *tbl) tbl 223 net/core/neighbour.c int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2; tbl 228 net/core/neighbour.c NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); tbl 230 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 232 net/core/neighbour.c list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) { tbl 242 net/core/neighbour.c if (remove && neigh_remove_one(n, tbl)) tbl 249 net/core/neighbour.c tbl->last_flush = jiffies; tbl 251 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 286 net/core/neighbour.c static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev, tbl 292 net/core/neighbour.c nht = rcu_dereference_protected(tbl->nht, tbl 293 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 300 net/core/neighbour.c lockdep_is_held(&tbl->lock))) != NULL) { tbl 311 net/core/neighbour.c lockdep_is_held(&tbl->lock))); tbl 340 net/core/neighbour.c void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) tbl 342 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 343 net/core/neighbour.c neigh_flush_dev(tbl, dev, false); tbl 344 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 348 net/core/neighbour.c static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, tbl 351 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 352 net/core/neighbour.c neigh_flush_dev(tbl, dev, skip_perm); tbl 353 net/core/neighbour.c pneigh_ifdown_and_unlock(tbl, dev); tbl 355 net/core/neighbour.c del_timer_sync(&tbl->proxy_timer); tbl 356 net/core/neighbour.c pneigh_queue_purge(&tbl->proxy_queue); tbl 360 net/core/neighbour.c int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev) tbl 362 net/core/neighbour.c __neigh_ifdown(tbl, dev, true); tbl 367 net/core/neighbour.c int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) tbl 369 net/core/neighbour.c __neigh_ifdown(tbl, dev, false); tbl 374 net/core/neighbour.c static struct neighbour *neigh_alloc(struct neigh_table *tbl, tbl 385 net/core/neighbour.c entries = atomic_inc_return(&tbl->gc_entries) - 1; tbl 386 net/core/neighbour.c if (entries >= tbl->gc_thresh3 || tbl 387 net/core/neighbour.c (entries >= tbl->gc_thresh2 && tbl 388 net/core/neighbour.c time_after(now, tbl->last_flush + 5 * HZ))) { tbl 389 net/core/neighbour.c if (!neigh_forced_gc(tbl) && tbl 390 net/core/neighbour.c entries >= tbl->gc_thresh3) { tbl 392 net/core/neighbour.c tbl->id); tbl 393 net/core/neighbour.c NEIGH_CACHE_STAT_INC(tbl, table_fulls); tbl 399 net/core/neighbour.c n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC); tbl 410 net/core/neighbour.c n->parms = neigh_parms_clone(&tbl->parms); tbl 413 net/core/neighbour.c NEIGH_CACHE_STAT_INC(tbl, allocs); tbl 414 net/core/neighbour.c n->tbl = tbl; tbl 419 net/core/neighbour.c atomic_inc(&tbl->entries); tbl 425 net/core/neighbour.c atomic_dec(&tbl->gc_entries); tbl 480 net/core/neighbour.c static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl, tbl 486 net/core/neighbour.c NEIGH_CACHE_STAT_INC(tbl, hash_grows); tbl 488 net/core/neighbour.c old_nht = rcu_dereference_protected(tbl->nht, tbl 489 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 498 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 501 net/core/neighbour.c hash = tbl->hash(n->primary_key, n->dev, tbl 506 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 511 net/core/neighbour.c lockdep_is_held(&tbl->lock))); tbl 516 net/core/neighbour.c rcu_assign_pointer(tbl->nht, new_nht); tbl 521 net/core/neighbour.c struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, tbl 526 net/core/neighbour.c NEIGH_CACHE_STAT_INC(tbl, lookups); tbl 529 net/core/neighbour.c n = __neigh_lookup_noref(tbl, pkey, dev); tbl 533 net/core/neighbour.c NEIGH_CACHE_STAT_INC(tbl, hits); tbl 541 net/core/neighbour.c struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, tbl 545 net/core/neighbour.c unsigned int key_len = tbl->key_len; tbl 549 net/core/neighbour.c NEIGH_CACHE_STAT_INC(tbl, lookups); tbl 552 net/core/neighbour.c nht = rcu_dereference_bh(tbl->nht); tbl 553 net/core/neighbour.c hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift); tbl 562 net/core/neighbour.c NEIGH_CACHE_STAT_INC(tbl, hits); tbl 572 net/core/neighbour.c static struct neighbour *___neigh_create(struct neigh_table *tbl, tbl 577 net/core/neighbour.c struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc); tbl 579 net/core/neighbour.c unsigned int key_len = tbl->key_len; tbl 583 net/core/neighbour.c trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc); tbl 595 net/core/neighbour.c if (tbl->constructor && (error = tbl->constructor(n)) < 0) { tbl 617 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 618 net/core/neighbour.c nht = rcu_dereference_protected(tbl->nht, tbl 619 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 621 net/core/neighbour.c if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) tbl 622 net/core/neighbour.c nht = neigh_hash_grow(tbl, nht->hash_shift + 1); tbl 624 net/core/neighbour.c hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); tbl 632 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 635 net/core/neighbour.c lockdep_is_held(&tbl->lock))) { tbl 646 net/core/neighbour.c list_add_tail(&n->gc_list, &n->tbl->gc_list); tbl 652 net/core/neighbour.c lockdep_is_held(&tbl->lock))); tbl 654 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 660 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 663 net/core/neighbour.c atomic_dec(&tbl->gc_entries); tbl 668 net/core/neighbour.c struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, tbl 671 net/core/neighbour.c return ___neigh_create(tbl, pkey, dev, false, want_ref); tbl 701 net/core/neighbour.c struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, tbl 704 net/core/neighbour.c unsigned int key_len = tbl->key_len; tbl 707 net/core/neighbour.c return __pneigh_lookup_1(tbl->phash_buckets[hash_val], tbl 712 net/core/neighbour.c struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, tbl 717 net/core/neighbour.c unsigned int key_len = tbl->key_len; tbl 720 net/core/neighbour.c read_lock_bh(&tbl->lock); tbl 721 net/core/neighbour.c n = __pneigh_lookup_1(tbl->phash_buckets[hash_val], tbl 723 net/core/neighbour.c read_unlock_bh(&tbl->lock); tbl 741 net/core/neighbour.c if (tbl->pconstructor && tbl->pconstructor(n)) { tbl 749 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 750 net/core/neighbour.c n->next = tbl->phash_buckets[hash_val]; tbl 751 net/core/neighbour.c tbl->phash_buckets[hash_val] = n; tbl 752 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 759 net/core/neighbour.c int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, tbl 763 net/core/neighbour.c unsigned int key_len = tbl->key_len; tbl 766 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 767 net/core/neighbour.c for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; tbl 772 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 773 net/core/neighbour.c if (tbl->pdestructor) tbl 774 net/core/neighbour.c tbl->pdestructor(n); tbl 781 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 785 net/core/neighbour.c static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, tbl 792 net/core/neighbour.c np = &tbl->phash_buckets[h]; tbl 803 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 807 net/core/neighbour.c if (tbl->pdestructor) tbl 808 net/core/neighbour.c tbl->pdestructor(n); tbl 832 net/core/neighbour.c NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); tbl 856 net/core/neighbour.c atomic_dec(&neigh->tbl->entries); tbl 887 net/core/neighbour.c struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); tbl 893 net/core/neighbour.c NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); tbl 895 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 896 net/core/neighbour.c nht = rcu_dereference_protected(tbl->nht, tbl 897 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 903 net/core/neighbour.c if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { tbl 905 net/core/neighbour.c tbl->last_rand = jiffies; tbl 906 net/core/neighbour.c list_for_each_entry(p, &tbl->parms_list, list) tbl 911 net/core/neighbour.c if (atomic_read(&tbl->entries) < tbl->gc_thresh1) tbl 918 net/core/neighbour.c lockdep_is_held(&tbl->lock))) != NULL) { tbl 951 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 953 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 954 net/core/neighbour.c nht = rcu_dereference_protected(tbl->nht, tbl 955 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 962 net/core/neighbour.c queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, tbl 963 net/core/neighbour.c NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1); tbl 964 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 981 net/core/neighbour.c NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); tbl 1159 net/core/neighbour.c NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); tbl 1434 net/core/neighbour.c struct neighbour *neigh_event_ns(struct neigh_table *tbl, tbl 1438 net/core/neighbour.c struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, tbl 1451 net/core/neighbour.c __be16 prot = n->tbl->protocol; tbl 1533 net/core/neighbour.c struct neigh_table *tbl = from_timer(tbl, t, proxy_timer); tbl 1538 net/core/neighbour.c spin_lock(&tbl->proxy_queue.lock); tbl 1540 net/core/neighbour.c skb_queue_walk_safe(&tbl->proxy_queue, skb, n) { tbl 1546 net/core/neighbour.c __skb_unlink(skb, &tbl->proxy_queue); tbl 1547 net/core/neighbour.c if (tbl->proxy_redo && netif_running(dev)) { tbl 1549 net/core/neighbour.c tbl->proxy_redo(skb); tbl 1559 net/core/neighbour.c del_timer(&tbl->proxy_timer); tbl 1561 net/core/neighbour.c mod_timer(&tbl->proxy_timer, jiffies + sched_next); tbl 1562 net/core/neighbour.c spin_unlock(&tbl->proxy_queue.lock); tbl 1565 net/core/neighbour.c void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, tbl 1573 net/core/neighbour.c if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) { tbl 1581 net/core/neighbour.c spin_lock(&tbl->proxy_queue.lock); tbl 1582 net/core/neighbour.c if (del_timer(&tbl->proxy_timer)) { tbl 1583 net/core/neighbour.c if (time_before(tbl->proxy_timer.expires, sched_next)) tbl 1584 net/core/neighbour.c sched_next = tbl->proxy_timer.expires; tbl 1588 net/core/neighbour.c __skb_queue_tail(&tbl->proxy_queue, skb); tbl 1589 net/core/neighbour.c mod_timer(&tbl->proxy_timer, sched_next); tbl 1590 net/core/neighbour.c spin_unlock(&tbl->proxy_queue.lock); tbl 1594 net/core/neighbour.c static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, tbl 1599 net/core/neighbour.c list_for_each_entry(p, &tbl->parms_list, list) { tbl 1609 net/core/neighbour.c struct neigh_table *tbl) tbl 1615 net/core/neighbour.c p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL); tbl 1617 net/core/neighbour.c p->tbl = tbl; tbl 1632 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 1633 net/core/neighbour.c list_add(&p->list, &tbl->parms.list); tbl 1634 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 1650 net/core/neighbour.c void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) tbl 1652 net/core/neighbour.c if (!parms || parms == &tbl->parms) tbl 1654 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 1657 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 1673 net/core/neighbour.c void neigh_table_init(int index, struct neigh_table *tbl) tbl 1678 net/core/neighbour.c INIT_LIST_HEAD(&tbl->parms_list); tbl 1679 net/core/neighbour.c INIT_LIST_HEAD(&tbl->gc_list); tbl 1680 net/core/neighbour.c list_add(&tbl->parms.list, &tbl->parms_list); tbl 1681 net/core/neighbour.c write_pnet(&tbl->parms.net, &init_net); tbl 1682 net/core/neighbour.c refcount_set(&tbl->parms.refcnt, 1); tbl 1683 net/core/neighbour.c tbl->parms.reachable_time = tbl 1684 net/core/neighbour.c neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME)); tbl 1686 net/core/neighbour.c tbl->stats = alloc_percpu(struct neigh_statistics); tbl 1687 net/core/neighbour.c if (!tbl->stats) tbl 1691 net/core/neighbour.c if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat, tbl 1692 net/core/neighbour.c &neigh_stat_seq_ops, tbl)) tbl 1696 net/core/neighbour.c RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3)); tbl 1699 net/core/neighbour.c tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); tbl 1701 net/core/neighbour.c if (!tbl->nht || !tbl->phash_buckets) tbl 1704 net/core/neighbour.c if (!tbl->entry_size) tbl 1705 net/core/neighbour.c tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) + tbl 1706 net/core/neighbour.c tbl->key_len, NEIGH_PRIV_ALIGN); tbl 1708 net/core/neighbour.c WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN); tbl 1710 net/core/neighbour.c rwlock_init(&tbl->lock); tbl 1711 net/core/neighbour.c INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); tbl 1712 net/core/neighbour.c queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, tbl 1713 net/core/neighbour.c tbl->parms.reachable_time); tbl 1714 net/core/neighbour.c timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0); tbl 1715 net/core/neighbour.c skb_queue_head_init_class(&tbl->proxy_queue, tbl 1718 net/core/neighbour.c tbl->last_flush = now; tbl 1719 net/core/neighbour.c tbl->last_rand = now + tbl->parms.reachable_time * 20; tbl 1721 net/core/neighbour.c neigh_tables[index] = tbl; tbl 1725 net/core/neighbour.c int neigh_table_clear(int index, struct neigh_table *tbl) tbl 1729 net/core/neighbour.c cancel_delayed_work_sync(&tbl->gc_work); tbl 1730 net/core/neighbour.c del_timer_sync(&tbl->proxy_timer); tbl 1731 net/core/neighbour.c pneigh_queue_purge(&tbl->proxy_queue); tbl 1732 net/core/neighbour.c neigh_ifdown(tbl, NULL); tbl 1733 net/core/neighbour.c if (atomic_read(&tbl->entries)) tbl 1736 net/core/neighbour.c call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu, tbl 1738 net/core/neighbour.c tbl->nht = NULL; tbl 1740 net/core/neighbour.c kfree(tbl->phash_buckets); tbl 1741 net/core/neighbour.c tbl->phash_buckets = NULL; tbl 1743 net/core/neighbour.c remove_proc_entry(tbl->id, init_net.proc_net_stat); tbl 1745 net/core/neighbour.c free_percpu(tbl->stats); tbl 1746 net/core/neighbour.c tbl->stats = NULL; tbl 1754 net/core/neighbour.c struct neigh_table *tbl = NULL; tbl 1758 net/core/neighbour.c tbl = neigh_tables[NEIGH_ARP_TABLE]; tbl 1761 net/core/neighbour.c tbl = neigh_tables[NEIGH_ND_TABLE]; tbl 1764 net/core/neighbour.c tbl = neigh_tables[NEIGH_DN_TABLE]; tbl 1768 net/core/neighbour.c return tbl; tbl 1790 net/core/neighbour.c struct neigh_table *tbl; tbl 1814 net/core/neighbour.c tbl = neigh_find_table(ndm->ndm_family); tbl 1815 net/core/neighbour.c if (tbl == NULL) tbl 1818 net/core/neighbour.c if (nla_len(dst_attr) < (int)tbl->key_len) { tbl 1824 net/core/neighbour.c err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); tbl 1831 net/core/neighbour.c neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); tbl 1840 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 1842 net/core/neighbour.c neigh_remove_one(neigh, tbl); tbl 1843 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 1857 net/core/neighbour.c struct neigh_table *tbl; tbl 1890 net/core/neighbour.c tbl = neigh_find_table(ndm->ndm_family); tbl 1891 net/core/neighbour.c if (tbl == NULL) tbl 1894 net/core/neighbour.c if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) { tbl 1909 net/core/neighbour.c pn = pneigh_lookup(tbl, net, dst, dev, 1); tbl 1924 net/core/neighbour.c if (tbl->allow_add && !tbl->allow_add(dev, extack)) { tbl 1929 net/core/neighbour.c neigh = neigh_lookup(tbl, dst, dev); tbl 1940 net/core/neighbour.c neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true); tbl 2027 net/core/neighbour.c static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, tbl 2039 net/core/neighbour.c read_lock_bh(&tbl->lock); tbl 2040 net/core/neighbour.c ndtmsg->ndtm_family = tbl->family; tbl 2044 net/core/neighbour.c if (nla_put_string(skb, NDTA_NAME, tbl->id) || tbl 2045 net/core/neighbour.c nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) || tbl 2046 net/core/neighbour.c nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) || tbl 2047 net/core/neighbour.c nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) || tbl 2048 net/core/neighbour.c nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3)) tbl 2052 net/core/neighbour.c long flush_delta = now - tbl->last_flush; tbl 2053 net/core/neighbour.c long rand_delta = now - tbl->last_rand; tbl 2056 net/core/neighbour.c .ndtc_key_len = tbl->key_len, tbl 2057 net/core/neighbour.c .ndtc_entry_size = tbl->entry_size, tbl 2058 net/core/neighbour.c .ndtc_entries = atomic_read(&tbl->entries), tbl 2061 net/core/neighbour.c .ndtc_proxy_qlen = tbl->proxy_queue.qlen, tbl 2065 net/core/neighbour.c nht = rcu_dereference_bh(tbl->nht); tbl 2083 net/core/neighbour.c st = per_cpu_ptr(tbl->stats, cpu); tbl 2102 net/core/neighbour.c BUG_ON(tbl->parms.dev); tbl 2103 net/core/neighbour.c if (neightbl_fill_parms(skb, &tbl->parms) < 0) tbl 2106 net/core/neighbour.c read_unlock_bh(&tbl->lock); tbl 2111 net/core/neighbour.c read_unlock_bh(&tbl->lock); tbl 2117 net/core/neighbour.c struct neigh_table *tbl, tbl 2131 net/core/neighbour.c read_lock_bh(&tbl->lock); tbl 2132 net/core/neighbour.c ndtmsg->ndtm_family = tbl->family; tbl 2136 net/core/neighbour.c if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 || tbl 2140 net/core/neighbour.c read_unlock_bh(&tbl->lock); tbl 2144 net/core/neighbour.c read_unlock_bh(&tbl->lock); tbl 2179 net/core/neighbour.c struct neigh_table *tbl; tbl 2198 net/core/neighbour.c tbl = neigh_tables[tidx]; tbl 2199 net/core/neighbour.c if (!tbl) tbl 2201 net/core/neighbour.c if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) tbl 2203 net/core/neighbour.c if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) { tbl 2216 net/core/neighbour.c write_lock_bh(&tbl->lock); tbl 2232 net/core/neighbour.c p = lookup_neigh_parms(tbl, net, ifindex); tbl 2318 net/core/neighbour.c tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]); tbl 2321 net/core/neighbour.c tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]); tbl 2324 net/core/neighbour.c tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]); tbl 2327 net/core/neighbour.c tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]); tbl 2332 net/core/neighbour.c write_unlock_bh(&tbl->lock); tbl 2368 net/core/neighbour.c struct neigh_table *tbl; tbl 2382 net/core/neighbour.c tbl = neigh_tables[tidx]; tbl 2383 net/core/neighbour.c if (!tbl) tbl 2386 net/core/neighbour.c if (tidx < tbl_skip || (family && tbl->family != family)) tbl 2389 net/core/neighbour.c if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid, tbl 2395 net/core/neighbour.c p = list_next_entry(&tbl->parms, list); tbl 2396 net/core/neighbour.c list_for_each_entry_from(p, &tbl->parms_list, list) { tbl 2403 net/core/neighbour.c if (neightbl_fill_param_info(skb, tbl, p, tbl 2442 net/core/neighbour.c if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key)) tbl 2480 net/core/neighbour.c struct neigh_table *tbl) tbl 2490 net/core/neighbour.c ndm->ndm_family = tbl->family; tbl 2498 net/core/neighbour.c if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) tbl 2545 net/core/neighbour.c static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, tbl 2560 net/core/neighbour.c nht = rcu_dereference_bh(tbl->nht); tbl 2592 net/core/neighbour.c static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, tbl 2605 net/core/neighbour.c read_lock_bh(&tbl->lock); tbl 2610 net/core/neighbour.c for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { tbl 2618 net/core/neighbour.c RTM_NEWNEIGH, flags, tbl) < 0) { tbl 2619 net/core/neighbour.c read_unlock_bh(&tbl->lock); tbl 2628 net/core/neighbour.c read_unlock_bh(&tbl->lock); tbl 2702 net/core/neighbour.c struct neigh_table *tbl; tbl 2723 net/core/neighbour.c tbl = neigh_tables[t]; tbl 2725 net/core/neighbour.c if (!tbl) tbl 2727 net/core/neighbour.c if (t < s_t || (family && tbl->family != family)) tbl 2733 net/core/neighbour.c err = pneigh_dump_table(tbl, skb, cb, &filter); tbl 2735 net/core/neighbour.c err = neigh_dump_table(tbl, skb, cb, &filter); tbl 2745 net/core/neighbour.c struct neigh_table **tbl, tbl 2777 net/core/neighbour.c *tbl = neigh_find_table(ndm->ndm_family); tbl 2778 net/core/neighbour.c if (*tbl == NULL) { tbl 2789 net/core/neighbour.c if (nla_len(tb[i]) != (int)(*tbl)->key_len) { tbl 2843 net/core/neighbour.c u32 pid, u32 seq, struct neigh_table *tbl) tbl 2852 net/core/neighbour.c err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl); tbl 2868 net/core/neighbour.c struct neigh_table *tbl = NULL; tbl 2875 net/core/neighbour.c err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags, tbl 2896 net/core/neighbour.c pn = pneigh_lookup(tbl, net, dst, dev, 0); tbl 2902 net/core/neighbour.c nlh->nlmsg_seq, tbl); tbl 2910 net/core/neighbour.c neigh = neigh_lookup(tbl, dst, dev); tbl 2924 net/core/neighbour.c void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie) tbl 2930 net/core/neighbour.c nht = rcu_dereference_bh(tbl->nht); tbl 2932 net/core/neighbour.c read_lock(&tbl->lock); /* avoid resizes */ tbl 2941 net/core/neighbour.c read_unlock(&tbl->lock); tbl 2947 net/core/neighbour.c void __neigh_for_each_release(struct neigh_table *tbl, tbl 2953 net/core/neighbour.c nht = rcu_dereference_protected(tbl->nht, tbl 2954 net/core/neighbour.c lockdep_is_held(&tbl->lock)); tbl 2961 net/core/neighbour.c lockdep_is_held(&tbl->lock))) != NULL) { tbl 2969 net/core/neighbour.c lockdep_is_held(&tbl->lock))); tbl 2986 net/core/neighbour.c struct neigh_table *tbl; tbl 2989 net/core/neighbour.c tbl = neigh_tables[index]; tbl 2990 net/core/neighbour.c if (!tbl) tbl 2998 net/core/neighbour.c neigh = __neigh_lookup_noref(tbl, addr, dev); tbl 3001 net/core/neighbour.c neigh = __neigh_create(tbl, addr, dev, false); tbl 3133 net/core/neighbour.c struct neigh_table *tbl = state->tbl; tbl 3139 net/core/neighbour.c pn = tbl->phash_buckets[bucket]; tbl 3156 net/core/neighbour.c struct neigh_table *tbl = state->tbl; tbl 3165 net/core/neighbour.c pn = tbl->phash_buckets[state->bucket]; tbl 3206 net/core/neighbour.c void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) tbl 3207 net/core/neighbour.c __acquires(tbl->lock) tbl 3212 net/core/neighbour.c state->tbl = tbl; tbl 3217 net/core/neighbour.c state->nht = rcu_dereference_bh(tbl->nht); tbl 3218 net/core/neighbour.c read_lock(&tbl->lock); tbl 3252 net/core/neighbour.c __releases(tbl->lock) tbl 3256 net/core/neighbour.c struct neigh_table *tbl = state->tbl; tbl 3258 net/core/neighbour.c read_unlock(&tbl->lock); tbl 3267 net/core/neighbour.c struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); tbl 3277 net/core/neighbour.c return per_cpu_ptr(tbl->stats, cpu); tbl 3284 net/core/neighbour.c struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); tbl 3291 net/core/neighbour.c return per_cpu_ptr(tbl->stats, cpu); tbl 3303 net/core/neighbour.c struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); tbl 3313 net/core/neighbour.c atomic_read(&tbl->entries), tbl 3644 net/core/neighbour.c struct neigh_table *tbl = p->tbl; tbl 3646 net/core/neighbour.c t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval; tbl 3647 net/core/neighbour.c t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1; tbl 3648 net/core/neighbour.c t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2; tbl 3649 net/core/neighbour.c t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3; tbl 223 net/core/sysctl_net_core.c struct ctl_table tbl = { tbl 231 net/core/sysctl_net_core.c ret = proc_dostring(&tbl, write, buffer, lenp, ppos); tbl 597 net/core/sysctl_net_core.c struct ctl_table *tbl; tbl 599 net/core/sysctl_net_core.c tbl = netns_core_table; tbl 601 net/core/sysctl_net_core.c tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); tbl 602 net/core/sysctl_net_core.c if (tbl == NULL) tbl 605 net/core/sysctl_net_core.c tbl[0].data = &net->core.sysctl_somaxconn; tbl 609 net/core/sysctl_net_core.c tbl[0].procname = NULL; tbl 613 net/core/sysctl_net_core.c net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); tbl 620 net/core/sysctl_net_core.c if (tbl != netns_core_table) tbl 621 net/core/sysctl_net_core.c kfree(tbl); tbl 628 net/core/sysctl_net_core.c struct ctl_table *tbl; tbl 630 net/core/sysctl_net_core.c tbl = net->core.sysctl_hdr->ctl_table_arg; tbl 632 net/core/sysctl_net_core.c BUG_ON(tbl == netns_core_table); tbl 633 net/core/sysctl_net_core.c kfree(tbl); tbl 88 net/decnet/dn_neigh.c .tbl = &dn_neigh_table, tbl 73 net/decnet/dn_rules.c struct dn_fib_table *tbl; tbl 93 net/decnet/dn_rules.c tbl = dn_fib_get_table(rule->table, 0); tbl 94 net/decnet/dn_rules.c if (tbl == NULL) tbl 97 net/decnet/dn_rules.c err = tbl->lookup(tbl, fld, (struct dn_fib_res *)arg->result); tbl 161 net/ipv4/arp.c .tbl = &arp_tbl, tbl 1117 net/ipv4/arp.c struct neigh_table *tbl = &arp_tbl; tbl 1124 net/ipv4/arp.c write_lock_bh(&tbl->lock); tbl 1126 net/ipv4/arp.c neigh_remove_one(neigh, tbl); tbl 1127 net/ipv4/arp.c write_unlock_bh(&tbl->lock); tbl 2650 net/ipv4/devinet.c struct ctl_table *tbl; tbl 2664 net/ipv4/devinet.c tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL); tbl 2665 net/ipv4/devinet.c if (!tbl) tbl 2668 net/ipv4/devinet.c tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1]; tbl 2669 net/ipv4/devinet.c tbl[0].extra1 = all; tbl 2670 net/ipv4/devinet.c tbl[0].extra2 = net; tbl 2691 net/ipv4/devinet.c forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); tbl 2707 net/ipv4/devinet.c kfree(tbl); tbl 2720 net/ipv4/devinet.c struct ctl_table *tbl; tbl 2722 net/ipv4/devinet.c tbl = net->ipv4.forw_hdr->ctl_table_arg; tbl 2728 net/ipv4/devinet.c kfree(tbl); tbl 109 net/ipv4/fib_rules.c struct fib_table *tbl; tbl 130 net/ipv4/fib_rules.c tbl = fib_get_table(rule->fr_net, tb_id); tbl 131 net/ipv4/fib_rules.c if (tbl) tbl 132 net/ipv4/fib_rules.c err = fib_table_lookup(tbl, &flp->u.ip4, tbl 1091 net/ipv4/fib_semantics.c struct fib_table *tbl = NULL; tbl 1104 net/ipv4/fib_semantics.c tbl = fib_get_table(net, table); tbl 1106 net/ipv4/fib_semantics.c if (tbl) tbl 1107 net/ipv4/fib_semantics.c err = fib_table_lookup(tbl, &fl4, &res, tbl 1115 net/ipv4/fib_semantics.c if (!tbl || err) { tbl 2738 net/ipv4/ipmr.c struct mr_table *tbl; tbl 2742 net/ipv4/ipmr.c tbl = NULL; tbl 2743 net/ipv4/ipmr.c ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack); tbl 2749 net/ipv4/ipmr.c return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent); tbl 2751 net/ipv4/ipmr.c return ipmr_mfc_delete(tbl, &mfcc, parent); tbl 3367 net/ipv4/route.c struct ctl_table *tbl; tbl 3369 net/ipv4/route.c tbl = ipv4_route_flush_table; tbl 3371 net/ipv4/route.c tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); tbl 3372 net/ipv4/route.c if (!tbl) tbl 3377 net/ipv4/route.c if (tbl[0].procname != ipv4_route_flush_procname) tbl 3378 net/ipv4/route.c tbl[0].procname = NULL; tbl 3381 net/ipv4/route.c tbl[0].extra1 = net; tbl 3383 net/ipv4/route.c net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); tbl 3389 net/ipv4/route.c if (tbl != ipv4_route_flush_table) tbl 3390 net/ipv4/route.c kfree(tbl); tbl 3397 net/ipv4/route.c struct ctl_table *tbl; tbl 3399 net/ipv4/route.c tbl = net->ipv4.route_hdr->ctl_table_arg; tbl 3401 net/ipv4/route.c BUG_ON(tbl == ipv4_route_flush_table); tbl 3402 net/ipv4/route.c kfree(tbl); tbl 229 net/ipv4/sysctl_net_ipv4.c struct ctl_table tbl = { tbl 237 net/ipv4/sysctl_net_ipv4.c ret = proc_dostring(&tbl, write, buffer, lenp, ppos); tbl 248 net/ipv4/sysctl_net_ipv4.c struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, }; tbl 251 net/ipv4/sysctl_net_ipv4.c tbl.data = kmalloc(tbl.maxlen, GFP_USER); tbl 252 net/ipv4/sysctl_net_ipv4.c if (!tbl.data) tbl 254 net/ipv4/sysctl_net_ipv4.c tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX); tbl 255 net/ipv4/sysctl_net_ipv4.c ret = proc_dostring(&tbl, write, buffer, lenp, ppos); tbl 256 net/ipv4/sysctl_net_ipv4.c kfree(tbl.data); tbl 265 net/ipv4/sysctl_net_ipv4.c struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; tbl 268 net/ipv4/sysctl_net_ipv4.c tbl.data = kmalloc(tbl.maxlen, GFP_USER); tbl 269 net/ipv4/sysctl_net_ipv4.c if (!tbl.data) tbl 272 net/ipv4/sysctl_net_ipv4.c tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen); tbl 273 net/ipv4/sysctl_net_ipv4.c ret = proc_dostring(&tbl, write, buffer, lenp, ppos); tbl 275 net/ipv4/sysctl_net_ipv4.c ret = tcp_set_allowed_congestion_control(tbl.data); tbl 276 net/ipv4/sysctl_net_ipv4.c kfree(tbl.data); tbl 307 net/ipv4/sysctl_net_ipv4.c struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH * tbl 316 net/ipv4/sysctl_net_ipv4.c tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); tbl 317 net/ipv4/sysctl_net_ipv4.c if (!tbl.data) tbl 337 net/ipv4/sysctl_net_ipv4.c off += snprintf(tbl.data + off, tbl.maxlen - off, tbl 344 net/ipv4/sysctl_net_ipv4.c off += snprintf(tbl.data + off, tbl.maxlen - off, ","); tbl 347 net/ipv4/sysctl_net_ipv4.c ret = proc_dostring(&tbl, write, buffer, lenp, ppos); tbl 350 net/ipv4/sysctl_net_ipv4.c backup_data = strchr(tbl.data, ','); tbl 355 net/ipv4/sysctl_net_ipv4.c if (sscanf_key(tbl.data, key)) { tbl 370 net/ipv4/sysctl_net_ipv4.c kfree(tbl.data); tbl 450 net/ipv4/sysctl_net_ipv4.c struct ctl_table tbl = { .maxlen = TCP_ULP_BUF_MAX, }; tbl 453 net/ipv4/sysctl_net_ipv4.c tbl.data = kmalloc(tbl.maxlen, GFP_USER); tbl 454 net/ipv4/sysctl_net_ipv4.c if (!tbl.data) tbl 456 net/ipv4/sysctl_net_ipv4.c tcp_get_available_ulp(tbl.data, TCP_ULP_BUF_MAX); tbl 457 net/ipv4/sysctl_net_ipv4.c ret = proc_dostring(&tbl, write, buffer, lenp, ppos); tbl 458 net/ipv4/sysctl_net_ipv4.c kfree(tbl.data); tbl 30 net/ipv4/udp_diag.c static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, tbl 45 net/ipv4/udp_diag.c req->id.idiag_if, 0, tbl, NULL); tbl 53 net/ipv4/udp_diag.c req->id.idiag_if, 0, tbl, NULL); tbl 171 net/ipv4/udp_diag.c struct udp_table *tbl) tbl 183 net/ipv4/udp_diag.c req->id.idiag_if, 0, tbl, NULL); tbl 191 net/ipv4/udp_diag.c req->id.idiag_if, 0, tbl, NULL); tbl 199 net/ipv4/udp_diag.c req->id.idiag_if, 0, tbl, NULL); tbl 2415 net/ipv6/ip6_fib.c lockdep_is_held(&iter->tbl->tb6_lock)); tbl 2429 net/ipv6/ip6_fib.c iter->w.root = &iter->tbl->tb6_root; tbl 2438 net/ipv6/ip6_fib.c static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, tbl 2444 net/ipv6/ip6_fib.c if (tbl) { tbl 2445 net/ipv6/ip6_fib.c h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1; tbl 2446 net/ipv6/ip6_fib.c node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist)); tbl 2488 net/ipv6/ip6_fib.c spin_lock_bh(&iter->tbl->tb6_lock); tbl 2490 net/ipv6/ip6_fib.c spin_unlock_bh(&iter->tbl->tb6_lock); tbl 2501 net/ipv6/ip6_fib.c iter->tbl = ipv6_route_seq_next_table(iter->tbl, net); tbl 2502 net/ipv6/ip6_fib.c if (!iter->tbl) tbl 2516 net/ipv6/ip6_fib.c iter->tbl = ipv6_route_seq_next_table(NULL, net); tbl 2519 net/ipv6/ip6_fib.c if (iter->tbl) { tbl 121 net/ipv6/ndisc.c .tbl = &nd_tbl, tbl 18 net/mac80211/mesh_pathtbl.c static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); tbl 45 net/mac80211/mesh_pathtbl.c struct mesh_table *tbl = tblptr; tbl 47 net/mac80211/mesh_pathtbl.c mesh_path_free_rcu(tbl, mpath); tbl 67 net/mac80211/mesh_pathtbl.c static void mesh_table_free(struct mesh_table *tbl) tbl 69 net/mac80211/mesh_pathtbl.c rhashtable_free_and_destroy(&tbl->rhead, tbl 70 net/mac80211/mesh_pathtbl.c mesh_path_rht_free, tbl); tbl 71 net/mac80211/mesh_pathtbl.c kfree(tbl); tbl 212 net/mac80211/mesh_pathtbl.c static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, tbl 217 net/mac80211/mesh_pathtbl.c mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params); tbl 249 net/mac80211/mesh_pathtbl.c __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) tbl 254 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { tbl 306 net/mac80211/mesh_pathtbl.c struct mesh_table *tbl; tbl 310 net/mac80211/mesh_pathtbl.c tbl = mpath->sdata->u.mesh.mesh_paths; tbl 321 net/mac80211/mesh_pathtbl.c spin_lock(&tbl->gates_lock); tbl 322 net/mac80211/mesh_pathtbl.c hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); tbl 323 net/mac80211/mesh_pathtbl.c spin_unlock(&tbl->gates_lock); tbl 341 net/mac80211/mesh_pathtbl.c static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) tbl 348 net/mac80211/mesh_pathtbl.c spin_lock_bh(&tbl->gates_lock); tbl 351 net/mac80211/mesh_pathtbl.c spin_unlock_bh(&tbl->gates_lock); tbl 402 net/mac80211/mesh_pathtbl.c struct mesh_table *tbl; tbl 419 net/mac80211/mesh_pathtbl.c tbl = sdata->u.mesh.mesh_paths; tbl 420 net/mac80211/mesh_pathtbl.c spin_lock_bh(&tbl->walk_lock); tbl 421 net/mac80211/mesh_pathtbl.c mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, tbl 425 net/mac80211/mesh_pathtbl.c hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); tbl 426 net/mac80211/mesh_pathtbl.c spin_unlock_bh(&tbl->walk_lock); tbl 444 net/mac80211/mesh_pathtbl.c struct mesh_table *tbl; tbl 461 net/mac80211/mesh_pathtbl.c tbl = sdata->u.mesh.mpp_paths; tbl 463 net/mac80211/mesh_pathtbl.c spin_lock_bh(&tbl->walk_lock); tbl 464 net/mac80211/mesh_pathtbl.c ret = rhashtable_lookup_insert_fast(&tbl->rhead, tbl 468 net/mac80211/mesh_pathtbl.c hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); tbl 469 net/mac80211/mesh_pathtbl.c spin_unlock_bh(&tbl->walk_lock); tbl 490 net/mac80211/mesh_pathtbl.c struct mesh_table *tbl = sdata->u.mesh.mesh_paths; tbl 495 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { tbl 512 net/mac80211/mesh_pathtbl.c static void mesh_path_free_rcu(struct mesh_table *tbl, tbl 519 net/mac80211/mesh_pathtbl.c mesh_gate_del(tbl, mpath); tbl 523 net/mac80211/mesh_pathtbl.c atomic_dec(&tbl->entries); tbl 527 net/mac80211/mesh_pathtbl.c static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) tbl 530 net/mac80211/mesh_pathtbl.c rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); tbl 531 net/mac80211/mesh_pathtbl.c mesh_path_free_rcu(tbl, mpath); tbl 548 net/mac80211/mesh_pathtbl.c struct mesh_table *tbl = sdata->u.mesh.mesh_paths; tbl 552 net/mac80211/mesh_pathtbl.c spin_lock_bh(&tbl->walk_lock); tbl 553 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { tbl 555 net/mac80211/mesh_pathtbl.c __mesh_path_del(tbl, mpath); tbl 557 net/mac80211/mesh_pathtbl.c spin_unlock_bh(&tbl->walk_lock); tbl 563 net/mac80211/mesh_pathtbl.c struct mesh_table *tbl = sdata->u.mesh.mpp_paths; tbl 567 net/mac80211/mesh_pathtbl.c spin_lock_bh(&tbl->walk_lock); tbl 568 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { tbl 570 net/mac80211/mesh_pathtbl.c __mesh_path_del(tbl, mpath); tbl 572 net/mac80211/mesh_pathtbl.c spin_unlock_bh(&tbl->walk_lock); tbl 575 net/mac80211/mesh_pathtbl.c static void table_flush_by_iface(struct mesh_table *tbl) tbl 580 net/mac80211/mesh_pathtbl.c spin_lock_bh(&tbl->walk_lock); tbl 581 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { tbl 582 net/mac80211/mesh_pathtbl.c __mesh_path_del(tbl, mpath); tbl 584 net/mac80211/mesh_pathtbl.c spin_unlock_bh(&tbl->walk_lock); tbl 610 net/mac80211/mesh_pathtbl.c static int table_path_del(struct mesh_table *tbl, tbl 616 net/mac80211/mesh_pathtbl.c spin_lock_bh(&tbl->walk_lock); tbl 617 net/mac80211/mesh_pathtbl.c mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); tbl 619 net/mac80211/mesh_pathtbl.c spin_unlock_bh(&tbl->walk_lock); tbl 623 net/mac80211/mesh_pathtbl.c __mesh_path_del(tbl, mpath); tbl 624 net/mac80211/mesh_pathtbl.c spin_unlock_bh(&tbl->walk_lock); tbl 677 net/mac80211/mesh_pathtbl.c struct mesh_table *tbl; tbl 682 net/mac80211/mesh_pathtbl.c tbl = sdata->u.mesh.mesh_paths; tbl 685 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { tbl 698 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { tbl 792 net/mac80211/mesh_pathtbl.c struct mesh_table *tbl) tbl 797 net/mac80211/mesh_pathtbl.c spin_lock_bh(&tbl->walk_lock); tbl 798 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { tbl 802 net/mac80211/mesh_pathtbl.c __mesh_path_del(tbl, mpath); tbl 804 net/mac80211/mesh_pathtbl.c spin_unlock_bh(&tbl->walk_lock); tbl 3984 net/netfilter/ipvs/ip_vs_ctl.c struct ctl_table *tbl; tbl 3992 net/netfilter/ipvs/ip_vs_ctl.c tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL); tbl 3993 net/netfilter/ipvs/ip_vs_ctl.c if (tbl == NULL) tbl 3998 net/netfilter/ipvs/ip_vs_ctl.c tbl[0].procname = NULL; tbl 4000 net/netfilter/ipvs/ip_vs_ctl.c tbl = vs_vars; tbl 4003 net/netfilter/ipvs/ip_vs_ctl.c if (tbl[idx].proc_handler == proc_do_defense_mode) tbl 4004 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx].extra2 = ipvs; tbl 4008 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_amemthresh; tbl 4010 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_am_droprate; tbl 4011 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_drop_entry; tbl 4012 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_drop_packet; tbl 4014 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_conntrack; tbl 4016 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_secure_tcp; tbl 4018 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_snat_reroute; tbl 4020 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_sync_ver; tbl 4022 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_sync_ports; tbl 4023 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_sync_persist_mode; tbl 4025 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_sync_qlen_max; tbl 4027 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_sync_sock_size; tbl 4028 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_cache_bypass; tbl 4029 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn; tbl 4030 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_sloppy_tcp; tbl 4031 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_sloppy_sctp; tbl 4032 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template; tbl 4035 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx].data = &ipvs->sysctl_sync_threshold; tbl 4036 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); tbl 4038 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_sync_refresh_period; tbl 4040 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_sync_retries; tbl 4041 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; tbl 4043 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_pmtu_disc; tbl 4044 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_backup_only; tbl 4046 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode; tbl 4047 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_schedule_icmp; tbl 4048 net/netfilter/ipvs/ip_vs_ctl.c tbl[idx++].data = &ipvs->sysctl_ignore_tunneled; tbl 4050 net/netfilter/ipvs/ip_vs_ctl.c ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); tbl 4053 net/netfilter/ipvs/ip_vs_ctl.c kfree(tbl); tbl 4057 net/netfilter/ipvs/ip_vs_ctl.c ipvs->sysctl_tbl = tbl; tbl 168 net/netfilter/ipvs/ip_vs_lblc.c ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) tbl 172 net/netfilter/ipvs/ip_vs_lblc.c hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); tbl 173 net/netfilter/ipvs/ip_vs_lblc.c atomic_inc(&tbl->entries); tbl 179 net/netfilter/ipvs/ip_vs_lblc.c ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, tbl 185 net/netfilter/ipvs/ip_vs_lblc.c hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) tbl 198 net/netfilter/ipvs/ip_vs_lblc.c ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, tbl 203 net/netfilter/ipvs/ip_vs_lblc.c en = ip_vs_lblc_get(af, tbl, daddr); tbl 220 net/netfilter/ipvs/ip_vs_lblc.c ip_vs_lblc_hash(tbl, en); tbl 231 net/netfilter/ipvs/ip_vs_lblc.c struct ip_vs_lblc_table *tbl = svc->sched_data; tbl 237 net/netfilter/ipvs/ip_vs_lblc.c tbl->dead = true; tbl 239 net/netfilter/ipvs/ip_vs_lblc.c hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { tbl 241 net/netfilter/ipvs/ip_vs_lblc.c atomic_dec(&tbl->entries); tbl 258 net/netfilter/ipvs/ip_vs_lblc.c struct ip_vs_lblc_table *tbl = svc->sched_data; tbl 264 net/netfilter/ipvs/ip_vs_lblc.c for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) { tbl 268 net/netfilter/ipvs/ip_vs_lblc.c hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { tbl 275 net/netfilter/ipvs/ip_vs_lblc.c atomic_dec(&tbl->entries); tbl 279 net/netfilter/ipvs/ip_vs_lblc.c tbl->rover = j; tbl 296 net/netfilter/ipvs/ip_vs_lblc.c struct ip_vs_lblc_table *tbl = from_timer(tbl, t, periodic_timer); tbl 297 net/netfilter/ipvs/ip_vs_lblc.c struct ip_vs_service *svc = tbl->svc; tbl 304 net/netfilter/ipvs/ip_vs_lblc.c if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { tbl 307 net/netfilter/ipvs/ip_vs_lblc.c tbl->counter = 1; tbl 311 net/netfilter/ipvs/ip_vs_lblc.c if (atomic_read(&tbl->entries) <= tbl->max_size) { tbl 312 net/netfilter/ipvs/ip_vs_lblc.c tbl->counter++; tbl 316 net/netfilter/ipvs/ip_vs_lblc.c goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; tbl 317 net/netfilter/ipvs/ip_vs_lblc.c if (goal > tbl->max_size/2) tbl 318 net/netfilter/ipvs/ip_vs_lblc.c goal = tbl->max_size/2; tbl 320 net/netfilter/ipvs/ip_vs_lblc.c for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) { tbl 324 net/netfilter/ipvs/ip_vs_lblc.c hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { tbl 329 net/netfilter/ipvs/ip_vs_lblc.c atomic_dec(&tbl->entries); tbl 336 net/netfilter/ipvs/ip_vs_lblc.c tbl->rover = j; tbl 339 net/netfilter/ipvs/ip_vs_lblc.c mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); tbl 346 net/netfilter/ipvs/ip_vs_lblc.c struct ip_vs_lblc_table *tbl; tbl 351 net/netfilter/ipvs/ip_vs_lblc.c tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); tbl 352 net/netfilter/ipvs/ip_vs_lblc.c if (tbl == NULL) tbl 355 net/netfilter/ipvs/ip_vs_lblc.c svc->sched_data = tbl; tbl 357 net/netfilter/ipvs/ip_vs_lblc.c "current service\n", sizeof(*tbl)); tbl 363 net/netfilter/ipvs/ip_vs_lblc.c INIT_HLIST_HEAD(&tbl->bucket[i]); tbl 365 net/netfilter/ipvs/ip_vs_lblc.c tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; tbl 366 net/netfilter/ipvs/ip_vs_lblc.c tbl->rover = 0; tbl 367 net/netfilter/ipvs/ip_vs_lblc.c tbl->counter = 1; tbl 368 net/netfilter/ipvs/ip_vs_lblc.c tbl->dead = false; tbl 369 net/netfilter/ipvs/ip_vs_lblc.c tbl->svc = svc; tbl 370 net/netfilter/ipvs/ip_vs_lblc.c atomic_set(&tbl->entries, 0); tbl 375 net/netfilter/ipvs/ip_vs_lblc.c timer_setup(&tbl->periodic_timer, ip_vs_lblc_check_expire, 0); tbl 376 net/netfilter/ipvs/ip_vs_lblc.c mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); tbl 384 net/netfilter/ipvs/ip_vs_lblc.c struct ip_vs_lblc_table *tbl = svc->sched_data; tbl 387 net/netfilter/ipvs/ip_vs_lblc.c del_timer_sync(&tbl->periodic_timer); tbl 393 net/netfilter/ipvs/ip_vs_lblc.c kfree_rcu(tbl, rcu_head); tbl 395 net/netfilter/ipvs/ip_vs_lblc.c sizeof(*tbl)); tbl 484 net/netfilter/ipvs/ip_vs_lblc.c struct ip_vs_lblc_table *tbl = svc->sched_data; tbl 491 net/netfilter/ipvs/ip_vs_lblc.c en = ip_vs_lblc_get(svc->af, tbl, &iph->daddr); tbl 520 net/netfilter/ipvs/ip_vs_lblc.c if (!tbl->dead) tbl 521 net/netfilter/ipvs/ip_vs_lblc.c ip_vs_lblc_new(tbl, &iph->daddr, svc->af, dest); tbl 331 net/netfilter/ipvs/ip_vs_lblcr.c ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) tbl 335 net/netfilter/ipvs/ip_vs_lblcr.c hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); tbl 336 net/netfilter/ipvs/ip_vs_lblcr.c atomic_inc(&tbl->entries); tbl 342 net/netfilter/ipvs/ip_vs_lblcr.c ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, tbl 348 net/netfilter/ipvs/ip_vs_lblcr.c hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) tbl 361 net/netfilter/ipvs/ip_vs_lblcr.c ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, tbl 366 net/netfilter/ipvs/ip_vs_lblcr.c en = ip_vs_lblcr_get(af, tbl, daddr); tbl 382 net/netfilter/ipvs/ip_vs_lblcr.c ip_vs_lblcr_hash(tbl, en); tbl 397 net/netfilter/ipvs/ip_vs_lblcr.c struct ip_vs_lblcr_table *tbl = svc->sched_data; tbl 403 net/netfilter/ipvs/ip_vs_lblcr.c tbl->dead = true; tbl 405 net/netfilter/ipvs/ip_vs_lblcr.c hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { tbl 423 net/netfilter/ipvs/ip_vs_lblcr.c struct ip_vs_lblcr_table *tbl = svc->sched_data; tbl 429 net/netfilter/ipvs/ip_vs_lblcr.c for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) { tbl 433 net/netfilter/ipvs/ip_vs_lblcr.c hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { tbl 439 net/netfilter/ipvs/ip_vs_lblcr.c atomic_dec(&tbl->entries); tbl 443 net/netfilter/ipvs/ip_vs_lblcr.c tbl->rover = j; tbl 460 net/netfilter/ipvs/ip_vs_lblcr.c struct ip_vs_lblcr_table *tbl = from_timer(tbl, t, periodic_timer); tbl 461 net/netfilter/ipvs/ip_vs_lblcr.c struct ip_vs_service *svc = tbl->svc; tbl 468 net/netfilter/ipvs/ip_vs_lblcr.c if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { tbl 471 net/netfilter/ipvs/ip_vs_lblcr.c tbl->counter = 1; tbl 475 net/netfilter/ipvs/ip_vs_lblcr.c if (atomic_read(&tbl->entries) <= tbl->max_size) { tbl 476 net/netfilter/ipvs/ip_vs_lblcr.c tbl->counter++; tbl 480 net/netfilter/ipvs/ip_vs_lblcr.c goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; tbl 481 net/netfilter/ipvs/ip_vs_lblcr.c if (goal > tbl->max_size/2) tbl 482 net/netfilter/ipvs/ip_vs_lblcr.c goal = tbl->max_size/2; tbl 484 net/netfilter/ipvs/ip_vs_lblcr.c for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) { tbl 488 net/netfilter/ipvs/ip_vs_lblcr.c hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { tbl 493 net/netfilter/ipvs/ip_vs_lblcr.c atomic_dec(&tbl->entries); tbl 500 net/netfilter/ipvs/ip_vs_lblcr.c tbl->rover = j; tbl 503 net/netfilter/ipvs/ip_vs_lblcr.c mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL); tbl 509 net/netfilter/ipvs/ip_vs_lblcr.c struct ip_vs_lblcr_table *tbl; tbl 514 net/netfilter/ipvs/ip_vs_lblcr.c tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); tbl 515 net/netfilter/ipvs/ip_vs_lblcr.c if (tbl == NULL) tbl 518 net/netfilter/ipvs/ip_vs_lblcr.c svc->sched_data = tbl; tbl 520 net/netfilter/ipvs/ip_vs_lblcr.c "current service\n", sizeof(*tbl)); tbl 526 net/netfilter/ipvs/ip_vs_lblcr.c INIT_HLIST_HEAD(&tbl->bucket[i]); tbl 528 net/netfilter/ipvs/ip_vs_lblcr.c tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; tbl 529 net/netfilter/ipvs/ip_vs_lblcr.c tbl->rover = 0; tbl 530 net/netfilter/ipvs/ip_vs_lblcr.c tbl->counter = 1; tbl 531 net/netfilter/ipvs/ip_vs_lblcr.c tbl->dead = false; tbl 532 net/netfilter/ipvs/ip_vs_lblcr.c tbl->svc = svc; tbl 533 net/netfilter/ipvs/ip_vs_lblcr.c atomic_set(&tbl->entries, 0); tbl 538 net/netfilter/ipvs/ip_vs_lblcr.c timer_setup(&tbl->periodic_timer, ip_vs_lblcr_check_expire, 0); tbl 539 net/netfilter/ipvs/ip_vs_lblcr.c mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); tbl 547 net/netfilter/ipvs/ip_vs_lblcr.c struct ip_vs_lblcr_table *tbl = svc->sched_data; tbl 550 net/netfilter/ipvs/ip_vs_lblcr.c del_timer_sync(&tbl->periodic_timer); tbl 556 net/netfilter/ipvs/ip_vs_lblcr.c kfree_rcu(tbl, rcu_head); tbl 558 net/netfilter/ipvs/ip_vs_lblcr.c sizeof(*tbl)); tbl 648 net/netfilter/ipvs/ip_vs_lblcr.c struct ip_vs_lblcr_table *tbl = svc->sched_data; tbl 655 net/netfilter/ipvs/ip_vs_lblcr.c en = ip_vs_lblcr_get(svc->af, tbl, &iph->daddr); tbl 690 net/netfilter/ipvs/ip_vs_lblcr.c if (!tbl->dead) tbl 705 net/netfilter/ipvs/ip_vs_lblcr.c if (!tbl->dead) tbl 706 net/netfilter/ipvs/ip_vs_lblcr.c ip_vs_lblcr_new(tbl, &iph->daddr, svc->af, dest); tbl 24 net/netfilter/xt_repldata.h } *tbl; \ tbl 26 net/netfilter/xt_repldata.h size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \ tbl 28 net/netfilter/xt_repldata.h tbl = kzalloc(term_offset + sizeof(*term), GFP_KERNEL); \ tbl 29 net/netfilter/xt_repldata.h if (tbl == NULL) \ tbl 31 net/netfilter/xt_repldata.h term = (struct type##_error *)&(((char *)tbl)[term_offset]); \ tbl 32 net/netfilter/xt_repldata.h strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \ tbl 34 net/netfilter/xt_repldata.h tbl->repl.valid_hooks = hook_mask; \ tbl 35 net/netfilter/xt_repldata.h tbl->repl.num_entries = nhooks + 1; \ tbl 36 net/netfilter/xt_repldata.h tbl->repl.size = nhooks * sizeof(struct type##_standard) + \ tbl 41 net/netfilter/xt_repldata.h tbl->repl.hook_entry[hooknum] = bytes; \ tbl 42 net/netfilter/xt_repldata.h tbl->repl.underflow[hooknum] = bytes; \ tbl 43 net/netfilter/xt_repldata.h tbl->entries[i++] = (struct type##_standard) \ tbl 47 net/netfilter/xt_repldata.h tbl; \ tbl 36 net/netlabel/netlabel_domainhash.c struct list_head *tbl; tbl 145 net/netlabel/netlabel_domainhash.c bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; tbl 372 net/netlabel/netlabel_domainhash.c hsh_tbl->tbl = kcalloc(hsh_tbl->size, tbl 375 net/netlabel/netlabel_domainhash.c if (hsh_tbl->tbl == NULL) { tbl 380 net/netlabel/netlabel_domainhash.c INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); tbl 436 net/netlabel/netlabel_domainhash.c &rcu_dereference(netlbl_domhsh)->tbl[bkt]); tbl 953 net/netlabel/netlabel_domainhash.c iter_list = &rcu_dereference(netlbl_domhsh)->tbl[iter_bkt]; tbl 63 net/netlabel/netlabel_unlabeled.c struct list_head *tbl; tbl 209 net/netlabel/netlabel_unlabeled.c bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]; tbl 331 net/netlabel/netlabel_unlabeled.c &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]); tbl 1185 net/netlabel/netlabel_unlabeled.c iter_list = &rcu_dereference(netlbl_unlhsh)->tbl[iter_bkt]; tbl 1421 net/netlabel/netlabel_unlabeled.c hsh_tbl->tbl = kcalloc(hsh_tbl->size, tbl 1424 net/netlabel/netlabel_unlabeled.c if (hsh_tbl->tbl == NULL) { tbl 1429 net/netlabel/netlabel_unlabeled.c INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); tbl 536 net/netlink/af_netlink.c struct netlink_table *tbl = &nl_table[sk->sk_protocol]; tbl 541 net/netlink/af_netlink.c listeners = nl_deref_protected(tbl->listeners); tbl 545 net/netlink/af_netlink.c for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { tbl 547 net/netlink/af_netlink.c sk_for_each_bound(sk, &tbl->mc_list) { tbl 2113 net/netlink/af_netlink.c struct netlink_table *tbl = &nl_table[sk->sk_protocol]; tbl 2118 net/netlink/af_netlink.c if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { tbl 2122 net/netlink/af_netlink.c old = nl_deref_protected(tbl->listeners); tbl 2123 net/netlink/af_netlink.c memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); tbl 2124 net/netlink/af_netlink.c rcu_assign_pointer(tbl->listeners, new); tbl 2128 net/netlink/af_netlink.c tbl->groups = groups; tbl 2159 net/netlink/af_netlink.c struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; tbl 2161 net/netlink/af_netlink.c sk_for_each_bound(sk, &tbl->mc_list) tbl 93 net/netlink/diag.c struct netlink_table *tbl = &nl_table[protocol]; tbl 117 net/netlink/diag.c rhashtable_walk_enter(&tbl->hash, hti); tbl 156 net/netlink/diag.c sk_for_each_bound(sk, &tbl->mc_list) { tbl 1720 net/openvswitch/flow_netlink.c const struct ovs_len_tbl *tbl) tbl 1727 net/openvswitch/flow_netlink.c if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) tbl 1728 net/openvswitch/flow_netlink.c nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl); tbl 446 net/openvswitch/flow_table.c struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, tbl 450 net/openvswitch/flow_table.c struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); tbl 455 net/openvswitch/flow_table.c list_for_each_entry_rcu(mask, &tbl->mask_list, list) { tbl 464 net/openvswitch/flow_table.c struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, tbl 469 net/openvswitch/flow_table.c return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); tbl 472 net/openvswitch/flow_table.c struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, tbl 475 net/openvswitch/flow_table.c struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); tbl 480 net/openvswitch/flow_table.c list_for_each_entry(mask, &tbl->mask_list, list) { tbl 511 net/openvswitch/flow_table.c struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, tbl 514 net/openvswitch/flow_table.c struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); tbl 547 net/openvswitch/flow_table.c static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) tbl 606 net/openvswitch/flow_table.c static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, tbl 611 net/openvswitch/flow_table.c list_for_each(ml, &tbl->mask_list) { tbl 622 net/openvswitch/flow_table.c static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, tbl 626 net/openvswitch/flow_table.c mask = flow_mask_find(tbl, new); tbl 634 net/openvswitch/flow_table.c list_add_rcu(&mask->list, &tbl->mask_list); tbl 67 net/openvswitch/flow_table.h struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, tbl 540 net/rds/tcp.c struct ctl_table *tbl; tbl 549 net/rds/tcp.c tbl = rds_tcp_sysctl_table; tbl 551 net/rds/tcp.c tbl = kmemdup(rds_tcp_sysctl_table, tbl 553 net/rds/tcp.c if (!tbl) { tbl 557 net/rds/tcp.c rtn->ctl_table = tbl; tbl 559 net/rds/tcp.c tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size; tbl 560 net/rds/tcp.c tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size; tbl 561 net/rds/tcp.c rtn->rds_tcp_sysctl = register_net_sysctl(net, "net/rds/tcp", tbl); tbl 594 net/rds/tcp.c kfree(tbl); tbl 776 net/sched/sch_netem.c static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, tbl 799 net/sched/sch_netem.c swap(*tbl, d); tbl 230 net/sched/sch_teql.c mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev); tbl 330 net/sctp/sysctl.c struct ctl_table tbl; tbl 336 net/sctp/sysctl.c memset(&tbl, 0, sizeof(struct ctl_table)); tbl 339 net/sctp/sysctl.c tbl.data = tmp; tbl 340 net/sctp/sysctl.c tbl.maxlen = sizeof(tmp); tbl 342 net/sctp/sysctl.c tbl.data = net->sctp.sctp_hmac_alg ? : none; tbl 343 net/sctp/sysctl.c tbl.maxlen = strlen(tbl.data); tbl 346 net/sctp/sysctl.c ret = proc_dostring(&tbl, write, buffer, lenp, ppos); tbl 378 net/sctp/sysctl.c struct ctl_table tbl; tbl 381 net/sctp/sysctl.c memset(&tbl, 0, sizeof(struct ctl_table)); tbl 382 net/sctp/sysctl.c tbl.maxlen = sizeof(unsigned int); tbl 385 net/sctp/sysctl.c tbl.data = &new_value; tbl 387 net/sctp/sysctl.c tbl.data = &net->sctp.rto_min; tbl 389 net/sctp/sysctl.c ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); tbl 407 net/sctp/sysctl.c struct ctl_table tbl; tbl 410 net/sctp/sysctl.c memset(&tbl, 0, sizeof(struct ctl_table)); tbl 411 net/sctp/sysctl.c tbl.maxlen = sizeof(unsigned int); tbl 414 net/sctp/sysctl.c tbl.data = &new_value; tbl 416 net/sctp/sysctl.c tbl.data = &net->sctp.rto_max; tbl 418 net/sctp/sysctl.c ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); tbl 445 net/sctp/sysctl.c struct ctl_table tbl; tbl 448 net/sctp/sysctl.c memset(&tbl, 0, sizeof(struct ctl_table)); tbl 449 net/sctp/sysctl.c tbl.maxlen = sizeof(unsigned int); tbl 452 net/sctp/sysctl.c tbl.data = &new_value; tbl 454 net/sctp/sysctl.c tbl.data = &net->sctp.auth_enable; tbl 456 net/sctp/sysctl.c ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); tbl 668 scripts/dtc/livetree.c struct reserve_info *ri, **tbl; tbl 679 scripts/dtc/livetree.c tbl = xmalloc(n * sizeof(*tbl)); tbl 684 scripts/dtc/livetree.c tbl[i++] = ri; tbl 686 scripts/dtc/livetree.c qsort(tbl, n, sizeof(*tbl), cmp_reserve_info); tbl 688 scripts/dtc/livetree.c dti->reservelist = tbl[0]; tbl 690 scripts/dtc/livetree.c tbl[i]->next = tbl[i+1]; tbl 691 scripts/dtc/livetree.c tbl[n-1]->next = NULL; tbl 693 scripts/dtc/livetree.c free(tbl); tbl 709 scripts/dtc/livetree.c struct property *prop, **tbl; tbl 717 scripts/dtc/livetree.c tbl = xmalloc(n * sizeof(*tbl)); tbl 720 scripts/dtc/livetree.c tbl[i++] = prop; tbl 722 scripts/dtc/livetree.c qsort(tbl, n, sizeof(*tbl), cmp_prop); tbl 724 scripts/dtc/livetree.c node->proplist = tbl[0]; tbl 726 scripts/dtc/livetree.c tbl[i]->next = tbl[i+1]; tbl 727 scripts/dtc/livetree.c tbl[n-1]->next = NULL; tbl 729 scripts/dtc/livetree.c free(tbl); tbl 745 scripts/dtc/livetree.c struct node *subnode, **tbl; tbl 753 scripts/dtc/livetree.c tbl = xmalloc(n * sizeof(*tbl)); tbl 756 scripts/dtc/livetree.c tbl[i++] = subnode; tbl 758 scripts/dtc/livetree.c qsort(tbl, n, sizeof(*tbl), cmp_subnode); tbl 760 scripts/dtc/livetree.c node->children = tbl[0]; tbl 762 scripts/dtc/livetree.c tbl[i]->next_sibling = tbl[i+1]; tbl 763 scripts/dtc/livetree.c tbl[n-1]->next_sibling = NULL; tbl 765 scripts/dtc/livetree.c free(tbl); tbl 1174 sound/core/oss/mixer_oss.c struct snd_mixer_oss_assign_table *tbl; tbl 1207 sound/core/oss/mixer_oss.c tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); tbl 1208 sound/core/oss/mixer_oss.c if (!tbl) tbl 1210 sound/core/oss/mixer_oss.c tbl->oss_id = ch; tbl 1211 sound/core/oss/mixer_oss.c tbl->name = kstrdup(str, GFP_KERNEL); tbl 1212 sound/core/oss/mixer_oss.c if (! tbl->name) { tbl 1213 sound/core/oss/mixer_oss.c kfree(tbl); tbl 1216 sound/core/oss/mixer_oss.c tbl->index = idx; tbl 1217 sound/core/oss/mixer_oss.c if (snd_mixer_oss_build_input(mixer, tbl, 1, 1) <= 0) { tbl 1218 sound/core/oss/mixer_oss.c kfree(tbl->name); tbl 1219 sound/core/oss/mixer_oss.c kfree(tbl); tbl 18 sound/core/sgbuf.c #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) tbl 1083 sound/pci/ac97/ac97_codec.c const struct snd_ac97_res_table *tbl; tbl 1084 sound/pci/ac97/ac97_codec.c for (tbl = ac97->res_table; tbl->reg; tbl++) { tbl 1085 sound/pci/ac97/ac97_codec.c if (tbl->reg == reg) { tbl 1086 sound/pci/ac97/ac97_codec.c *lo_max = tbl->bits & 0xff; tbl 1087 sound/pci/ac97/ac97_codec.c *hi_max = (tbl->bits >> 8) & 0xff; tbl 7606 sound/pci/hda/patch_ca0132.c struct hda_jack_tbl *tbl; tbl 7611 sound/pci/hda/patch_ca0132.c tbl = snd_hda_jack_tbl_get(codec, cb->nid); tbl 7612 sound/pci/hda/patch_ca0132.c if (tbl) tbl 7613 sound/pci/hda/patch_ca0132.c tbl->block_report = 1; tbl 1020 sound/pci/hda/patch_cirrus.c struct hda_jack_callback *tbl) tbl 2446 sound/pci/hda/patch_hdmi.c struct hda_jack_tbl *tbl; tbl 2448 sound/pci/hda/patch_hdmi.c tbl = snd_hda_jack_tbl_get(codec, nid); tbl 2449 sound/pci/hda/patch_hdmi.c if (tbl) { tbl 2453 sound/pci/hda/patch_hdmi.c unsigned int val = use_acomp ? 0 : (AC_USRSP_EN | tbl->tag); tbl 2273 sound/pci/ice1712/ice1712.c struct snd_ice1712_card_info * const *tbl, *c; tbl 2296 sound/pci/ice1712/ice1712.c for (tbl = card_tables; *tbl; tbl++) { tbl 2297 sound/pci/ice1712/ice1712.c for (c = *tbl; c->subvendor; c++) { tbl 2619 sound/pci/ice1712/ice1712.c struct snd_ice1712_card_info * const *tbl, *c; tbl 2643 sound/pci/ice1712/ice1712.c for (tbl = card_tables; *tbl; tbl++) { tbl 2644 sound/pci/ice1712/ice1712.c for (c = *tbl; c->subvendor; c++) { tbl 2294 sound/pci/ice1712/ice1724.c struct snd_ice1712_card_info * const *tbl, *c; tbl 2323 sound/pci/ice1712/ice1724.c for (tbl = card_tables; *tbl; tbl++) { tbl 2324 sound/pci/ice1712/ice1724.c for (c = *tbl; c->name; c++) { tbl 2608 sound/pci/ice1712/ice1724.c struct snd_ice1712_card_info * const *tbl, *c; tbl 2634 sound/pci/ice1712/ice1724.c for (tbl = card_tables; *tbl; tbl++) { tbl 2635 sound/pci/ice1712/ice1724.c for (c = *tbl; c->name; c++) { tbl 1611 sound/pci/intel8x0.c struct ich_pcm_table *tbl, *rec; tbl 1615 sound/pci/intel8x0.c tbl = intel_pcms; tbl 1621 sound/pci/intel8x0.c tbl = nforce_pcms; tbl 1627 sound/pci/intel8x0.c tbl = ali_pcms; tbl 1631 sound/pci/intel8x0.c tbl = intel_pcms; tbl 1638 sound/pci/intel8x0.c rec = tbl + i; tbl 2947 sound/pci/intel8x0.c struct ich_reg_info *tbl; tbl 3020 sound/pci/intel8x0.c tbl = nforce_regs; tbl 3023 sound/pci/intel8x0.c tbl = ali_regs; tbl 3026 sound/pci/intel8x0.c tbl = intel_regs; tbl 3032 sound/pci/intel8x0.c ichdev->reg_offset = tbl[i].offset; tbl 3033 sound/pci/intel8x0.c ichdev->int_sta_mask = tbl[i].int_sta_mask; tbl 757 sound/pci/intel8x0m.c struct ich_pcm_table *tbl, *rec; tbl 760 sound/pci/intel8x0m.c tbl = intel_pcms; tbl 765 sound/pci/intel8x0m.c tbl = nforce_pcms; tbl 769 sound/pci/intel8x0m.c tbl = ali_pcms; tbl 773 sound/pci/intel8x0m.c tbl = intel_pcms; tbl 780 sound/pci/intel8x0m.c rec = tbl + i; tbl 1104 sound/pci/intel8x0m.c struct ich_reg_info *tbl; tbl 1156 sound/pci/intel8x0m.c tbl = intel_regs; tbl 1161 sound/pci/intel8x0m.c ichdev->reg_offset = tbl[i].offset; tbl 1162 sound/pci/intel8x0m.c ichdev->int_sta_mask = tbl[i].int_sta_mask; tbl 335 sound/soc/soc-jack.c struct jack_gpio_tbl *tbl = res; tbl 337 sound/soc/soc-jack.c jack_free_gpios(tbl->jack, tbl->count, tbl->gpios); tbl 354 sound/soc/soc-jack.c struct jack_gpio_tbl *tbl; tbl 356 sound/soc/soc-jack.c tbl = devres_alloc(jack_devres_free_gpios, sizeof(*tbl), GFP_KERNEL); tbl 357 sound/soc/soc-jack.c if (!tbl) tbl 359 sound/soc/soc-jack.c tbl->jack = jack; tbl 360 sound/soc/soc-jack.c tbl->count = count; tbl 361 sound/soc/soc-jack.c tbl->gpios = gpios; tbl 439 sound/soc/soc-jack.c devres_add(jack->card->dev, tbl); tbl 446 sound/soc/soc-jack.c devres_free(tbl); tbl 47 tools/perf/arch/powerpc/util/kvm-stat.c struct exit_reasons_table *tbl = hcall_reasons; tbl 49 tools/perf/arch/powerpc/util/kvm-stat.c while (tbl->reason != NULL) { tbl 50 tools/perf/arch/powerpc/util/kvm-stat.c if (tbl->exit_code == exit_code) tbl 51 tools/perf/arch/powerpc/util/kvm-stat.c return tbl->reason; tbl 52 tools/perf/arch/powerpc/util/kvm-stat.c tbl++; tbl 103 tools/perf/builtin-kvm.c struct exit_reasons_table *tbl, tbl 106 tools/perf/builtin-kvm.c while (tbl->reason != NULL) { tbl 107 tools/perf/builtin-kvm.c if (tbl->exit_code == exit_code) tbl 108 tools/perf/builtin-kvm.c return tbl->reason; tbl 109 tools/perf/builtin-kvm.c tbl++; tbl 28 tools/perf/util/dwarf-regs.c #define __get_dwarf_regstr(tbl, n) (((n) < ARRAY_SIZE(tbl)) ? (tbl)[(n)] : NULL) tbl 59 tools/perf/util/syscalltbl.c static int syscalltbl__init_native(struct syscalltbl *tbl) tbl 68 tools/perf/util/syscalltbl.c entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries); tbl 69 tools/perf/util/syscalltbl.c if (tbl->syscalls.entries == NULL) tbl 80 tools/perf/util/syscalltbl.c qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp); tbl 81 tools/perf/util/syscalltbl.c tbl->syscalls.nr_entries = nr_entries; tbl 82 tools/perf/util/syscalltbl.c tbl->syscalls.max_id = syscalltbl_native_max_id; tbl 88 tools/perf/util/syscalltbl.c struct syscalltbl *tbl = malloc(sizeof(*tbl)); tbl 89 tools/perf/util/syscalltbl.c if (tbl) { tbl 90 tools/perf/util/syscalltbl.c if (syscalltbl__init_native(tbl)) { tbl 91 tools/perf/util/syscalltbl.c free(tbl); tbl 95 tools/perf/util/syscalltbl.c return tbl; tbl 98 tools/perf/util/syscalltbl.c void syscalltbl__delete(struct syscalltbl *tbl) tbl 100 tools/perf/util/syscalltbl.c zfree(&tbl->syscalls.entries); tbl 101 tools/perf/util/syscalltbl.c free(tbl); tbl 104 tools/perf/util/syscalltbl.c const char *syscalltbl__name(const struct syscalltbl *tbl __maybe_unused, int id) tbl 109 tools/perf/util/syscalltbl.c int syscalltbl__id(struct syscalltbl *tbl, const char *name) tbl 111 tools/perf/util/syscalltbl.c struct syscall *sc = bsearch(name, tbl->syscalls.entries, tbl 112 tools/perf/util/syscalltbl.c tbl->syscalls.nr_entries, sizeof(*sc), tbl 118 tools/perf/util/syscalltbl.c int syscalltbl__strglobmatch_next(struct syscalltbl *tbl, const char *syscall_glob, int *idx) tbl 121 tools/perf/util/syscalltbl.c struct syscall *syscalls = tbl->syscalls.entries; tbl 123 tools/perf/util/syscalltbl.c for (i = *idx + 1; i < tbl->syscalls.nr_entries; ++i) { tbl 133 tools/perf/util/syscalltbl.c int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx) tbl 136 tools/perf/util/syscalltbl.c return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); tbl 145 tools/perf/util/syscalltbl.c struct syscalltbl *tbl = malloc(sizeof(*tbl)); tbl 146 tools/perf/util/syscalltbl.c if (tbl) tbl 147 tools/perf/util/syscalltbl.c tbl->audit_machine = audit_detect_machine(); tbl 148 tools/perf/util/syscalltbl.c return tbl; tbl 151 tools/perf/util/syscalltbl.c void syscalltbl__delete(struct syscalltbl *tbl) tbl 153 tools/perf/util/syscalltbl.c free(tbl); tbl 156 tools/perf/util/syscalltbl.c const char *syscalltbl__name(const struct syscalltbl *tbl, int id) tbl 158 tools/perf/util/syscalltbl.c return audit_syscall_to_name(id, tbl->audit_machine); tbl 161 tools/perf/util/syscalltbl.c int syscalltbl__id(struct syscalltbl *tbl, const char *name) tbl 163 tools/perf/util/syscalltbl.c return audit_name_to_syscall(name, tbl->audit_machine); tbl 166 tools/perf/util/syscalltbl.c int syscalltbl__strglobmatch_next(struct syscalltbl *tbl __maybe_unused, tbl 172 tools/perf/util/syscalltbl.c int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx) tbl 174 tools/perf/util/syscalltbl.c return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); tbl 17 tools/perf/util/syscalltbl.h void syscalltbl__delete(struct syscalltbl *tbl); tbl 19 tools/perf/util/syscalltbl.h const char *syscalltbl__name(const struct syscalltbl *tbl, int id); tbl 20 tools/perf/util/syscalltbl.h int syscalltbl__id(struct syscalltbl *tbl, const char *name); tbl 22 tools/perf/util/syscalltbl.h int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx); tbl 23 tools/perf/util/syscalltbl.h int syscalltbl__strglobmatch_next(struct syscalltbl *tbl, const char *syscall_glob, int *idx);